@@ -800,7 +800,6 @@ struct hci_conn_params {
800
800
extern struct list_head hci_dev_list ;
801
801
extern struct list_head hci_cb_list ;
802
802
extern rwlock_t hci_dev_list_lock ;
803
- extern struct mutex hci_cb_list_lock ;
804
803
805
804
#define hci_dev_set_flag (hdev , nr ) set_bit((nr), (hdev)->dev_flags)
806
805
#define hci_dev_clear_flag (hdev , nr ) clear_bit((nr), (hdev)->dev_flags)
@@ -1949,68 +1948,103 @@ struct hci_cb {
1949
1948
1950
1949
char * name ;
1951
1950
1951
+ bool (* match ) (struct hci_conn * conn );
1952
1952
void (* connect_cfm ) (struct hci_conn * conn , __u8 status );
1953
1953
void (* disconn_cfm ) (struct hci_conn * conn , __u8 status );
1954
1954
void (* security_cfm ) (struct hci_conn * conn , __u8 status ,
1955
- __u8 encrypt );
1955
+ __u8 encrypt );
1956
1956
void (* key_change_cfm ) (struct hci_conn * conn , __u8 status );
1957
1957
void (* role_switch_cfm ) (struct hci_conn * conn , __u8 status , __u8 role );
1958
1958
};
1959
1959
1960
+ static inline void hci_cb_lookup (struct hci_conn * conn , struct list_head * list )
1961
+ {
1962
+ struct hci_cb * cb , * cpy ;
1963
+
1964
+ rcu_read_lock ();
1965
+ list_for_each_entry_rcu (cb , & hci_cb_list , list ) {
1966
+ if (cb -> match && cb -> match (conn )) {
1967
+ cpy = kmalloc (sizeof (* cpy ), GFP_ATOMIC );
1968
+ if (!cpy )
1969
+ break ;
1970
+
1971
+ * cpy = * cb ;
1972
+ INIT_LIST_HEAD (& cpy -> list );
1973
+ list_add_rcu (& cpy -> list , list );
1974
+ }
1975
+ }
1976
+ rcu_read_unlock ();
1977
+ }
1978
+
1960
1979
static inline void hci_connect_cfm (struct hci_conn * conn , __u8 status )
1961
1980
{
1962
- struct hci_cb * cb ;
1981
+ struct list_head list ;
1982
+ struct hci_cb * cb , * tmp ;
1983
+
1984
+ INIT_LIST_HEAD (& list );
1985
+ hci_cb_lookup (conn , & list );
1963
1986
1964
- mutex_lock (& hci_cb_list_lock );
1965
- list_for_each_entry (cb , & hci_cb_list , list ) {
1987
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
1966
1988
if (cb -> connect_cfm )
1967
1989
cb -> connect_cfm (conn , status );
1990
+ kfree (cb );
1968
1991
}
1969
- mutex_unlock (& hci_cb_list_lock );
1970
1992
1971
1993
if (conn -> connect_cfm_cb )
1972
1994
conn -> connect_cfm_cb (conn , status );
1973
1995
}
1974
1996
1975
1997
static inline void hci_disconn_cfm (struct hci_conn * conn , __u8 reason )
1976
1998
{
1977
- struct hci_cb * cb ;
1999
+ struct list_head list ;
2000
+ struct hci_cb * cb , * tmp ;
2001
+
2002
+ INIT_LIST_HEAD (& list );
2003
+ hci_cb_lookup (conn , & list );
1978
2004
1979
- mutex_lock (& hci_cb_list_lock );
1980
- list_for_each_entry (cb , & hci_cb_list , list ) {
2005
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
1981
2006
if (cb -> disconn_cfm )
1982
2007
cb -> disconn_cfm (conn , reason );
2008
+ kfree (cb );
1983
2009
}
1984
- mutex_unlock (& hci_cb_list_lock );
1985
2010
1986
2011
if (conn -> disconn_cfm_cb )
1987
2012
conn -> disconn_cfm_cb (conn , reason );
1988
2013
}
1989
2014
1990
- static inline void hci_auth_cfm (struct hci_conn * conn , __u8 status )
2015
+ static inline void hci_security_cfm (struct hci_conn * conn , __u8 status ,
2016
+ __u8 encrypt )
1991
2017
{
1992
- struct hci_cb * cb ;
1993
- __u8 encrypt ;
1994
-
1995
- if (test_bit (HCI_CONN_ENCRYPT_PEND , & conn -> flags ))
1996
- return ;
2018
+ struct list_head list ;
2019
+ struct hci_cb * cb , * tmp ;
1997
2020
1998
- encrypt = test_bit (HCI_CONN_ENCRYPT , & conn -> flags ) ? 0x01 : 0x00 ;
2021
+ INIT_LIST_HEAD (& list );
2022
+ hci_cb_lookup (conn , & list );
1999
2023
2000
- mutex_lock (& hci_cb_list_lock );
2001
- list_for_each_entry (cb , & hci_cb_list , list ) {
2024
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
2002
2025
if (cb -> security_cfm )
2003
2026
cb -> security_cfm (conn , status , encrypt );
2027
+ kfree (cb );
2004
2028
}
2005
- mutex_unlock (& hci_cb_list_lock );
2006
2029
2007
2030
if (conn -> security_cfm_cb )
2008
2031
conn -> security_cfm_cb (conn , status );
2009
2032
}
2010
2033
2034
+ static inline void hci_auth_cfm (struct hci_conn * conn , __u8 status )
2035
+ {
2036
+ __u8 encrypt ;
2037
+
2038
+ if (test_bit (HCI_CONN_ENCRYPT_PEND , & conn -> flags ))
2039
+ return ;
2040
+
2041
+ encrypt = test_bit (HCI_CONN_ENCRYPT , & conn -> flags ) ? 0x01 : 0x00 ;
2042
+
2043
+ hci_security_cfm (conn , status , encrypt );
2044
+ }
2045
+
2011
2046
static inline void hci_encrypt_cfm (struct hci_conn * conn , __u8 status )
2012
2047
{
2013
- struct hci_cb * cb ;
2014
2048
__u8 encrypt ;
2015
2049
2016
2050
if (conn -> state == BT_CONFIG ) {
@@ -2037,40 +2071,38 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
2037
2071
conn -> sec_level = conn -> pending_sec_level ;
2038
2072
}
2039
2073
2040
- mutex_lock (& hci_cb_list_lock );
2041
- list_for_each_entry (cb , & hci_cb_list , list ) {
2042
- if (cb -> security_cfm )
2043
- cb -> security_cfm (conn , status , encrypt );
2044
- }
2045
- mutex_unlock (& hci_cb_list_lock );
2046
-
2047
- if (conn -> security_cfm_cb )
2048
- conn -> security_cfm_cb (conn , status );
2074
+ hci_security_cfm (conn , status , encrypt );
2049
2075
}
2050
2076
2051
2077
static inline void hci_key_change_cfm (struct hci_conn * conn , __u8 status )
2052
2078
{
2053
- struct hci_cb * cb ;
2079
+ struct list_head list ;
2080
+ struct hci_cb * cb , * tmp ;
2081
+
2082
+ INIT_LIST_HEAD (& list );
2083
+ hci_cb_lookup (conn , & list );
2054
2084
2055
- mutex_lock (& hci_cb_list_lock );
2056
- list_for_each_entry (cb , & hci_cb_list , list ) {
2085
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
2057
2086
if (cb -> key_change_cfm )
2058
2087
cb -> key_change_cfm (conn , status );
2088
+ kfree (cb );
2059
2089
}
2060
- mutex_unlock (& hci_cb_list_lock );
2061
2090
}
2062
2091
2063
2092
static inline void hci_role_switch_cfm (struct hci_conn * conn , __u8 status ,
2064
2093
__u8 role )
2065
2094
{
2066
- struct hci_cb * cb ;
2095
+ struct list_head list ;
2096
+ struct hci_cb * cb , * tmp ;
2097
+
2098
+ INIT_LIST_HEAD (& list );
2099
+ hci_cb_lookup (conn , & list );
2067
2100
2068
- mutex_lock (& hci_cb_list_lock );
2069
- list_for_each_entry (cb , & hci_cb_list , list ) {
2101
+ list_for_each_entry_safe (cb , tmp , & list , list ) {
2070
2102
if (cb -> role_switch_cfm )
2071
2103
cb -> role_switch_cfm (conn , status , role );
2104
+ kfree (cb );
2072
2105
}
2073
- mutex_unlock (& hci_cb_list_lock );
2074
2106
}
2075
2107
2076
2108
static inline bool hci_bdaddr_is_rpa (bdaddr_t * bdaddr , u8 addr_type )
0 commit comments