@@ -23,8 +23,8 @@ struct ctl_table_header;
2323
2424#ifdef CONFIG_CGROUP_BPF
2525
26- extern struct static_key_false cgroup_bpf_enabled_key ;
27- #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
26+ extern struct static_key_false cgroup_bpf_enabled_key [ MAX_BPF_ATTACH_TYPE ] ;
27+ #define cgroup_bpf_enabled ( type ) static_branch_unlikely(&cgroup_bpf_enabled_key[type] )
2828
2929DECLARE_PER_CPU (struct bpf_cgroup_storage * ,
3030 bpf_cgroup_storage [MAX_BPF_CGROUP_STORAGE_TYPE ]);
@@ -185,7 +185,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
185185#define BPF_CGROUP_RUN_PROG_INET_INGRESS (sk , skb ) \
186186({ \
187187 int __ret = 0; \
188- if (cgroup_bpf_enabled) \
188+ if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
189189 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
190190 BPF_CGROUP_INET_INGRESS); \
191191 \
@@ -195,7 +195,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
195195#define BPF_CGROUP_RUN_PROG_INET_EGRESS (sk , skb ) \
196196({ \
197197 int __ret = 0; \
198- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
198+ if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
199199 typeof(sk) __sk = sk_to_full_sk(sk); \
200200 if (sk_fullsock(__sk)) \
201201 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
@@ -207,7 +207,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
207207#define BPF_CGROUP_RUN_SK_PROG (sk , type ) \
208208({ \
209209 int __ret = 0; \
210- if (cgroup_bpf_enabled) { \
210+ if (cgroup_bpf_enabled(type) ) { \
211211 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
212212 } \
213213 __ret; \
@@ -228,7 +228,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
228228#define BPF_CGROUP_RUN_SA_PROG (sk , uaddr , type ) \
229229({ \
230230 int __ret = 0; \
231- if (cgroup_bpf_enabled) \
231+ if (cgroup_bpf_enabled(type)) \
232232 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
233233 NULL); \
234234 __ret; \
@@ -237,7 +237,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
237237#define BPF_CGROUP_RUN_SA_PROG_LOCK (sk , uaddr , type , t_ctx ) \
238238({ \
239239 int __ret = 0; \
240- if (cgroup_bpf_enabled) { \
240+ if (cgroup_bpf_enabled(type)) { \
241241 lock_sock(sk); \
242242 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
243243 t_ctx); \
@@ -252,8 +252,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
252252#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK (sk , uaddr ) \
253253 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
254254
255- #define BPF_CGROUP_PRE_CONNECT_ENABLED (sk ) (cgroup_bpf_enabled && \
256- sk->sk_prot->pre_connect)
255+ #define BPF_CGROUP_PRE_CONNECT_ENABLED (sk ) \
256+ ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
257+ cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
258+ (sk)->sk_prot->pre_connect)
257259
258260#define BPF_CGROUP_RUN_PROG_INET4_CONNECT (sk , uaddr ) \
259261 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
@@ -297,7 +299,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
297299#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK (sock_ops , sk ) \
298300({ \
299301 int __ret = 0; \
300- if (cgroup_bpf_enabled) \
302+ if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
301303 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
302304 sock_ops, \
303305 BPF_CGROUP_SOCK_OPS); \
@@ -307,7 +309,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
307309#define BPF_CGROUP_RUN_PROG_SOCK_OPS (sock_ops ) \
308310({ \
309311 int __ret = 0; \
310- if (cgroup_bpf_enabled && (sock_ops)->sk) { \
312+ if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
311313 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
312314 if (__sk && sk_fullsock(__sk)) \
313315 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
@@ -320,7 +322,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
320322#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP (type , major , minor , access ) \
321323({ \
322324 int __ret = 0; \
323- if (cgroup_bpf_enabled) \
325+ if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
324326 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
325327 access, \
326328 BPF_CGROUP_DEVICE); \
@@ -332,7 +334,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
332334#define BPF_CGROUP_RUN_PROG_SYSCTL (head , table , write , buf , count , pos ) \
333335({ \
334336 int __ret = 0; \
335- if (cgroup_bpf_enabled) \
337+ if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
336338 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
337339 buf, count, pos, \
338340 BPF_CGROUP_SYSCTL); \
@@ -343,7 +345,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
343345 kernel_optval ) \
344346({ \
345347 int __ret = 0; \
346- if (cgroup_bpf_enabled) \
348+ if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
347349 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
348350 optname, optval, \
349351 optlen, \
@@ -354,7 +356,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
354356#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN (optlen ) \
355357({ \
356358 int __ret = 0; \
357- if (cgroup_bpf_enabled) \
359+ if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
358360 get_user(__ret, optlen); \
359361 __ret; \
360362})
@@ -363,7 +365,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
363365 max_optlen , retval ) \
364366({ \
365367 int __ret = retval; \
366- if (cgroup_bpf_enabled) \
368+ if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
367369 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
368370 optname, optval, \
369371 optlen, max_optlen, \
@@ -427,7 +429,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
427429 return 0 ;
428430}
429431
430- #define cgroup_bpf_enabled (0)
432+ #define cgroup_bpf_enabled ( type ) (0)
431433#define BPF_CGROUP_RUN_SA_PROG_LOCK (sk , uaddr , type , t_ctx ) ({ 0; })
432434#define BPF_CGROUP_PRE_CONNECT_ENABLED (sk ) (0)
433435#define BPF_CGROUP_RUN_PROG_INET_INGRESS (sk ,skb ) ({ 0; })
0 commit comments