Skip to content

Commit 5c8a71d

Browse files
fomichevkernel-patches-bot
authored andcommitted
bpf: split cgroup_bpf_enabled per attach type
When we attach any cgroup hook, the rest (even if unused/unattached) start to contribute small overhead. In particular, the one we want to avoid is __cgroup_bpf_run_filter_skb which does two redirections to get to the cgroup and pushes/pulls skb. Let's split cgroup_bpf_enabled to be per-attach to make sure only used attach types trigger. I've dropped some existing high-level cgroup_bpf_enabled in some places because BPF_PROG_CGROUP_XXX_RUN macros usually have another cgroup_bpf_enabled check. I also had to copy-paste BPF_CGROUP_RUN_SA_PROG_LOCK for GETPEERNAME/GETSOCKNAME because type for cgroup_bpf_enabled[type] has to be constant and known at compile time. Signed-off-by: Stanislav Fomichev <sdf@google.com> Acked-by: Song Liu <songliubraving@fb.com>
1 parent 07546ba commit 5c8a71d

File tree

6 files changed

+41
-41
lines changed

6 files changed

+41
-41
lines changed

include/linux/bpf-cgroup.h

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ struct ctl_table_header;
2323

2424
#ifdef CONFIG_CGROUP_BPF
2525

26-
extern struct static_key_false cgroup_bpf_enabled_key;
27-
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
26+
extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
27+
#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
2828

2929
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
3030
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
@@ -185,7 +185,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
185185
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
186186
({ \
187187
int __ret = 0; \
188-
if (cgroup_bpf_enabled) \
188+
if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
189189
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
190190
BPF_CGROUP_INET_INGRESS); \
191191
\
@@ -195,7 +195,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
195195
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
196196
({ \
197197
int __ret = 0; \
198-
if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
198+
if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
199199
typeof(sk) __sk = sk_to_full_sk(sk); \
200200
if (sk_fullsock(__sk)) \
201201
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
@@ -207,7 +207,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
207207
#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
208208
({ \
209209
int __ret = 0; \
210-
if (cgroup_bpf_enabled) { \
210+
if (cgroup_bpf_enabled(type)) { \
211211
__ret = __cgroup_bpf_run_filter_sk(sk, type); \
212212
} \
213213
__ret; \
@@ -228,7 +228,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
228228
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
229229
({ \
230230
int __ret = 0; \
231-
if (cgroup_bpf_enabled) \
231+
if (cgroup_bpf_enabled(type)) \
232232
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
233233
NULL); \
234234
__ret; \
@@ -237,7 +237,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
237237
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
238238
({ \
239239
int __ret = 0; \
240-
if (cgroup_bpf_enabled) { \
240+
if (cgroup_bpf_enabled(type)) { \
241241
lock_sock(sk); \
242242
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
243243
t_ctx); \
@@ -252,8 +252,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
252252
#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) \
253253
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
254254

255-
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
256-
sk->sk_prot->pre_connect)
255+
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
256+
((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
257+
cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
258+
(sk)->sk_prot->pre_connect)
257259

258260
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
259261
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
@@ -297,7 +299,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
297299
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
298300
({ \
299301
int __ret = 0; \
300-
if (cgroup_bpf_enabled) \
302+
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
301303
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
302304
sock_ops, \
303305
BPF_CGROUP_SOCK_OPS); \
@@ -307,7 +309,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
307309
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
308310
({ \
309311
int __ret = 0; \
310-
if (cgroup_bpf_enabled && (sock_ops)->sk) { \
312+
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
311313
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
312314
if (__sk && sk_fullsock(__sk)) \
313315
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
@@ -320,7 +322,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
320322
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
321323
({ \
322324
int __ret = 0; \
323-
if (cgroup_bpf_enabled) \
325+
if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
324326
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
325327
access, \
326328
BPF_CGROUP_DEVICE); \
@@ -332,7 +334,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
332334
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
333335
({ \
334336
int __ret = 0; \
335-
if (cgroup_bpf_enabled) \
337+
if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
336338
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
337339
buf, count, pos, \
338340
BPF_CGROUP_SYSCTL); \
@@ -343,7 +345,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
343345
kernel_optval) \
344346
({ \
345347
int __ret = 0; \
346-
if (cgroup_bpf_enabled) \
348+
if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
347349
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
348350
optname, optval, \
349351
optlen, \
@@ -354,7 +356,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
354356
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
355357
({ \
356358
int __ret = 0; \
357-
if (cgroup_bpf_enabled) \
359+
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
358360
get_user(__ret, optlen); \
359361
__ret; \
360362
})
@@ -363,7 +365,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
363365
max_optlen, retval) \
364366
({ \
365367
int __ret = retval; \
366-
if (cgroup_bpf_enabled) \
368+
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
367369
__ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
368370
optname, optval, \
369371
optlen, max_optlen, \
@@ -427,7 +429,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
427429
return 0;
428430
}
429431

430-
#define cgroup_bpf_enabled (0)
432+
#define cgroup_bpf_enabled(type) (0)
431433
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
432434
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
433435
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })

kernel/bpf/cgroup.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
#include "../cgroup/cgroup-internal.h"
2222

23-
DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23+
DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_BPF_ATTACH_TYPE);
2424
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
2525

2626
void cgroup_bpf_offline(struct cgroup *cgrp)
@@ -129,7 +129,7 @@ static void cgroup_bpf_release(struct work_struct *work)
129129
if (pl->link)
130130
bpf_cgroup_link_auto_detach(pl->link);
131131
kfree(pl);
132-
static_branch_dec(&cgroup_bpf_enabled_key);
132+
static_branch_dec(&cgroup_bpf_enabled_key[type]);
133133
}
134134
old_array = rcu_dereference_protected(
135135
cgrp->bpf.effective[type],
@@ -500,7 +500,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp,
500500
if (old_prog)
501501
bpf_prog_put(old_prog);
502502
else
503-
static_branch_inc(&cgroup_bpf_enabled_key);
503+
static_branch_inc(&cgroup_bpf_enabled_key[type]);
504504
bpf_cgroup_storages_link(new_storage, cgrp, type);
505505
return 0;
506506

@@ -699,7 +699,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
699699
cgrp->bpf.flags[type] = 0;
700700
if (old_prog)
701701
bpf_prog_put(old_prog);
702-
static_branch_dec(&cgroup_bpf_enabled_key);
702+
static_branch_dec(&cgroup_bpf_enabled_key[type]);
703703
return 0;
704704

705705
cleanup:
@@ -1361,8 +1361,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
13611361
* attached to the hook so we don't waste time allocating
13621362
* memory and locking the socket.
13631363
*/
1364-
if (!cgroup_bpf_enabled ||
1365-
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
1364+
if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
13661365
return 0;
13671366

13681367
/* Allocate a bit more than the initial user buffer for
@@ -1456,8 +1455,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
14561455
* attached to the hook so we don't waste time allocating
14571456
* memory and locking the socket.
14581457
*/
1459-
if (!cgroup_bpf_enabled ||
1460-
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
1458+
if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
14611459
return retval;
14621460

14631461
ctx.optlen = max_optlen;

net/ipv4/af_inet.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -777,18 +777,19 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
777777
return -ENOTCONN;
778778
sin->sin_port = inet->inet_dport;
779779
sin->sin_addr.s_addr = inet->inet_daddr;
780+
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
781+
BPF_CGROUP_INET4_GETPEERNAME,
782+
NULL);
780783
} else {
781784
__be32 addr = inet->inet_rcv_saddr;
782785
if (!addr)
783786
addr = inet->inet_saddr;
784787
sin->sin_port = inet->inet_sport;
785788
sin->sin_addr.s_addr = addr;
786-
}
787-
if (cgroup_bpf_enabled)
788789
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
789-
peer ? BPF_CGROUP_INET4_GETPEERNAME :
790-
BPF_CGROUP_INET4_GETSOCKNAME,
790+
BPF_CGROUP_INET4_GETSOCKNAME,
791791
NULL);
792+
}
792793
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
793794
return sizeof(*sin);
794795
}

net/ipv4/udp.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1124,7 +1124,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
11241124
rcu_read_unlock();
11251125
}
11261126

1127-
if (cgroup_bpf_enabled && !connected) {
1127+
if (cgroup_bpf_enabled(BPF_CGROUP_UDP4_SENDMSG) && !connected) {
11281128
err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
11291129
(struct sockaddr *)usin, &ipc.addr);
11301130
if (err)
@@ -1858,9 +1858,8 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
18581858
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
18591859
*addr_len = sizeof(*sin);
18601860

1861-
if (cgroup_bpf_enabled)
1862-
BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
1863-
(struct sockaddr *)sin);
1861+
BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
1862+
(struct sockaddr *)sin);
18641863
}
18651864

18661865
if (udp_sk(sk)->gro_enabled)

net/ipv6/af_inet6.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -527,18 +527,19 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
527527
sin->sin6_addr = sk->sk_v6_daddr;
528528
if (np->sndflow)
529529
sin->sin6_flowinfo = np->flow_label;
530+
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
531+
BPF_CGROUP_INET6_GETPEERNAME,
532+
NULL);
530533
} else {
531534
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
532535
sin->sin6_addr = np->saddr;
533536
else
534537
sin->sin6_addr = sk->sk_v6_rcv_saddr;
535538
sin->sin6_port = inet->inet_sport;
536-
}
537-
if (cgroup_bpf_enabled)
538539
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
539-
peer ? BPF_CGROUP_INET6_GETPEERNAME :
540-
BPF_CGROUP_INET6_GETSOCKNAME,
540+
BPF_CGROUP_INET6_GETSOCKNAME,
541541
NULL);
542+
}
542543
sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr,
543544
sk->sk_bound_dev_if);
544545
return sizeof(*sin);

net/ipv6/udp.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -409,9 +409,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
409409
}
410410
*addr_len = sizeof(*sin6);
411411

412-
if (cgroup_bpf_enabled)
413-
BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
414-
(struct sockaddr *)sin6);
412+
BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
413+
(struct sockaddr *)sin6);
415414
}
416415

417416
if (udp_sk(sk)->gro_enabled)
@@ -1462,7 +1461,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
14621461
fl6.saddr = np->saddr;
14631462
fl6.fl6_sport = inet->inet_sport;
14641463

1465-
if (cgroup_bpf_enabled && !connected) {
1464+
if (cgroup_bpf_enabled(BPF_CGROUP_UDP6_SENDMSG) && !connected) {
14661465
err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
14671466
(struct sockaddr *)sin6, &fl6.saddr);
14681467
if (err)

0 commit comments

Comments
 (0)