Skip to content

Commit 4629106

Browse files
bjackmanAlexei Starovoitov
authored andcommitted
bpf: Pull out a macro for interpreting atomic ALU operations
Since the atomic operations that are added in subsequent commits are all isomorphic with BPF_ADD, pull out a macro to avoid the interpreter becoming dominated by lines of atomic-related code. Note that this sacrificies interpreter performance (combining STX_ATOMIC_W and STX_ATOMIC_DW into single switch case means that we need an extra conditional branch to differentiate them) in favour of compact and (relatively!) simple C code. Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20210114181751.768687-9-jackmanb@google.com
1 parent 5ffa255 commit 4629106

File tree

1 file changed

+39
-41
lines changed

1 file changed

+39
-41
lines changed

kernel/bpf/core.c

Lines changed: 39 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1618,55 +1618,53 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
16181618
LDX_PROBE(DW, 8)
16191619
#undef LDX_PROBE
16201620

1621-
STX_ATOMIC_W:
1622-
switch (IMM) {
1623-
case BPF_ADD:
1624-
/* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1625-
atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1626-
(DST + insn->off));
1627-
break;
1628-
case BPF_ADD | BPF_FETCH:
1629-
SRC = (u32) atomic_fetch_add(
1630-
(u32) SRC,
1631-
(atomic_t *)(unsigned long) (DST + insn->off));
1632-
break;
1633-
case BPF_XCHG:
1634-
SRC = (u32) atomic_xchg(
1635-
(atomic_t *)(unsigned long) (DST + insn->off),
1636-
(u32) SRC);
1637-
break;
1638-
case BPF_CMPXCHG:
1639-
BPF_R0 = (u32) atomic_cmpxchg(
1640-
(atomic_t *)(unsigned long) (DST + insn->off),
1641-
(u32) BPF_R0, (u32) SRC);
1621+
#define ATOMIC_ALU_OP(BOP, KOP) \
1622+
case BOP: \
1623+
if (BPF_SIZE(insn->code) == BPF_W) \
1624+
atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1625+
(DST + insn->off)); \
1626+
else \
1627+
atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1628+
(DST + insn->off)); \
1629+
break; \
1630+
case BOP | BPF_FETCH: \
1631+
if (BPF_SIZE(insn->code) == BPF_W) \
1632+
SRC = (u32) atomic_fetch_##KOP( \
1633+
(u32) SRC, \
1634+
(atomic_t *)(unsigned long) (DST + insn->off)); \
1635+
else \
1636+
SRC = (u64) atomic64_fetch_##KOP( \
1637+
(u64) SRC, \
1638+
(atomic64_t *)(unsigned long) (DST + insn->off)); \
16421639
break;
1643-
default:
1644-
goto default_label;
1645-
}
1646-
CONT;
16471640

16481641
STX_ATOMIC_DW:
1642+
STX_ATOMIC_W:
16491643
switch (IMM) {
1650-
case BPF_ADD:
1651-
/* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1652-
atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1653-
(DST + insn->off));
1654-
break;
1655-
case BPF_ADD | BPF_FETCH:
1656-
SRC = (u64) atomic64_fetch_add(
1657-
(u64) SRC,
1658-
(atomic64_t *)(unsigned long) (DST + insn->off));
1659-
break;
1644+
ATOMIC_ALU_OP(BPF_ADD, add)
1645+
#undef ATOMIC_ALU_OP
1646+
16601647
case BPF_XCHG:
1661-
SRC = (u64) atomic64_xchg(
1662-
(atomic64_t *)(unsigned long) (DST + insn->off),
1663-
(u64) SRC);
1648+
if (BPF_SIZE(insn->code) == BPF_W)
1649+
SRC = (u32) atomic_xchg(
1650+
(atomic_t *)(unsigned long) (DST + insn->off),
1651+
(u32) SRC);
1652+
else
1653+
SRC = (u64) atomic64_xchg(
1654+
(atomic64_t *)(unsigned long) (DST + insn->off),
1655+
(u64) SRC);
16641656
break;
16651657
case BPF_CMPXCHG:
1666-
BPF_R0 = (u64) atomic64_cmpxchg(
1667-
(atomic64_t *)(unsigned long) (DST + insn->off),
1668-
(u64) BPF_R0, (u64) SRC);
1658+
if (BPF_SIZE(insn->code) == BPF_W)
1659+
BPF_R0 = (u32) atomic_cmpxchg(
1660+
(atomic_t *)(unsigned long) (DST + insn->off),
1661+
(u32) BPF_R0, (u32) SRC);
1662+
else
1663+
BPF_R0 = (u64) atomic64_cmpxchg(
1664+
(atomic64_t *)(unsigned long) (DST + insn->off),
1665+
(u64) BPF_R0, (u64) SRC);
16691666
break;
1667+
16701668
default:
16711669
goto default_label;
16721670
}

0 commit comments

Comments
 (0)