30
30
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
31
31
#define TCCNT_PTR (MAX_BPF_JIT_REG + 2)
32
32
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
33
+ #define PRIVATE_SP (MAX_BPF_JIT_REG + 4)
33
34
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
34
35
35
36
#define check_imm (bits , imm ) do { \
@@ -68,6 +69,8 @@ static const int bpf2a64[] = {
68
69
[TCCNT_PTR ] = A64_R (26 ),
69
70
/* temporary register for blinding constants */
70
71
[BPF_REG_AX ] = A64_R (9 ),
72
+ /* callee saved register for private stack pointer */
73
+ [PRIVATE_SP ] = A64_R (27 ),
71
74
/* callee saved register for kern_vm_start address */
72
75
[ARENA_VM_START ] = A64_R (28 ),
73
76
};
@@ -86,6 +89,7 @@ struct jit_ctx {
86
89
u64 user_vm_start ;
87
90
u64 arena_vm_start ;
88
91
bool fp_used ;
92
+ bool priv_sp_used ;
89
93
bool write ;
90
94
};
91
95
@@ -98,6 +102,10 @@ struct bpf_plt {
98
102
#define PLT_TARGET_SIZE sizeof_field(struct bpf_plt, target)
99
103
#define PLT_TARGET_OFFSET offsetof(struct bpf_plt, target)
100
104
105
+ /* Memory size/value to protect private stack overflow/underflow */
106
+ #define PRIV_STACK_GUARD_SZ 16
107
+ #define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL
108
+
101
109
static inline void emit (const u32 insn , struct jit_ctx * ctx )
102
110
{
103
111
if (ctx -> image != NULL && ctx -> write )
@@ -387,8 +395,11 @@ static void find_used_callee_regs(struct jit_ctx *ctx)
387
395
if (reg_used & 8 )
388
396
ctx -> used_callee_reg [i ++ ] = bpf2a64 [BPF_REG_9 ];
389
397
390
- if (reg_used & 16 )
398
+ if (reg_used & 16 ) {
391
399
ctx -> used_callee_reg [i ++ ] = bpf2a64 [BPF_REG_FP ];
400
+ if (ctx -> priv_sp_used )
401
+ ctx -> used_callee_reg [i ++ ] = bpf2a64 [PRIVATE_SP ];
402
+ }
392
403
393
404
if (ctx -> arena_vm_start )
394
405
ctx -> used_callee_reg [i ++ ] = bpf2a64 [ARENA_VM_START ];
@@ -461,6 +472,19 @@ static void pop_callee_regs(struct jit_ctx *ctx)
461
472
}
462
473
}
463
474
475
+ static void emit_percpu_ptr (const u8 dst_reg , void __percpu * ptr ,
476
+ struct jit_ctx * ctx )
477
+ {
478
+ const u8 tmp = bpf2a64 [TMP_REG_1 ];
479
+
480
+ emit_a64_mov_i64 (dst_reg , (__force const u64 )ptr , ctx );
481
+ if (cpus_have_cap (ARM64_HAS_VIRT_HOST_EXTN ))
482
+ emit (A64_MRS_TPIDR_EL2 (tmp ), ctx );
483
+ else
484
+ emit (A64_MRS_TPIDR_EL1 (tmp ), ctx );
485
+ emit (A64_ADD (1 , dst_reg , dst_reg , tmp ), ctx );
486
+ }
487
+
464
488
#define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0)
465
489
#define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0)
466
490
@@ -476,6 +500,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
476
500
const bool is_main_prog = !bpf_is_subprog (prog );
477
501
const u8 fp = bpf2a64 [BPF_REG_FP ];
478
502
const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
503
+ const u8 priv_sp = bpf2a64 [PRIVATE_SP ];
504
+ void __percpu * priv_stack_ptr ;
505
+ void __percpu * priv_frame_ptr ;
479
506
const int idx0 = ctx -> idx ;
480
507
int cur_offset ;
481
508
@@ -551,15 +578,24 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
551
578
emit (A64_SUB_I (1 , A64_SP , A64_FP , 96 ), ctx );
552
579
}
553
580
554
- if (ctx -> fp_used )
555
- /* Set up BPF prog stack base register */
556
- emit (A64_MOV (1 , fp , A64_SP ), ctx );
557
-
558
581
/* Stack must be multiples of 16B */
559
582
ctx -> stack_size = round_up (prog -> aux -> stack_depth , 16 );
560
583
584
+ if (ctx -> fp_used ) {
585
+ if (ctx -> priv_sp_used ) {
586
+ /* Set up private stack pointer */
587
+ priv_stack_ptr = prog -> aux -> priv_stack_ptr + PRIV_STACK_GUARD_SZ ;
588
+ emit_percpu_ptr (priv_sp , priv_stack_ptr , ctx );
589
+ priv_frame_ptr = priv_stack_ptr + ctx -> stack_size ;
590
+ emit_percpu_ptr (fp , priv_frame_ptr , ctx );
591
+ } else {
592
+ /* Set up BPF prog stack base register */
593
+ emit (A64_MOV (1 , fp , A64_SP ), ctx );
594
+ }
595
+ }
596
+
561
597
/* Set up function call stack */
562
- if (ctx -> stack_size )
598
+ if (ctx -> stack_size && ! ctx -> priv_sp_used )
563
599
emit (A64_SUB_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
564
600
565
601
if (ctx -> arena_vm_start )
@@ -623,7 +659,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
623
659
emit (A64_STR64I (tcc , ptr , 0 ), ctx );
624
660
625
661
/* restore SP */
626
- if (ctx -> stack_size )
662
+ if (ctx -> stack_size && ! ctx -> priv_sp_used )
627
663
emit (A64_ADD_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
628
664
629
665
pop_callee_regs (ctx );
@@ -991,7 +1027,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
991
1027
const u8 ptr = bpf2a64 [TCCNT_PTR ];
992
1028
993
1029
/* We're done with BPF stack */
994
- if (ctx -> stack_size )
1030
+ if (ctx -> stack_size && ! ctx -> priv_sp_used )
995
1031
emit (A64_ADD_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
996
1032
997
1033
pop_callee_regs (ctx );
@@ -1120,6 +1156,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1120
1156
const u8 tmp2 = bpf2a64 [TMP_REG_2 ];
1121
1157
const u8 fp = bpf2a64 [BPF_REG_FP ];
1122
1158
const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
1159
+ const u8 priv_sp = bpf2a64 [PRIVATE_SP ];
1123
1160
const s16 off = insn -> off ;
1124
1161
const s32 imm = insn -> imm ;
1125
1162
const int i = insn - ctx -> prog -> insnsi ;
@@ -1564,7 +1601,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1564
1601
src = tmp2 ;
1565
1602
}
1566
1603
if (src == fp ) {
1567
- src_adj = A64_SP ;
1604
+ src_adj = ctx -> priv_sp_used ? priv_sp : A64_SP ;
1568
1605
off_adj = off + ctx -> stack_size ;
1569
1606
} else {
1570
1607
src_adj = src ;
@@ -1654,7 +1691,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1654
1691
dst = tmp2 ;
1655
1692
}
1656
1693
if (dst == fp ) {
1657
- dst_adj = A64_SP ;
1694
+ dst_adj = ctx -> priv_sp_used ? priv_sp : A64_SP ;
1658
1695
off_adj = off + ctx -> stack_size ;
1659
1696
} else {
1660
1697
dst_adj = dst ;
@@ -1716,7 +1753,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1716
1753
dst = tmp2 ;
1717
1754
}
1718
1755
if (dst == fp ) {
1719
- dst_adj = A64_SP ;
1756
+ dst_adj = ctx -> priv_sp_used ? priv_sp : A64_SP ;
1720
1757
off_adj = off + ctx -> stack_size ;
1721
1758
} else {
1722
1759
dst_adj = dst ;
@@ -1859,6 +1896,39 @@ static inline void bpf_flush_icache(void *start, void *end)
1859
1896
flush_icache_range ((unsigned long )start , (unsigned long )end );
1860
1897
}
1861
1898
1899
+ static void priv_stack_init_guard (void __percpu * priv_stack_ptr , int alloc_size )
1900
+ {
1901
+ int cpu , underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ ) >> 3 ;
1902
+ u64 * stack_ptr ;
1903
+
1904
+ for_each_possible_cpu (cpu ) {
1905
+ stack_ptr = per_cpu_ptr (priv_stack_ptr , cpu );
1906
+ stack_ptr [0 ] = PRIV_STACK_GUARD_VAL ;
1907
+ stack_ptr [1 ] = PRIV_STACK_GUARD_VAL ;
1908
+ stack_ptr [underflow_idx ] = PRIV_STACK_GUARD_VAL ;
1909
+ stack_ptr [underflow_idx + 1 ] = PRIV_STACK_GUARD_VAL ;
1910
+ }
1911
+ }
1912
+
1913
+ static void priv_stack_check_guard (void __percpu * priv_stack_ptr , int alloc_size ,
1914
+ struct bpf_prog * prog )
1915
+ {
1916
+ int cpu , underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ ) >> 3 ;
1917
+ u64 * stack_ptr ;
1918
+
1919
+ for_each_possible_cpu (cpu ) {
1920
+ stack_ptr = per_cpu_ptr (priv_stack_ptr , cpu );
1921
+ if (stack_ptr [0 ] != PRIV_STACK_GUARD_VAL ||
1922
+ stack_ptr [1 ] != PRIV_STACK_GUARD_VAL ||
1923
+ stack_ptr [underflow_idx ] != PRIV_STACK_GUARD_VAL ||
1924
+ stack_ptr [underflow_idx + 1 ] != PRIV_STACK_GUARD_VAL ) {
1925
+ pr_err ("BPF private stack overflow/underflow detected for prog %sx\n" ,
1926
+ bpf_jit_get_prog_name (prog ));
1927
+ break ;
1928
+ }
1929
+ }
1930
+ }
1931
+
1862
1932
struct arm64_jit_data {
1863
1933
struct bpf_binary_header * header ;
1864
1934
u8 * ro_image ;
@@ -1873,7 +1943,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1873
1943
struct bpf_binary_header * header ;
1874
1944
struct bpf_binary_header * ro_header ;
1875
1945
struct arm64_jit_data * jit_data ;
1946
+ void __percpu * priv_stack_ptr = NULL ;
1876
1947
bool was_classic = bpf_prog_was_classic (prog );
1948
+ int priv_stack_alloc_sz ;
1877
1949
bool tmp_blinded = false;
1878
1950
bool extra_pass = false;
1879
1951
struct jit_ctx ctx ;
@@ -1905,6 +1977,23 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1905
1977
}
1906
1978
prog -> aux -> jit_data = jit_data ;
1907
1979
}
1980
+ priv_stack_ptr = prog -> aux -> priv_stack_ptr ;
1981
+ if (!priv_stack_ptr && prog -> aux -> jits_use_priv_stack ) {
1982
+ /* Allocate actual private stack size with verifier-calculated
1983
+ * stack size plus two memory guards to protect overflow and
1984
+ * underflow.
1985
+ */
1986
+ priv_stack_alloc_sz = round_up (prog -> aux -> stack_depth , 16 ) +
1987
+ 2 * PRIV_STACK_GUARD_SZ ;
1988
+ priv_stack_ptr = __alloc_percpu_gfp (priv_stack_alloc_sz , 16 , GFP_KERNEL );
1989
+ if (!priv_stack_ptr ) {
1990
+ prog = orig_prog ;
1991
+ goto out_priv_stack ;
1992
+ }
1993
+
1994
+ priv_stack_init_guard (priv_stack_ptr , priv_stack_alloc_sz );
1995
+ prog -> aux -> priv_stack_ptr = priv_stack_ptr ;
1996
+ }
1908
1997
if (jit_data -> ctx .offset ) {
1909
1998
ctx = jit_data -> ctx ;
1910
1999
ro_image_ptr = jit_data -> ro_image ;
@@ -1928,6 +2017,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1928
2017
ctx .user_vm_start = bpf_arena_get_user_vm_start (prog -> aux -> arena );
1929
2018
ctx .arena_vm_start = bpf_arena_get_kern_vm_start (prog -> aux -> arena );
1930
2019
2020
+ if (priv_stack_ptr )
2021
+ ctx .priv_sp_used = true;
2022
+
1931
2023
/* Pass 1: Estimate the maximum image size.
1932
2024
*
1933
2025
* BPF line info needs ctx->offset[i] to be the offset of
@@ -2067,7 +2159,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2067
2159
ctx .offset [i ] *= AARCH64_INSN_SIZE ;
2068
2160
bpf_prog_fill_jited_linfo (prog , ctx .offset + 1 );
2069
2161
out_off :
2162
+ if (!ro_header && priv_stack_ptr ) {
2163
+ free_percpu (priv_stack_ptr );
2164
+ prog -> aux -> priv_stack_ptr = NULL ;
2165
+ }
2070
2166
kvfree (ctx .offset );
2167
+ out_priv_stack :
2071
2168
kfree (jit_data );
2072
2169
prog -> aux -> jit_data = NULL ;
2073
2170
}
@@ -2086,6 +2183,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2086
2183
goto out_off ;
2087
2184
}
2088
2185
2186
+ bool bpf_jit_supports_private_stack (void )
2187
+ {
2188
+ return true;
2189
+ }
2190
+
2089
2191
bool bpf_jit_supports_kfunc_call (void )
2090
2192
{
2091
2193
return true;
@@ -2931,6 +3033,8 @@ void bpf_jit_free(struct bpf_prog *prog)
2931
3033
if (prog -> jited ) {
2932
3034
struct arm64_jit_data * jit_data = prog -> aux -> jit_data ;
2933
3035
struct bpf_binary_header * hdr ;
3036
+ void __percpu * priv_stack_ptr ;
3037
+ int priv_stack_alloc_sz ;
2934
3038
2935
3039
/*
2936
3040
* If we fail the final pass of JIT (from jit_subprogs),
@@ -2944,6 +3048,13 @@ void bpf_jit_free(struct bpf_prog *prog)
2944
3048
}
2945
3049
hdr = bpf_jit_binary_pack_hdr (prog );
2946
3050
bpf_jit_binary_pack_free (hdr , NULL );
3051
+ priv_stack_ptr = prog -> aux -> priv_stack_ptr ;
3052
+ if (priv_stack_ptr ) {
3053
+ priv_stack_alloc_sz = round_up (prog -> aux -> stack_depth , 16 ) +
3054
+ 2 * PRIV_STACK_GUARD_SZ ;
3055
+ priv_stack_check_guard (priv_stack_ptr , priv_stack_alloc_sz , prog );
3056
+ free_percpu (prog -> aux -> priv_stack_ptr );
3057
+ }
2947
3058
WARN_ON_ONCE (!bpf_prog_kallsyms_verify_off (prog ));
2948
3059
}
2949
3060
0 commit comments