@@ -5040,16 +5040,19 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
50405040static int bpf_core_calc_field_relo (const struct bpf_program * prog ,
50415041 const struct bpf_core_relo * relo ,
50425042 const struct bpf_core_spec * spec ,
5043- __u32 * val , bool * validate )
5043+ __u32 * val , __u32 * field_sz , __u32 * type_id ,
5044+ bool * validate )
50445045{
50455046 const struct bpf_core_accessor * acc ;
50465047 const struct btf_type * t ;
5047- __u32 byte_off , byte_sz , bit_off , bit_sz ;
5048+ __u32 byte_off , byte_sz , bit_off , bit_sz , field_type_id ;
50485049 const struct btf_member * m ;
50495050 const struct btf_type * mt ;
50505051 bool bitfield ;
50515052 __s64 sz ;
50525053
5054+ * field_sz = 0 ;
5055+
50535056 if (relo -> kind == BPF_FIELD_EXISTS ) {
50545057 * val = spec ? 1 : 0 ;
50555058 return 0 ;
@@ -5065,6 +5068,12 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
50655068 if (!acc -> name ) {
50665069 if (relo -> kind == BPF_FIELD_BYTE_OFFSET ) {
50675070 * val = spec -> bit_offset / 8 ;
5071+ /* remember field size for load/store mem size */
5072+ sz = btf__resolve_size (spec -> btf , acc -> type_id );
5073+ if (sz < 0 )
5074+ return - EINVAL ;
5075+ * field_sz = sz ;
5076+ * type_id = acc -> type_id ;
50685077 } else if (relo -> kind == BPF_FIELD_BYTE_SIZE ) {
50695078 sz = btf__resolve_size (spec -> btf , acc -> type_id );
50705079 if (sz < 0 )
@@ -5081,7 +5090,7 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
50815090 }
50825091
50835092 m = btf_members (t ) + acc -> idx ;
5084- mt = skip_mods_and_typedefs (spec -> btf , m -> type , NULL );
5093+ mt = skip_mods_and_typedefs (spec -> btf , m -> type , & field_type_id );
50855094 bit_off = spec -> bit_offset ;
50865095 bit_sz = btf_member_bitfield_size (t , acc -> idx );
50875096
@@ -5101,7 +5110,7 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
51015110 byte_off = bit_off / 8 / byte_sz * byte_sz ;
51025111 }
51035112 } else {
5104- sz = btf__resolve_size (spec -> btf , m -> type );
5113+ sz = btf__resolve_size (spec -> btf , field_type_id );
51055114 if (sz < 0 )
51065115 return - EINVAL ;
51075116 byte_sz = sz ;
@@ -5119,6 +5128,10 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
51195128 switch (relo -> kind ) {
51205129 case BPF_FIELD_BYTE_OFFSET :
51215130 * val = byte_off ;
5131+ if (!bitfield ) {
5132+ * field_sz = byte_sz ;
5133+ * type_id = field_type_id ;
5134+ }
51225135 break ;
51235136 case BPF_FIELD_BYTE_SIZE :
51245137 * val = byte_sz ;
@@ -5219,6 +5232,19 @@ struct bpf_core_relo_res
52195232 bool poison ;
52205233 /* some relocations can't be validated against orig_val */
52215234 bool validate ;
5235+ /* for field byte offset relocations or the forms:
5236+ * *(T *)(rX + <off>) = rY
5237+ * rX = *(T *)(rY + <off>),
5238+ * we remember original and resolved field size to adjust direct
5239+ * memory loads of pointers and integers; this is necessary for 32-bit
5240+ * host kernel architectures, but also allows to automatically
5241+ * relocate fields that were resized from, e.g., u32 to u64, etc.
5242+ */
5243+ bool fail_memsz_adjust ;
5244+ __u32 orig_sz ;
5245+ __u32 orig_type_id ;
5246+ __u32 new_sz ;
5247+ __u32 new_type_id ;
52225248};
52235249
52245250/* Calculate original and target relocation values, given local and target
@@ -5240,10 +5266,56 @@ static int bpf_core_calc_relo(const struct bpf_program *prog,
52405266 res -> new_val = 0 ;
52415267 res -> poison = false;
52425268 res -> validate = true;
5269+ res -> fail_memsz_adjust = false;
5270+ res -> orig_sz = res -> new_sz = 0 ;
5271+ res -> orig_type_id = res -> new_type_id = 0 ;
52435272
52445273 if (core_relo_is_field_based (relo -> kind )) {
5245- err = bpf_core_calc_field_relo (prog , relo , local_spec , & res -> orig_val , & res -> validate );
5246- err = err ?: bpf_core_calc_field_relo (prog , relo , targ_spec , & res -> new_val , NULL );
5274+ err = bpf_core_calc_field_relo (prog , relo , local_spec ,
5275+ & res -> orig_val , & res -> orig_sz ,
5276+ & res -> orig_type_id , & res -> validate );
5277+ err = err ?: bpf_core_calc_field_relo (prog , relo , targ_spec ,
5278+ & res -> new_val , & res -> new_sz ,
5279+ & res -> new_type_id , NULL );
5280+ if (err )
5281+ goto done ;
5282+ /* Validate if it's safe to adjust load/store memory size.
5283+ * Adjustments are performed only if original and new memory
5284+ * sizes differ.
5285+ */
5286+ res -> fail_memsz_adjust = false;
5287+ if (res -> orig_sz != res -> new_sz ) {
5288+ const struct btf_type * orig_t , * new_t ;
5289+
5290+ orig_t = btf__type_by_id (local_spec -> btf , res -> orig_type_id );
5291+ new_t = btf__type_by_id (targ_spec -> btf , res -> new_type_id );
5292+
5293+ /* There are two use cases in which it's safe to
5294+ * adjust load/store's mem size:
5295+ * - reading a 32-bit kernel pointer, while on BPF
5296+ * size pointers are always 64-bit; in this case
5297+ * it's safe to "downsize" instruction size due to
5298+ * pointer being treated as unsigned integer with
5299+ * zero-extended upper 32-bits;
5300+ * - reading unsigned integers, again due to
5301+ * zero-extension is preserving the value correctly.
5302+ *
5303+ * In all other cases it's incorrect to attempt to
5304+ * load/store field because read value will be
5305+ * incorrect, so we poison relocated instruction.
5306+ */
5307+ if (btf_is_ptr (orig_t ) && btf_is_ptr (new_t ))
5308+ goto done ;
5309+ if (btf_is_int (orig_t ) && btf_is_int (new_t ) &&
5310+ btf_int_encoding (orig_t ) != BTF_INT_SIGNED &&
5311+ btf_int_encoding (new_t ) != BTF_INT_SIGNED )
5312+ goto done ;
5313+
5314+ /* mark as invalid mem size adjustment, but this will
5315+ * only be checked for LDX/STX/ST insns
5316+ */
5317+ res -> fail_memsz_adjust = true;
5318+ }
52475319 } else if (core_relo_is_type_based (relo -> kind )) {
52485320 err = bpf_core_calc_type_relo (relo , local_spec , & res -> orig_val );
52495321 err = err ?: bpf_core_calc_type_relo (relo , targ_spec , & res -> new_val );
@@ -5252,6 +5324,7 @@ static int bpf_core_calc_relo(const struct bpf_program *prog,
52525324 err = err ?: bpf_core_calc_enumval_relo (relo , targ_spec , & res -> new_val );
52535325 }
52545326
5327+ done :
52555328 if (err == - EUCLEAN ) {
52565329 /* EUCLEAN is used to signal instruction poisoning request */
52575330 res -> poison = true;
@@ -5291,6 +5364,28 @@ static bool is_ldimm64(struct bpf_insn *insn)
52915364 return insn -> code == (BPF_LD | BPF_IMM | BPF_DW );
52925365}
52935366
5367+ static int insn_bpf_size_to_bytes (struct bpf_insn * insn )
5368+ {
5369+ switch (BPF_SIZE (insn -> code )) {
5370+ case BPF_DW : return 8 ;
5371+ case BPF_W : return 4 ;
5372+ case BPF_H : return 2 ;
5373+ case BPF_B : return 1 ;
5374+ default : return -1 ;
5375+ }
5376+ }
5377+
5378+ static int insn_bytes_to_bpf_size (__u32 sz )
5379+ {
5380+ switch (sz ) {
5381+ case 8 : return BPF_DW ;
5382+ case 4 : return BPF_W ;
5383+ case 2 : return BPF_H ;
5384+ case 1 : return BPF_B ;
5385+ default : return -1 ;
5386+ }
5387+ }
5388+
52945389/*
52955390 * Patch relocatable BPF instruction.
52965391 *
@@ -5300,10 +5395,13 @@ static bool is_ldimm64(struct bpf_insn *insn)
53005395 * spec, and is checked before patching instruction. If actual insn->imm value
53015396 * is wrong, bail out with error.
53025397 *
5303- * Currently three kinds of BPF instructions are supported :
5398+ * Currently supported classes of BPF instruction are:
53045399 * 1. rX = <imm> (assignment with immediate operand);
53055400 * 2. rX += <imm> (arithmetic operations with immediate operand);
5306- * 3. rX = <imm64> (load with 64-bit immediate value).
5401+ * 3. rX = <imm64> (load with 64-bit immediate value);
5402+ * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
5403+ * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
5404+ * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
53075405 */
53085406static int bpf_core_patch_insn (struct bpf_program * prog ,
53095407 const struct bpf_core_relo * relo ,
@@ -5327,6 +5425,7 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
53275425 class = BPF_CLASS (insn -> code );
53285426
53295427 if (res -> poison ) {
5428+ poison :
53305429 /* poison second part of ldimm64 to avoid confusing error from
53315430 * verifier about "unknown opcode 00"
53325431 */
@@ -5369,10 +5468,39 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
53695468 prog -> name , relo_idx , insn_idx , new_val );
53705469 return - ERANGE ;
53715470 }
5471+ if (res -> fail_memsz_adjust ) {
5472+ pr_warn ("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5473+ "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n" ,
5474+ prog -> name , relo_idx , insn_idx );
5475+ goto poison ;
5476+ }
5477+
53725478 orig_val = insn -> off ;
53735479 insn -> off = new_val ;
53745480 pr_debug ("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n" ,
53755481 prog -> name , relo_idx , insn_idx , orig_val , new_val );
5482+
5483+ if (res -> new_sz != res -> orig_sz ) {
5484+ int insn_bytes_sz , insn_bpf_sz ;
5485+
5486+ insn_bytes_sz = insn_bpf_size_to_bytes (insn );
5487+ if (insn_bytes_sz != res -> orig_sz ) {
5488+ pr_warn ("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n" ,
5489+ prog -> name , relo_idx , insn_idx , insn_bytes_sz , res -> orig_sz );
5490+ return - EINVAL ;
5491+ }
5492+
5493+ insn_bpf_sz = insn_bytes_to_bpf_size (res -> new_sz );
5494+ if (insn_bpf_sz < 0 ) {
5495+ pr_warn ("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n" ,
5496+ prog -> name , relo_idx , insn_idx , res -> new_sz );
5497+ return - EINVAL ;
5498+ }
5499+
5500+ insn -> code = BPF_MODE (insn -> code ) | insn_bpf_sz | BPF_CLASS (insn -> code );
5501+ pr_debug ("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n" ,
5502+ prog -> name , relo_idx , insn_idx , res -> orig_sz , res -> new_sz );
5503+ }
53765504 break ;
53775505 case BPF_LD : {
53785506 __u64 imm ;
@@ -5714,7 +5842,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
57145842 return 0 ;
57155843
57165844 if (targ_btf_path )
5717- targ_btf = btf__parse_elf (targ_btf_path , NULL );
5845+ targ_btf = btf__parse (targ_btf_path , NULL );
57185846 else
57195847 targ_btf = obj -> btf_vmlinux ;
57205848 if (IS_ERR_OR_NULL (targ_btf )) {
@@ -5765,6 +5893,11 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
57655893 err = - EINVAL ;
57665894 goto out ;
57675895 }
5896+ /* no need to apply CO-RE relocation if the program is
5897+ * not going to be loaded
5898+ */
5899+ if (!prog -> load )
5900+ continue ;
57685901
57695902 err = bpf_core_apply_relo (prog , rec , i , obj -> btf ,
57705903 targ_btf , cand_cache );
0 commit comments