@@ -39,6 +39,7 @@ struct bpf_local_storage;
3939struct bpf_local_storage_map ;
4040struct kobject ;
4141struct mem_cgroup ;
42+ struct bpf_func_state ;
4243
4344extern struct idr btf_idr ;
4445extern spinlock_t btf_idr_lock ;
@@ -117,6 +118,9 @@ struct bpf_map_ops {
117118 void * owner , u32 size );
118119 struct bpf_local_storage __rcu * * (* map_owner_storage_ptr )(void * owner );
119120
121+ /* Misc helpers.*/
122+ int (* map_redirect )(struct bpf_map * map , u32 ifindex , u64 flags );
123+
120124 /* map_meta_equal must be implemented for maps that can be
121125 * used as an inner map. It is a runtime check to ensure
122126 * an inner map can be inserted to an outer map.
@@ -129,6 +133,13 @@ struct bpf_map_ops {
129133 bool (* map_meta_equal )(const struct bpf_map * meta0 ,
130134 const struct bpf_map * meta1 );
131135
136+
137+ int (* map_set_for_each_callback_args )(struct bpf_verifier_env * env ,
138+ struct bpf_func_state * caller ,
139+ struct bpf_func_state * callee );
140+ int (* map_for_each_callback )(struct bpf_map * map , void * callback_fn ,
141+ void * callback_ctx , u64 flags );
142+
132143 /* BTF name and id of struct allocated by map_alloc */
133144 const char * const map_btf_name ;
134145 int * map_btf_id ;
@@ -295,6 +306,8 @@ enum bpf_arg_type {
295306 ARG_CONST_ALLOC_SIZE_OR_ZERO , /* number of allocated bytes requested */
296307 ARG_PTR_TO_BTF_ID_SOCK_COMMON , /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
297308 ARG_PTR_TO_PERCPU_BTF_ID , /* pointer to in-kernel percpu type */
309+ ARG_PTR_TO_FUNC , /* pointer to a bpf program function */
310+ ARG_PTR_TO_STACK_OR_NULL , /* pointer to stack or NULL */
298311 __BPF_ARG_TYPE_MAX ,
299312};
300313
@@ -411,6 +424,8 @@ enum bpf_reg_type {
411424 PTR_TO_RDWR_BUF , /* reg points to a read/write buffer */
412425 PTR_TO_RDWR_BUF_OR_NULL , /* reg points to a read/write buffer or NULL */
413426 PTR_TO_PERCPU_BTF_ID , /* reg points to a percpu kernel variable */
427+ PTR_TO_FUNC , /* reg points to a bpf program function */
428+ PTR_TO_MAP_KEY , /* reg points to a map element key */
414429};
415430
416431/* The information passed from prog-specific *_is_valid_access
@@ -506,6 +521,11 @@ enum bpf_cgroup_storage_type {
506521 */
507522#define MAX_BPF_FUNC_ARGS 12
508523
524+ /* The maximum number of arguments passed through registers
525+ * a single function may have.
526+ */
527+ #define MAX_BPF_FUNC_REG_ARGS 5
528+
509529struct btf_func_model {
510530 u8 ret_size ;
511531 u8 nr_args ;
@@ -1380,6 +1400,10 @@ void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
13801400int bpf_iter_map_fill_link_info (const struct bpf_iter_aux_info * aux ,
13811401 struct bpf_link_info * info );
13821402
1403+ int map_set_for_each_callback_args (struct bpf_verifier_env * env ,
1404+ struct bpf_func_state * caller ,
1405+ struct bpf_func_state * callee );
1406+
13831407int bpf_percpu_hash_copy (struct bpf_map * map , void * key , void * value );
13841408int bpf_percpu_array_copy (struct bpf_map * map , void * key , void * value );
13851409int bpf_percpu_hash_update (struct bpf_map * map , void * key , void * value ,
@@ -1429,9 +1453,9 @@ struct btf *bpf_get_btf_vmlinux(void);
14291453/* Map specifics */
14301454struct xdp_buff ;
14311455struct sk_buff ;
1456+ struct bpf_dtab_netdev ;
1457+ struct bpf_cpu_map_entry ;
14321458
1433- struct bpf_dtab_netdev * __dev_map_lookup_elem (struct bpf_map * map , u32 key );
1434- struct bpf_dtab_netdev * __dev_map_hash_lookup_elem (struct bpf_map * map , u32 key );
14351459void __dev_flush (void );
14361460int dev_xdp_enqueue (struct net_device * dev , struct xdp_buff * xdp ,
14371461 struct net_device * dev_rx );
@@ -1441,7 +1465,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
14411465 struct bpf_prog * xdp_prog );
14421466bool dev_map_can_have_prog (struct bpf_map * map );
14431467
1444- struct bpf_cpu_map_entry * __cpu_map_lookup_elem (struct bpf_map * map , u32 key );
14451468void __cpu_map_flush (void );
14461469int cpu_map_enqueue (struct bpf_cpu_map_entry * rcpu , struct xdp_buff * xdp ,
14471470 struct net_device * dev_rx );
@@ -1470,6 +1493,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
14701493int bpf_prog_test_run_raw_tp (struct bpf_prog * prog ,
14711494 const union bpf_attr * kattr ,
14721495 union bpf_attr __user * uattr );
1496+ int bpf_prog_test_run_sk_lookup (struct bpf_prog * prog ,
1497+ const union bpf_attr * kattr ,
1498+ union bpf_attr __user * uattr );
14731499bool btf_ctx_access (int off , int size , enum bpf_access_type type ,
14741500 const struct bpf_prog * prog ,
14751501 struct bpf_insn_access_aux * info );
@@ -1499,6 +1525,7 @@ struct bpf_prog *bpf_prog_by_id(u32 id);
14991525struct bpf_link * bpf_link_by_id (u32 id );
15001526
15011527const struct bpf_func_proto * bpf_base_func_proto (enum bpf_func_id func_id );
1528+ void bpf_task_storage_free (struct task_struct * task );
15021529#else /* !CONFIG_BPF_SYSCALL */
15031530static inline struct bpf_prog * bpf_prog_get (u32 ufd )
15041531{
@@ -1568,17 +1595,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
15681595 return - EOPNOTSUPP ;
15691596}
15701597
1571- static inline struct net_device * __dev_map_lookup_elem (struct bpf_map * map ,
1572- u32 key )
1573- {
1574- return NULL ;
1575- }
1576-
1577- static inline struct net_device * __dev_map_hash_lookup_elem (struct bpf_map * map ,
1578- u32 key )
1579- {
1580- return NULL ;
1581- }
15821598static inline bool dev_map_can_have_prog (struct bpf_map * map )
15831599{
15841600 return false;
@@ -1590,6 +1606,7 @@ static inline void __dev_flush(void)
15901606
15911607struct xdp_buff ;
15921608struct bpf_dtab_netdev ;
1609+ struct bpf_cpu_map_entry ;
15931610
15941611static inline
15951612int dev_xdp_enqueue (struct net_device * dev , struct xdp_buff * xdp ,
@@ -1614,12 +1631,6 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
16141631 return 0 ;
16151632}
16161633
1617- static inline
1618- struct bpf_cpu_map_entry * __cpu_map_lookup_elem (struct bpf_map * map , u32 key )
1619- {
1620- return NULL ;
1621- }
1622-
16231634static inline void __cpu_map_flush (void )
16241635{
16251636}
@@ -1670,6 +1681,13 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
16701681 return - ENOTSUPP ;
16711682}
16721683
1684+ static inline int bpf_prog_test_run_sk_lookup (struct bpf_prog * prog ,
1685+ const union bpf_attr * kattr ,
1686+ union bpf_attr __user * uattr )
1687+ {
1688+ return - ENOTSUPP ;
1689+ }
1690+
16731691static inline void bpf_map_put (struct bpf_map * map )
16741692{
16751693}
@@ -1684,6 +1702,10 @@ bpf_base_func_proto(enum bpf_func_id func_id)
16841702{
16851703 return NULL ;
16861704}
1705+
1706+ static inline void bpf_task_storage_free (struct task_struct * task )
1707+ {
1708+ }
16871709#endif /* CONFIG_BPF_SYSCALL */
16881710
16891711void __bpf_free_used_btfs (struct bpf_prog_aux * aux ,
@@ -1768,22 +1790,24 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
17681790}
17691791#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
17701792
1771- #if defined(CONFIG_BPF_STREAM_PARSER )
1772- int sock_map_prog_update (struct bpf_map * map , struct bpf_prog * prog ,
1773- struct bpf_prog * old , u32 which );
1793+ #if defined(CONFIG_INET ) && defined(CONFIG_BPF_SYSCALL )
17741794int sock_map_get_from_fd (const union bpf_attr * attr , struct bpf_prog * prog );
17751795int sock_map_prog_detach (const union bpf_attr * attr , enum bpf_prog_type ptype );
17761796int sock_map_update_elem_sys (struct bpf_map * map , void * key , void * value , u64 flags );
17771797void sock_map_unhash (struct sock * sk );
17781798void sock_map_close (struct sock * sk , long timeout );
1799+
1800+ void bpf_sk_reuseport_detach (struct sock * sk );
1801+ int bpf_fd_reuseport_array_lookup_elem (struct bpf_map * map , void * key ,
1802+ void * value );
1803+ int bpf_fd_reuseport_array_update_elem (struct bpf_map * map , void * key ,
1804+ void * value , u64 map_flags );
17791805#else
1780- static inline int sock_map_prog_update (struct bpf_map * map ,
1781- struct bpf_prog * prog ,
1782- struct bpf_prog * old , u32 which )
1806+ static inline void bpf_sk_reuseport_detach (struct sock * sk )
17831807{
1784- return - EOPNOTSUPP ;
17851808}
17861809
1810+ #ifdef CONFIG_BPF_SYSCALL
17871811static inline int sock_map_get_from_fd (const union bpf_attr * attr ,
17881812 struct bpf_prog * prog )
17891813{
@@ -1801,20 +1825,7 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void
18011825{
18021826 return - EOPNOTSUPP ;
18031827}
1804- #endif /* CONFIG_BPF_STREAM_PARSER */
18051828
1806- #if defined(CONFIG_INET ) && defined(CONFIG_BPF_SYSCALL )
1807- void bpf_sk_reuseport_detach (struct sock * sk );
1808- int bpf_fd_reuseport_array_lookup_elem (struct bpf_map * map , void * key ,
1809- void * value );
1810- int bpf_fd_reuseport_array_update_elem (struct bpf_map * map , void * key ,
1811- void * value , u64 map_flags );
1812- #else
1813- static inline void bpf_sk_reuseport_detach (struct sock * sk )
1814- {
1815- }
1816-
1817- #ifdef CONFIG_BPF_SYSCALL
18181829static inline int bpf_fd_reuseport_array_lookup_elem (struct bpf_map * map ,
18191830 void * key , void * value )
18201831{
@@ -1886,6 +1897,9 @@ extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
18861897extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto ;
18871898extern const struct bpf_func_proto bpf_sock_from_file_proto ;
18881899extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto ;
1900+ extern const struct bpf_func_proto bpf_task_storage_get_proto ;
1901+ extern const struct bpf_func_proto bpf_task_storage_delete_proto ;
1902+ extern const struct bpf_func_proto bpf_for_each_map_elem_proto ;
18891903
18901904const struct bpf_func_proto * bpf_tracing_func_proto (
18911905 enum bpf_func_id func_id , const struct bpf_prog * prog );
0 commit comments