@@ -80,9 +80,6 @@ struct bucket {
8080 raw_spinlock_t raw_lock ;
8181};
8282
83- #define HASHTAB_MAP_LOCK_COUNT 8
84- #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
85-
8683struct bpf_htab {
8784 struct bpf_map map ;
8885 struct bpf_mem_alloc ma ;
@@ -104,7 +101,6 @@ struct bpf_htab {
104101 u32 elem_size ; /* size of each element in bytes */
105102 u32 hashrnd ;
106103 struct lock_class_key lockdep_key ;
107- int __percpu * map_locked [HASHTAB_MAP_LOCK_COUNT ];
108104};
109105
110106/* each htab element is struct htab_elem + key + value */
@@ -146,35 +142,26 @@ static void htab_init_buckets(struct bpf_htab *htab)
146142 }
147143}
148144
149- static inline int htab_lock_bucket (const struct bpf_htab * htab ,
150- struct bucket * b , u32 hash ,
145+ static inline int htab_lock_bucket (struct bucket * b ,
151146 unsigned long * pflags )
152147{
153148 unsigned long flags ;
154149
155- hash = hash & HASHTAB_MAP_LOCK_MASK ;
156-
157- preempt_disable ();
158- if (unlikely (__this_cpu_inc_return (* (htab -> map_locked [hash ])) != 1 )) {
159- __this_cpu_dec (* (htab -> map_locked [hash ]));
160- preempt_enable ();
161- return - EBUSY ;
150+ if (in_nmi ()) {
151+ if (!raw_spin_trylock_irqsave (& b -> raw_lock , flags ))
152+ return - EBUSY ;
153+ } else {
154+ raw_spin_lock_irqsave (& b -> raw_lock , flags );
162155 }
163156
164- raw_spin_lock_irqsave (& b -> raw_lock , flags );
165157 * pflags = flags ;
166-
167158 return 0 ;
168159}
169160
170- static inline void htab_unlock_bucket (const struct bpf_htab * htab ,
171- struct bucket * b , u32 hash ,
161+ static inline void htab_unlock_bucket (struct bucket * b ,
172162 unsigned long flags )
173163{
174- hash = hash & HASHTAB_MAP_LOCK_MASK ;
175164 raw_spin_unlock_irqrestore (& b -> raw_lock , flags );
176- __this_cpu_dec (* (htab -> map_locked [hash ]));
177- preempt_enable ();
178165}
179166
180167static bool htab_lru_map_delete_node (void * arg , struct bpf_lru_node * node );
@@ -467,7 +454,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
467454 bool percpu_lru = (attr -> map_flags & BPF_F_NO_COMMON_LRU );
468455 bool prealloc = !(attr -> map_flags & BPF_F_NO_PREALLOC );
469456 struct bpf_htab * htab ;
470- int err , i ;
457+ int err ;
471458
472459 htab = bpf_map_area_alloc (sizeof (* htab ), NUMA_NO_NODE );
473460 if (!htab )
@@ -512,15 +499,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
512499 if (!htab -> buckets )
513500 goto free_htab ;
514501
515- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ ) {
516- htab -> map_locked [i ] = bpf_map_alloc_percpu (& htab -> map ,
517- sizeof (int ),
518- sizeof (int ),
519- GFP_USER );
520- if (!htab -> map_locked [i ])
521- goto free_map_locked ;
522- }
523-
524502 if (htab -> map .map_flags & BPF_F_ZERO_SEED )
525503 htab -> hashrnd = 0 ;
526504 else
@@ -548,13 +526,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
548526 if (htab -> use_percpu_counter ) {
549527 err = percpu_counter_init (& htab -> pcount , 0 , GFP_KERNEL );
550528 if (err )
551- goto free_map_locked ;
529+ goto free_buckets ;
552530 }
553531
554532 if (prealloc ) {
555533 err = prealloc_init (htab );
556534 if (err )
557- goto free_map_locked ;
535+ goto free_buckets ;
558536
559537 if (!percpu && !lru ) {
560538 /* lru itself can remove the least used element, so
@@ -567,24 +545,23 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
567545 } else {
568546 err = bpf_mem_alloc_init (& htab -> ma , htab -> elem_size , false);
569547 if (err )
570- goto free_map_locked ;
548+ goto free_buckets ;
571549 if (percpu ) {
572550 err = bpf_mem_alloc_init (& htab -> pcpu_ma ,
573551 round_up (htab -> map .value_size , 8 ), true);
574552 if (err )
575- goto free_map_locked ;
553+ goto free_buckets ;
576554 }
577555 }
578556
579557 return & htab -> map ;
580558
581559free_prealloc :
582560 prealloc_destroy (htab );
583- free_map_locked :
561+ free_buckets :
584562 if (htab -> use_percpu_counter )
585563 percpu_counter_destroy (& htab -> pcount );
586- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ )
587- free_percpu (htab -> map_locked [i ]);
564+
588565 bpf_map_area_free (htab -> buckets );
589566 bpf_mem_alloc_destroy (& htab -> pcpu_ma );
590567 bpf_mem_alloc_destroy (& htab -> ma );
@@ -781,7 +758,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
781758 b = __select_bucket (htab , tgt_l -> hash );
782759 head = & b -> head ;
783760
784- ret = htab_lock_bucket (htab , b , tgt_l -> hash , & flags );
761+ ret = htab_lock_bucket (b , & flags );
785762 if (ret )
786763 return false;
787764
@@ -792,7 +769,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
792769 break ;
793770 }
794771
795- htab_unlock_bucket (htab , b , tgt_l -> hash , flags );
772+ htab_unlock_bucket (b , flags );
796773
797774 return l == tgt_l ;
798775}
@@ -1106,7 +1083,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11061083 */
11071084 }
11081085
1109- ret = htab_lock_bucket (htab , b , hash , & flags );
1086+ ret = htab_lock_bucket (b , & flags );
11101087 if (ret )
11111088 return ret ;
11121089
@@ -1151,7 +1128,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11511128 }
11521129 ret = 0 ;
11531130err :
1154- htab_unlock_bucket (htab , b , hash , flags );
1131+ htab_unlock_bucket (b , flags );
11551132 return ret ;
11561133}
11571134
@@ -1197,7 +1174,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
11971174 copy_map_value (& htab -> map ,
11981175 l_new -> key + round_up (map -> key_size , 8 ), value );
11991176
1200- ret = htab_lock_bucket (htab , b , hash , & flags );
1177+ ret = htab_lock_bucket (b , & flags );
12011178 if (ret )
12021179 return ret ;
12031180
@@ -1218,7 +1195,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
12181195 ret = 0 ;
12191196
12201197err :
1221- htab_unlock_bucket (htab , b , hash , flags );
1198+ htab_unlock_bucket (b , flags );
12221199
12231200 if (ret )
12241201 htab_lru_push_free (htab , l_new );
@@ -1254,7 +1231,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
12541231 b = __select_bucket (htab , hash );
12551232 head = & b -> head ;
12561233
1257- ret = htab_lock_bucket (htab , b , hash , & flags );
1234+ ret = htab_lock_bucket (b , & flags );
12581235 if (ret )
12591236 return ret ;
12601237
@@ -1279,7 +1256,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
12791256 }
12801257 ret = 0 ;
12811258err :
1282- htab_unlock_bucket (htab , b , hash , flags );
1259+ htab_unlock_bucket (b , flags );
12831260 return ret ;
12841261}
12851262
@@ -1320,7 +1297,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
13201297 return - ENOMEM ;
13211298 }
13221299
1323- ret = htab_lock_bucket (htab , b , hash , & flags );
1300+ ret = htab_lock_bucket (b , & flags );
13241301 if (ret )
13251302 return ret ;
13261303
@@ -1344,7 +1321,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
13441321 }
13451322 ret = 0 ;
13461323err :
1347- htab_unlock_bucket (htab , b , hash , flags );
1324+ htab_unlock_bucket (b , flags );
13481325 if (l_new )
13491326 bpf_lru_push_free (& htab -> lru , & l_new -> lru_node );
13501327 return ret ;
@@ -1383,7 +1360,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
13831360 b = __select_bucket (htab , hash );
13841361 head = & b -> head ;
13851362
1386- ret = htab_lock_bucket (htab , b , hash , & flags );
1363+ ret = htab_lock_bucket (b , & flags );
13871364 if (ret )
13881365 return ret ;
13891366
@@ -1396,7 +1373,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
13961373 ret = - ENOENT ;
13971374 }
13981375
1399- htab_unlock_bucket (htab , b , hash , flags );
1376+ htab_unlock_bucket (b , flags );
14001377 return ret ;
14011378}
14021379
@@ -1419,7 +1396,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
14191396 b = __select_bucket (htab , hash );
14201397 head = & b -> head ;
14211398
1422- ret = htab_lock_bucket (htab , b , hash , & flags );
1399+ ret = htab_lock_bucket (b , & flags );
14231400 if (ret )
14241401 return ret ;
14251402
@@ -1430,7 +1407,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
14301407 else
14311408 ret = - ENOENT ;
14321409
1433- htab_unlock_bucket (htab , b , hash , flags );
1410+ htab_unlock_bucket (b , flags );
14341411 if (l )
14351412 htab_lru_push_free (htab , l );
14361413 return ret ;
@@ -1493,7 +1470,6 @@ static void htab_map_free_timers(struct bpf_map *map)
14931470static void htab_map_free (struct bpf_map * map )
14941471{
14951472 struct bpf_htab * htab = container_of (map , struct bpf_htab , map );
1496- int i ;
14971473
14981474 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
14991475 * bpf_free_used_maps() is called after bpf prog is no longer executing.
@@ -1515,10 +1491,10 @@ static void htab_map_free(struct bpf_map *map)
15151491 bpf_map_area_free (htab -> buckets );
15161492 bpf_mem_alloc_destroy (& htab -> pcpu_ma );
15171493 bpf_mem_alloc_destroy (& htab -> ma );
1494+
15181495 if (htab -> use_percpu_counter )
15191496 percpu_counter_destroy (& htab -> pcount );
1520- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ )
1521- free_percpu (htab -> map_locked [i ]);
1497+
15221498 lockdep_unregister_key (& htab -> lockdep_key );
15231499 bpf_map_area_free (htab );
15241500}
@@ -1562,7 +1538,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
15621538 b = __select_bucket (htab , hash );
15631539 head = & b -> head ;
15641540
1565- ret = htab_lock_bucket (htab , b , hash , & bflags );
1541+ ret = htab_lock_bucket (b , & bflags );
15661542 if (ret )
15671543 return ret ;
15681544
@@ -1600,7 +1576,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
16001576 free_htab_elem (htab , l );
16011577 }
16021578
1603- htab_unlock_bucket (htab , b , hash , bflags );
1579+ htab_unlock_bucket (b , bflags );
16041580
16051581 if (is_lru_map && l )
16061582 htab_lru_push_free (htab , l );
@@ -1718,7 +1694,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17181694 head = & b -> head ;
17191695 /* do not grab the lock unless need it (bucket_cnt > 0). */
17201696 if (locked ) {
1721- ret = htab_lock_bucket (htab , b , batch , & flags );
1697+ ret = htab_lock_bucket (b , & flags );
17221698 if (ret ) {
17231699 rcu_read_unlock ();
17241700 bpf_enable_instrumentation ();
@@ -1741,7 +1717,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17411717 /* Note that since bucket_cnt > 0 here, it is implicit
17421718 * that the locked was grabbed, so release it.
17431719 */
1744- htab_unlock_bucket (htab , b , batch , flags );
1720+ htab_unlock_bucket (b , flags );
17451721 rcu_read_unlock ();
17461722 bpf_enable_instrumentation ();
17471723 goto after_loop ;
@@ -1752,7 +1728,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17521728 /* Note that since bucket_cnt > 0 here, it is implicit
17531729 * that the locked was grabbed, so release it.
17541730 */
1755- htab_unlock_bucket (htab , b , batch , flags );
1731+ htab_unlock_bucket (b , flags );
17561732 rcu_read_unlock ();
17571733 bpf_enable_instrumentation ();
17581734 kvfree (keys );
@@ -1813,7 +1789,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
18131789 dst_val += value_size ;
18141790 }
18151791
1816- htab_unlock_bucket (htab , b , batch , flags );
1792+ htab_unlock_bucket (b , flags );
18171793 locked = false;
18181794
18191795 while (node_to_free ) {
0 commit comments