99#include <linux/slab.h>
1010#include <linux/btf_ids.h>
1111#include "percpu_freelist.h"
12+ #include <asm/rqspinlock.h>
1213
1314#define QUEUE_STACK_CREATE_FLAG_MASK \
1415 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
1516
1617struct bpf_queue_stack {
1718 struct bpf_map map ;
18- raw_spinlock_t lock ;
19+ rqspinlock_t lock ;
1920 u32 head , tail ;
2021 u32 size ; /* max_entries + 1 */
2122
@@ -78,7 +79,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
7879
7980 qs -> size = size ;
8081
81- raw_spin_lock_init (& qs -> lock );
82+ raw_res_spin_lock_init (& qs -> lock );
8283
8384 return & qs -> map ;
8485}
@@ -98,12 +99,8 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
9899 int err = 0 ;
99100 void * ptr ;
100101
101- if (in_nmi ()) {
102- if (!raw_spin_trylock_irqsave (& qs -> lock , flags ))
103- return - EBUSY ;
104- } else {
105- raw_spin_lock_irqsave (& qs -> lock , flags );
106- }
102+ if (raw_res_spin_lock_irqsave (& qs -> lock , flags ))
103+ return - EBUSY ;
107104
108105 if (queue_stack_map_is_empty (qs )) {
109106 memset (value , 0 , qs -> map .value_size );
@@ -120,7 +117,7 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
120117 }
121118
122119out :
123- raw_spin_unlock_irqrestore (& qs -> lock , flags );
120+ raw_res_spin_unlock_irqrestore (& qs -> lock , flags );
124121 return err ;
125122}
126123
@@ -133,12 +130,8 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
133130 void * ptr ;
134131 u32 index ;
135132
136- if (in_nmi ()) {
137- if (!raw_spin_trylock_irqsave (& qs -> lock , flags ))
138- return - EBUSY ;
139- } else {
140- raw_spin_lock_irqsave (& qs -> lock , flags );
141- }
133+ if (raw_res_spin_lock_irqsave (& qs -> lock , flags ))
134+ return - EBUSY ;
142135
143136 if (queue_stack_map_is_empty (qs )) {
144137 memset (value , 0 , qs -> map .value_size );
@@ -157,7 +150,7 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
157150 qs -> head = index ;
158151
159152out :
160- raw_spin_unlock_irqrestore (& qs -> lock , flags );
153+ raw_res_spin_unlock_irqrestore (& qs -> lock , flags );
161154 return err ;
162155}
163156
@@ -203,12 +196,8 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
203196 if (flags & BPF_NOEXIST || flags > BPF_EXIST )
204197 return - EINVAL ;
205198
206- if (in_nmi ()) {
207- if (!raw_spin_trylock_irqsave (& qs -> lock , irq_flags ))
208- return - EBUSY ;
209- } else {
210- raw_spin_lock_irqsave (& qs -> lock , irq_flags );
211- }
199+ if (raw_res_spin_lock_irqsave (& qs -> lock , irq_flags ))
200+ return - EBUSY ;
212201
213202 if (queue_stack_map_is_full (qs )) {
214203 if (!replace ) {
@@ -227,7 +216,7 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
227216 qs -> head = 0 ;
228217
229218out :
230- raw_spin_unlock_irqrestore (& qs -> lock , irq_flags );
219+ raw_res_spin_unlock_irqrestore (& qs -> lock , irq_flags );
231220 return err ;
232221}
233222
0 commit comments