@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
33
33
struct virtio_pci_device * vp_dev = to_vp_device (vdev );
34
34
int i ;
35
35
36
- synchronize_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ));
37
- for (i = 1 ; i < vp_dev -> msix_vectors ; i ++ )
36
+ if (vp_dev -> intx_enabled )
37
+ synchronize_irq (vp_dev -> pci_dev -> irq );
38
+
39
+ for (i = 0 ; i < vp_dev -> msix_vectors ; ++ i )
38
40
synchronize_irq (pci_irq_vector (vp_dev -> pci_dev , i ));
39
41
}
40
42
@@ -97,10 +99,79 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
97
99
return vp_vring_interrupt (irq , opaque );
98
100
}
99
101
100
- static void vp_remove_vqs (struct virtio_device * vdev )
102
+ static int vp_request_msix_vectors (struct virtio_device * vdev , int nvectors ,
103
+ bool per_vq_vectors , struct irq_affinity * desc )
104
+ {
105
+ struct virtio_pci_device * vp_dev = to_vp_device (vdev );
106
+ const char * name = dev_name (& vp_dev -> vdev .dev );
107
+ unsigned i , v ;
108
+ int err = - ENOMEM ;
109
+
110
+ vp_dev -> msix_vectors = nvectors ;
111
+
112
+ vp_dev -> msix_names = kmalloc (nvectors * sizeof * vp_dev -> msix_names ,
113
+ GFP_KERNEL );
114
+ if (!vp_dev -> msix_names )
115
+ goto error ;
116
+ vp_dev -> msix_affinity_masks
117
+ = kzalloc (nvectors * sizeof * vp_dev -> msix_affinity_masks ,
118
+ GFP_KERNEL );
119
+ if (!vp_dev -> msix_affinity_masks )
120
+ goto error ;
121
+ for (i = 0 ; i < nvectors ; ++ i )
122
+ if (!alloc_cpumask_var (& vp_dev -> msix_affinity_masks [i ],
123
+ GFP_KERNEL ))
124
+ goto error ;
125
+
126
+ err = pci_alloc_irq_vectors_affinity (vp_dev -> pci_dev , nvectors ,
127
+ nvectors , PCI_IRQ_MSIX |
128
+ (desc ? PCI_IRQ_AFFINITY : 0 ),
129
+ desc );
130
+ if (err < 0 )
131
+ goto error ;
132
+ vp_dev -> msix_enabled = 1 ;
133
+
134
+ /* Set the vector used for configuration */
135
+ v = vp_dev -> msix_used_vectors ;
136
+ snprintf (vp_dev -> msix_names [v ], sizeof * vp_dev -> msix_names ,
137
+ "%s-config" , name );
138
+ err = request_irq (pci_irq_vector (vp_dev -> pci_dev , v ),
139
+ vp_config_changed , 0 , vp_dev -> msix_names [v ],
140
+ vp_dev );
141
+ if (err )
142
+ goto error ;
143
+ ++ vp_dev -> msix_used_vectors ;
144
+
145
+ v = vp_dev -> config_vector (vp_dev , v );
146
+ /* Verify we had enough resources to assign the vector */
147
+ if (v == VIRTIO_MSI_NO_VECTOR ) {
148
+ err = - EBUSY ;
149
+ goto error ;
150
+ }
151
+
152
+ if (!per_vq_vectors ) {
153
+ /* Shared vector for all VQs */
154
+ v = vp_dev -> msix_used_vectors ;
155
+ snprintf (vp_dev -> msix_names [v ], sizeof * vp_dev -> msix_names ,
156
+ "%s-virtqueues" , name );
157
+ err = request_irq (pci_irq_vector (vp_dev -> pci_dev , v ),
158
+ vp_vring_interrupt , 0 , vp_dev -> msix_names [v ],
159
+ vp_dev );
160
+ if (err )
161
+ goto error ;
162
+ ++ vp_dev -> msix_used_vectors ;
163
+ }
164
+ return 0 ;
165
+ error :
166
+ return err ;
167
+ }
168
+
169
+ /* the config->del_vqs() implementation */
170
+ void vp_del_vqs (struct virtio_device * vdev )
101
171
{
102
172
struct virtio_pci_device * vp_dev = to_vp_device (vdev );
103
173
struct virtqueue * vq , * n ;
174
+ int i ;
104
175
105
176
list_for_each_entry_safe (vq , n , & vdev -> vqs , list ) {
106
177
if (vp_dev -> msix_vector_map ) {
@@ -112,33 +183,35 @@ static void vp_remove_vqs(struct virtio_device *vdev)
112
183
}
113
184
vp_dev -> del_vq (vq );
114
185
}
115
- }
116
186
117
- /* the config->del_vqs() implementation */
118
- void vp_del_vqs (struct virtio_device * vdev )
119
- {
120
- struct virtio_pci_device * vp_dev = to_vp_device (vdev );
121
- int i ;
122
-
123
- if (WARN_ON_ONCE (list_empty_careful (& vdev -> vqs )))
124
- return ;
187
+ if (vp_dev -> intx_enabled ) {
188
+ free_irq (vp_dev -> pci_dev -> irq , vp_dev );
189
+ vp_dev -> intx_enabled = 0 ;
190
+ }
125
191
126
- vp_remove_vqs (vdev );
192
+ for (i = 0 ; i < vp_dev -> msix_used_vectors ; ++ i )
193
+ free_irq (pci_irq_vector (vp_dev -> pci_dev , i ), vp_dev );
127
194
128
- if ( vp_dev -> msix_enabled ) {
129
- for ( i = 0 ; i < vp_dev -> msix_vectors ; i ++ )
195
+ for ( i = 0 ; i < vp_dev -> msix_vectors ; i ++ )
196
+ if ( vp_dev -> msix_affinity_masks [ i ] )
130
197
free_cpumask_var (vp_dev -> msix_affinity_masks [i ]);
131
198
199
+ if (vp_dev -> msix_enabled ) {
132
200
/* Disable the vector used for configuration */
133
201
vp_dev -> config_vector (vp_dev , VIRTIO_MSI_NO_VECTOR );
134
202
135
- kfree (vp_dev -> msix_affinity_masks );
136
- kfree (vp_dev -> msix_names );
137
- kfree (vp_dev -> msix_vector_map );
203
+ pci_free_irq_vectors (vp_dev -> pci_dev );
204
+ vp_dev -> msix_enabled = 0 ;
138
205
}
139
206
140
- free_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ), vp_dev );
141
- pci_free_irq_vectors (vp_dev -> pci_dev );
207
+ vp_dev -> msix_vectors = 0 ;
208
+ vp_dev -> msix_used_vectors = 0 ;
209
+ kfree (vp_dev -> msix_names );
210
+ vp_dev -> msix_names = NULL ;
211
+ kfree (vp_dev -> msix_affinity_masks );
212
+ vp_dev -> msix_affinity_masks = NULL ;
213
+ kfree (vp_dev -> msix_vector_map );
214
+ vp_dev -> msix_vector_map = NULL ;
142
215
}
143
216
144
217
static int vp_find_vqs_msix (struct virtio_device * vdev , unsigned nvqs ,
@@ -147,128 +220,80 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
147
220
struct irq_affinity * desc )
148
221
{
149
222
struct virtio_pci_device * vp_dev = to_vp_device (vdev );
150
- const char * name = dev_name (& vp_dev -> vdev .dev );
151
- int i , err = - ENOMEM , allocated_vectors , nvectors ;
152
- unsigned flags = PCI_IRQ_MSIX ;
153
223
u16 msix_vec ;
154
-
155
- if (desc ) {
156
- flags |= PCI_IRQ_AFFINITY ;
157
- desc -> pre_vectors ++ ; /* virtio config vector */
158
- }
159
-
160
- nvectors = 1 ;
161
- for (i = 0 ; i < nvqs ; i ++ )
162
- if (callbacks [i ])
163
- nvectors ++ ;
224
+ int i , err , nvectors , allocated_vectors ;
164
225
165
226
if (per_vq_vectors ) {
166
- err = pci_alloc_irq_vectors_affinity (vp_dev -> pci_dev , nvectors ,
167
- nvectors , flags , desc );
227
+ /* Best option: one for change interrupt, one per vq. */
228
+ nvectors = 1 ;
229
+ for (i = 0 ; i < nvqs ; ++ i )
230
+ if (callbacks [i ])
231
+ ++ nvectors ;
168
232
} else {
169
- err = pci_alloc_irq_vectors (vp_dev -> pci_dev , 2 , 2 ,
170
- PCI_IRQ_MSIX );
171
- }
172
- if (err < 0 )
173
- return err ;
174
-
175
- vp_dev -> msix_vectors = nvectors ;
176
- vp_dev -> msix_names = kmalloc_array (nvectors ,
177
- sizeof (* vp_dev -> msix_names ), GFP_KERNEL );
178
- if (!vp_dev -> msix_names )
179
- goto out_free_irq_vectors ;
180
-
181
- vp_dev -> msix_affinity_masks = kcalloc (nvectors ,
182
- sizeof (* vp_dev -> msix_affinity_masks ), GFP_KERNEL );
183
- if (!vp_dev -> msix_affinity_masks )
184
- goto out_free_msix_names ;
185
-
186
- for (i = 0 ; i < nvectors ; ++ i ) {
187
- if (!alloc_cpumask_var (& vp_dev -> msix_affinity_masks [i ],
188
- GFP_KERNEL ))
189
- goto out_free_msix_affinity_masks ;
233
+ /* Second best: one for change, shared for all vqs. */
234
+ nvectors = 2 ;
190
235
}
191
236
192
- /* Set the vector used for configuration */
193
- snprintf (vp_dev -> msix_names [0 ], sizeof (* vp_dev -> msix_names ),
194
- "%s-config" , name );
195
- err = request_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ), vp_config_changed ,
196
- 0 , vp_dev -> msix_names [0 ], vp_dev );
237
+ err = vp_request_msix_vectors (vdev , nvectors , per_vq_vectors ,
238
+ per_vq_vectors ? desc : NULL );
197
239
if (err )
198
- goto out_free_irq_vectors ;
240
+ goto error_find ;
199
241
200
- /* Verify we had enough resources to assign the vector */
201
- if (vp_dev -> config_vector (vp_dev , 0 ) == VIRTIO_MSI_NO_VECTOR ) {
202
- err = - EBUSY ;
203
- goto out_free_config_irq ;
242
+ if (per_vq_vectors ) {
243
+ vp_dev -> msix_vector_map = kmalloc_array (nvqs ,
244
+ sizeof (* vp_dev -> msix_vector_map ), GFP_KERNEL );
245
+ if (!vp_dev -> msix_vector_map )
246
+ goto error_find ;
204
247
}
205
248
206
- vp_dev -> msix_vector_map = kmalloc_array (nvqs ,
207
- sizeof (* vp_dev -> msix_vector_map ), GFP_KERNEL );
208
- if (!vp_dev -> msix_vector_map )
209
- goto out_disable_config_irq ;
210
-
211
- allocated_vectors = 1 ; /* vector 0 is the config interrupt */
249
+ allocated_vectors = vp_dev -> msix_used_vectors ;
212
250
for (i = 0 ; i < nvqs ; ++ i ) {
213
251
if (!names [i ]) {
214
252
vqs [i ] = NULL ;
215
253
continue ;
216
254
}
217
255
218
- if (callbacks [i ])
219
- msix_vec = allocated_vectors ;
220
- else
256
+ if (!callbacks [i ])
221
257
msix_vec = VIRTIO_MSI_NO_VECTOR ;
222
-
258
+ else if (per_vq_vectors )
259
+ msix_vec = allocated_vectors ++ ;
260
+ else
261
+ msix_vec = VP_MSIX_VQ_VECTOR ;
223
262
vqs [i ] = vp_dev -> setup_vq (vp_dev , i , callbacks [i ], names [i ],
224
263
msix_vec );
225
264
if (IS_ERR (vqs [i ])) {
226
265
err = PTR_ERR (vqs [i ]);
227
- goto out_remove_vqs ;
266
+ goto error_find ;
228
267
}
229
268
269
+ if (!per_vq_vectors )
270
+ continue ;
271
+
230
272
if (msix_vec == VIRTIO_MSI_NO_VECTOR ) {
231
273
vp_dev -> msix_vector_map [i ] = VIRTIO_MSI_NO_VECTOR ;
232
274
continue ;
233
275
}
234
276
235
- snprintf (vp_dev -> msix_names [i + 1 ],
236
- sizeof (* vp_dev -> msix_names ), "%s-%s" ,
277
+ /* allocate per-vq irq if available and necessary */
278
+ snprintf (vp_dev -> msix_names [msix_vec ],
279
+ sizeof * vp_dev -> msix_names ,
280
+ "%s-%s" ,
237
281
dev_name (& vp_dev -> vdev .dev ), names [i ]);
238
282
err = request_irq (pci_irq_vector (vp_dev -> pci_dev , msix_vec ),
239
- vring_interrupt , IRQF_SHARED ,
240
- vp_dev -> msix_names [i + 1 ], vqs [i ]);
283
+ vring_interrupt , 0 ,
284
+ vp_dev -> msix_names [msix_vec ],
285
+ vqs [i ]);
241
286
if (err ) {
242
287
/* don't free this irq on error */
243
288
vp_dev -> msix_vector_map [i ] = VIRTIO_MSI_NO_VECTOR ;
244
- goto out_remove_vqs ;
289
+ goto error_find ;
245
290
}
246
291
vp_dev -> msix_vector_map [i ] = msix_vec ;
247
-
248
- if (per_vq_vectors )
249
- allocated_vectors ++ ;
250
292
}
251
-
252
- vp_dev -> msix_enabled = 1 ;
253
293
return 0 ;
254
294
255
- out_remove_vqs :
256
- vp_remove_vqs (vdev );
257
- kfree (vp_dev -> msix_vector_map );
258
- out_disable_config_irq :
259
- vp_dev -> config_vector (vp_dev , VIRTIO_MSI_NO_VECTOR );
260
- out_free_config_irq :
261
- free_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ), vp_dev );
262
- out_free_msix_affinity_masks :
263
- for (i = 0 ; i < nvectors ; i ++ ) {
264
- if (vp_dev -> msix_affinity_masks [i ])
265
- free_cpumask_var (vp_dev -> msix_affinity_masks [i ]);
266
- }
267
- kfree (vp_dev -> msix_affinity_masks );
268
- out_free_msix_names :
269
- kfree (vp_dev -> msix_names );
270
- out_free_irq_vectors :
271
- pci_free_irq_vectors (vp_dev -> pci_dev );
295
+ error_find :
296
+ vp_del_vqs (vdev );
272
297
return err ;
273
298
}
274
299
@@ -282,8 +307,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
282
307
err = request_irq (vp_dev -> pci_dev -> irq , vp_interrupt , IRQF_SHARED ,
283
308
dev_name (& vdev -> dev ), vp_dev );
284
309
if (err )
285
- return err ;
310
+ goto out_del_vqs ;
286
311
312
+ vp_dev -> intx_enabled = 1 ;
287
313
for (i = 0 ; i < nvqs ; ++ i ) {
288
314
if (!names [i ]) {
289
315
vqs [i ] = NULL ;
@@ -293,15 +319,13 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
293
319
VIRTIO_MSI_NO_VECTOR );
294
320
if (IS_ERR (vqs [i ])) {
295
321
err = PTR_ERR (vqs [i ]);
296
- goto out_remove_vqs ;
322
+ goto out_del_vqs ;
297
323
}
298
324
}
299
325
300
326
return 0 ;
301
-
302
- out_remove_vqs :
303
- vp_remove_vqs (vdev );
304
- free_irq (pci_irq_vector (vp_dev -> pci_dev , 0 ), vp_dev );
327
+ out_del_vqs :
328
+ vp_del_vqs (vdev );
305
329
return err ;
306
330
}
307
331
0 commit comments