@@ -2358,8 +2358,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2358
2358
*/
2359
2359
qp = vsi -> base_queue ;
2360
2360
vector = vsi -> base_vector ;
2361
- q_vector = vsi -> q_vectors ;
2362
- for ( i = 0 ; i < vsi -> num_q_vectors ; i ++ , q_vector ++ , vector ++ ) {
2361
+ for ( i = 0 ; i < vsi -> num_q_vectors ; i ++ , vector ++ ) {
2362
+ q_vector = vsi -> q_vectors [ i ];
2363
2363
q_vector -> rx .itr = ITR_TO_REG (vsi -> rx_itr_setting );
2364
2364
q_vector -> rx .latency_range = I40E_LOW_LATENCY ;
2365
2365
wr32 (hw , I40E_PFINT_ITRN (I40E_RX_ITR , vector - 1 ),
@@ -2439,7 +2439,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2439
2439
**/
2440
2440
static void i40e_configure_msi_and_legacy (struct i40e_vsi * vsi )
2441
2441
{
2442
- struct i40e_q_vector * q_vector = vsi -> q_vectors ;
2442
+ struct i40e_q_vector * q_vector = vsi -> q_vectors [ 0 ] ;
2443
2443
struct i40e_pf * pf = vsi -> back ;
2444
2444
struct i40e_hw * hw = & pf -> hw ;
2445
2445
u32 val ;
@@ -2558,7 +2558,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2558
2558
int vector , err ;
2559
2559
2560
2560
for (vector = 0 ; vector < q_vectors ; vector ++ ) {
2561
- struct i40e_q_vector * q_vector = & ( vsi -> q_vectors [vector ]) ;
2561
+ struct i40e_q_vector * q_vector = vsi -> q_vectors [vector ];
2562
2562
2563
2563
if (q_vector -> tx .ring [0 ] && q_vector -> rx .ring [0 ]) {
2564
2564
snprintf (q_vector -> name , sizeof (q_vector -> name ) - 1 ,
@@ -2709,7 +2709,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
2709
2709
i40e_flush (hw );
2710
2710
2711
2711
if (!test_bit (__I40E_DOWN , & pf -> state ))
2712
- napi_schedule (& pf -> vsi [pf -> lan_vsi ]-> q_vectors [0 ]. napi );
2712
+ napi_schedule (& pf -> vsi [pf -> lan_vsi ]-> q_vectors [0 ]-> napi );
2713
2713
}
2714
2714
2715
2715
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK ) {
@@ -2785,7 +2785,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
2785
2785
**/
2786
2786
static void map_vector_to_rxq (struct i40e_vsi * vsi , int v_idx , int r_idx )
2787
2787
{
2788
- struct i40e_q_vector * q_vector = & ( vsi -> q_vectors [v_idx ]) ;
2788
+ struct i40e_q_vector * q_vector = vsi -> q_vectors [v_idx ];
2789
2789
struct i40e_ring * rx_ring = & (vsi -> rx_rings [r_idx ]);
2790
2790
2791
2791
rx_ring -> q_vector = q_vector ;
@@ -2803,7 +2803,7 @@ static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
2803
2803
**/
2804
2804
static void map_vector_to_txq (struct i40e_vsi * vsi , int v_idx , int t_idx )
2805
2805
{
2806
- struct i40e_q_vector * q_vector = & ( vsi -> q_vectors [v_idx ]) ;
2806
+ struct i40e_q_vector * q_vector = vsi -> q_vectors [v_idx ];
2807
2807
struct i40e_ring * tx_ring = & (vsi -> tx_rings [t_idx ]);
2808
2808
2809
2809
tx_ring -> q_vector = q_vector ;
@@ -2891,7 +2891,7 @@ static void i40e_netpoll(struct net_device *netdev)
2891
2891
pf -> flags |= I40E_FLAG_IN_NETPOLL ;
2892
2892
if (pf -> flags & I40E_FLAG_MSIX_ENABLED ) {
2893
2893
for (i = 0 ; i < vsi -> num_q_vectors ; i ++ )
2894
- i40e_msix_clean_rings (0 , & vsi -> q_vectors [i ]);
2894
+ i40e_msix_clean_rings (0 , vsi -> q_vectors [i ]);
2895
2895
} else {
2896
2896
i40e_intr (pf -> pdev -> irq , netdev );
2897
2897
}
@@ -3077,14 +3077,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3077
3077
u16 vector = i + base ;
3078
3078
3079
3079
/* free only the irqs that were actually requested */
3080
- if (vsi -> q_vectors [i ]. num_ringpairs == 0 )
3080
+ if (vsi -> q_vectors [i ]-> num_ringpairs == 0 )
3081
3081
continue ;
3082
3082
3083
3083
/* clear the affinity_mask in the IRQ descriptor */
3084
3084
irq_set_affinity_hint (pf -> msix_entries [vector ].vector ,
3085
3085
NULL );
3086
3086
free_irq (pf -> msix_entries [vector ].vector ,
3087
- & vsi -> q_vectors [i ]);
3087
+ vsi -> q_vectors [i ]);
3088
3088
3089
3089
/* Tear down the interrupt queue link list
3090
3090
*
@@ -3167,6 +3167,38 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3167
3167
}
3168
3168
}
3169
3169
3170
+ /**
3171
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3172
+ * @vsi: the VSI being configured
3173
+ * @v_idx: Index of vector to be freed
3174
+ *
3175
+ * This function frees the memory allocated to the q_vector. In addition if
3176
+ * NAPI is enabled it will delete any references to the NAPI struct prior
3177
+ * to freeing the q_vector.
3178
+ **/
3179
+ static void i40e_free_q_vector (struct i40e_vsi * vsi , int v_idx )
3180
+ {
3181
+ struct i40e_q_vector * q_vector = vsi -> q_vectors [v_idx ];
3182
+ int r_idx ;
3183
+
3184
+ if (!q_vector )
3185
+ return ;
3186
+
3187
+ /* disassociate q_vector from rings */
3188
+ for (r_idx = 0 ; r_idx < q_vector -> tx .count ; r_idx ++ )
3189
+ q_vector -> tx .ring [r_idx ]-> q_vector = NULL ;
3190
+ for (r_idx = 0 ; r_idx < q_vector -> rx .count ; r_idx ++ )
3191
+ q_vector -> rx .ring [r_idx ]-> q_vector = NULL ;
3192
+
3193
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
3194
+ if (vsi -> netdev )
3195
+ netif_napi_del (& q_vector -> napi );
3196
+
3197
+ vsi -> q_vectors [v_idx ] = NULL ;
3198
+
3199
+ kfree_rcu (q_vector , rcu );
3200
+ }
3201
+
3170
3202
/**
3171
3203
* i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3172
3204
* @vsi: the VSI being un-configured
@@ -3178,24 +3210,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3178
3210
{
3179
3211
int v_idx ;
3180
3212
3181
- for (v_idx = 0 ; v_idx < vsi -> num_q_vectors ; v_idx ++ ) {
3182
- struct i40e_q_vector * q_vector = & vsi -> q_vectors [v_idx ];
3183
- int r_idx ;
3184
-
3185
- if (!q_vector )
3186
- continue ;
3187
-
3188
- /* disassociate q_vector from rings */
3189
- for (r_idx = 0 ; r_idx < q_vector -> tx .count ; r_idx ++ )
3190
- q_vector -> tx .ring [r_idx ]-> q_vector = NULL ;
3191
- for (r_idx = 0 ; r_idx < q_vector -> rx .count ; r_idx ++ )
3192
- q_vector -> rx .ring [r_idx ]-> q_vector = NULL ;
3193
-
3194
- /* only VSI w/ an associated netdev is set up w/ NAPI */
3195
- if (vsi -> netdev )
3196
- netif_napi_del (& q_vector -> napi );
3197
- }
3198
- kfree (vsi -> q_vectors );
3213
+ for (v_idx = 0 ; v_idx < vsi -> num_q_vectors ; v_idx ++ )
3214
+ i40e_free_q_vector (vsi , v_idx );
3199
3215
}
3200
3216
3201
3217
/**
@@ -3245,7 +3261,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3245
3261
return ;
3246
3262
3247
3263
for (q_idx = 0 ; q_idx < vsi -> num_q_vectors ; q_idx ++ )
3248
- napi_enable (& vsi -> q_vectors [q_idx ]. napi );
3264
+ napi_enable (& vsi -> q_vectors [q_idx ]-> napi );
3249
3265
}
3250
3266
3251
3267
/**
@@ -3260,7 +3276,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3260
3276
return ;
3261
3277
3262
3278
for (q_idx = 0 ; q_idx < vsi -> num_q_vectors ; q_idx ++ )
3263
- napi_disable (& vsi -> q_vectors [q_idx ]. napi );
3279
+ napi_disable (& vsi -> q_vectors [q_idx ]-> napi );
3264
3280
}
3265
3281
3266
3282
/**
@@ -4945,6 +4961,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4945
4961
{
4946
4962
int ret = - ENODEV ;
4947
4963
struct i40e_vsi * vsi ;
4964
+ int sz_vectors ;
4948
4965
int vsi_idx ;
4949
4966
int i ;
4950
4967
@@ -4970,14 +4987,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4970
4987
vsi_idx = i ; /* Found one! */
4971
4988
} else {
4972
4989
ret = - ENODEV ;
4973
- goto err_alloc_vsi ; /* out of VSI slots! */
4990
+ goto unlock_pf ; /* out of VSI slots! */
4974
4991
}
4975
4992
pf -> next_vsi = ++ i ;
4976
4993
4977
4994
vsi = kzalloc (sizeof (* vsi ), GFP_KERNEL );
4978
4995
if (!vsi ) {
4979
4996
ret = - ENOMEM ;
4980
- goto err_alloc_vsi ;
4997
+ goto unlock_pf ;
4981
4998
}
4982
4999
vsi -> type = type ;
4983
5000
vsi -> back = pf ;
@@ -4992,12 +5009,25 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
4992
5009
4993
5010
i40e_set_num_rings_in_vsi (vsi );
4994
5011
5012
+ /* allocate memory for q_vector pointers */
5013
+ sz_vectors = sizeof (struct i40e_q_vectors * ) * vsi -> num_q_vectors ;
5014
+ vsi -> q_vectors = kzalloc (sz_vectors , GFP_KERNEL );
5015
+ if (!vsi -> q_vectors ) {
5016
+ ret = - ENOMEM ;
5017
+ goto err_vectors ;
5018
+ }
5019
+
4995
5020
/* Setup default MSIX irq handler for VSI */
4996
5021
i40e_vsi_setup_irqhandler (vsi , i40e_msix_clean_rings );
4997
5022
4998
5023
pf -> vsi [vsi_idx ] = vsi ;
4999
5024
ret = vsi_idx ;
5000
- err_alloc_vsi :
5025
+ goto unlock_pf ;
5026
+
5027
+ err_vectors :
5028
+ pf -> next_vsi = i - 1 ;
5029
+ kfree (vsi );
5030
+ unlock_pf :
5001
5031
mutex_unlock (& pf -> switch_mutex );
5002
5032
return ret ;
5003
5033
}
@@ -5038,6 +5068,9 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
5038
5068
i40e_put_lump (pf -> qp_pile , vsi -> base_queue , vsi -> idx );
5039
5069
i40e_put_lump (pf -> irq_pile , vsi -> base_vector , vsi -> idx );
5040
5070
5071
+ /* free the ring and vector containers */
5072
+ kfree (vsi -> q_vectors );
5073
+
5041
5074
pf -> vsi [vsi -> idx ] = NULL ;
5042
5075
if (vsi -> idx < pf -> next_vsi )
5043
5076
pf -> next_vsi = vsi -> idx ;
@@ -5256,6 +5289,35 @@ static int i40e_init_msix(struct i40e_pf *pf)
5256
5289
return err ;
5257
5290
}
5258
5291
5292
+ /**
5293
+ * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
5294
+ * @vsi: the VSI being configured
5295
+ * @v_idx: index of the vector in the vsi struct
5296
+ *
5297
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
5298
+ **/
5299
+ static int i40e_alloc_q_vector (struct i40e_vsi * vsi , int v_idx )
5300
+ {
5301
+ struct i40e_q_vector * q_vector ;
5302
+
5303
+ /* allocate q_vector */
5304
+ q_vector = kzalloc (sizeof (struct i40e_q_vector ), GFP_KERNEL );
5305
+ if (!q_vector )
5306
+ return - ENOMEM ;
5307
+
5308
+ q_vector -> vsi = vsi ;
5309
+ q_vector -> v_idx = v_idx ;
5310
+ cpumask_set_cpu (v_idx , & q_vector -> affinity_mask );
5311
+ if (vsi -> netdev )
5312
+ netif_napi_add (vsi -> netdev , & q_vector -> napi ,
5313
+ i40e_napi_poll , vsi -> work_limit );
5314
+
5315
+ /* tie q_vector and vsi together */
5316
+ vsi -> q_vectors [v_idx ] = q_vector ;
5317
+
5318
+ return 0 ;
5319
+ }
5320
+
5259
5321
/**
5260
5322
* i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5261
5323
* @vsi: the VSI being configured
@@ -5267,6 +5329,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5267
5329
{
5268
5330
struct i40e_pf * pf = vsi -> back ;
5269
5331
int v_idx , num_q_vectors ;
5332
+ int err ;
5270
5333
5271
5334
/* if not MSIX, give the one vector only to the LAN VSI */
5272
5335
if (pf -> flags & I40E_FLAG_MSIX_ENABLED )
@@ -5276,22 +5339,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5276
5339
else
5277
5340
return - EINVAL ;
5278
5341
5279
- vsi -> q_vectors = kcalloc (num_q_vectors ,
5280
- sizeof (struct i40e_q_vector ),
5281
- GFP_KERNEL );
5282
- if (!vsi -> q_vectors )
5283
- return - ENOMEM ;
5284
-
5285
5342
for (v_idx = 0 ; v_idx < num_q_vectors ; v_idx ++ ) {
5286
- vsi -> q_vectors [v_idx ].vsi = vsi ;
5287
- vsi -> q_vectors [v_idx ].v_idx = v_idx ;
5288
- cpumask_set_cpu (v_idx , & vsi -> q_vectors [v_idx ].affinity_mask );
5289
- if (vsi -> netdev )
5290
- netif_napi_add (vsi -> netdev , & vsi -> q_vectors [v_idx ].napi ,
5291
- i40e_napi_poll , vsi -> work_limit );
5343
+ err = i40e_alloc_q_vector (vsi , v_idx );
5344
+ if (err )
5345
+ goto err_out ;
5292
5346
}
5293
5347
5294
5348
return 0 ;
5349
+
5350
+ err_out :
5351
+ while (v_idx -- )
5352
+ i40e_free_q_vector (vsi , v_idx );
5353
+
5354
+ return err ;
5295
5355
}
5296
5356
5297
5357
/**
@@ -5958,7 +6018,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
5958
6018
int ret = - ENOENT ;
5959
6019
struct i40e_pf * pf = vsi -> back ;
5960
6020
5961
- if (vsi -> q_vectors ) {
6021
+ if (vsi -> q_vectors [ 0 ] ) {
5962
6022
dev_info (& pf -> pdev -> dev , "VSI %d has existing q_vectors\n" ,
5963
6023
vsi -> seid );
5964
6024
return - EEXIST ;
0 commit comments