@@ -1227,48 +1227,34 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1227
1227
{
1228
1228
struct mlxsw_sp_acl_tcam_ventry * ventry ;
1229
1229
int err ;
1230
- int err2 ;
1231
1230
1232
- err = mlxsw_sp_acl_tcam_vchunk_migrate_start (mlxsw_sp , vchunk ,
1233
- region , ctx );
1234
- if (err )
1235
- return err ;
1231
+ if (vchunk -> chunk -> region != region ) {
1232
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_start (mlxsw_sp , vchunk ,
1233
+ region , ctx );
1234
+ if (err )
1235
+ return err ;
1236
+ } else if (!vchunk -> chunk2 ) {
1237
+ /* The chunk is already as it should be, nothing to do. */
1238
+ return 0 ;
1239
+ }
1236
1240
1237
1241
list_for_each_entry (ventry , & vchunk -> ventry_list , list ) {
1238
1242
err = mlxsw_sp_acl_tcam_ventry_migrate (mlxsw_sp , ventry ,
1239
1243
vchunk -> chunk );
1240
1244
if (err ) {
1241
- if (ctx -> this_is_rollback ) {
1242
- vchunk -> vregion -> failed_rollback = true;
1245
+ if (ctx -> this_is_rollback )
1243
1246
return err ;
1244
- }
1245
- goto rollback ;
1247
+ /* Swap the chunk and chunk2 pointers so the follow-up
1248
+ * rollback call will see the original chunk pointer
1249
+ * in vchunk->chunk.
1250
+ */
1251
+ swap (vchunk -> chunk , vchunk -> chunk2 );
1252
+ return err ;
1246
1253
}
1247
1254
}
1248
1255
1249
1256
mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk );
1250
1257
return 0 ;
1251
-
1252
- rollback :
1253
- /* Migrate the entries back to the original chunk. If some entry
1254
- * migration fails, there's no good way how to proceed. Set the
1255
- * vregion with "failed_rollback" flag.
1256
- */
1257
- swap (vchunk -> chunk , vchunk -> chunk2 );
1258
- list_for_each_entry_continue_reverse (ventry , & vchunk -> ventry_list ,
1259
- list ) {
1260
- err2 = mlxsw_sp_acl_tcam_ventry_migrate (mlxsw_sp , ventry ,
1261
- vchunk -> chunk );
1262
- if (err2 ) {
1263
- vchunk -> vregion -> failed_rollback = true;
1264
- goto err_rollback ;
1265
- }
1266
- }
1267
-
1268
- mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk );
1269
-
1270
- err_rollback :
1271
- return err ;
1272
1258
}
1273
1259
1274
1260
static int
@@ -1284,35 +1270,32 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1284
1270
vregion -> region ,
1285
1271
ctx );
1286
1272
if (err )
1287
- goto rollback ;
1273
+ return err ;
1288
1274
}
1289
1275
return 0 ;
1290
-
1291
- rollback :
1292
- /* In case migration was not successful, we need to swap
1293
- * so the original region pointer is assigned again to vregion->region.
1294
- */
1295
- swap (vregion -> region , vregion -> region2 );
1296
- ctx -> this_is_rollback = true;
1297
- list_for_each_entry_continue_reverse (vchunk , & vregion -> vchunk_list ,
1298
- list ) {
1299
- mlxsw_sp_acl_tcam_vchunk_migrate_one (mlxsw_sp , vchunk ,
1300
- vregion -> region ,
1301
- ctx );
1302
- }
1303
- return err ;
1304
1276
}
1305
1277
1306
1278
static int
1307
1279
mlxsw_sp_acl_tcam_vregion_migrate (struct mlxsw_sp * mlxsw_sp ,
1308
1280
struct mlxsw_sp_acl_tcam_vregion * vregion ,
1309
1281
struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
1310
1282
{
1311
- int err ;
1283
+ int err , err2 ;
1312
1284
1313
1285
trace_mlxsw_sp_acl_tcam_vregion_migrate (mlxsw_sp , vregion );
1314
1286
mutex_lock (& vregion -> lock );
1315
1287
err = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion , ctx );
1288
+ if (err ) {
1289
+ /* In case migration was not successful, we need to swap
1290
+ * so the original region pointer is assigned again
1291
+ * to vregion->region.
1292
+ */
1293
+ swap (vregion -> region , vregion -> region2 );
1294
+ ctx -> this_is_rollback = true;
1295
+ err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion , ctx );
1296
+ if (err2 )
1297
+ vregion -> failed_rollback = true;
1298
+ }
1316
1299
mutex_unlock (& vregion -> lock );
1317
1300
trace_mlxsw_sp_acl_tcam_vregion_migrate_end (mlxsw_sp , vregion );
1318
1301
return err ;
0 commit comments