@@ -1227,48 +1227,34 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
12271227{
12281228 struct mlxsw_sp_acl_tcam_ventry * ventry ;
12291229 int err ;
1230- int err2 ;
12311230
1232- err = mlxsw_sp_acl_tcam_vchunk_migrate_start (mlxsw_sp , vchunk ,
1233- region , ctx );
1234- if (err )
1235- return err ;
1231+ if (vchunk -> chunk -> region != region ) {
1232+ err = mlxsw_sp_acl_tcam_vchunk_migrate_start (mlxsw_sp , vchunk ,
1233+ region , ctx );
1234+ if (err )
1235+ return err ;
1236+ } else if (!vchunk -> chunk2 ) {
1237+ /* The chunk is already as it should be, nothing to do. */
1238+ return 0 ;
1239+ }
12361240
12371241 list_for_each_entry (ventry , & vchunk -> ventry_list , list ) {
12381242 err = mlxsw_sp_acl_tcam_ventry_migrate (mlxsw_sp , ventry ,
12391243 vchunk -> chunk );
12401244 if (err ) {
1241- if (ctx -> this_is_rollback ) {
1242- vchunk -> vregion -> failed_rollback = true;
1245+ if (ctx -> this_is_rollback )
12431246 return err ;
1244- }
1245- goto rollback ;
1247+ /* Swap the chunk and chunk2 pointers so the follow-up
1248+ * rollback call will see the original chunk pointer
1249+ * in vchunk->chunk.
1250+ */
1251+ swap (vchunk -> chunk , vchunk -> chunk2 );
1252+ return err ;
12461253 }
12471254 }
12481255
12491256 mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk );
12501257 return 0 ;
1251-
1252- rollback :
1253- /* Migrate the entries back to the original chunk. If some entry
1254- * migration fails, there's no good way how to proceed. Set the
1255- * vregion with "failed_rollback" flag.
1256- */
1257- swap (vchunk -> chunk , vchunk -> chunk2 );
1258- list_for_each_entry_continue_reverse (ventry , & vchunk -> ventry_list ,
1259- list ) {
1260- err2 = mlxsw_sp_acl_tcam_ventry_migrate (mlxsw_sp , ventry ,
1261- vchunk -> chunk );
1262- if (err2 ) {
1263- vchunk -> vregion -> failed_rollback = true;
1264- goto err_rollback ;
1265- }
1266- }
1267-
1268- mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk );
1269-
1270- err_rollback :
1271- return err ;
12721258}
12731259
12741260static int
@@ -1284,35 +1270,32 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
12841270 vregion -> region ,
12851271 ctx );
12861272 if (err )
1287- goto rollback ;
1273+ return err ;
12881274 }
12891275 return 0 ;
1290-
1291- rollback :
1292- /* In case migration was not successful, we need to swap
1293- * so the original region pointer is assigned again to vregion->region.
1294- */
1295- swap (vregion -> region , vregion -> region2 );
1296- ctx -> this_is_rollback = true;
1297- list_for_each_entry_continue_reverse (vchunk , & vregion -> vchunk_list ,
1298- list ) {
1299- mlxsw_sp_acl_tcam_vchunk_migrate_one (mlxsw_sp , vchunk ,
1300- vregion -> region ,
1301- ctx );
1302- }
1303- return err ;
13041276}
13051277
13061278static int
13071279mlxsw_sp_acl_tcam_vregion_migrate (struct mlxsw_sp * mlxsw_sp ,
13081280 struct mlxsw_sp_acl_tcam_vregion * vregion ,
13091281 struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
13101282{
1311- int err ;
1283+ int err , err2 ;
13121284
13131285 trace_mlxsw_sp_acl_tcam_vregion_migrate (mlxsw_sp , vregion );
13141286 mutex_lock (& vregion -> lock );
13151287 err = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion , ctx );
1288+ if (err ) {
1289+ /* In case migration was not successful, we need to swap
1290+ * so the original region pointer is assigned again
1291+ * to vregion->region.
1292+ */
1293+ swap (vregion -> region , vregion -> region2 );
1294+ ctx -> this_is_rollback = true;
1295+ err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion , ctx );
1296+ if (err2 )
1297+ vregion -> failed_rollback = true;
1298+ }
13161299 mutex_unlock (& vregion -> lock );
13171300 trace_mlxsw_sp_acl_tcam_vregion_migrate_end (mlxsw_sp , vregion );
13181301 return err ;
0 commit comments