1818#include "lib/clock.h"
1919#include "diag/fw_tracer.h"
2020#include "mlx5_irq.h"
21+ #include "pci_irq.h"
2122#include "devlink.h"
2223#include "en_accel/ipsec.h"
2324
@@ -61,9 +62,7 @@ struct mlx5_eq_table {
6162 struct mlx5_irq_table * irq_table ;
6263 struct mlx5_irq * * comp_irqs ;
6364 struct mlx5_irq * ctrl_irq ;
64- #ifdef CONFIG_RFS_ACCEL
6565 struct cpu_rmap * rmap ;
66- #endif
6766};
6867
6968#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -839,7 +838,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
839838 }
840839spread_done :
841840 rcu_read_unlock ();
842- ret = mlx5_irqs_request_vectors (dev , cpus , ncomp_eqs , table -> comp_irqs );
841+ ret = mlx5_irqs_request_vectors (dev , cpus , ncomp_eqs , table -> comp_irqs , & table -> rmap );
843842 kfree (cpus );
844843 return ret ;
845844}
@@ -888,6 +887,40 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
888887 return ret ;
889888}
890889
890+ #ifdef CONFIG_RFS_ACCEL
891+ static int alloc_rmap (struct mlx5_core_dev * mdev )
892+ {
893+ struct mlx5_eq_table * eq_table = mdev -> priv .eq_table ;
894+
895+ /* rmap is a mapping between irq number and queue number.
896+ * Each irq can be assigned only to a single rmap.
897+ * Since SFs share IRQs, rmap mapping cannot function correctly
898+ * for irqs that are shared between different core/netdev RX rings.
899+ * Hence we don't allow netdev rmap for SFs.
900+ */
901+ if (mlx5_core_is_sf (mdev ))
902+ return 0 ;
903+
904+ eq_table -> rmap = alloc_irq_cpu_rmap (eq_table -> num_comp_eqs );
905+ if (!eq_table -> rmap )
906+ return - ENOMEM ;
907+ return 0 ;
908+ }
909+
910+ static void free_rmap (struct mlx5_core_dev * mdev )
911+ {
912+ struct mlx5_eq_table * eq_table = mdev -> priv .eq_table ;
913+
914+ if (eq_table -> rmap ) {
915+ free_irq_cpu_rmap (eq_table -> rmap );
916+ eq_table -> rmap = NULL ;
917+ }
918+ }
919+ #else
920+ static int alloc_rmap (struct mlx5_core_dev * mdev ) { return 0 ; }
921+ static void free_rmap (struct mlx5_core_dev * mdev ) {}
922+ #endif
923+
891924static void destroy_comp_eqs (struct mlx5_core_dev * dev )
892925{
893926 struct mlx5_eq_table * table = dev -> priv .eq_table ;
@@ -903,6 +936,7 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
903936 kfree (eq );
904937 }
905938 comp_irqs_release (dev );
939+ free_rmap (dev );
906940}
907941
908942static u16 comp_eq_depth_devlink_param_get (struct mlx5_core_dev * dev )
@@ -929,9 +963,16 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
929963 int err ;
930964 int i ;
931965
966+ err = alloc_rmap (dev );
967+ if (err )
968+ return err ;
969+
932970 ncomp_eqs = comp_irqs_request (dev );
933- if (ncomp_eqs < 0 )
934- return ncomp_eqs ;
971+ if (ncomp_eqs < 0 ) {
972+ err = ncomp_eqs ;
973+ goto err_irqs_req ;
974+ }
975+
935976 INIT_LIST_HEAD (& table -> comp_eqs_list );
936977 nent = comp_eq_depth_devlink_param_get (dev );
937978
@@ -976,6 +1017,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
9761017 kfree (eq );
9771018clean :
9781019 destroy_comp_eqs (dev );
1020+ err_irqs_req :
1021+ free_rmap (dev );
9791022 return err ;
9801023}
9811024
@@ -1054,55 +1097,12 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
10541097 return ERR_PTR (- ENOENT );
10551098}
10561099
1057- static void clear_rmap (struct mlx5_core_dev * dev )
1058- {
1059- #ifdef CONFIG_RFS_ACCEL
1060- struct mlx5_eq_table * eq_table = dev -> priv .eq_table ;
1061-
1062- free_irq_cpu_rmap (eq_table -> rmap );
1063- #endif
1064- }
1065-
1066- static int set_rmap (struct mlx5_core_dev * mdev )
1067- {
1068- int err = 0 ;
1069- #ifdef CONFIG_RFS_ACCEL
1070- struct mlx5_eq_table * eq_table = mdev -> priv .eq_table ;
1071- int vecidx ;
1072-
1073- eq_table -> rmap = alloc_irq_cpu_rmap (eq_table -> num_comp_eqs );
1074- if (!eq_table -> rmap ) {
1075- err = - ENOMEM ;
1076- mlx5_core_err (mdev , "Failed to allocate cpu_rmap. err %d" , err );
1077- goto err_out ;
1078- }
1079-
1080- for (vecidx = 0 ; vecidx < eq_table -> num_comp_eqs ; vecidx ++ ) {
1081- err = irq_cpu_rmap_add (eq_table -> rmap ,
1082- pci_irq_vector (mdev -> pdev , vecidx ));
1083- if (err ) {
1084- mlx5_core_err (mdev , "irq_cpu_rmap_add failed. err %d" ,
1085- err );
1086- goto err_irq_cpu_rmap_add ;
1087- }
1088- }
1089- return 0 ;
1090-
1091- err_irq_cpu_rmap_add :
1092- clear_rmap (mdev );
1093- err_out :
1094- #endif
1095- return err ;
1096- }
1097-
10981100/* This function should only be called after mlx5_cmd_force_teardown_hca */
10991101void mlx5_core_eq_free_irqs (struct mlx5_core_dev * dev )
11001102{
11011103 struct mlx5_eq_table * table = dev -> priv .eq_table ;
11021104
11031105 mutex_lock (& table -> lock ); /* sync with create/destroy_async_eq */
1104- if (!mlx5_core_is_sf (dev ))
1105- clear_rmap (dev );
11061106 mlx5_irq_table_destroy (dev );
11071107 mutex_unlock (& table -> lock );
11081108}
@@ -1139,38 +1139,22 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
11391139 goto err_async_eqs ;
11401140 }
11411141
1142- if (!mlx5_core_is_sf (dev )) {
1143- /* rmap is a mapping between irq number and queue number.
1144- * each irq can be assign only to a single rmap.
1145- * since SFs share IRQs, rmap mapping cannot function correctly
1146- * for irqs that are shared for different core/netdev RX rings.
1147- * Hence we don't allow netdev rmap for SFs
1148- */
1149- err = set_rmap (dev );
1150- if (err )
1151- goto err_rmap ;
1152- }
1153-
11541142 err = create_comp_eqs (dev );
11551143 if (err ) {
11561144 mlx5_core_err (dev , "Failed to create completion EQs\n" );
11571145 goto err_comp_eqs ;
11581146 }
11591147
11601148 return 0 ;
1149+
11611150err_comp_eqs :
1162- if (!mlx5_core_is_sf (dev ))
1163- clear_rmap (dev );
1164- err_rmap :
11651151 destroy_async_eqs (dev );
11661152err_async_eqs :
11671153 return err ;
11681154}
11691155
11701156void mlx5_eq_table_destroy (struct mlx5_core_dev * dev )
11711157{
1172- if (!mlx5_core_is_sf (dev ))
1173- clear_rmap (dev );
11741158 destroy_comp_eqs (dev );
11751159 destroy_async_eqs (dev );
11761160}
0 commit comments