Skip to content

Commit

Permalink
net/mlx5e: Simulate missing IPsec TX limits hardware functionality
Browse files Browse the repository at this point in the history
ConnectX-7 devices don't have ability to send TX hard/soft limits
events. As a possible workaround, let's rely on existing infrastructure
and use periodic check of cached flow counter. In these periodic checks,
we call to xfrm_state_check_expire() to check and mark state accordingly.

Once the state is marked as XFRM_STATE_EXPIRED, the SA flow rule is
changed to drop all the traffic.

Link: https://lore.kernel.org/r/94a5d82c0c399747117d8a558f9beebfbcf26154.1680162300.git.leonro@nvidia.com
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
  • Loading branch information
rleon committed Apr 6, 2023
1 parent 4562116 commit b2f7b01
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 5 deletions.
65 changes: 64 additions & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@
#include "ipsec.h"
#include "ipsec_rxtx.h"

#define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)

static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
Expand All @@ -50,6 +52,28 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
}

static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
{
struct mlx5e_ipsec_dwork *dwork =
container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
struct xfrm_state *x = sa_entry->x;

spin_lock(&x->lock);
xfrm_state_check_expire(x);
if (x->km.state == XFRM_STATE_EXPIRED) {
sa_entry->attrs.drop = true;
mlx5e_accel_ipsec_fs_modify(sa_entry);
}
spin_unlock(&x->lock);

if (sa_entry->attrs.drop)
return;

queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
MLX5_IPSEC_RESCHED);
}

static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_state *x = sa_entry->x;
Expand Down Expand Up @@ -464,6 +488,31 @@ static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
return 0;
}

static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_state *x = sa_entry->x;
struct mlx5e_ipsec_dwork *dwork;

if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return 0;

if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
return 0;

if (x->lft.soft_packet_limit == XFRM_INF &&
x->lft.hard_packet_limit == XFRM_INF)
return 0;

dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
if (!dwork)
return -ENOMEM;

dwork->sa_entry = sa_entry;
INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit);
sa_entry->dwork = dwork;
return 0;
}

static int mlx5e_xfrm_add_state(struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
Expand Down Expand Up @@ -504,10 +553,14 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_xfrm;

err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
goto release_work;

/* create hw context */
err = mlx5_ipsec_create_sa_ctx(sa_entry);
if (err)
goto release_work;
goto release_dwork;

err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err)
Expand All @@ -523,6 +576,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
goto err_add_rule;

mlx5e_ipsec_set_esn_ops(sa_entry);

if (sa_entry->dwork)
queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
MLX5_IPSEC_RESCHED);
out:
x->xso.offload_handle = (unsigned long)sa_entry;
return 0;
Expand All @@ -531,6 +588,8 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry);
release_dwork:
kfree(sa_entry->dwork);
release_work:
kfree(sa_entry->work->data);
kfree(sa_entry->work);
Expand Down Expand Up @@ -563,8 +622,12 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
if (sa_entry->work)
cancel_work_sync(&sa_entry->work->work);

if (sa_entry->dwork)
cancel_delayed_work_sync(&sa_entry->dwork->dwork);

mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry->dwork);
kfree(sa_entry->work->data);
kfree(sa_entry->work);
sa_entry_free:
Expand Down
8 changes: 8 additions & 0 deletions drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ struct mlx5_accel_esp_xfrm_attrs {
struct upspec upspec;
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
Expand Down Expand Up @@ -140,6 +141,11 @@ struct mlx5e_ipsec_work {
void *data;
};

struct mlx5e_ipsec_dwork {
struct delayed_work dwork;
struct mlx5e_ipsec_sa_entry *sa_entry;
};

struct mlx5e_ipsec_aso {
u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr;
Expand Down Expand Up @@ -193,6 +199,7 @@ struct mlx5e_ipsec_sa_entry {
u32 enc_key_id;
struct mlx5e_ipsec_rule ipsec_rule;
struct mlx5e_ipsec_work *work;
struct mlx5e_ipsec_dwork *dwork;
struct mlx5e_ipsec_limits limits;
};

Expand Down Expand Up @@ -235,6 +242,7 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);

int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
Expand Down
31 changes: 27 additions & 4 deletions drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
Original file line number Diff line number Diff line change
Expand Up @@ -926,9 +926,12 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (attrs->drop)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
else
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
Expand Down Expand Up @@ -1018,9 +1021,13 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (attrs->drop)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
else
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;

dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
Expand Down Expand Up @@ -1430,3 +1437,19 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
kfree(ipsec->tx);
return err;
}

void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
int err;

memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));

err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
if (err)
return;

mlx5e_accel_ipsec_fs_del_rule(sa_entry);
memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
}

0 comments on commit b2f7b01

Please sign in to comment.