Skip to content

Commit

Permalink
vdpa/mlx5: Extract mr members in own resource struct
Browse files Browse the repository at this point in the history
Group all mapping related resources into their own structure.

Upcoming patches will add more members in this new structure.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <20240830105838.2666587-6-dtatulea@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
  • Loading branch information
dtatulea authored and mstsirkin committed Sep 25, 2024
1 parent 0b916a9 commit 5fc8567
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 41 deletions.
13 changes: 8 additions & 5 deletions drivers/vdpa/mlx5/core/mlx5_vdpa.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,18 @@ enum {
MLX5_VDPA_NUM_AS = 2
};

struct mlx5_vdpa_mr_resources {
struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
struct list_head mr_list_head;
struct mutex mr_mtx;
};

struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
struct mlx5_vdpa_resources res;
struct mlx5_vdpa_mr_resources mres;

u64 mlx_features;
u64 actual_features;
Expand All @@ -95,13 +103,8 @@ struct mlx5_vdpa_dev {
u16 max_idx;
u32 generation;

struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
struct list_head mr_list_head;
/* serialize mr access */
struct mutex mr_mtx;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
bool suspended;

struct mlx5_async_ctx async_ctx;
Expand Down
30 changes: 15 additions & 15 deletions drivers/vdpa/mlx5/core/mr.c
Original file line number Diff line number Diff line change
Expand Up @@ -666,9 +666,9 @@ static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
mutex_lock(&mvdev->mr_mtx);
mutex_lock(&mvdev->mres.mr_mtx);
_mlx5_vdpa_put_mr(mvdev, mr);
mutex_unlock(&mvdev->mr_mtx);
mutex_unlock(&mvdev->mres.mr_mtx);
}

static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
Expand All @@ -683,39 +683,39 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
mutex_lock(&mvdev->mr_mtx);
mutex_lock(&mvdev->mres.mr_mtx);
_mlx5_vdpa_get_mr(mvdev, mr);
mutex_unlock(&mvdev->mr_mtx);
mutex_unlock(&mvdev->mres.mr_mtx);
}

void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *new_mr,
unsigned int asid)
{
struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid];
struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid];

mutex_lock(&mvdev->mr_mtx);
mutex_lock(&mvdev->mres.mr_mtx);

_mlx5_vdpa_put_mr(mvdev, old_mr);
mvdev->mr[asid] = new_mr;
mvdev->mres.mr[asid] = new_mr;

mutex_unlock(&mvdev->mr_mtx);
mutex_unlock(&mvdev->mres.mr_mtx);
}

static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_mr *mr;

mutex_lock(&mvdev->mr_mtx);
mutex_lock(&mvdev->mres.mr_mtx);

list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) {
list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) {

mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: "
"mr: %p, mkey: 0x%x, refcount: %u\n",
mr, mr->mkey, refcount_read(&mr->refcount));
}

mutex_unlock(&mvdev->mr_mtx);
mutex_unlock(&mvdev->mres.mr_mtx);

}

Expand Down Expand Up @@ -756,7 +756,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_iotlb;

list_add_tail(&mr->mr_list, &mvdev->mr_list_head);
list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head);

return 0;

Expand All @@ -782,9 +782,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (!mr)
return ERR_PTR(-ENOMEM);

mutex_lock(&mvdev->mr_mtx);
mutex_lock(&mvdev->mres.mr_mtx);
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
mutex_unlock(&mvdev->mr_mtx);
mutex_unlock(&mvdev->mres.mr_mtx);

if (err)
goto out_err;
Expand All @@ -804,7 +804,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
{
int err;

if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
if (mvdev->mres.group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
return 0;

spin_lock(&mvdev->cvq.iommu_lock);
Expand Down
6 changes: 3 additions & 3 deletions drivers/vdpa/mlx5/core/resources.c
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL;
}
mutex_init(&mvdev->mr_mtx);
mutex_init(&mvdev->mres.mr_mtx);
res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar);
Expand Down Expand Up @@ -301,7 +301,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
err_uctx:
mlx5_put_uars_page(mdev, res->uar);
err_uars:
mutex_destroy(&mvdev->mr_mtx);
mutex_destroy(&mvdev->mres.mr_mtx);
return err;
}

Expand All @@ -318,7 +318,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar);
mutex_destroy(&mvdev->mr_mtx);
mutex_destroy(&mvdev->mres.mr_mtx);
res->valid = false;
}

Expand Down
36 changes: 18 additions & 18 deletions drivers/vdpa/mlx5/net/mlx5_vnet.c
Original file line number Diff line number Diff line change
Expand Up @@ -941,23 +941,23 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);

vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);

vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr &&
MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
} else {
/* If there is no mr update, make sure that the existing ones are set
* modify to ready.
*/
vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;

vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
}
Expand Down Expand Up @@ -1354,7 +1354,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
}

if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];

if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
Expand All @@ -1363,7 +1363,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
}

if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];

if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey);
Expand All @@ -1381,17 +1381,17 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;

if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP];
struct mlx5_vdpa_mr *vq_mr = mvdev->mr[asid];
unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP];
struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid];

mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
mlx5_vdpa_get_mr(mvdev, vq_mr);
mvq->vq_mr = vq_mr;
}

if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP];
struct mlx5_vdpa_mr *desc_mr = mvdev->mr[asid];
unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP];
struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid];

mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
mlx5_vdpa_get_mr(mvdev, desc_mr);
Expand Down Expand Up @@ -3235,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)

/* default mapping all groups are mapped to asid 0 */
for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
mvdev->group2asid[i] = 0;
mvdev->mres.group2asid[i] = 0;
}

static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev)
Expand Down Expand Up @@ -3353,7 +3353,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = NULL;
}

if (!mvdev->mr[asid]) {
if (!mvdev->mres.mr[asid]) {
mlx5_vdpa_update_mr(mvdev, new_mr, asid);
} else {
err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
Expand Down Expand Up @@ -3637,12 +3637,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
if (group >= MLX5_VDPA_NUMVQ_GROUPS)
return -EINVAL;

mvdev->group2asid[group] = asid;
mvdev->mres.group2asid[group] = asid;

mutex_lock(&mvdev->mr_mtx);
if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid])
err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid);
mutex_unlock(&mvdev->mr_mtx);
mutex_lock(&mvdev->mres.mr_mtx);
if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid])
err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid);
mutex_unlock(&mvdev->mres.mr_mtx);

return err;
}
Expand Down Expand Up @@ -3962,7 +3962,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err)
goto err_mpfs;

INIT_LIST_HEAD(&mvdev->mr_list_head);
INIT_LIST_HEAD(&mvdev->mres.mr_list_head);

if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_dma_mr(mvdev);
Expand Down

0 comments on commit 5fc8567

Please sign in to comment.