Skip to content

Commit

Permalink
mlx5: Move pci device handling from mlx5_ib to mlx5_core
Browse files Browse the repository at this point in the history
In preparation for a new mlx5 device which is VPI (i.e., ports can be
either IB or ETH), move the pci device functionality from mlx5_ib
to mlx5_core.

This involves the following changes:
1. Move mlx5_core_dev struct out of mlx5_ib_dev. mlx5_core_dev
   is now an independent structure maintained by mlx5_core.
   mlx5_ib_dev now has a pointer to that struct.
   This requires changing a lot of places where the core_dev
   struct was accessed via mlx5_ib_dev (now, this needs to
   be a pointer dereference).
2. All PCI initializations are now done in mlx5_core. Thus,
   it is now mlx5_core which does pci_register_device (and not
   mlx5_ib, as was previously).
3. mlx5_ib now registers itself with mlx5_core as an "interface"
   driver. This is very similar to the mechanism employed for
   the mlx4 (ConnectX) driver. Once the HCA is initialized
   (by mlx5_core), it invokes the interface drivers to do
   their initializations.
4. There is a new event handler which the core registers:
   mlx5_core_event(). This event handler invokes the
   event handlers registered by the interfaces.

Based on a patch by Eli Cohen <eli@mellanox.com>

Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Jack Morgenstein authored and davem330 committed Jul 30, 2014
1 parent 4ada97a commit 9603b61
Show file tree
Hide file tree
Showing 9 changed files with 498 additions and 310 deletions.
46 changes: 23 additions & 23 deletions drivers/infiniband/hw/mlx5/cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_core_srq *msrq = NULL;

if (qp->ibqp.xrcd) {
msrq = mlx5_core_get_srq(&dev->mdev,
msrq = mlx5_core_get_srq(dev->mdev,
be32_to_cpu(cqe->srqn));
srq = to_mibsrq(msrq);
} else {
Expand Down Expand Up @@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,

static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
mlx5_buf_free(&dev->mdev, &buf->buf);
mlx5_buf_free(dev->mdev, &buf->buf);
}

static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
Expand Down Expand Up @@ -450,7 +450,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
* because CQs will be locked while QPs are removed
* from the table.
*/
mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
mqp = __mlx5_qp_lookup(dev->mdev, qpn);
if (unlikely(!mqp)) {
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
cq->mcq.cqn, qpn);
Expand Down Expand Up @@ -514,11 +514,11 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
case MLX5_CQE_SIG_ERR:
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;

read_lock(&dev->mdev.priv.mr_table.lock);
mmr = __mlx5_mr_lookup(&dev->mdev,
read_lock(&dev->mdev->priv.mr_table.lock);
mmr = __mlx5_mr_lookup(dev->mdev,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
if (unlikely(!mmr)) {
read_unlock(&dev->mdev.priv.mr_table.lock);
read_unlock(&dev->mdev->priv.mr_table.lock);
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
return -EINVAL;
Expand All @@ -536,7 +536,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
mr->sig->err_item.expected,
mr->sig->err_item.actual);

read_unlock(&dev->mdev.priv.mr_table.lock);
read_unlock(&dev->mdev->priv.mr_table.lock);
goto repoll;
}

Expand Down Expand Up @@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
mlx5_cq_arm(&to_mcq(ibcq)->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));

return 0;
}
Expand All @@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
{
int err;

err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
PAGE_SIZE * 2, &buf->buf);
if (err)
return err;
Expand Down Expand Up @@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
{
int err;

err = mlx5_db_alloc(&dev->mdev, &cq->db);
err = mlx5_db_alloc(dev->mdev, &cq->db);
if (err)
return err;

Expand All @@ -716,22 +716,22 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);

(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
*index = dev->mdev.priv.uuari.uars[0].index;
*index = dev->mdev->priv.uuari.uars[0].index;

return 0;

err_buf:
free_cq_buf(dev, &cq->buf);

err_db:
mlx5_db_free(&dev->mdev, &cq->db);
mlx5_db_free(dev->mdev, &cq->db);
return err;
}

static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
{
free_cq_buf(dev, &cq->buf);
mlx5_db_free(&dev->mdev, &cq->db);
mlx5_db_free(dev->mdev, &cq->db);
}

struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
Expand All @@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(-EINVAL);

entries = roundup_pow_of_two(entries + 1);
if (entries > dev->mdev.caps.max_cqes)
if (entries > dev->mdev->caps.max_cqes)
return ERR_PTR(-EINVAL);

cq = kzalloc(sizeof(*cq), GFP_KERNEL);
Expand Down Expand Up @@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
cqb->ctx.c_eqn = cpu_to_be16(eqn);
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);

err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
if (err)
goto err_cqb;

Expand All @@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
return &cq->ibcq;

err_cmd:
mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
mlx5_core_destroy_cq(dev->mdev, &cq->mcq);

err_cqb:
mlx5_vfree(cqb);
Expand All @@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
if (cq->uobject)
context = cq->uobject->context;

mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
if (context)
destroy_cq_user(mcq, context);
else
Expand Down Expand Up @@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int err;
u32 fsel;

if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
return -ENOSYS;

in = kzalloc(sizeof(*in), GFP_KERNEL);
Expand All @@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
in->ctx.cq_period = cpu_to_be16(cq_period);
in->ctx.cq_max_count = cpu_to_be16(cq_count);
in->field_select = cpu_to_be32(fsel);
err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
kfree(in);

if (err)
Expand Down Expand Up @@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
int uninitialized_var(cqe_size);
unsigned long flags;

if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
pr_info("Firmware does not support resize CQ\n");
return -ENOSYS;
}
Expand All @@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -EINVAL;

entries = roundup_pow_of_two(entries + 1);
if (entries > dev->mdev.caps.max_cqes + 1)
if (entries > dev->mdev->caps.max_cqes + 1)
return -EINVAL;

if (entries == ibcq->cqe + 1)
Expand Down Expand Up @@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
in->cqn = cpu_to_be32(cq->mcq.cqn);

err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err)
goto ex_alloc;

Expand Down
4 changes: 2 additions & 2 deletions drivers/infiniband/hw/mlx5/mad.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
if (ignore_bkey || !in_wc)
op_modifier |= 0x2;

return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
}

int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Expand Down Expand Up @@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)

packet_error = be16_to_cpu(out_mad->status);

dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;

out:
Expand Down
Loading

0 comments on commit 9603b61

Please sign in to comment.