Skip to content

Commit

Permalink
RDMA/irdma: Split mr alloc and free into new functions
Browse files Browse the repository at this point in the history
In the function irdma_reg_user_mr, the mr allocation and free
will be used by other functions. As such, the source codes related
with mr allocation and free are split into the new functions.

Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Link: https://lore.kernel.org/r/20230116193502.66540-3-yanjun.zhu@intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
  • Loading branch information
zhuyj authored and rleon committed Jan 26, 2023
1 parent 01798df commit 693a538
Showing 1 changed file with 46 additions and 28 deletions.
74 changes: 46 additions & 28 deletions drivers/infiniband/hw/irdma/verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -2793,6 +2793,48 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
return err;
}

static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
struct ib_pd *pd, u64 virt,
enum irdma_memreg_type reg_type)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_pbl *iwpbl = NULL;
struct irdma_mr *iwmr = NULL;
unsigned long pgsz_bitmap;

iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
if (!iwmr)
return ERR_PTR(-ENOMEM);

iwpbl = &iwmr->iwpbl;
iwpbl->iwmr = iwmr;
iwmr->region = region;
iwmr->ibmr.pd = pd;
iwmr->ibmr.device = pd->device;
iwmr->ibmr.iova = virt;
iwmr->type = reg_type;

pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;

iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
if (unlikely(!iwmr->page_size)) {
kfree(iwmr);
return ERR_PTR(-EOPNOTSUPP);
}

iwmr->len = region->length;
iwpbl->user_base = virt;
iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);

return iwmr;
}

static void irdma_free_iwmr(struct irdma_mr *iwmr)
{
kfree(iwmr);
}

/**
* irdma_reg_user_mr - Register a user memory region
* @pd: ptr of pd
Expand Down Expand Up @@ -2838,34 +2880,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
return ERR_PTR(-EFAULT);
}

iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
if (!iwmr) {
iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
if (IS_ERR(iwmr)) {
ib_umem_release(region);
return ERR_PTR(-ENOMEM);
return (struct ib_mr *)iwmr;
}

iwpbl = &iwmr->iwpbl;
iwpbl->iwmr = iwmr;
iwmr->region = region;
iwmr->ibmr.pd = pd;
iwmr->ibmr.device = pd->device;
iwmr->ibmr.iova = virt;
iwmr->page_size = PAGE_SIZE;

if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
iwmr->page_size = ib_umem_find_best_pgsz(region,
iwdev->rf->sc_dev.hw_attrs.page_size_cap,
virt);
if (unlikely(!iwmr->page_size)) {
kfree(iwmr);
ib_umem_release(region);
return ERR_PTR(-EOPNOTSUPP);
}
}
iwmr->len = region->length;
iwpbl->user_base = virt;
iwmr->type = req.reg_type;
iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);

switch (req.reg_type) {
case IRDMA_MEMREG_TYPE_QP:
Expand Down Expand Up @@ -2918,13 +2939,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
goto error;
}

iwmr->type = req.reg_type;

return &iwmr->ibmr;

error:
ib_umem_release(region);
kfree(iwmr);
irdma_free_iwmr(iwmr);

return ERR_PTR(err);
}
Expand Down

0 comments on commit 693a538

Please sign in to comment.