Skip to content

Commit

Permalink
net: core: page_pool: add user refcnt and reintroduce page_pool_destroy
Browse files Browse the repository at this point in the history
Jesper recently removed page_pool_destroy() (from driver invocation)
and moved shutdown and free of page_pool into xdp_rxq_info_unreg(),
in-order to handle in-flight packets/pages. This created an asymmetry
in drivers create/destroy pairs.

This patch reintroduce page_pool_destroy and add page_pool user
refcnt. This serves the purpose to simplify drivers error handling as
driver now drivers always calls page_pool_destroy() and don't need to
track if xdp_rxq_info_reg_mem_model() was unsuccessful.

This could be used for a special cases where a single RX-queue (with a
single page_pool) provides packets for two net_device'es, and thus
needs to register the same page_pool twice with two xdp_rxq_info
structures.

This patch is primarily to ease API usage for drivers. The recently
merged netsec driver, actually have a bug in this area, which is
solved by this API change.

This patch is a modified version of Ivan Khoronzhuk's original patch.

Link: https://lore.kernel.org/netdev/20190625175948.24771-2-ivan.khoronzhuk@linaro.org/
Fixes: 5c67bf0 ("net: netsec: Use page_pool API")
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
ikhorn authored and davem330 committed Jul 8, 2019
1 parent 49db922 commit 1da4bbe
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 8 deletions.
4 changes: 2 additions & 2 deletions drivers/net/ethernet/mellanox/mlx5/core/en_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -577,8 +577,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
if (err)
page_pool_free(rq->page_pool);
}
if (err)
goto err_free;
Expand Down Expand Up @@ -646,6 +644,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);

return err;
Expand Down Expand Up @@ -680,6 +679,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
}

xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
}

Expand Down
8 changes: 2 additions & 6 deletions drivers/net/ethernet/socionext/netsec.c
Original file line number Diff line number Diff line change
Expand Up @@ -1212,15 +1212,11 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
}
}

/* Rx is currently using page_pool
* since the pool is created during netsec_setup_rx_dring(), we need to
* free the pool manually if the registration failed
*/
/* Rx is currently using page_pool */
if (id == NETSEC_RING_RX) {
if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
xdp_rxq_info_unreg(&dring->xdp_rxq);
else
page_pool_free(dring->page_pool);
page_pool_destroy(dring->page_pool);
}

memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
Expand Down
25 changes: 25 additions & 0 deletions include/net/page_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,12 @@ struct page_pool {
struct ptr_ring ring;

atomic_t pages_state_release_cnt;

/* A page_pool is strictly tied to a single RX-queue being
* protected by NAPI, due to above pp_alloc_cache. This
* refcnt serves purpose is to simplify drivers error handling.
*/
refcount_t user_cnt;
};

struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
Expand Down Expand Up @@ -134,6 +140,15 @@ static inline void page_pool_free(struct page_pool *pool)
#endif
}

/* Drivers use this instead of page_pool_free */
static inline void page_pool_destroy(struct page_pool *pool)
{
if (!pool)
return;

page_pool_free(pool);
}

/* Never call this directly, use helpers below */
void __page_pool_put_page(struct page_pool *pool,
struct page *page, bool allow_direct);
Expand Down Expand Up @@ -201,4 +216,14 @@ static inline bool is_page_pool_compiled_in(void)
#endif
}

static inline void page_pool_get(struct page_pool *pool)
{
refcount_inc(&pool->user_cnt);
}

static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
}

#endif /* _NET_PAGE_POOL_H */
8 changes: 8 additions & 0 deletions net/core/page_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ static int page_pool_init(struct page_pool *pool,

atomic_set(&pool->pages_state_release_cnt, 0);

/* Driver calling page_pool_create() also call page_pool_destroy() */
refcount_set(&pool->user_cnt, 1);

if (pool->p.flags & PP_FLAG_DMA_MAP)
get_device(pool->p.dev);

Expand All @@ -70,6 +73,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
kfree(pool);
return ERR_PTR(err);
}

return pool;
}
EXPORT_SYMBOL(page_pool_create);
Expand Down Expand Up @@ -356,6 +360,10 @@ static void __warn_in_flight(struct page_pool *pool)

void __page_pool_free(struct page_pool *pool)
{
/* Only last user actually free/release resources */
if (!page_pool_put(pool))
return;

WARN(pool->alloc.count, "API usage violation");
WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");

Expand Down
3 changes: 3 additions & 0 deletions net/core/xdp.c
Original file line number Diff line number Diff line change
Expand Up @@ -370,6 +370,9 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
goto err;
}

if (type == MEM_TYPE_PAGE_POOL)
page_pool_get(xdp_alloc->page_pool);

mutex_unlock(&mem_id_lock);

trace_mem_connect(xdp_alloc, xdp_rxq);
Expand Down

0 comments on commit 1da4bbe

Please sign in to comment.