Skip to content

Commit 504fd6a

Browse files
ShayAgrosdavem330
authored andcommitted
net: ena: fix DMA mapping function issues in XDP
This patch fixes several bugs found when (DMA/LLQ) mapping a packet for transmission. The mapping procedure makes the transmitted packet accessible by the device. When using LLQ, this requires copying the packet's header to push header (which would be passed to LLQ) and creating DMA mapping for the payload (if the packet doesn't fit the maximum push length). When not using LLQ, we map the whole packet with DMA. The following bugs are fixed in the code: 1. Add support for non-LLQ machines: The ena_xdp_tx_map_frame() function assumed that LLQ is supported, and never mapped the whole packet using DMA. On some instances, which don't support LLQ, this causes loss of traffic. 2. Wrong DMA buffer length passed to device: When using LLQ, the first 'tx_max_header_size' bytes of the packet would be copied to push header. The rest of the packet would be copied to a DMA'd buffer. 3. Freeing the XDP buffer twice in case of a mapping error: In case a buffer DMA mapping fails, the function uses xdp_return_frame_rx_napi() to free the RX buffer and returns from the function with an error. XDP frames that fail to xmit get freed by the kernel and so there is no need for this call. Fixes: 548c494 ("net: ena: Implement XDP_TX action") Signed-off-by: Shay Agroskin <shayagr@amazon.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 1650bdb commit 504fd6a

File tree

1 file changed

+28
-26
lines changed

1 file changed

+28
-26
lines changed

drivers/net/ethernet/amazon/ena/ena_netdev.c

Lines changed: 28 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
236236
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
237237
struct ena_tx_buffer *tx_info,
238238
struct xdp_frame *xdpf,
239-
void **push_hdr,
240-
u32 *push_len)
239+
struct ena_com_tx_ctx *ena_tx_ctx)
241240
{
242241
struct ena_adapter *adapter = xdp_ring->adapter;
243242
struct ena_com_buf *ena_buf;
244-
dma_addr_t dma = 0;
243+
int push_len = 0;
244+
dma_addr_t dma;
245+
void *data;
245246
u32 size;
246247

247248
tx_info->xdpf = xdpf;
249+
data = tx_info->xdpf->data;
248250
size = tx_info->xdpf->len;
249-
ena_buf = tx_info->bufs;
250251

251-
/* llq push buffer */
252-
*push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
253-
*push_hdr = tx_info->xdpf->data;
252+
if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
253+
/* Designate part of the packet for LLQ */
254+
push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
255+
256+
ena_tx_ctx->push_header = data;
257+
258+
size -= push_len;
259+
data += push_len;
260+
}
261+
262+
ena_tx_ctx->header_len = push_len;
254263

255-
if (size - *push_len > 0) {
264+
if (size > 0) {
256265
dma = dma_map_single(xdp_ring->dev,
257-
*push_hdr + *push_len,
258-
size - *push_len,
266+
data,
267+
size,
259268
DMA_TO_DEVICE);
260269
if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
261270
goto error_report_dma_error;
262271

263-
tx_info->map_linear_data = 1;
264-
tx_info->num_of_bufs = 1;
265-
}
272+
tx_info->map_linear_data = 0;
266273

267-
ena_buf->paddr = dma;
268-
ena_buf->len = size;
274+
ena_buf = tx_info->bufs;
275+
ena_buf->paddr = dma;
276+
ena_buf->len = size;
277+
278+
ena_tx_ctx->ena_bufs = ena_buf;
279+
ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
280+
}
269281

270282
return 0;
271283

@@ -274,10 +286,6 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
274286
&xdp_ring->syncp);
275287
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
276288

277-
xdp_return_frame_rx_napi(tx_info->xdpf);
278-
tx_info->xdpf = NULL;
279-
tx_info->num_of_bufs = 0;
280-
281289
return -EINVAL;
282290
}
283291

@@ -289,24 +297,18 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
289297
struct ena_com_tx_ctx ena_tx_ctx = {};
290298
struct ena_tx_buffer *tx_info;
291299
u16 next_to_use, req_id;
292-
void *push_hdr;
293-
u32 push_len;
294300
int rc;
295301

296302
next_to_use = xdp_ring->next_to_use;
297303
req_id = xdp_ring->free_ids[next_to_use];
298304
tx_info = &xdp_ring->tx_buffer_info[req_id];
299305
tx_info->num_of_bufs = 0;
300306

301-
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
307+
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
302308
if (unlikely(rc))
303309
return rc;
304310

305-
ena_tx_ctx.ena_bufs = tx_info->bufs;
306-
ena_tx_ctx.push_header = push_hdr;
307-
ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
308311
ena_tx_ctx.req_id = req_id;
309-
ena_tx_ctx.header_len = push_len;
310312

311313
rc = ena_xmit_common(dev,
312314
xdp_ring,

0 commit comments

Comments
 (0)