Skip to content

Commit 4de0211

Browse files
jahay1gregkh
authored andcommitted
idpf: simplify and fix splitq Tx packet rollback error path
[ Upstream commit b61dfa9 ] Move (and rename) the existing rollback logic to singleq.c since that will be the only consumer. Create a simplified splitq specific rollback function to loop through and unmap tx_bufs based on the completion tag. This is critical before replacing the Tx buffer ring with the buffer pool since the previous rollback indexing will not work to unmap the chained buffers from the pool. Cache the next_to_use index before any portion of the packet is put on the descriptor ring. In case of an error, the rollback will bump tail to the correct next_to_use value. Because the splitq path now supports different types of context descriptors (and potentially multiple in the future), this will take care of rolling back any and all context descriptors encoded on the ring for the erroneous packet. The previous rollback logic was broken for PTP packets since it would not account for the PTP context descriptor. Fixes: 1a49cf8 ("idpf: add Tx timestamp flows") Signed-off-by: Joshua Hay <joshua.a.hay@intel.com> Reviewed-by: Madhu Chittim <madhu.chittim@intel.com> Tested-by: Samuel Salin <Samuel.salin@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 9e20a0a commit 4de0211

File tree

3 files changed

+95
-58
lines changed

3 files changed

+95
-58
lines changed

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 55 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
179179
return 1;
180180
}
181181

182+
/**
183+
* idpf_tx_singleq_dma_map_error - handle TX DMA map errors
184+
* @txq: queue to send buffer on
185+
* @skb: send buffer
186+
* @first: original first buffer info buffer for packet
187+
* @idx: starting point on ring to unwind
188+
*/
189+
static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
190+
struct sk_buff *skb,
191+
struct idpf_tx_buf *first, u16 idx)
192+
{
193+
struct libeth_sq_napi_stats ss = { };
194+
struct libeth_cq_pp cp = {
195+
.dev = txq->dev,
196+
.ss = &ss,
197+
};
198+
199+
u64_stats_update_begin(&txq->stats_sync);
200+
u64_stats_inc(&txq->q_stats.dma_map_errs);
201+
u64_stats_update_end(&txq->stats_sync);
202+
203+
/* clear dma mappings for failed tx_buf map */
204+
for (;;) {
205+
struct idpf_tx_buf *tx_buf;
206+
207+
tx_buf = &txq->tx_buf[idx];
208+
libeth_tx_complete(tx_buf, &cp);
209+
if (tx_buf == first)
210+
break;
211+
if (idx == 0)
212+
idx = txq->desc_count;
213+
idx--;
214+
}
215+
216+
if (skb_is_gso(skb)) {
217+
union idpf_tx_flex_desc *tx_desc;
218+
219+
/* If we failed a DMA mapping for a TSO packet, we will have
220+
* used one additional descriptor for a context
221+
* descriptor. Reset that here.
222+
*/
223+
tx_desc = &txq->flex_tx[idx];
224+
memset(tx_desc, 0, sizeof(*tx_desc));
225+
if (idx == 0)
226+
idx = txq->desc_count;
227+
idx--;
228+
}
229+
230+
/* Update tail in case netdev_xmit_more was previously true */
231+
idpf_tx_buf_hw_update(txq, idx, false);
232+
}
233+
182234
/**
183235
* idpf_tx_singleq_map - Build the Tx base descriptor
184236
* @tx_q: queue to send buffer on
@@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
219271
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
220272
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
221273

222-
if (dma_mapping_error(tx_q->dev, dma))
223-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
274+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
275+
return idpf_tx_singleq_dma_map_error(tx_q, skb,
276+
first, i);
224277

225278
/* record length, and DMA address */
226279
dma_unmap_len_set(tx_buf, len, size);

drivers/net/ethernet/intel/idpf/idpf_txrx.c

Lines changed: 37 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -2337,57 +2337,6 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
23372337
return count;
23382338
}
23392339

2340-
/**
2341-
* idpf_tx_dma_map_error - handle TX DMA map errors
2342-
* @txq: queue to send buffer on
2343-
* @skb: send buffer
2344-
* @first: original first buffer info buffer for packet
2345-
* @idx: starting point on ring to unwind
2346-
*/
2347-
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2348-
struct idpf_tx_buf *first, u16 idx)
2349-
{
2350-
struct libeth_sq_napi_stats ss = { };
2351-
struct libeth_cq_pp cp = {
2352-
.dev = txq->dev,
2353-
.ss = &ss,
2354-
};
2355-
2356-
u64_stats_update_begin(&txq->stats_sync);
2357-
u64_stats_inc(&txq->q_stats.dma_map_errs);
2358-
u64_stats_update_end(&txq->stats_sync);
2359-
2360-
/* clear dma mappings for failed tx_buf map */
2361-
for (;;) {
2362-
struct idpf_tx_buf *tx_buf;
2363-
2364-
tx_buf = &txq->tx_buf[idx];
2365-
libeth_tx_complete(tx_buf, &cp);
2366-
if (tx_buf == first)
2367-
break;
2368-
if (idx == 0)
2369-
idx = txq->desc_count;
2370-
idx--;
2371-
}
2372-
2373-
if (skb_is_gso(skb)) {
2374-
union idpf_tx_flex_desc *tx_desc;
2375-
2376-
/* If we failed a DMA mapping for a TSO packet, we will have
2377-
* used one additional descriptor for a context
2378-
* descriptor. Reset that here.
2379-
*/
2380-
tx_desc = &txq->flex_tx[idx];
2381-
memset(tx_desc, 0, sizeof(*tx_desc));
2382-
if (idx == 0)
2383-
idx = txq->desc_count;
2384-
idx--;
2385-
}
2386-
2387-
/* Update tail in case netdev_xmit_more was previously true */
2388-
idpf_tx_buf_hw_update(txq, idx, false);
2389-
}
2390-
23912340
/**
23922341
* idpf_tx_splitq_bump_ntu - adjust NTU and generation
23932342
* @txq: the tx ring to wrap
@@ -2436,6 +2385,37 @@ static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
24362385
return true;
24372386
}
24382387

2388+
/**
2389+
* idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
2390+
* @txq: Tx queue to unwind
2391+
* @params: pointer to splitq params struct
2392+
* @first: starting buffer for packet to unmap
2393+
*/
2394+
static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
2395+
struct idpf_tx_splitq_params *params,
2396+
struct idpf_tx_buf *first)
2397+
{
2398+
struct libeth_sq_napi_stats ss = { };
2399+
struct idpf_tx_buf *tx_buf = first;
2400+
struct libeth_cq_pp cp = {
2401+
.dev = txq->dev,
2402+
.ss = &ss,
2403+
};
2404+
u32 idx = 0;
2405+
2406+
u64_stats_update_begin(&txq->stats_sync);
2407+
u64_stats_inc(&txq->q_stats.dma_map_errs);
2408+
u64_stats_update_end(&txq->stats_sync);
2409+
2410+
do {
2411+
libeth_tx_complete(tx_buf, &cp);
2412+
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
2413+
} while (idpf_tx_buf_compl_tag(tx_buf) == params->compl_tag);
2414+
2415+
/* Update tail in case netdev_xmit_more was previously true. */
2416+
idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
2417+
}
2418+
24392419
/**
24402420
* idpf_tx_splitq_map - Build the Tx flex descriptor
24412421
* @tx_q: queue to send buffer on
@@ -2480,8 +2460,9 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
24802460
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
24812461
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
24822462

2483-
if (dma_mapping_error(tx_q->dev, dma))
2484-
return idpf_tx_dma_map_error(tx_q, skb, first, i);
2463+
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
2464+
return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
2465+
first);
24852466

24862467
first->nr_frags++;
24872468
idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
@@ -2922,7 +2903,9 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
29222903
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
29232904
struct idpf_tx_queue *tx_q)
29242905
{
2925-
struct idpf_tx_splitq_params tx_params = { };
2906+
struct idpf_tx_splitq_params tx_params = {
2907+
.prev_ntu = tx_q->next_to_use,
2908+
};
29262909
union idpf_flex_tx_ctx_desc *ctx_desc;
29272910
struct idpf_tx_buf *first;
29282911
unsigned int count;

drivers/net/ethernet/intel/idpf/idpf_txrx.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,7 @@ struct idpf_tx_offload_params {
195195
* @compl_tag: Associated tag for completion
196196
* @td_tag: Descriptor tunneling tag
197197
* @offload: Offload parameters
198+
* @prev_ntu: stored TxQ next_to_use in case of rollback
198199
*/
199200
struct idpf_tx_splitq_params {
200201
enum idpf_tx_desc_dtype_value dtype;
@@ -205,6 +206,8 @@ struct idpf_tx_splitq_params {
205206
};
206207

207208
struct idpf_tx_offload_params offload;
209+
210+
u16 prev_ntu;
208211
};
209212

210213
enum idpf_tx_ctx_desc_eipt_offload {
@@ -1039,8 +1042,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
10391042
bool xmit_more);
10401043
unsigned int idpf_size_to_txd_count(unsigned int size);
10411044
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1042-
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
1043-
struct idpf_tx_buf *first, u16 ring_idx);
10441045
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
10451046
struct sk_buff *skb);
10461047
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);

0 commit comments

Comments
 (0)