Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 57 additions & 4 deletions drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
return 1;
}

/**
* idpf_tx_singleq_dma_map_error - handle TX DMA map errors
* @txq: queue to send buffer on
* @skb: send buffer
* @first: original first buffer info buffer for packet
* @idx: starting point on ring to unwind
*/
static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
struct libeth_sq_napi_stats ss = { };
struct libeth_cq_pp cp = {
.dev = txq->dev,
.ss = &ss,
};

u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);

/* clear dma mappings for failed tx_buf map */
for (;;) {
struct idpf_tx_buf *tx_buf;

tx_buf = &txq->tx_buf[idx];
libeth_tx_complete(tx_buf, &cp);
if (tx_buf == first)
break;
if (idx == 0)
idx = txq->desc_count;
idx--;
}

if (skb_is_gso(skb)) {
union idpf_tx_flex_desc *tx_desc;

/* If we failed a DMA mapping for a TSO packet, we will have
* used one additional descriptor for a context
* descriptor. Reset that here.
*/
tx_desc = &txq->flex_tx[idx];
memset(tx_desc, 0, sizeof(*tx_desc));
if (idx == 0)
idx = txq->desc_count;
idx--;
}

/* Update tail in case netdev_xmit_more was previously true */
idpf_tx_buf_hw_update(txq, idx, false);
}

/**
* idpf_tx_singleq_map - Build the Tx base descriptor
* @tx_q: queue to send buffer on
Expand Down Expand Up @@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;

if (dma_mapping_error(tx_q->dev, dma))
return idpf_tx_dma_map_error(tx_q, skb, first, i);
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
return idpf_tx_singleq_dma_map_error(tx_q, skb,
first, i);

/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
Expand Down Expand Up @@ -362,11 +415,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
u32 count, buf_count = 1;
int csum, tso, needed;
unsigned int count;
__be16 protocol;

count = idpf_tx_desc_count_required(tx_q, skb);
count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);

Expand Down
Loading