Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
162 changes: 99 additions & 63 deletions drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */

#include <net/libeth/tx.h>

#include "idpf.h"

/**
Expand Down Expand Up @@ -176,6 +178,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
return 1;
}

/**
* idpf_tx_singleq_dma_map_error - handle TX DMA map errors
* @txq: queue to send buffer on
* @skb: send buffer
* @first: original first buffer info buffer for packet
* @idx: starting point on ring to unwind
*/
static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
struct libeth_sq_napi_stats ss = { };
struct libeth_cq_pp cp = {
.dev = txq->dev,
.ss = &ss,
};

u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);

/* clear dma mappings for failed tx_buf map */
for (;;) {
struct idpf_tx_buf *tx_buf;

tx_buf = &txq->tx_buf[idx];
libeth_tx_complete(tx_buf, &cp);
if (tx_buf == first)
break;
if (idx == 0)
idx = txq->desc_count;
idx--;
}

if (skb_is_gso(skb)) {
union idpf_tx_flex_desc *tx_desc;

/* If we failed a DMA mapping for a TSO packet, we will have
* used one additional descriptor for a context
* descriptor. Reset that here.
*/
tx_desc = &txq->flex_tx[idx];
memset(tx_desc, 0, sizeof(*tx_desc));
if (idx == 0)
idx = txq->desc_count;
idx--;
}

/* Update tail in case netdev_xmit_more was previously true */
idpf_tx_buf_hw_update(txq, idx, false);
}

/**
* idpf_tx_singleq_map - Build the Tx base descriptor
* @tx_q: queue to send buffer on
Expand Down Expand Up @@ -216,12 +270,14 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;

if (dma_mapping_error(tx_q->dev, dma))
return idpf_tx_dma_map_error(tx_q, skb, first, i);
if (unlikely(dma_mapping_error(tx_q->dev, dma)))
return idpf_tx_singleq_dma_map_error(tx_q, skb,
first, i);

/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
tx_buf->type = LIBETH_SQE_FRAG;

/* align size to end of page */
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
Expand All @@ -235,14 +291,17 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
offsets,
max_data,
td_tag);
tx_desc++;
i++;

if (i == tx_q->desc_count) {
if (unlikely(++i == tx_q->desc_count)) {
tx_buf = &tx_q->tx_buf[0];
tx_desc = &tx_q->base_tx[0];
i = 0;
} else {
tx_buf++;
tx_desc++;
}

tx_buf->type = LIBETH_SQE_EMPTY;

dma += max_data;
size -= max_data;

Expand All @@ -255,21 +314,21 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,

tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
tx_desc++;
i++;

if (i == tx_q->desc_count) {
if (unlikely(++i == tx_q->desc_count)) {
tx_buf = &tx_q->tx_buf[0];
tx_desc = &tx_q->base_tx[0];
i = 0;
} else {
tx_buf++;
tx_desc++;
}

size = skb_frag_size(frag);
data_len -= size;

dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);

tx_buf = &tx_q->tx_buf[i];
}

skb_tx_timestamp(first->skb);
Expand All @@ -280,13 +339,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);

IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
first->type = LIBETH_SQE_SKB;
first->rs_idx = i;

/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);

nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
netdev_tx_sent_queue(nq, first->bytes);

idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
Expand All @@ -304,8 +363,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;

memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
txq->tx_buf[ntu].ctx_entry = true;
txq->tx_buf[ntu].type = LIBETH_SQE_CTX;

ctx_desc = &txq->base_ctx[ntu];

Expand Down Expand Up @@ -356,11 +414,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
u32 count, buf_count = 1;
int csum, tso, needed;
unsigned int count;
__be16 protocol;

count = idpf_tx_desc_count_required(tx_q, skb);
count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);

Expand Down Expand Up @@ -399,11 +457,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
first->skb = skb;

if (tso) {
first->gso_segs = offload.tso_segs;
first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
first->packets = offload.tso_segs;
first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
} else {
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
first->gso_segs = 1;
first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
first->packets = 1;
}
idpf_tx_singleq_map(tx_q, first, &offload);

Expand All @@ -423,10 +481,15 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
unsigned int total_bytes = 0, total_pkts = 0;
struct libeth_sq_napi_stats ss = { };
struct idpf_base_tx_desc *tx_desc;
u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
struct libeth_cq_pp cp = {
.dev = tx_q->dev,
.ss = &ss,
.napi = napi_budget,
};
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
struct netdev_queue *nq;
Expand All @@ -444,47 +507,26 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
* such. We can skip this descriptor since there is no buffer
* to clean.
*/
if (tx_buf->ctx_entry) {
/* Clear this flag here to avoid stale flag values when
* this buffer is used for actual data in the future.
* There are cases where the tx_buf struct / the flags
* field will not be cleared before being reused.
*/
tx_buf->ctx_entry = false;
if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
tx_buf->type = LIBETH_SQE_EMPTY;
goto fetch_next_txq_desc;
}

/* if next_to_watch is not set then no work pending */
eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
if (!eop_desc)
if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
break;

/* prevent any other reads prior to eop_desc */
/* prevent any other reads prior to type */
smp_rmb();

eop_desc = &tx_q->base_tx[tx_buf->rs_idx];

/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->qw1 &
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
break;

/* clear next_to_watch to prevent false hangs */
tx_buf->next_to_watch = NULL;

/* update the statistics for this packet */
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;

napi_consume_skb(tx_buf->skb, napi_budget);

/* unmap skb header data */
dma_unmap_single(tx_q->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);

/* clear tx_buf data */
tx_buf->skb = NULL;
dma_unmap_len_set(tx_buf, len, 0);
libeth_tx_complete(tx_buf, &cp);

/* unmap remaining buffers */
while (tx_desc != eop_desc) {
Expand All @@ -498,13 +540,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
}

/* unmap any remaining paged data */
if (dma_unmap_len(tx_buf, len)) {
dma_unmap_page(tx_q->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buf, len, 0);
}
libeth_tx_complete(tx_buf, &cp);
}

/* update budget only if we did something */
Expand All @@ -524,19 +560,19 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;

*cleaned += total_pkts;
*cleaned += ss.packets;

u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_add(&tx_q->q_stats.packets, total_pkts);
u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
u64_stats_add(&tx_q->q_stats.packets, ss.packets);
u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
u64_stats_update_end(&tx_q->stats_sync);

np = netdev_priv(tx_q->netdev);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);

dont_wake = np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, total_pkts, total_bytes,
__netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);

Expand Down
Loading