blob: 8d9b775f7b938916db64a9d4bf27cb35b589106c [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From b0a43db9087a21d96e1a0b716b8d9963064b2d58 Mon Sep 17 00:00:00 2001
2From: Lorenzo Bianconi <lorenzo@kernel.org>
3Date: Sat, 19 Oct 2019 10:13:27 +0200
4Subject: [PATCH 7/7] net: mvneta: add XDP_TX support
5
6Implement XDP_TX verdict and ndo_xdp_xmit net_device_ops function
7pointer
8
9Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
10Signed-off-by: David S. Miller <davem@davemloft.net>
11---
12 drivers/net/ethernet/marvell/mvneta.c | 128 ++++++++++++++++++++++++--
13 1 file changed, 121 insertions(+), 7 deletions(-)
14
15--- a/drivers/net/ethernet/marvell/mvneta.c
16+++ b/drivers/net/ethernet/marvell/mvneta.c
17@@ -1813,16 +1813,19 @@ static void mvneta_txq_bufs_free(struct
18
19 mvneta_txq_inc_get(txq);
20
21- if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
22+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
23+ buf->type != MVNETA_TYPE_XDP_TX)
24 dma_unmap_single(pp->dev->dev.parent,
25 tx_desc->buf_phys_addr,
26 tx_desc->data_size, DMA_TO_DEVICE);
27- if (!buf->skb)
28- continue;
29-
30- bytes_compl += buf->skb->len;
31- pkts_compl++;
32- dev_kfree_skb_any(buf->skb);
33+ if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
34+ bytes_compl += buf->skb->len;
35+ pkts_compl++;
36+ dev_kfree_skb_any(buf->skb);
37+ } else if (buf->type == MVNETA_TYPE_XDP_TX ||
38+ buf->type == MVNETA_TYPE_XDP_NDO) {
39+ xdp_return_frame(buf->xdpf);
40+ }
41 }
42
43 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
44@@ -1987,6 +1990,111 @@ int mvneta_rx_refill_queue(struct mvneta
45 }
46
47 static int
48+mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
49+ struct xdp_frame *xdpf, bool dma_map)
50+{
51+ struct mvneta_tx_desc *tx_desc;
52+ struct mvneta_tx_buf *buf;
53+ dma_addr_t dma_addr;
54+
55+ if (txq->count >= txq->tx_stop_threshold)
56+ return MVNETA_XDP_DROPPED;
57+
58+ tx_desc = mvneta_txq_next_desc_get(txq);
59+
60+ buf = &txq->buf[txq->txq_put_index];
61+ if (dma_map) {
62+ /* ndo_xdp_xmit */
63+ dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
64+ xdpf->len, DMA_TO_DEVICE);
65+ if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
66+ mvneta_txq_desc_put(txq);
67+ return MVNETA_XDP_DROPPED;
68+ }
69+ buf->type = MVNETA_TYPE_XDP_NDO;
70+ } else {
71+ struct page *page = virt_to_page(xdpf->data);
72+
73+ dma_addr = page_pool_get_dma_addr(page) +
74+ sizeof(*xdpf) + xdpf->headroom;
75+ dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
76+ xdpf->len, DMA_BIDIRECTIONAL);
77+ buf->type = MVNETA_TYPE_XDP_TX;
78+ }
79+ buf->xdpf = xdpf;
80+
81+ tx_desc->command = MVNETA_TXD_FLZ_DESC;
82+ tx_desc->buf_phys_addr = dma_addr;
83+ tx_desc->data_size = xdpf->len;
84+
85+ mvneta_update_stats(pp, 1, xdpf->len, true);
86+ mvneta_txq_inc_put(txq);
87+ txq->pending++;
88+ txq->count++;
89+
90+ return MVNETA_XDP_TX;
91+}
92+
93+static int
94+mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
95+{
96+ struct mvneta_tx_queue *txq;
97+ struct netdev_queue *nq;
98+ struct xdp_frame *xdpf;
99+ int cpu;
100+ u32 ret;
101+
102+ xdpf = convert_to_xdp_frame(xdp);
103+ if (unlikely(!xdpf))
104+ return MVNETA_XDP_DROPPED;
105+
106+ cpu = smp_processor_id();
107+ txq = &pp->txqs[cpu % txq_number];
108+ nq = netdev_get_tx_queue(pp->dev, txq->id);
109+
110+ __netif_tx_lock(nq, cpu);
111+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
112+ if (ret == MVNETA_XDP_TX)
113+ mvneta_txq_pend_desc_add(pp, txq, 0);
114+ __netif_tx_unlock(nq);
115+
116+ return ret;
117+}
118+
119+static int
120+mvneta_xdp_xmit(struct net_device *dev, int num_frame,
121+ struct xdp_frame **frames, u32 flags)
122+{
123+ struct mvneta_port *pp = netdev_priv(dev);
124+ int cpu = smp_processor_id();
125+ struct mvneta_tx_queue *txq;
126+ struct netdev_queue *nq;
127+ int i, drops = 0;
128+ u32 ret;
129+
130+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
131+ return -EINVAL;
132+
133+ txq = &pp->txqs[cpu % txq_number];
134+ nq = netdev_get_tx_queue(pp->dev, txq->id);
135+
136+ __netif_tx_lock(nq, cpu);
137+ for (i = 0; i < num_frame; i++) {
138+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
139+ if (ret != MVNETA_XDP_TX) {
140+ xdp_return_frame_rx_napi(frames[i]);
141+ drops++;
142+ }
143+ }
144+
145+ if (unlikely(flags & XDP_XMIT_FLUSH))
146+ mvneta_txq_pend_desc_add(pp, txq, 0);
147+ __netif_tx_unlock(nq);
148+
149+ return num_frame - drops;
150+}
151+
152+static int
153 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
154 struct bpf_prog *prog, struct xdp_buff *xdp)
155 {
156@@ -2008,6 +2116,11 @@ mvneta_run_xdp(struct mvneta_port *pp, s
157 }
158 break;
159 }
160+ case XDP_TX:
161+ ret = mvneta_xdp_xmit_back(pp, xdp);
162+ if (ret != MVNETA_XDP_TX)
163+ xdp_return_buff(xdp);
164+ break;
165 default:
166 bpf_warn_invalid_xdp_action(act);
167 /* fall through */
168@@ -4581,6 +4694,7 @@ static const struct net_device_ops mvnet
169 .ndo_get_stats64 = mvneta_get_stats64,
170 .ndo_do_ioctl = mvneta_ioctl,
171 .ndo_bpf = mvneta_xdp,
172+ .ndo_xdp_xmit = mvneta_xdp_xmit,
173 };
174
175 static const struct ethtool_ops mvneta_eth_tool_ops = {