blob: 273d7e5ea81b74b4ee2f8729dcc8c302243546cf [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From 73cc32aace5fe123182337c3abd769a1d6edd9fe Mon Sep 17 00:00:00 2001
2From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
3Date: Wed, 4 Apr 2018 15:12:28 +0300
4Subject: [PATCH] dpaa_eth: fix iova handling for sg frames
5
6The driver relies on the no longer valid assumption that dma addresses
7(iovas) are identical to physical addressees and uses phys_to_virt() to
8make iova -> vaddr conversions. Fix this also for scatter-gather frames
9using the iova -> phys conversion function added in the previous patch.
10While at it, clean-up a redundant dpaa_bpid2pool() and pass the bp
11as parameter.
12
13Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
14Acked-by: Madalin Bucur <madalin.bucur@nxp.com>
15[rebase]
16Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
17---
18 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 41 +++++++++++++++-----------
19 1 file changed, 24 insertions(+), 17 deletions(-)
20
21--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
22+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
23@@ -1656,14 +1656,17 @@ static struct sk_buff *dpaa_cleanup_tx_f
24
25 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
26 nr_frags = skb_shinfo(skb)->nr_frags;
27- dma_unmap_single(dev, addr,
28- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
29- dma_dir);
30
31 /* The sgt buffer has been allocated with netdev_alloc_frag(),
32 * it's from lowmem.
33 */
34- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
35+ sgt = phys_to_virt(dpaa_iova_to_phys(dev,
36+ addr +
37+ qm_fd_get_offset(fd)));
38+
39+ dma_unmap_single(dev, addr,
40+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
41+ dma_dir);
42
43 /* sgt[0] is from lowmem, was dma_map_single()-ed */
44 dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
45@@ -1702,7 +1705,7 @@ static struct sk_buff *dpaa_cleanup_tx_f
46 else
47 #endif
48 /* Free the page frag that we allocated on Tx */
49- skb_free_frag(phys_to_virt(addr));
50+ skb_free_frag(phys_to_virt(skbh));
51 }
52
53 return skb;
54@@ -1760,14 +1763,14 @@ static struct sk_buff *contig_fd_to_skb(
55 * The page fragment holding the S/G Table is recycled here.
56 */
57 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
58- const struct qm_fd *fd)
59+ const struct qm_fd *fd,
60+ struct dpaa_bp *dpaa_bp,
61+ void *vaddr)
62 {
63 ssize_t fd_off = qm_fd_get_offset(fd);
64- dma_addr_t addr = qm_fd_addr(fd);
65 const struct qm_sg_entry *sgt;
66 struct page *page, *head_page;
67- struct dpaa_bp *dpaa_bp;
68- void *vaddr, *sg_vaddr;
69+ void *sg_vaddr;
70 int frag_off, frag_len;
71 struct sk_buff *skb;
72 dma_addr_t sg_addr;
73@@ -1776,7 +1779,6 @@ static struct sk_buff *sg_fd_to_skb(cons
74 int *count_ptr;
75 int i;
76
77- vaddr = phys_to_virt(addr);
78 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
79
80 /* Iterate through the SGT entries and add data buffers to the skb */
81@@ -1787,14 +1789,18 @@ static struct sk_buff *sg_fd_to_skb(cons
82 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
83
84 sg_addr = qm_sg_addr(&sgt[i]);
85- sg_vaddr = phys_to_virt(sg_addr);
86- WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
87- SMP_CACHE_BYTES));
88
89 /* We may use multiple Rx pools */
90 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
91- if (!dpaa_bp)
92+ if (!dpaa_bp) {
93+ pr_info("%s: fail to get dpaa_bp for sg bpid %d\n",
94+ __func__, sgt[i].bpid);
95 goto free_buffers;
96+ }
97+ sg_vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev,
98+ sg_addr));
99+ WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
100+ SMP_CACHE_BYTES));
101
102 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
103 dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
104@@ -1866,10 +1872,11 @@ free_buffers:
105 /* free all the SG entries */
106 for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
107 sg_addr = qm_sg_addr(&sgt[i]);
108- sg_vaddr = phys_to_virt(sg_addr);
109- skb_free_frag(sg_vaddr);
110 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
111 if (dpaa_bp) {
112+ sg_addr = dpaa_iova_to_phys(dpaa_bp->dev, sg_addr);
113+ sg_vaddr = phys_to_virt(sg_addr);
114+ skb_free_frag(sg_vaddr);
115 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
116 (*count_ptr)--;
117 }
118@@ -2498,7 +2505,7 @@ static enum qman_cb_dqrr_result rx_defau
119 if (likely(fd_format == qm_fd_contig))
120 skb = contig_fd_to_skb(priv, fd, dpaa_bp, vaddr);
121 else
122- skb = sg_fd_to_skb(priv, fd);
123+ skb = sg_fd_to_skb(priv, fd, dpaa_bp, vaddr);
124 if (!skb)
125 return qman_cb_dqrr_consume;
126