blob: 6daee462bce8f3af0a28cf09ae0097cb9f64f8b9 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From 26d3cc476c26832e1e05db182ac27906f6c81f2d Mon Sep 17 00:00:00 2001
2From: Camelia Groza <camelia.groza@nxp.com>
3Date: Tue, 29 Oct 2019 16:12:18 +0200
4Subject: [PATCH] sdk_dpaa: ls1043a errata: memory related fixes
5
6Avoid a crash by verifying the allocation return status.
7
8Use the standard API for determining the page order needed for
9allocating Jumbo sized skbs.
10
11Explicitly remove the old skb outside the w/a, for both successful and
12unsuccessful realignments. Make sure the old skb's memory isn't leaked.
13
14Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
15---
16 .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 30 ++++++++++++++--------
17 1 file changed, 19 insertions(+), 11 deletions(-)
18
19--- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
20+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
21@@ -809,8 +809,8 @@ static struct sk_buff *a010022_realign_s
22 {
23 int trans_offset = skb_transport_offset(skb);
24 int net_offset = skb_network_offset(skb);
25- int nsize, headroom, npage_order;
26 struct sk_buff *nskb = NULL;
27+ int nsize, headroom;
28 struct page *npage;
29 void *npage_addr;
30
31@@ -825,8 +825,7 @@ static struct sk_buff *a010022_realign_s
32 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
33
34 /* Reserve enough memory to accommodate Jumbo frames */
35- npage_order = (nsize - 1) / PAGE_SIZE;
36- npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, npage_order);
37+ npage = alloc_pages(GFP_ATOMIC | __GFP_COMP, get_order(nsize));
38 if (unlikely(!npage)) {
39 WARN_ONCE(1, "Memory allocation failure\n");
40 return NULL;
41@@ -869,7 +868,6 @@ static struct sk_buff *a010022_realign_s
42 /* We don't want the buffer to be recycled so we mark it accordingly */
43 nskb->mark = NONREC_MARK;
44
45- dev_kfree_skb(skb);
46 return nskb;
47
48 err:
49@@ -911,8 +909,13 @@ int __hot skb_to_sg_fd(struct dpa_priv_s
50 * is in place and we need to avoid crossing a 4k boundary.
51 */
52 #ifndef CONFIG_PPC
53- if (unlikely(dpaa_errata_a010022))
54- sgt_buf = page_address(alloc_page(GFP_ATOMIC));
55+ if (unlikely(dpaa_errata_a010022)) {
56+ struct page *new_page = alloc_page(GFP_ATOMIC);
57+
58+ if (unlikely(!new_page))
59+ return -ENOMEM;
60+ sgt_buf = page_address(new_page);
61+ }
62 else
63 #endif
64 sgt_buf = netdev_alloc_frag(priv->tx_headroom + sgt_size);
65@@ -1061,6 +1064,7 @@ int __hot dpa_tx_extended(struct sk_buff
66 int err = 0;
67 bool nonlinear;
68 int *countptr, offset = 0;
69+ struct sk_buff *nskb;
70
71 priv = netdev_priv(net_dev);
72 /* Non-migratable context, safe to use raw_cpu_ptr */
73@@ -1072,9 +1076,11 @@ int __hot dpa_tx_extended(struct sk_buff
74
75 #ifndef CONFIG_PPC
76 if (unlikely(dpaa_errata_a010022) && a010022_check_skb(skb, priv)) {
77- skb = a010022_realign_skb(skb, priv);
78- if (!skb)
79+ nskb = a010022_realign_skb(skb, priv);
80+ if (!nskb)
81 goto skb_to_fd_failed;
82+ dev_kfree_skb(skb);
83+ skb = nskb;
84 }
85 #endif
86
87@@ -1130,15 +1136,17 @@ int __hot dpa_tx_extended(struct sk_buff
88
89 /* Code borrowed from skb_unshare(). */
90 if (skb_cloned(skb)) {
91- struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
92+ nskb = skb_copy(skb, GFP_ATOMIC);
93 kfree_skb(skb);
94 skb = nskb;
95 #ifndef CONFIG_PPC
96 if (unlikely(dpaa_errata_a010022) &&
97 a010022_check_skb(skb, priv)) {
98- skb = a010022_realign_skb(skb, priv);
99- if (!skb)
100+ nskb = a010022_realign_skb(skb, priv);
101+ if (!nskb)
102 goto skb_to_fd_failed;
103+ dev_kfree_skb(skb);
104+ skb = nskb;
105 }
106 #endif
107 /* skb_copy() has now linearized the skbuff. */