blob: 08be58909531aeda76eb1f301af08288fa50db21 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From: Felix Fietkau <nbd@nbd.name>
2Date: Wed, 26 Aug 2020 16:55:54 +0200
3Subject: [PATCH] net: ethernet: mtk_eth_soc: fix unnecessary tx queue
4 stops
5
6When running short on descriptors, only stop the queue for the netdev that tx
7was attempted for. By the time the something tries to send on the other netdev,
8the ring might have some more room already
9
10Signed-off-by: Felix Fietkau <nbd@nbd.name>
11---
12
13--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
14+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
15@@ -1158,17 +1158,6 @@ static void mtk_wake_queue(struct mtk_et
16 }
17 }
18
19-static void mtk_stop_queue(struct mtk_eth *eth)
20-{
21- int i;
22-
23- for (i = 0; i < MTK_MAC_COUNT; i++) {
24- if (!eth->netdev[i])
25- continue;
26- netif_stop_queue(eth->netdev[i]);
27- }
28-}
29-
30 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
31 {
32 struct mtk_mac *mac = netdev_priv(dev);
33@@ -1189,7 +1178,7 @@ static int mtk_start_xmit(struct sk_buff
34
35 tx_num = mtk_cal_txd_req(skb);
36 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
37- mtk_stop_queue(eth);
38+ netif_stop_queue(dev);
39 netif_err(eth, tx_queued, dev,
40 "Tx Ring full when queue awake!\n");
41 spin_unlock(&eth->page_lock);
42@@ -1215,7 +1204,7 @@ static int mtk_start_xmit(struct sk_buff
43 goto drop;
44
45 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
46- mtk_stop_queue(eth);
47+ netif_stop_queue(dev);
48
49 spin_unlock(&eth->page_lock);
50