blob: 982344d86e3ad89f8ac6f3aea1f3f469f34214af [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From e21cd0d3ef6f039c674b9e22581aaaabdf3a40f7 Mon Sep 17 00:00:00 2001
2From: Camelia Groza <camelia.groza@nxp.com>
3Date: Fri, 24 Nov 2017 11:55:51 +0200
4Subject: [PATCH] sdk_dpaa: ceetm: coding style fixes and added comments
5
6Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
7---
8 .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 38 ++++++++++++++--------
9 1 file changed, 24 insertions(+), 14 deletions(-)
10
11--- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
12+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
13@@ -69,15 +69,17 @@ static void get_dcp_and_sp(struct net_de
14 static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
15 const struct qm_mr_entry *msg)
16 {
17- struct net_device *net_dev;
18- struct ceetm_class *cls;
19+ struct dpa_percpu_priv_s *dpa_percpu_priv;
20 struct ceetm_class_stats *cstats = NULL;
21 const struct dpa_priv_s *dpa_priv;
22- struct dpa_percpu_priv_s *dpa_percpu_priv;
23- struct sk_buff *skb;
24 struct qm_fd fd = msg->ern.fd;
25+ struct net_device *net_dev;
26+ struct ceetm_fq *ceetm_fq;
27+ struct ceetm_class *cls;
28+ struct sk_buff *skb;
29
30- net_dev = ((struct ceetm_fq *)fq)->net_dev;
31+ ceetm_fq = container_of(fq, struct ceetm_fq, fq);
32+ net_dev = ceetm_fq->net_dev;
33 dpa_priv = netdev_priv(net_dev);
34 dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv);
35
36@@ -86,7 +88,7 @@ static void ceetm_ern(struct qman_portal
37 dpa_percpu_priv->stats.tx_fifo_errors++;
38
39 /* Increment CEETM counters */
40- cls = ((struct ceetm_fq *)fq)->ceetm_cls;
41+ cls = ceetm_fq->ceetm_cls;
42 switch (cls->type) {
43 case CEETM_PRIO:
44 cstats = this_cpu_ptr(cls->prio.cstats);
45@@ -99,11 +101,15 @@ static void ceetm_ern(struct qman_portal
46 if (cstats)
47 cstats->ern_drop_count++;
48
49+ /* Release the buffers that were supposed to be recycled. */
50 if (fd.bpid != 0xff) {
51 dpa_fd_release(net_dev, &fd);
52 return;
53 }
54
55+ /* Release the frames that were supposed to return on the
56+ * confirmation path.
57+ */
58 skb = _dpa_cleanup_tx_fd(dpa_priv, &fd);
59 dev_kfree_skb_any(skb);
60 }
61@@ -1909,18 +1915,22 @@ static struct ceetm_class *ceetm_classif
62
63 int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
64 {
65- int ret;
66- bool act_drop = false;
67+ const int queue_mapping = dpa_get_queue_mapping(skb);
68 struct Qdisc *sch = net_dev->qdisc;
69- struct ceetm_class *cl;
70+ struct ceetm_class_stats *cstats;
71+ struct ceetm_qdisc_stats *qstats;
72 struct dpa_priv_s *priv_dpa;
73 struct ceetm_fq *ceetm_fq;
74+ struct ceetm_qdisc *priv;
75 struct qman_fq *conf_fq;
76- struct ceetm_qdisc *priv = qdisc_priv(sch);
77- struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats);
78- struct ceetm_class_stats *cstats;
79- const int queue_mapping = dpa_get_queue_mapping(skb);
80- spinlock_t *root_lock = qdisc_lock(sch);
81+ struct ceetm_class *cl;
82+ spinlock_t *root_lock;
83+ bool act_drop = false;
84+ int ret;
85+
86+ root_lock = qdisc_lock(sch);
87+ priv = qdisc_priv(sch);
88+ qstats = this_cpu_ptr(priv->root.qstats);
89
90 spin_lock(root_lock);
91 cl = ceetm_classify(skb, sch, &ret, &act_drop);