b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | From 936ce2452068cb0f6d48ca7d77d6b975802c19ae Mon Sep 17 00:00:00 2001 |
| 2 | From: Ioana Radulescu <ruxandra.radulescu@nxp.com> |
| 3 | Date: Tue, 3 Sep 2019 14:13:32 +0300 |
| 4 | Subject: [PATCH] dpaa2-eth: Add support for Rx traffic classes |
| 5 | |
| 6 | The firmware reserves for each DPNI a number of RX frame queues |
| 7 | equal to the number of configured flows x number of configured |
| 8 | traffic classes. |
| 9 | |
| 10 | Current driver configuration directs all incoming traffic to |
| 11 | FQs corresponding to TC0, leaving all other priority levels unused. |
| 12 | |
| 13 | Start adding support for multiple ingress traffic classes, by |
| 14 | configuring the FQs associated with all priority levels, not just |
| 15 | TC0. All settings that are per-TC, such as those related to |
| 16 | hashing and flow steering, are also updated. |
| 17 | |
| 18 | Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com> |
| 19 | --- |
| 20 | .../ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c | 7 ++- |
| 21 | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 70 +++++++++++++++------- |
| 22 | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 4 +- |
| 23 | .../net/ethernet/freescale/dpaa2/dpaa2-ethtool.c | 19 ++++-- |
| 24 | 4 files changed, 68 insertions(+), 32 deletions(-) |
| 25 | |
| 26 | --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c |
| 27 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c |
| 28 | @@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq |
| 29 | int i, err; |
| 30 | |
| 31 | seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); |
| 32 | - seq_printf(file, "%s%16s%16s%16s%16s\n", |
| 33 | - "VFQID", "CPU", "Type", "Frames", "Pending frames"); |
| 34 | + seq_printf(file, "%s%16s%16s%16s%16s%16s\n", |
| 35 | + "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames"); |
| 36 | |
| 37 | for (i = 0; i < priv->num_fqs; i++) { |
| 38 | fq = &priv->fq[i]; |
| 39 | @@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq |
| 40 | if (err) |
| 41 | fcnt = 0; |
| 42 | |
| 43 | - seq_printf(file, "%5d%16d%16s%16llu%16u\n", |
| 44 | + seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n", |
| 45 | fq->fqid, |
| 46 | fq->target_cpu, |
| 47 | + fq->tc, |
| 48 | fq_type_to_str(fq), |
| 49 | fq->stats.frames, |
| 50 | fcnt); |
| 51 | --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c |
| 52 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c |
| 53 | @@ -1231,6 +1231,7 @@ static void disable_ch_napi(struct dpaa2 |
| 54 | static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable) |
| 55 | { |
| 56 | struct dpni_taildrop td = {0}; |
| 57 | + struct dpaa2_eth_fq *fq; |
| 58 | int i, err; |
| 59 | |
| 60 | if (priv->rx_td_enabled == enable) |
| 61 | @@ -1240,11 +1241,12 @@ static void dpaa2_eth_set_rx_taildrop(st |
| 62 | td.threshold = DPAA2_ETH_TAILDROP_THRESH; |
| 63 | |
| 64 | for (i = 0; i < priv->num_fqs; i++) { |
| 65 | - if (priv->fq[i].type != DPAA2_RX_FQ) |
| 66 | + fq = &priv->fq[i]; |
| 67 | + if (fq->type != DPAA2_RX_FQ) |
| 68 | continue; |
| 69 | err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
| 70 | - DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0, |
| 71 | - priv->fq[i].flowid, &td); |
| 72 | + DPNI_CP_QUEUE, DPNI_QUEUE_RX, |
| 73 | + fq->tc, fq->flowid, &td); |
| 74 | if (err) { |
| 75 | netdev_err(priv->net_dev, |
| 76 | "dpni_set_taildrop() failed\n"); |
| 77 | @@ -2338,7 +2340,7 @@ static void set_fq_affinity(struct dpaa2 |
| 78 | |
| 79 | static void setup_fqs(struct dpaa2_eth_priv *priv) |
| 80 | { |
| 81 | - int i; |
| 82 | + int i, j; |
| 83 | |
| 84 | /* We have one TxConf FQ per Tx flow. |
| 85 | * The number of Tx and Rx queues is the same. |
| 86 | @@ -2350,10 +2352,13 @@ static void setup_fqs(struct dpaa2_eth_p |
| 87 | priv->fq[priv->num_fqs++].flowid = (u16)i; |
| 88 | } |
| 89 | |
| 90 | - for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { |
| 91 | - priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; |
| 92 | - priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; |
| 93 | - priv->fq[priv->num_fqs++].flowid = (u16)i; |
| 94 | + for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { |
| 95 | + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { |
| 96 | + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; |
| 97 | + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; |
| 98 | + priv->fq[priv->num_fqs].tc = (u8)j; |
| 99 | + priv->fq[priv->num_fqs++].flowid = (u16)i; |
| 100 | + } |
| 101 | } |
| 102 | |
| 103 | /* For each FQ, decide on which core to process incoming frames */ |
| 104 | @@ -2701,7 +2706,7 @@ static int setup_rx_flow(struct dpaa2_et |
| 105 | int err; |
| 106 | |
| 107 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, |
| 108 | - DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); |
| 109 | + DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid); |
| 110 | if (err) { |
| 111 | dev_err(dev, "dpni_get_queue(RX) failed\n"); |
| 112 | return err; |
| 113 | @@ -2714,7 +2719,7 @@ static int setup_rx_flow(struct dpaa2_et |
| 114 | queue.destination.priority = 1; |
| 115 | queue.user_context = (u64)(uintptr_t)fq; |
| 116 | err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, |
| 117 | - DPNI_QUEUE_RX, 0, fq->flowid, |
| 118 | + DPNI_QUEUE_RX, fq->tc, fq->flowid, |
| 119 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, |
| 120 | &queue); |
| 121 | if (err) { |
| 122 | @@ -2723,6 +2728,10 @@ static int setup_rx_flow(struct dpaa2_et |
| 123 | } |
| 124 | |
| 125 | /* xdp_rxq setup */ |
| 126 | + /* only once for each channel */ |
| 127 | + if (fq->tc > 0) |
| 128 | + return 0; |
| 129 | + |
| 130 | err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, |
| 131 | fq->flowid); |
| 132 | if (err) { |
| 133 | @@ -2860,7 +2869,7 @@ static int config_legacy_hash_key(struct |
| 134 | { |
| 135 | struct device *dev = priv->net_dev->dev.parent; |
| 136 | struct dpni_rx_tc_dist_cfg dist_cfg; |
| 137 | - int err; |
| 138 | + int i, err = 0; |
| 139 | |
| 140 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
| 141 | |
| 142 | @@ -2868,9 +2877,14 @@ static int config_legacy_hash_key(struct |
| 143 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
| 144 | dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; |
| 145 | |
| 146 | - err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); |
| 147 | - if (err) |
| 148 | - dev_err(dev, "dpni_set_rx_tc_dist failed\n"); |
| 149 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 150 | + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, |
| 151 | + i, &dist_cfg); |
| 152 | + if (err) { |
| 153 | + dev_err(dev, "dpni_set_rx_tc_dist failed\n"); |
| 154 | + break; |
| 155 | + } |
| 156 | + } |
| 157 | |
| 158 | return err; |
| 159 | } |
| 160 | @@ -2880,7 +2894,7 @@ static int config_hash_key(struct dpaa2_ |
| 161 | { |
| 162 | struct device *dev = priv->net_dev->dev.parent; |
| 163 | struct dpni_rx_dist_cfg dist_cfg; |
| 164 | - int err; |
| 165 | + int i, err = 0; |
| 166 | |
| 167 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
| 168 | |
| 169 | @@ -2888,9 +2902,15 @@ static int config_hash_key(struct dpaa2_ |
| 170 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
| 171 | dist_cfg.enable = 1; |
| 172 | |
| 173 | - err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); |
| 174 | - if (err) |
| 175 | - dev_err(dev, "dpni_set_rx_hash_dist failed\n"); |
| 176 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 177 | + dist_cfg.tc = i; |
| 178 | + err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, |
| 179 | + &dist_cfg); |
| 180 | + if (err) { |
| 181 | + dev_err(dev, "dpni_set_rx_hash_dist failed\n"); |
| 182 | + break; |
| 183 | + } |
| 184 | + } |
| 185 | |
| 186 | return err; |
| 187 | } |
| 188 | @@ -2900,7 +2920,7 @@ static int config_cls_key(struct dpaa2_e |
| 189 | { |
| 190 | struct device *dev = priv->net_dev->dev.parent; |
| 191 | struct dpni_rx_dist_cfg dist_cfg; |
| 192 | - int err; |
| 193 | + int i, err = 0; |
| 194 | |
| 195 | memset(&dist_cfg, 0, sizeof(dist_cfg)); |
| 196 | |
| 197 | @@ -2908,9 +2928,15 @@ static int config_cls_key(struct dpaa2_e |
| 198 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); |
| 199 | dist_cfg.enable = 1; |
| 200 | |
| 201 | - err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); |
| 202 | - if (err) |
| 203 | - dev_err(dev, "dpni_set_rx_fs_dist failed\n"); |
| 204 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 205 | + dist_cfg.tc = i; |
| 206 | + err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, |
| 207 | + &dist_cfg); |
| 208 | + if (err) { |
| 209 | + dev_err(dev, "dpni_set_rx_fs_dist failed\n"); |
| 210 | + break; |
| 211 | + } |
| 212 | + } |
| 213 | |
| 214 | return err; |
| 215 | } |
| 216 | --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |
| 217 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |
| 218 | @@ -291,7 +291,9 @@ struct dpaa2_eth_ch_stats { |
| 219 | |
| 220 | /* Maximum number of queues associated with a DPNI */ |
| 221 | #define DPAA2_ETH_MAX_TCS 8 |
| 222 | -#define DPAA2_ETH_MAX_RX_QUEUES 16 |
| 223 | +#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16 |
| 224 | +#define DPAA2_ETH_MAX_RX_QUEUES \ |
| 225 | + (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS) |
| 226 | #define DPAA2_ETH_MAX_TX_QUEUES 16 |
| 227 | #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ |
| 228 | DPAA2_ETH_MAX_TX_QUEUES) |
| 229 | --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c |
| 230 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c |
| 231 | @@ -502,7 +502,7 @@ static int do_cls_rule(struct net_device |
| 232 | dma_addr_t key_iova; |
| 233 | u64 fields = 0; |
| 234 | void *key_buf; |
| 235 | - int err; |
| 236 | + int i, err; |
| 237 | |
| 238 | if (fs->ring_cookie != RX_CLS_FLOW_DISC && |
| 239 | fs->ring_cookie >= dpaa2_eth_queue_count(priv)) |
| 240 | @@ -562,11 +562,18 @@ static int do_cls_rule(struct net_device |
| 241 | fs_act.options |= DPNI_FS_OPT_DISCARD; |
| 242 | else |
| 243 | fs_act.flow_id = fs->ring_cookie; |
| 244 | - err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, |
| 245 | - fs->location, &rule_cfg, &fs_act); |
| 246 | - } else { |
| 247 | - err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, |
| 248 | - &rule_cfg); |
| 249 | + } |
| 250 | + for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { |
| 251 | + if (add) |
| 252 | + err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, |
| 253 | + i, fs->location, &rule_cfg, |
| 254 | + &fs_act); |
| 255 | + else |
| 256 | + err = dpni_remove_fs_entry(priv->mc_io, 0, |
| 257 | + priv->mc_token, i, |
| 258 | + &rule_cfg); |
| 259 | + if (err) |
| 260 | + break; |
| 261 | } |
| 262 | |
| 263 | dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE); |