| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | From 68622c8bb029f9fd4c83ffa3bd979fa62a3599d0 Mon Sep 17 00:00:00 2001 |
| 2 | From: Bogdan Purcareata <bogdan.purcareata@nxp.com> |
| 3 | Date: Mon, 13 Nov 2017 17:26:13 +0200 |
| 4 | Subject: [PATCH] dpaa2-eth: Add CEETM qdisc support |
| 5 | |
| 6 | Features include: |
| 7 | - dual rate shaping support |
| 8 | - per-channel shaping and classification |
| 9 | - strict / weighted scheduling among num_tc classes |
| 10 | - TD enabled for configured class queues |
| 11 | - prio class (leaf) firmware statistics support |
| 12 | - weights normalized based on max |
| 13 | - tc filters based classification |
| 14 | |
| 15 | Only 1 CEETM ch supported, only channel shaping supported. |
| 16 | |
| 17 | Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com> |
| 18 | Signed-off-by: Camelia Groza <camelia.groza@nxp.com> |
| 19 | --- |
| 20 | drivers/net/ethernet/freescale/dpaa2/Kconfig | 7 + |
| 21 | drivers/net/ethernet/freescale/dpaa2/Makefile | 1 + |
| 22 | .../net/ethernet/freescale/dpaa2/dpaa2-eth-ceetm.c | 1219 ++++++++++++++++++++ |
| 23 | .../net/ethernet/freescale/dpaa2/dpaa2-eth-ceetm.h | 207 ++++ |
| 24 | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 53 +- |
| 25 | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 7 + |
| 26 | 6 files changed, 1482 insertions(+), 12 deletions(-) |
| 27 | create mode 100644 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-ceetm.c |
| 28 | create mode 100644 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-ceetm.h |
| 29 | |
| 30 | --- a/drivers/net/ethernet/freescale/dpaa2/Kconfig |
| 31 | +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig |
| 32 | @@ -25,6 +25,13 @@ config FSL_DPAA2_ETH_USE_ERR_QUEUE |
| 33 | in hardware). |
| 34 | This may impact performance, recommended for debugging |
| 35 | purposes only. |
| 36 | + |
| 37 | +config FSL_DPAA2_ETH_CEETM |
| 38 | + depends on NET_SCHED |
| 39 | + bool "DPAA2 Ethernet CEETM QoS" |
| 40 | + default n |
| 41 | + help |
| 42 | + Enable QoS offloading support through the CEETM hardware block. |
| 43 | endif |
| 44 | |
| 45 | config FSL_DPAA2_PTP_CLOCK |
| 46 | --- a/drivers/net/ethernet/freescale/dpaa2/Makefile |
| 47 | +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile |
| 48 | @@ -8,6 +8,7 @@ obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl |
| 49 | |
| 50 | fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o |
| 51 | fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o |
| 52 | +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o |
| 53 | fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o |
| 54 | |
| 55 | # Needed by the tracing framework |
| 56 | --- /dev/null |
| 57 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-ceetm.c |
| 58 | @@ -0,0 +1,1219 @@ |
| 59 | +/* Copyright 2017 NXP |
| 60 | + * |
| 61 | + * Redistribution and use in source and binary forms, with or without |
| 62 | + * modification, are permitted provided that the following conditions are met: |
| 63 | + * * Redistributions of source code must retain the above copyright |
| 64 | + * notice, this list of conditions and the following disclaimer. |
| 65 | + * * Redistributions in binary form must reproduce the above copyright |
| 66 | + * notice, this list of conditions and the following disclaimer in the |
| 67 | + * documentation and/or other materials provided with the distribution. |
| 68 | + * * Neither the name of Freescale Semiconductor nor the |
| 69 | + * names of its contributors may be used to endorse or promote products |
| 70 | + * derived from this software without specific prior written permission. |
| 71 | + * |
| 72 | + * |
| 73 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
| 74 | + * GNU General Public License ("GPL") as published by the Free Software |
| 75 | + * Foundation, either version 2 of that License or (at your option) any |
| 76 | + * later version. |
| 77 | + * |
| 78 | + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
| 79 | + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 80 | + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 81 | + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
| 82 | + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 83 | + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 84 | + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| 85 | + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 86 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 87 | + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 88 | + */ |
| 89 | + |
| 90 | +#include <linux/init.h> |
| 91 | +#include <linux/module.h> |
| 92 | + |
| 93 | +#include "dpaa2-eth-ceetm.h" |
| 94 | +#include "dpaa2-eth.h" |
| 95 | + |
| 96 | +#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc" |
| 97 | +/* Conversion formula from userspace passed Bps to expected Mbit */ |
| 98 | +#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17) |
| 99 | + |
| 100 | +static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = { |
| 101 | + [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) }, |
| 102 | + [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) }, |
| 103 | +}; |
| 104 | + |
| 105 | +struct Qdisc_ops dpaa2_ceetm_qdisc_ops; |
| 106 | + |
| 107 | +static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv, |
| 108 | + struct dpni_tx_shaping_cfg *scfg, |
| 109 | + struct dpni_tx_shaping_cfg *ecfg, |
| 110 | + int coupled, int ch_id) |
| 111 | +{ |
| 112 | + int err = 0; |
| 113 | + |
| 114 | + netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__, |
| 115 | + ch_id, scfg->rate_limit); |
| 116 | + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg, |
| 117 | + ecfg, coupled); |
| 118 | + if (err) |
| 119 | + netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n"); |
| 120 | + |
| 121 | + return err; |
| 122 | +} |
| 123 | + |
| 124 | +static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv, |
| 125 | + int ch_id) |
| 126 | +{ |
| 127 | + struct dpni_tx_shaping_cfg cfg = { 0 }; |
| 128 | + |
| 129 | + return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id); |
| 130 | +} |
| 131 | + |
| 132 | +static inline int |
| 133 | +dpaa2_eth_update_shaping_cfg(struct net_device *dev, |
| 134 | + struct dpaa2_ceetm_shaping_cfg cfg, |
| 135 | + struct dpni_tx_shaping_cfg *scfg, |
| 136 | + struct dpni_tx_shaping_cfg *ecfg) |
| 137 | +{ |
| 138 | + scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir); |
| 139 | + ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir); |
| 140 | + |
| 141 | + if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) { |
| 142 | + netdev_err(dev, "Committed burst size must be under %d\n", |
| 143 | + DPAA2_ETH_MAX_BURST_SIZE); |
| 144 | + return -EINVAL; |
| 145 | + } |
| 146 | + |
| 147 | + scfg->max_burst_size = cfg.cbs; |
| 148 | + |
| 149 | + if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) { |
| 150 | + netdev_err(dev, "Excess burst size must be under %d\n", |
| 151 | + DPAA2_ETH_MAX_BURST_SIZE); |
| 152 | + return -EINVAL; |
| 153 | + } |
| 154 | + |
| 155 | + ecfg->max_burst_size = cfg.ebs; |
| 156 | + |
| 157 | + if ((!cfg.cir || !cfg.eir) && cfg.coupled) { |
| 158 | + netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n"); |
| 159 | + return -EINVAL; |
| 160 | + } |
| 161 | + |
| 162 | + return 0; |
| 163 | +} |
| 164 | + |
| 165 | +enum update_tx_prio { |
| 166 | + DPAA2_ETH_ADD_CQ, |
| 167 | + DPAA2_ETH_DEL_CQ, |
| 168 | +}; |
| 169 | + |
| 170 | +/* Normalize weights based on max passed value */ |
| 171 | +static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv) |
| 172 | +{ |
| 173 | + struct dpni_tx_schedule_cfg *sched_cfg; |
| 174 | + struct dpaa2_ceetm_class *cl; |
| 175 | + u32 qpri; |
| 176 | + u16 weight_max = 0, increment; |
| 177 | + int i; |
| 178 | + |
| 179 | + /* Check the boundaries of the provided values */ |
| 180 | + for (i = 0; i < priv->clhash.hashsize; i++) |
| 181 | + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) |
| 182 | + weight_max = (weight_max == 0 ? cl->prio.weight : |
| 183 | + (weight_max < cl->prio.weight ? |
| 184 | + cl->prio.weight : weight_max)); |
| 185 | + |
| 186 | + /* If there are no elements, there's nothing to do */ |
| 187 | + if (weight_max == 0) |
| 188 | + return 0; |
| 189 | + |
| 190 | + increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) / |
| 191 | + weight_max; |
| 192 | + |
| 193 | + for (i = 0; i < priv->clhash.hashsize; i++) { |
| 194 | + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) { |
| 195 | + if (cl->prio.mode == STRICT_PRIORITY) |
| 196 | + continue; |
| 197 | + |
| 198 | + qpri = cl->prio.qpri; |
| 199 | + sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri]; |
| 200 | + |
| 201 | + sched_cfg->delta_bandwidth = |
| 202 | + DPAA2_CEETM_MIN_WEIGHT + |
| 203 | + (cl->prio.weight * increment); |
| 204 | + |
| 205 | + pr_debug("%s: Normalized CQ qpri %d weight to %d\n", |
| 206 | + __func__, qpri, sched_cfg->delta_bandwidth); |
| 207 | + } |
| 208 | + } |
| 209 | + |
| 210 | + return 0; |
| 211 | +} |
| 212 | + |
| 213 | +static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv, |
| 214 | + struct dpaa2_ceetm_class *cl, |
| 215 | + enum update_tx_prio type) |
| 216 | +{ |
| 217 | + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent); |
| 218 | + struct dpni_tx_schedule_cfg *sched_cfg; |
| 219 | + struct dpni_taildrop td = {0}; |
| 220 | + u8 ch_id = 0, tc_id = 0; |
| 221 | + u32 qpri = 0; |
| 222 | + int err = 0; |
| 223 | + |
| 224 | + qpri = cl->prio.qpri; |
| 225 | + tc_id = DPNI_BUILD_CH_TC(ch_id, qpri); |
| 226 | + |
| 227 | + switch (type) { |
| 228 | + case DPAA2_ETH_ADD_CQ: |
| 229 | + /* Enable taildrop */ |
| 230 | + td.enable = 1; |
| 231 | + td.units = DPNI_CONGESTION_UNIT_FRAMES; |
| 232 | + td.threshold = DPAA2_CEETM_TD_THRESHOLD; |
| 233 | + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
| 234 | + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id, |
| 235 | + 0, &td); |
| 236 | + if (err) { |
| 237 | + netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n", |
| 238 | + err); |
| 239 | + return err; |
| 240 | + } |
| 241 | + break; |
| 242 | + case DPAA2_ETH_DEL_CQ: |
| 243 | + /* Disable taildrop */ |
| 244 | + td.enable = 0; |
| 245 | + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, |
| 246 | + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id, |
| 247 | + 0, &td); |
| 248 | + if (err) { |
| 249 | + netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n", |
| 250 | + err); |
| 251 | + return err; |
| 252 | + } |
| 253 | + break; |
| 254 | + } |
| 255 | + |
| 256 | + /* We can zero out the structure in the tx_prio_conf array */ |
| 257 | + if (type == DPAA2_ETH_DEL_CQ) { |
| 258 | + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri]; |
| 259 | + memset(sched_cfg, 0, sizeof(*sched_cfg)); |
| 260 | + } |
| 261 | + |
| 262 | + /* Normalize priorities */ |
| 263 | + err = dpaa2_eth_normalize_tx_prio(sch); |
| 264 | + |
| 265 | + /* Debug print goes here */ |
| 266 | + print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1, |
| 267 | + &sch->prio.tx_prio_cfg, |
| 268 | + sizeof(sch->prio.tx_prio_cfg), 0); |
| 269 | + |
| 270 | + /* Call dpni_set_tx_priorities for the entire prio qdisc */ |
| 271 | + err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token, |
| 272 | + &sch->prio.tx_prio_cfg); |
| 273 | + if (err) |
| 274 | + netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n", |
| 275 | + err); |
| 276 | + |
| 277 | + return err; |
| 278 | +} |
| 279 | + |
| 280 | +static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv) |
| 281 | +{ |
| 282 | + priv->ceetm_en = true; |
| 283 | +} |
| 284 | + |
| 285 | +static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv) |
| 286 | +{ |
| 287 | + priv->ceetm_en = false; |
| 288 | +} |
| 289 | + |
| 290 | +/* Find class in qdisc hash table using given handle */ |
| 291 | +static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle, |
| 292 | + struct Qdisc *sch) |
| 293 | +{ |
| 294 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 295 | + struct Qdisc_class_common *clc; |
| 296 | + |
| 297 | + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n", |
| 298 | + __func__, handle, sch->handle); |
| 299 | + |
| 300 | + clc = qdisc_class_find(&priv->clhash, handle); |
| 301 | + return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL; |
| 302 | +} |
| 303 | + |
| 304 | +/* Insert a class in the qdisc's class hash */ |
| 305 | +static void dpaa2_ceetm_link_class(struct Qdisc *sch, |
| 306 | + struct Qdisc_class_hash *clhash, |
| 307 | + struct Qdisc_class_common *common) |
| 308 | +{ |
| 309 | + sch_tree_lock(sch); |
| 310 | + qdisc_class_hash_insert(clhash, common); |
| 311 | + sch_tree_unlock(sch); |
| 312 | + qdisc_class_hash_grow(sch, clhash); |
| 313 | +} |
| 314 | + |
| 315 | +/* Destroy a ceetm class */ |
| 316 | +static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch, |
| 317 | + struct dpaa2_ceetm_class *cl) |
| 318 | +{ |
| 319 | + struct net_device *dev = qdisc_dev(sch); |
| 320 | + struct dpaa2_eth_priv *priv = netdev_priv(dev); |
| 321 | + |
| 322 | + if (!cl) |
| 323 | + return; |
| 324 | + |
| 325 | + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n", |
| 326 | + __func__, cl->common.classid, sch->handle); |
| 327 | + |
| 328 | + /* Recurse into child first */ |
| 329 | + if (cl->child) { |
| 330 | + qdisc_put(cl->child); |
| 331 | + cl->child = NULL; |
| 332 | + } |
| 333 | + |
| 334 | + switch (cl->type) { |
| 335 | + case CEETM_ROOT: |
| 336 | + if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id)) |
| 337 | + netdev_err(dev, "Error resetting channel shaping\n"); |
| 338 | + |
| 339 | + break; |
| 340 | + |
| 341 | + case CEETM_PRIO: |
| 342 | + if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ)) |
| 343 | + netdev_err(dev, "Error resetting tx_priorities\n"); |
| 344 | + |
| 345 | + if (cl->prio.cstats) |
| 346 | + free_percpu(cl->prio.cstats); |
| 347 | + |
| 348 | + break; |
| 349 | + } |
| 350 | + |
| 351 | + tcf_block_put(cl->block); |
| 352 | + kfree(cl); |
| 353 | +} |
| 354 | + |
| 355 | +/* Destroy a ceetm qdisc */ |
| 356 | +static void dpaa2_ceetm_destroy(struct Qdisc *sch) |
| 357 | +{ |
| 358 | + unsigned int i; |
| 359 | + struct hlist_node *next; |
| 360 | + struct dpaa2_ceetm_class *cl; |
| 361 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 362 | + struct net_device *dev = qdisc_dev(sch); |
| 363 | + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev); |
| 364 | + |
| 365 | + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n", |
| 366 | + __func__, sch->handle); |
| 367 | + |
| 368 | + /* All filters need to be removed before destroying the classes */ |
| 369 | + tcf_block_put(priv->block); |
| 370 | + |
| 371 | + for (i = 0; i < priv->clhash.hashsize; i++) { |
| 372 | + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) |
| 373 | + tcf_block_put(cl->block); |
| 374 | + } |
| 375 | + |
| 376 | + for (i = 0; i < priv->clhash.hashsize; i++) { |
| 377 | + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i], |
| 378 | + common.hnode) |
| 379 | + dpaa2_ceetm_cls_destroy(sch, cl); |
| 380 | + } |
| 381 | + |
| 382 | + qdisc_class_hash_destroy(&priv->clhash); |
| 383 | + |
| 384 | + switch (priv->type) { |
| 385 | + case CEETM_ROOT: |
| 386 | + dpaa2_eth_ceetm_disable(priv_eth); |
| 387 | + |
| 388 | + if (priv->root.qstats) |
| 389 | + free_percpu(priv->root.qstats); |
| 390 | + |
| 391 | + if (!priv->root.qdiscs) |
| 392 | + break; |
| 393 | + |
| 394 | + /* Destroy the pfifo qdiscs in case they haven't been attached |
| 395 | + * to the netdev queues yet. |
| 396 | + */ |
| 397 | + for (i = 0; i < dev->num_tx_queues; i++) |
| 398 | + if (priv->root.qdiscs[i]) |
| 399 | + qdisc_put(priv->root.qdiscs[i]); |
| 400 | + |
| 401 | + kfree(priv->root.qdiscs); |
| 402 | + break; |
| 403 | + |
| 404 | + case CEETM_PRIO: |
| 405 | + if (priv->prio.parent) |
| 406 | + priv->prio.parent->child = NULL; |
| 407 | + break; |
| 408 | + } |
| 409 | +} |
| 410 | + |
| 411 | +static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb) |
| 412 | +{ |
| 413 | + struct Qdisc *qdisc; |
| 414 | + unsigned int ntx, i; |
| 415 | + struct nlattr *nest; |
| 416 | + struct dpaa2_ceetm_tc_qopt qopt; |
| 417 | + struct dpaa2_ceetm_qdisc_stats *qstats; |
| 418 | + struct net_device *dev = qdisc_dev(sch); |
| 419 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 420 | + |
| 421 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
| 422 | + |
| 423 | + sch_tree_lock(sch); |
| 424 | + memset(&qopt, 0, sizeof(qopt)); |
| 425 | + qopt.type = priv->type; |
| 426 | + qopt.shaped = priv->shaped; |
| 427 | + |
| 428 | + switch (priv->type) { |
| 429 | + case CEETM_ROOT: |
| 430 | + /* Gather statistics from the underlying pfifo qdiscs */ |
| 431 | + sch->q.qlen = 0; |
| 432 | + memset(&sch->bstats, 0, sizeof(sch->bstats)); |
| 433 | + memset(&sch->qstats, 0, sizeof(sch->qstats)); |
| 434 | + |
| 435 | + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { |
| 436 | + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; |
| 437 | + sch->q.qlen += qdisc->q.qlen; |
| 438 | + sch->bstats.bytes += qdisc->bstats.bytes; |
| 439 | + sch->bstats.packets += qdisc->bstats.packets; |
| 440 | + sch->qstats.qlen += qdisc->qstats.qlen; |
| 441 | + sch->qstats.backlog += qdisc->qstats.backlog; |
| 442 | + sch->qstats.drops += qdisc->qstats.drops; |
| 443 | + sch->qstats.requeues += qdisc->qstats.requeues; |
| 444 | + sch->qstats.overlimits += qdisc->qstats.overlimits; |
| 445 | + } |
| 446 | + |
| 447 | + for_each_online_cpu(i) { |
| 448 | + qstats = per_cpu_ptr(priv->root.qstats, i); |
| 449 | + sch->qstats.drops += qstats->drops; |
| 450 | + } |
| 451 | + |
| 452 | + break; |
| 453 | + |
| 454 | + case CEETM_PRIO: |
| 455 | + qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A; |
| 456 | + qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B; |
| 457 | + qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups; |
| 458 | + break; |
| 459 | + |
| 460 | + default: |
| 461 | + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); |
| 462 | + sch_tree_unlock(sch); |
| 463 | + return -EINVAL; |
| 464 | + } |
| 465 | + |
| 466 | + nest = nla_nest_start_noflag(skb, TCA_OPTIONS); |
| 467 | + if (!nest) |
| 468 | + goto nla_put_failure; |
| 469 | + if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt)) |
| 470 | + goto nla_put_failure; |
| 471 | + nla_nest_end(skb, nest); |
| 472 | + |
| 473 | + sch_tree_unlock(sch); |
| 474 | + return skb->len; |
| 475 | + |
| 476 | +nla_put_failure: |
| 477 | + sch_tree_unlock(sch); |
| 478 | + nla_nest_cancel(skb, nest); |
| 479 | + return -EMSGSIZE; |
| 480 | +} |
| 481 | + |
| 482 | +static int dpaa2_ceetm_change_prio(struct Qdisc *sch, |
| 483 | + struct dpaa2_ceetm_qdisc *priv, |
| 484 | + struct dpaa2_ceetm_tc_qopt *qopt) |
| 485 | +{ |
| 486 | + /* TODO: Once LX2 support is added */ |
| 487 | + /* priv->shaped = parent_cl->shaped; */ |
| 488 | + priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A; |
| 489 | + priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B; |
| 490 | + priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups; |
| 491 | + |
| 492 | + return 0; |
| 493 | +} |
| 494 | + |
| 495 | +/* Edit a ceetm qdisc */ |
| 496 | +static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt, |
| 497 | + struct netlink_ext_ack *extack) |
| 498 | +{ |
| 499 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 500 | + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1]; |
| 501 | + struct dpaa2_ceetm_tc_qopt *qopt; |
| 502 | + int err; |
| 503 | + |
| 504 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
| 505 | + |
| 506 | + err = nla_parse_nested_deprecated(tb, DPAA2_CEETM_TCA_QOPS, opt, |
| 507 | + dpaa2_ceetm_policy, extack); |
| 508 | + if (err < 0) { |
| 509 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
| 510 | + "nla_parse_nested_deprecated"); |
| 511 | + return err; |
| 512 | + } |
| 513 | + |
| 514 | + if (!tb[DPAA2_CEETM_TCA_QOPS]) { |
| 515 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
| 516 | + "tb"); |
| 517 | + return -EINVAL; |
| 518 | + } |
| 519 | + |
| 520 | + if (TC_H_MIN(sch->handle)) { |
| 521 | + pr_err("CEETM: a qdisc should not have a minor\n"); |
| 522 | + return -EINVAL; |
| 523 | + } |
| 524 | + |
| 525 | + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]); |
| 526 | + |
| 527 | + if (priv->type != qopt->type) { |
| 528 | + pr_err("CEETM: qdisc %X is not of the provided type\n", |
| 529 | + sch->handle); |
| 530 | + return -EINVAL; |
| 531 | + } |
| 532 | + |
| 533 | + switch (priv->type) { |
| 534 | + case CEETM_PRIO: |
| 535 | + err = dpaa2_ceetm_change_prio(sch, priv, qopt); |
| 536 | + break; |
| 537 | + default: |
| 538 | + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); |
| 539 | + err = -EINVAL; |
| 540 | + } |
| 541 | + |
| 542 | + return err; |
| 543 | +} |
| 544 | + |
| 545 | +/* Configure a root ceetm qdisc */ |
| 546 | +static int dpaa2_ceetm_init_root(struct Qdisc *sch, |
| 547 | + struct dpaa2_ceetm_qdisc *priv, |
| 548 | + struct dpaa2_ceetm_tc_qopt *qopt, |
| 549 | + struct netlink_ext_ack *extack) |
| 550 | +{ |
| 551 | + struct net_device *dev = qdisc_dev(sch); |
| 552 | + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev); |
| 553 | + struct netdev_queue *dev_queue; |
| 554 | + unsigned int i, parent_id; |
| 555 | + struct Qdisc *qdisc; |
| 556 | + |
| 557 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
| 558 | + |
| 559 | + /* Validate inputs */ |
| 560 | + if (sch->parent != TC_H_ROOT) { |
| 561 | + pr_err("CEETM: a root ceetm qdisc must be root\n"); |
| 562 | + return -EINVAL; |
| 563 | + } |
| 564 | + |
| 565 | + /* Pre-allocate underlying pfifo qdiscs. |
| 566 | + * |
| 567 | + * We want to offload shaping and scheduling decisions to the hardware. |
| 568 | + * The pfifo qdiscs will be attached to the netdev queues and will |
| 569 | + * guide the traffic from the IP stack down to the driver with minimum |
| 570 | + * interference. |
| 571 | + * |
| 572 | + * The CEETM qdiscs and classes will be crossed when the traffic |
| 573 | + * reaches the driver. |
| 574 | + */ |
| 575 | + priv->root.qdiscs = kcalloc(dev->num_tx_queues, |
| 576 | + sizeof(priv->root.qdiscs[0]), |
| 577 | + GFP_KERNEL); |
| 578 | + if (!priv->root.qdiscs) |
| 579 | + return -ENOMEM; |
| 580 | + |
| 581 | + for (i = 0; i < dev->num_tx_queues; i++) { |
| 582 | + dev_queue = netdev_get_tx_queue(dev, i); |
| 583 | + parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle), |
| 584 | + TC_H_MIN(i + PFIFO_MIN_OFFSET)); |
| 585 | + |
| 586 | + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, |
| 587 | + parent_id, extack); |
| 588 | + if (!qdisc) |
| 589 | + return -ENOMEM; |
| 590 | + |
| 591 | + priv->root.qdiscs[i] = qdisc; |
| 592 | + qdisc->flags |= TCQ_F_ONETXQUEUE; |
| 593 | + } |
| 594 | + |
| 595 | + sch->flags |= TCQ_F_MQROOT; |
| 596 | + |
| 597 | + priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats); |
| 598 | + if (!priv->root.qstats) { |
| 599 | + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", |
| 600 | + __func__); |
| 601 | + return -ENOMEM; |
| 602 | + } |
| 603 | + |
| 604 | + dpaa2_eth_ceetm_enable(priv_eth); |
| 605 | + return 0; |
| 606 | +} |
| 607 | + |
| 608 | +/* Configure a prio ceetm qdisc */ |
| 609 | +static int dpaa2_ceetm_init_prio(struct Qdisc *sch, |
| 610 | + struct dpaa2_ceetm_qdisc *priv, |
| 611 | + struct dpaa2_ceetm_tc_qopt *qopt) |
| 612 | +{ |
| 613 | + struct net_device *dev = qdisc_dev(sch); |
| 614 | + struct dpaa2_ceetm_class *parent_cl; |
| 615 | + struct Qdisc *parent_qdisc; |
| 616 | + |
| 617 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
| 618 | + |
| 619 | + if (sch->parent == TC_H_ROOT) { |
| 620 | + pr_err("CEETM: a prio ceetm qdisc can not be root\n"); |
| 621 | + return -EINVAL; |
| 622 | + } |
| 623 | + |
| 624 | + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent)); |
| 625 | + if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) { |
| 626 | + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n"); |
| 627 | + return -EINVAL; |
| 628 | + } |
| 629 | + |
| 630 | + /* Obtain the parent root ceetm_class */ |
| 631 | + parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc); |
| 632 | + |
| 633 | + if (!parent_cl || parent_cl->type != CEETM_ROOT) { |
| 634 | + pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n"); |
| 635 | + return -EINVAL; |
| 636 | + } |
| 637 | + |
| 638 | + priv->prio.parent = parent_cl; |
| 639 | + parent_cl->child = sch; |
| 640 | + |
| 641 | + return dpaa2_ceetm_change_prio(sch, priv, qopt); |
| 642 | +} |
| 643 | + |
| 644 | +/* Configure a generic ceetm qdisc */ |
| 645 | +static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt, |
| 646 | + struct netlink_ext_ack *extack) |
| 647 | +{ |
| 648 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 649 | + struct net_device *dev = qdisc_dev(sch); |
| 650 | + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1]; |
| 651 | + struct dpaa2_ceetm_tc_qopt *qopt; |
| 652 | + int err; |
| 653 | + |
| 654 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
| 655 | + |
| 656 | + if (!netif_is_multiqueue(dev)) |
| 657 | + return -EOPNOTSUPP; |
| 658 | + |
| 659 | + err = tcf_block_get(&priv->block, &priv->filter_list, sch, extack); |
| 660 | + if (err) { |
| 661 | + pr_err("CEETM: unable to get tcf_block\n"); |
| 662 | + return err; |
| 663 | + } |
| 664 | + |
| 665 | + if (!opt) { |
| 666 | + pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n", |
| 667 | + __func__); |
| 668 | + return -EINVAL; |
| 669 | + } |
| 670 | + |
| 671 | + err = nla_parse_nested_deprecated(tb, DPAA2_CEETM_TCA_QOPS, opt, |
| 672 | + dpaa2_ceetm_policy, extack); |
| 673 | + if (err < 0) { |
| 674 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
| 675 | + "nla_parse_nested_deprecated"); |
| 676 | + return err; |
| 677 | + } |
| 678 | + |
| 679 | + if (!tb[DPAA2_CEETM_TCA_QOPS]) { |
| 680 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
| 681 | + "tb"); |
| 682 | + return -EINVAL; |
| 683 | + } |
| 684 | + |
| 685 | + if (TC_H_MIN(sch->handle)) { |
| 686 | + pr_err("CEETM: a qdisc should not have a minor\n"); |
| 687 | + return -EINVAL; |
| 688 | + } |
| 689 | + |
| 690 | + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]); |
| 691 | + |
| 692 | + /* Initialize the class hash list. Each qdisc has its own class hash */ |
| 693 | + err = qdisc_class_hash_init(&priv->clhash); |
| 694 | + if (err < 0) { |
| 695 | + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n", |
| 696 | + __func__); |
| 697 | + return err; |
| 698 | + } |
| 699 | + |
| 700 | + priv->type = qopt->type; |
| 701 | + priv->shaped = qopt->shaped; |
| 702 | + |
| 703 | + switch (priv->type) { |
| 704 | + case CEETM_ROOT: |
| 705 | + err = dpaa2_ceetm_init_root(sch, priv, qopt, extack); |
| 706 | + break; |
| 707 | + case CEETM_PRIO: |
| 708 | + err = dpaa2_ceetm_init_prio(sch, priv, qopt); |
| 709 | + break; |
| 710 | + default: |
| 711 | + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__); |
| 712 | + /* Note: dpaa2_ceetm_destroy() will be called by our caller */ |
| 713 | + err = -EINVAL; |
| 714 | + } |
| 715 | + |
| 716 | + return err; |
| 717 | +} |
| 718 | + |
| 719 | +/* Attach the underlying pfifo qdiscs */ |
| 720 | +static void dpaa2_ceetm_attach(struct Qdisc *sch) |
| 721 | +{ |
| 722 | + struct net_device *dev = qdisc_dev(sch); |
| 723 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 724 | + struct Qdisc *qdisc, *old_qdisc; |
| 725 | + unsigned int i; |
| 726 | + |
| 727 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
| 728 | + |
| 729 | + for (i = 0; i < dev->num_tx_queues; i++) { |
| 730 | + qdisc = priv->root.qdiscs[i]; |
| 731 | + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); |
| 732 | + if (old_qdisc) |
| 733 | + qdisc_put(old_qdisc); |
| 734 | + } |
| 735 | + |
| 736 | + /* Remove the references to the pfifo qdiscs since the kernel will |
| 737 | + * destroy them when needed. No cleanup from our part is required from |
| 738 | + * this point on. |
| 739 | + */ |
| 740 | + kfree(priv->root.qdiscs); |
| 741 | + priv->root.qdiscs = NULL; |
| 742 | +} |
| 743 | + |
| 744 | +static unsigned long dpaa2_ceetm_cls_find(struct Qdisc *sch, u32 classid) |
| 745 | +{ |
| 746 | + struct dpaa2_ceetm_class *cl; |
| 747 | + |
| 748 | + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n", |
| 749 | + __func__, classid, sch->handle); |
| 750 | + cl = dpaa2_ceetm_find(classid, sch); |
| 751 | + |
| 752 | + return (unsigned long)cl; |
| 753 | +} |
| 754 | + |
| 755 | +static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl, |
| 756 | + struct dpaa2_ceetm_tc_copt *copt, |
| 757 | + struct net_device *dev) |
| 758 | +{ |
| 759 | + struct dpaa2_eth_priv *priv = netdev_priv(dev); |
| 760 | + struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 }; |
| 761 | + int err = 0; |
| 762 | + |
| 763 | + pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__, |
| 764 | + cl->common.classid); |
| 765 | + |
| 766 | + if (!cl->shaped) |
| 767 | + return 0; |
| 768 | + |
| 769 | + if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg, |
| 770 | + &scfg, &ecfg)) |
| 771 | + return -EINVAL; |
| 772 | + |
| 773 | + err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg, |
| 774 | + copt->shaping_cfg.coupled, |
| 775 | + cl->root.ch_id); |
| 776 | + if (err) |
| 777 | + return err; |
| 778 | + |
| 779 | + memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg, |
| 780 | + sizeof(struct dpaa2_ceetm_shaping_cfg)); |
| 781 | + |
| 782 | + return err; |
| 783 | +} |
| 784 | + |
| 785 | +static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl, |
| 786 | + struct dpaa2_ceetm_tc_copt *copt, |
| 787 | + struct net_device *dev) |
| 788 | +{ |
| 789 | + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent); |
| 790 | + struct dpni_tx_schedule_cfg *sched_cfg; |
| 791 | + struct dpaa2_eth_priv *priv = netdev_priv(dev); |
| 792 | + int err; |
| 793 | + |
| 794 | + pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n", |
| 795 | + __func__, cl->common.classid, copt->mode, copt->weight); |
| 796 | + |
| 797 | + if (!cl->prio.cstats) { |
| 798 | + cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats); |
| 799 | + if (!cl->prio.cstats) { |
| 800 | + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n", |
| 801 | + __func__); |
| 802 | + return -ENOMEM; |
| 803 | + } |
| 804 | + } |
| 805 | + |
| 806 | + cl->prio.mode = copt->mode; |
| 807 | + cl->prio.weight = copt->weight; |
| 808 | + |
| 809 | + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri]; |
| 810 | + |
| 811 | + switch (copt->mode) { |
| 812 | + case STRICT_PRIORITY: |
| 813 | + sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY; |
| 814 | + break; |
| 815 | + case WEIGHTED_A: |
| 816 | + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A; |
| 817 | + break; |
| 818 | + case WEIGHTED_B: |
| 819 | + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B; |
| 820 | + break; |
| 821 | + } |
| 822 | + |
| 823 | + err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ); |
| 824 | + |
| 825 | + return err; |
| 826 | +} |
| 827 | + |
| 828 | +/* Add a new ceetm class */ |
| 829 | +static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid, |
| 830 | + struct dpaa2_ceetm_tc_copt *copt, |
| 831 | + unsigned long *arg, |
| 832 | + struct netlink_ext_ack *extack) |
| 833 | +{ |
| 834 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 835 | + struct net_device *dev = qdisc_dev(sch); |
| 836 | + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev); |
| 837 | + struct dpaa2_ceetm_class *cl; |
| 838 | + int err; |
| 839 | + |
| 840 | + if (copt->type == CEETM_ROOT && |
| 841 | + priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) { |
| 842 | + pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n", |
| 843 | + dpaa2_eth_ch_count(priv_eth), |
| 844 | + dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s"); |
| 845 | + return -EINVAL; |
| 846 | + } |
| 847 | + |
| 848 | + if (copt->type == CEETM_PRIO && |
| 849 | + priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) { |
| 850 | + pr_err("CEETM: only %d queue%s per channel allowed, sorry\n", |
| 851 | + dpaa2_eth_tc_count(priv_eth), |
| 852 | + dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s"); |
| 853 | + return -EINVAL; |
| 854 | + } |
| 855 | + |
| 856 | + cl = kzalloc(sizeof(*cl), GFP_KERNEL); |
| 857 | + if (!cl) |
| 858 | + return -ENOMEM; |
| 859 | + |
| 860 | + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); |
| 861 | + if (err) { |
| 862 | + pr_err("%s: Unable to set new root class\n", __func__); |
| 863 | + goto out_free; |
| 864 | + } |
| 865 | + |
| 866 | + cl->common.classid = classid; |
| 867 | + cl->parent = sch; |
| 868 | + cl->child = NULL; |
| 869 | + |
| 870 | + /* Add class handle in Qdisc */ |
| 871 | + dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common); |
| 872 | + |
| 873 | + cl->shaped = copt->shaped; |
| 874 | + cl->type = copt->type; |
| 875 | + |
| 876 | + /* Claim a CEETM channel / tc - DPAA2. will assume transition from |
| 877 | + * classid to qdid/qpri, starting from qdid / qpri 0 |
| 878 | + */ |
| 879 | + switch (copt->type) { |
| 880 | + case CEETM_ROOT: |
| 881 | + cl->root.ch_id = classid - sch->handle - 1; |
| 882 | + err = dpaa2_ceetm_cls_change_root(cl, copt, dev); |
| 883 | + break; |
| 884 | + case CEETM_PRIO: |
| 885 | + cl->prio.qpri = classid - sch->handle - 1; |
| 886 | + err = dpaa2_ceetm_cls_change_prio(cl, copt, dev); |
| 887 | + break; |
| 888 | + } |
| 889 | + |
| 890 | + if (err) { |
| 891 | + pr_err("%s: Unable to set new %s class\n", __func__, |
| 892 | + (copt->type == CEETM_ROOT ? "root" : "prio")); |
| 893 | + goto out_free; |
| 894 | + } |
| 895 | + |
| 896 | + switch (copt->type) { |
| 897 | + case CEETM_ROOT: |
| 898 | + pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n", |
| 899 | + __func__, classid, cl->root.ch_id); |
| 900 | + break; |
| 901 | + case CEETM_PRIO: |
| 902 | + pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n", |
| 903 | + __func__, classid, cl->prio.qpri); |
| 904 | + break; |
| 905 | + } |
| 906 | + |
| 907 | + *arg = (unsigned long)cl; |
| 908 | + return 0; |
| 909 | + |
| 910 | +out_free: |
| 911 | + kfree(cl); |
| 912 | + return err; |
| 913 | +} |
| 914 | + |
| 915 | +/* Add or configure a ceetm class */ |
| 916 | +static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid, |
| 917 | + struct nlattr **tca, unsigned long *arg, |
| 918 | + struct netlink_ext_ack *extack) |
| 919 | +{ |
| 920 | + struct dpaa2_ceetm_qdisc *priv; |
| 921 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg; |
| 922 | + struct nlattr *opt = tca[TCA_OPTIONS]; |
| 923 | + struct nlattr *tb[DPAA2_CEETM_TCA_MAX]; |
| 924 | + struct dpaa2_ceetm_tc_copt *copt; |
| 925 | + struct net_device *dev = qdisc_dev(sch); |
| 926 | + int err; |
| 927 | + |
| 928 | + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n", |
| 929 | + __func__, classid, sch->handle); |
| 930 | + |
| 931 | + if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) { |
| 932 | + pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n"); |
| 933 | + return -EINVAL; |
| 934 | + } |
| 935 | + |
| 936 | + priv = qdisc_priv(sch); |
| 937 | + |
| 938 | + if (!opt) { |
| 939 | + pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__); |
| 940 | + return -EINVAL; |
| 941 | + } |
| 942 | + |
| 943 | + err = nla_parse_nested_deprecated(tb, DPAA2_CEETM_TCA_COPT, opt, |
| 944 | + dpaa2_ceetm_policy, extack); |
| 945 | + if (err < 0) { |
| 946 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
| 947 | + "nla_parse_nested_deprecated"); |
| 948 | + return -EINVAL; |
| 949 | + } |
| 950 | + |
| 951 | + if (!tb[DPAA2_CEETM_TCA_COPT]) { |
| 952 | + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__, |
| 953 | + "tb"); |
| 954 | + return -EINVAL; |
| 955 | + } |
| 956 | + |
| 957 | + copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]); |
| 958 | + |
| 959 | + /* Configure an existing ceetm class */ |
| 960 | + if (cl) { |
| 961 | + if (copt->type != cl->type) { |
| 962 | + pr_err("CEETM: class %X is not of the provided type\n", |
| 963 | + cl->common.classid); |
| 964 | + return -EINVAL; |
| 965 | + } |
| 966 | + |
| 967 | + switch (copt->type) { |
| 968 | + case CEETM_ROOT: |
| 969 | + return dpaa2_ceetm_cls_change_root(cl, copt, dev); |
| 970 | + case CEETM_PRIO: |
| 971 | + return dpaa2_ceetm_cls_change_prio(cl, copt, dev); |
| 972 | + |
| 973 | + default: |
| 974 | + pr_err(KBUILD_BASENAME " : %s : invalid class\n", |
| 975 | + __func__); |
| 976 | + return -EINVAL; |
| 977 | + } |
| 978 | + } |
| 979 | + |
| 980 | + return dpaa2_ceetm_cls_add(sch, classid, copt, arg, extack); |
| 981 | +} |
| 982 | + |
| 983 | +static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
| 984 | +{ |
| 985 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 986 | + struct dpaa2_ceetm_class *cl; |
| 987 | + unsigned int i; |
| 988 | + |
| 989 | + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle); |
| 990 | + |
| 991 | + if (arg->stop) |
| 992 | + return; |
| 993 | + |
| 994 | + for (i = 0; i < priv->clhash.hashsize; i++) { |
| 995 | + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) { |
| 996 | + if (arg->count < arg->skip) { |
| 997 | + arg->count++; |
| 998 | + continue; |
| 999 | + } |
| 1000 | + if (arg->fn(sch, (unsigned long)cl, arg) < 0) { |
| 1001 | + arg->stop = 1; |
| 1002 | + return; |
| 1003 | + } |
| 1004 | + arg->count++; |
| 1005 | + } |
| 1006 | + } |
| 1007 | +} |
| 1008 | + |
| 1009 | +static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg, |
| 1010 | + struct sk_buff *skb, struct tcmsg *tcm) |
| 1011 | +{ |
| 1012 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
| 1013 | + struct nlattr *nest; |
| 1014 | + struct dpaa2_ceetm_tc_copt copt; |
| 1015 | + |
| 1016 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", |
| 1017 | + __func__, cl->common.classid, sch->handle); |
| 1018 | + |
| 1019 | + sch_tree_lock(sch); |
| 1020 | + |
| 1021 | + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle; |
| 1022 | + tcm->tcm_handle = cl->common.classid; |
| 1023 | + |
| 1024 | + memset(&copt, 0, sizeof(copt)); |
| 1025 | + |
| 1026 | + copt.shaped = cl->shaped; |
| 1027 | + copt.type = cl->type; |
| 1028 | + |
| 1029 | + switch (cl->type) { |
| 1030 | + case CEETM_ROOT: |
| 1031 | + if (cl->child) |
| 1032 | + tcm->tcm_info = cl->child->handle; |
| 1033 | + |
| 1034 | + memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg, |
| 1035 | + sizeof(struct dpaa2_ceetm_shaping_cfg)); |
| 1036 | + |
| 1037 | + break; |
| 1038 | + |
| 1039 | + case CEETM_PRIO: |
| 1040 | + if (cl->child) |
| 1041 | + tcm->tcm_info = cl->child->handle; |
| 1042 | + |
| 1043 | + copt.mode = cl->prio.mode; |
| 1044 | + copt.weight = cl->prio.weight; |
| 1045 | + |
| 1046 | + break; |
| 1047 | + } |
| 1048 | + |
| 1049 | + nest = nla_nest_start_noflag(skb, TCA_OPTIONS); |
| 1050 | + if (!nest) |
| 1051 | + goto nla_put_failure; |
| 1052 | + if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt)) |
| 1053 | + goto nla_put_failure; |
| 1054 | + nla_nest_end(skb, nest); |
| 1055 | + sch_tree_unlock(sch); |
| 1056 | + return skb->len; |
| 1057 | + |
| 1058 | +nla_put_failure: |
| 1059 | + sch_tree_unlock(sch); |
| 1060 | + nla_nest_cancel(skb, nest); |
| 1061 | + return -EMSGSIZE; |
| 1062 | +} |
| 1063 | + |
| 1064 | +static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg) |
| 1065 | +{ |
| 1066 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 1067 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
| 1068 | + |
| 1069 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", |
| 1070 | + __func__, cl->common.classid, sch->handle); |
| 1071 | + |
| 1072 | + sch_tree_lock(sch); |
| 1073 | + qdisc_class_hash_remove(&priv->clhash, &cl->common); |
| 1074 | + sch_tree_unlock(sch); |
| 1075 | + return 0; |
| 1076 | +} |
| 1077 | + |
| 1078 | +/* Get the class' child qdisc, if any */ |
| 1079 | +static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg) |
| 1080 | +{ |
| 1081 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
| 1082 | + |
| 1083 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", |
| 1084 | + __func__, cl->common.classid, sch->handle); |
| 1085 | + |
| 1086 | + switch (cl->type) { |
| 1087 | + case CEETM_ROOT: |
| 1088 | + case CEETM_PRIO: |
| 1089 | + return cl->child; |
| 1090 | + } |
| 1091 | + |
| 1092 | + return NULL; |
| 1093 | +} |
| 1094 | + |
| 1095 | +static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg, |
| 1096 | + struct Qdisc *new, struct Qdisc **old, |
| 1097 | + struct netlink_ext_ack *extack) |
| 1098 | +{ |
| 1099 | + if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) { |
| 1100 | + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n"); |
| 1101 | + return -EOPNOTSUPP; |
| 1102 | + } |
| 1103 | + |
| 1104 | + return 0; |
| 1105 | +} |
| 1106 | + |
| 1107 | +static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg, |
| 1108 | + struct gnet_dump *d) |
| 1109 | +{ |
| 1110 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
| 1111 | + struct gnet_stats_basic_packed tmp_bstats; |
| 1112 | + struct dpaa2_ceetm_tc_xstats xstats; |
| 1113 | + union dpni_statistics dpni_stats; |
| 1114 | + struct net_device *dev = qdisc_dev(sch); |
| 1115 | + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev); |
| 1116 | + u8 ch_id = 0; |
| 1117 | + int err; |
| 1118 | + |
| 1119 | + memset(&xstats, 0, sizeof(xstats)); |
| 1120 | + memset(&tmp_bstats, 0, sizeof(tmp_bstats)); |
| 1121 | + |
| 1122 | + if (cl->type == CEETM_ROOT) |
| 1123 | + return 0; |
| 1124 | + |
| 1125 | + err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3, |
| 1126 | + DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri), |
| 1127 | + &dpni_stats); |
| 1128 | + if (err) |
| 1129 | + netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err); |
| 1130 | + |
| 1131 | + xstats.ceetm_dequeue_bytes = dpni_stats.page_3.egress_dequeue_bytes; |
| 1132 | + xstats.ceetm_dequeue_frames = dpni_stats.page_3.egress_dequeue_frames; |
| 1133 | + xstats.ceetm_reject_bytes = dpni_stats.page_3.egress_reject_bytes; |
| 1134 | + xstats.ceetm_reject_frames = dpni_stats.page_3.egress_reject_frames; |
| 1135 | + |
| 1136 | + return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
| 1137 | +} |
| 1138 | + |
| 1139 | +static struct tcf_block *dpaa2_ceetm_tcf_block(struct Qdisc *sch, |
| 1140 | + unsigned long arg, |
| 1141 | + struct netlink_ext_ack *extack) |
| 1142 | +{ |
| 1143 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 1144 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
| 1145 | + |
| 1146 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, |
| 1147 | + cl ? cl->common.classid : 0, sch->handle); |
| 1148 | + return cl ? cl->block : priv->block; |
| 1149 | +} |
| 1150 | + |
| 1151 | +static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch, |
| 1152 | + unsigned long parent, |
| 1153 | + u32 classid) |
| 1154 | +{ |
| 1155 | + struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch); |
| 1156 | + |
| 1157 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, |
| 1158 | + cl ? cl->common.classid : 0, sch->handle); |
| 1159 | + return (unsigned long)cl; |
| 1160 | +} |
| 1161 | + |
| 1162 | +static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg) |
| 1163 | +{ |
| 1164 | + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg; |
| 1165 | + |
| 1166 | + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__, |
| 1167 | + cl ? cl->common.classid : 0, sch->handle); |
| 1168 | +} |
| 1169 | + |
| 1170 | +const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = { |
| 1171 | + .graft = dpaa2_ceetm_cls_graft, |
| 1172 | + .leaf = dpaa2_ceetm_cls_leaf, |
| 1173 | + .find = dpaa2_ceetm_cls_find, |
| 1174 | + .change = dpaa2_ceetm_cls_change, |
| 1175 | + .delete = dpaa2_ceetm_cls_delete, |
| 1176 | + .walk = dpaa2_ceetm_cls_walk, |
| 1177 | + .tcf_block = dpaa2_ceetm_tcf_block, |
| 1178 | + .bind_tcf = dpaa2_ceetm_tcf_bind, |
| 1179 | + .unbind_tcf = dpaa2_ceetm_tcf_unbind, |
| 1180 | + .dump = dpaa2_ceetm_cls_dump, |
| 1181 | + .dump_stats = dpaa2_ceetm_cls_dump_stats, |
| 1182 | +}; |
| 1183 | + |
| 1184 | +struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = { |
| 1185 | + .id = "ceetm", |
| 1186 | + .priv_size = sizeof(struct dpaa2_ceetm_qdisc), |
| 1187 | + .cl_ops = &dpaa2_ceetm_cls_ops, |
| 1188 | + .init = dpaa2_ceetm_init, |
| 1189 | + .destroy = dpaa2_ceetm_destroy, |
| 1190 | + .change = dpaa2_ceetm_change, |
| 1191 | + .dump = dpaa2_ceetm_dump, |
| 1192 | + .attach = dpaa2_ceetm_attach, |
| 1193 | + .owner = THIS_MODULE, |
| 1194 | +}; |
| 1195 | + |
| 1196 | +/* Run the filters and classifiers attached to the qdisc on the provided skb */ |
| 1197 | +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch, |
| 1198 | + int *qdid, u8 *qpri) |
| 1199 | +{ |
| 1200 | + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch); |
| 1201 | + struct dpaa2_ceetm_class *cl = NULL; |
| 1202 | + struct tcf_result res; |
| 1203 | + struct tcf_proto *tcf; |
| 1204 | + int result; |
| 1205 | + |
| 1206 | + tcf = rcu_dereference_bh(priv->filter_list); |
| 1207 | + while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) { |
| 1208 | +#ifdef CONFIG_NET_CLS_ACT |
| 1209 | + switch (result) { |
| 1210 | + case TC_ACT_QUEUED: |
| 1211 | + case TC_ACT_STOLEN: |
| 1212 | + case TC_ACT_SHOT: |
| 1213 | + /* No valid class found due to action */ |
| 1214 | + return -1; |
| 1215 | + } |
| 1216 | +#endif |
| 1217 | + cl = (void *)res.class; |
| 1218 | + if (!cl) { |
| 1219 | + /* The filter leads to the qdisc */ |
| 1220 | + if (res.classid == sch->handle) |
| 1221 | + return 0; |
| 1222 | + |
| 1223 | + cl = dpaa2_ceetm_find(res.classid, sch); |
| 1224 | + /* The filter leads to an invalid class */ |
| 1225 | + if (!cl) |
| 1226 | + break; |
| 1227 | + } |
| 1228 | + |
| 1229 | + /* The class might have its own filters attached */ |
| 1230 | + tcf = rcu_dereference_bh(cl->filter_list); |
| 1231 | + } |
| 1232 | + |
| 1233 | + /* No valid class found */ |
| 1234 | + if (!cl) |
| 1235 | + return 0; |
| 1236 | + |
| 1237 | + switch (cl->type) { |
| 1238 | + case CEETM_ROOT: |
| 1239 | + *qdid = cl->root.ch_id; |
| 1240 | + |
| 1241 | + /* The root class does not have a child prio qdisc */ |
| 1242 | + if (!cl->child) |
| 1243 | + return 0; |
| 1244 | + |
| 1245 | + /* Run the prio qdisc classifiers */ |
| 1246 | + return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri); |
| 1247 | + |
| 1248 | + case CEETM_PRIO: |
| 1249 | + *qpri = cl->prio.qpri; |
| 1250 | + break; |
| 1251 | + } |
| 1252 | + |
| 1253 | + return 0; |
| 1254 | +} |
| 1255 | + |
| 1256 | +int __init dpaa2_ceetm_register(void) |
| 1257 | +{ |
| 1258 | + int err = 0; |
| 1259 | + |
| 1260 | + pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n"); |
| 1261 | + |
| 1262 | + err = register_qdisc(&dpaa2_ceetm_qdisc_ops); |
| 1263 | + if (unlikely(err)) |
| 1264 | + pr_err(KBUILD_MODNAME |
| 1265 | + ": %s:%hu:%s(): register_qdisc() = %d\n", |
| 1266 | + KBUILD_BASENAME ".c", __LINE__, __func__, err); |
| 1267 | + |
| 1268 | + return err; |
| 1269 | +} |
| 1270 | + |
| 1271 | +void __exit dpaa2_ceetm_unregister(void) |
| 1272 | +{ |
| 1273 | + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", |
| 1274 | + KBUILD_BASENAME ".c", __func__); |
| 1275 | + |
| 1276 | + unregister_qdisc(&dpaa2_ceetm_qdisc_ops); |
| 1277 | +} |
| 1278 | --- /dev/null |
| 1279 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-ceetm.h |
| 1280 | @@ -0,0 +1,207 @@ |
| 1281 | +/* Copyright 2017 NXP |
| 1282 | + * |
| 1283 | + * Redistribution and use in source and binary forms, with or without |
| 1284 | + * modification, are permitted provided that the following conditions are met: |
| 1285 | + * * Redistributions of source code must retain the above copyright |
| 1286 | + * notice, this list of conditions and the following disclaimer. |
| 1287 | + * * Redistributions in binary form must reproduce the above copyright |
| 1288 | + * notice, this list of conditions and the following disclaimer in the |
| 1289 | + * documentation and/or other materials provided with the distribution. |
| 1290 | + * * Neither the name of Freescale Semiconductor nor the |
| 1291 | + * names of its contributors may be used to endorse or promote products |
| 1292 | + * derived from this software without specific prior written permission. |
| 1293 | + * |
| 1294 | + * |
| 1295 | + * ALTERNATIVELY, this software may be distributed under the terms of the |
| 1296 | + * GNU General Public License ("GPL") as published by the Free Software |
| 1297 | + * Foundation, either version 2 of that License or (at your option) any |
| 1298 | + * later version. |
| 1299 | + * |
| 1300 | + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
| 1301 | + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 1302 | + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 1303 | + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
| 1304 | + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 1305 | + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 1306 | + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| 1307 | + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 1308 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 1309 | + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 1310 | + */ |
| 1311 | + |
| 1312 | +#ifndef __DPAA2_ETH_CEETM_H |
| 1313 | +#define __DPAA2_ETH_CEETM_H |
| 1314 | + |
| 1315 | +#include <net/pkt_sched.h> |
| 1316 | +#include <net/pkt_cls.h> |
| 1317 | +#include <net/netlink.h> |
| 1318 | + |
| 1319 | +#include "dpaa2-eth.h" |
| 1320 | + |
| 1321 | +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which |
| 1322 | + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20 |
| 1323 | + * are reserved for the maximum 32 CEETM channels (majors and minors are in |
| 1324 | + * hex). |
| 1325 | + */ |
| 1326 | +#define PFIFO_MIN_OFFSET 0x21 |
| 1327 | + |
| 1328 | +#define DPAA2_CEETM_MIN_WEIGHT 100 |
| 1329 | +#define DPAA2_CEETM_MAX_WEIGHT 24800 |
| 1330 | + |
| 1331 | +#define DPAA2_CEETM_TD_THRESHOLD 1000 |
| 1332 | + |
| 1333 | +enum wbfs_group_type { |
| 1334 | + WBFS_GRP_A, |
| 1335 | + WBFS_GRP_B, |
| 1336 | + WBFS_GRP_LARGE |
| 1337 | +}; |
| 1338 | + |
| 1339 | +enum { |
| 1340 | + DPAA2_CEETM_TCA_UNSPEC, |
| 1341 | + DPAA2_CEETM_TCA_COPT, |
| 1342 | + DPAA2_CEETM_TCA_QOPS, |
| 1343 | + DPAA2_CEETM_TCA_MAX, |
| 1344 | +}; |
| 1345 | + |
| 1346 | +/* CEETM configuration types */ |
| 1347 | +enum dpaa2_ceetm_type { |
| 1348 | + CEETM_ROOT = 1, |
| 1349 | + CEETM_PRIO, |
| 1350 | +}; |
| 1351 | + |
| 1352 | +enum { |
| 1353 | + STRICT_PRIORITY = 0, |
| 1354 | + WEIGHTED_A, |
| 1355 | + WEIGHTED_B, |
| 1356 | +}; |
| 1357 | + |
| 1358 | +struct dpaa2_ceetm_shaping_cfg { |
| 1359 | + __u64 cir; /* committed information rate */ |
| 1360 | + __u64 eir; /* excess information rate */ |
| 1361 | + __u16 cbs; /* committed burst size */ |
| 1362 | + __u16 ebs; /* excess burst size */ |
| 1363 | + __u8 coupled; /* shaper coupling */ |
| 1364 | +}; |
| 1365 | + |
| 1366 | +extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX]; |
| 1367 | + |
| 1368 | +struct dpaa2_ceetm_class; |
| 1369 | +struct dpaa2_ceetm_qdisc_stats; |
| 1370 | +struct dpaa2_ceetm_class_stats; |
| 1371 | + |
| 1372 | +/* corresponds to CEETM shaping at LNI level */ |
| 1373 | +struct dpaa2_root_q { |
| 1374 | + struct Qdisc **qdiscs; |
| 1375 | + struct dpaa2_ceetm_qdisc_stats __percpu *qstats; |
| 1376 | +}; |
| 1377 | + |
| 1378 | +/* corresponds to the number of priorities a channel serves */ |
| 1379 | +struct dpaa2_prio_q { |
| 1380 | + struct dpaa2_ceetm_class *parent; |
| 1381 | + struct dpni_tx_priorities_cfg tx_prio_cfg; |
| 1382 | +}; |
| 1383 | + |
| 1384 | +struct dpaa2_ceetm_qdisc { |
| 1385 | + struct Qdisc_class_hash clhash; |
| 1386 | + struct tcf_proto *filter_list; /* qdisc attached filters */ |
| 1387 | + struct tcf_block *block; |
| 1388 | + |
| 1389 | + enum dpaa2_ceetm_type type; /* ROOT/PRIO */ |
| 1390 | + bool shaped; |
| 1391 | + union { |
| 1392 | + struct dpaa2_root_q root; |
| 1393 | + struct dpaa2_prio_q prio; |
| 1394 | + }; |
| 1395 | +}; |
| 1396 | + |
| 1397 | +/* CEETM Qdisc configuration parameters */ |
| 1398 | +struct dpaa2_ceetm_tc_qopt { |
| 1399 | + enum dpaa2_ceetm_type type; |
| 1400 | + __u16 shaped; |
| 1401 | + __u8 prio_group_A; |
| 1402 | + __u8 prio_group_B; |
| 1403 | + __u8 separate_groups; |
| 1404 | +}; |
| 1405 | + |
| 1406 | +/* root class - corresponds to a channel */ |
| 1407 | +struct dpaa2_root_c { |
| 1408 | + struct dpaa2_ceetm_shaping_cfg shaping_cfg; |
| 1409 | + u32 ch_id; |
| 1410 | +}; |
| 1411 | + |
| 1412 | +/* prio class - corresponds to a strict priority queue (group) */ |
| 1413 | +struct dpaa2_prio_c { |
| 1414 | + struct dpaa2_ceetm_class_stats __percpu *cstats; |
| 1415 | + u32 qpri; |
| 1416 | + u8 mode; |
| 1417 | + u16 weight; |
| 1418 | +}; |
| 1419 | + |
| 1420 | +struct dpaa2_ceetm_class { |
| 1421 | + struct Qdisc_class_common common; |
| 1422 | + struct tcf_proto *filter_list; /* class attached filters */ |
| 1423 | + struct tcf_block *block; |
| 1424 | + struct Qdisc *parent; |
| 1425 | + struct Qdisc *child; |
| 1426 | + |
| 1427 | + enum dpaa2_ceetm_type type; /* ROOT/PRIO */ |
| 1428 | + bool shaped; |
| 1429 | + union { |
| 1430 | + struct dpaa2_root_c root; |
| 1431 | + struct dpaa2_prio_c prio; |
| 1432 | + }; |
| 1433 | +}; |
| 1434 | + |
| 1435 | +/* CEETM Class configuration parameters */ |
| 1436 | +struct dpaa2_ceetm_tc_copt { |
| 1437 | + enum dpaa2_ceetm_type type; |
| 1438 | + struct dpaa2_ceetm_shaping_cfg shaping_cfg; |
| 1439 | + __u16 shaped; |
| 1440 | + __u8 mode; |
| 1441 | + __u16 weight; |
| 1442 | +}; |
| 1443 | + |
| 1444 | +/* CEETM stats */ |
| 1445 | +struct dpaa2_ceetm_qdisc_stats { |
| 1446 | + __u32 drops; |
| 1447 | +}; |
| 1448 | + |
| 1449 | +struct dpaa2_ceetm_class_stats { |
| 1450 | + /* Software counters */ |
| 1451 | + struct gnet_stats_basic_packed bstats; |
| 1452 | + __u32 ern_drop_count; |
| 1453 | +}; |
| 1454 | + |
| 1455 | +struct dpaa2_ceetm_tc_xstats { |
| 1456 | + __u64 ceetm_dequeue_bytes; |
| 1457 | + __u64 ceetm_dequeue_frames; |
| 1458 | + __u64 ceetm_reject_bytes; |
| 1459 | + __u64 ceetm_reject_frames; |
| 1460 | +}; |
| 1461 | + |
| 1462 | +#ifdef CONFIG_FSL_DPAA2_ETH_CEETM |
| 1463 | +int __init dpaa2_ceetm_register(void); |
| 1464 | +void __exit dpaa2_ceetm_unregister(void); |
| 1465 | +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch, |
| 1466 | + int *qdid, u8 *qpri); |
| 1467 | +#else |
| 1468 | +static inline int dpaa2_ceetm_register(void) |
| 1469 | +{ |
| 1470 | + return 0; |
| 1471 | +} |
| 1472 | + |
| 1473 | +static inline void dpaa2_ceetm_unregister(void) {} |
| 1474 | + |
| 1475 | +static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch, |
| 1476 | + int *qdid, u8 *qpri) |
| 1477 | +{ |
| 1478 | + return 0; |
| 1479 | +} |
| 1480 | +#endif |
| 1481 | + |
| 1482 | +static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv) |
| 1483 | +{ |
| 1484 | + return priv->ceetm_en; |
| 1485 | +} |
| 1486 | + |
| 1487 | +#endif |
| 1488 | --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c |
| 1489 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c |
| 1490 | @@ -18,6 +18,7 @@ |
| 1491 | #include <net/sock.h> |
| 1492 | |
| 1493 | #include "dpaa2-eth.h" |
| 1494 | +#include "dpaa2-eth-ceetm.h" |
| 1495 | |
| 1496 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files |
| 1497 | * using trace events only need to #include <trace/events/sched.h> |
| 1498 | @@ -816,7 +817,7 @@ static netdev_tx_t dpaa2_eth_tx(struct s |
| 1499 | unsigned int needed_headroom; |
| 1500 | u32 fd_len; |
| 1501 | u8 prio = 0; |
| 1502 | - int err, i; |
| 1503 | + int err, i, ch_id = 0; |
| 1504 | |
| 1505 | percpu_stats = this_cpu_ptr(priv->percpu_stats); |
| 1506 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
| 1507 | @@ -887,6 +888,15 @@ static netdev_tx_t dpaa2_eth_tx(struct s |
| 1508 | } |
| 1509 | fq = &priv->fq[queue_mapping]; |
| 1510 | |
| 1511 | + if (dpaa2_eth_ceetm_is_enabled(priv)) { |
| 1512 | + err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio); |
| 1513 | + if (err) { |
| 1514 | + free_tx_fd(priv, fq, &fd, false); |
| 1515 | + percpu_stats->tx_dropped++; |
| 1516 | + return NETDEV_TX_OK; |
| 1517 | + } |
| 1518 | + } |
| 1519 | + |
| 1520 | fd_len = dpaa2_fd_get_len(&fd); |
| 1521 | nq = netdev_get_tx_queue(net_dev, queue_mapping); |
| 1522 | netdev_tx_sent_queue(nq, fd_len); |
| 1523 | @@ -2075,17 +2085,13 @@ static int update_xps(struct dpaa2_eth_p |
| 1524 | return err; |
| 1525 | } |
| 1526 | |
| 1527 | -static int dpaa2_eth_setup_tc(struct net_device *net_dev, |
| 1528 | - enum tc_setup_type type, void *type_data) |
| 1529 | +static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, |
| 1530 | + struct tc_mqprio_qopt *mqprio) |
| 1531 | { |
| 1532 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); |
| 1533 | - struct tc_mqprio_qopt *mqprio = type_data; |
| 1534 | u8 num_tc, num_queues; |
| 1535 | int i; |
| 1536 | |
| 1537 | - if (type != TC_SETUP_QDISC_MQPRIO) |
| 1538 | - return -EOPNOTSUPP; |
| 1539 | - |
| 1540 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
| 1541 | num_queues = dpaa2_eth_queue_count(priv); |
| 1542 | num_tc = mqprio->num_tc; |
| 1543 | @@ -2117,6 +2123,20 @@ out: |
| 1544 | return 0; |
| 1545 | } |
| 1546 | |
| 1547 | +static int dpaa2_eth_setup_tc(struct net_device *net_dev, |
| 1548 | + enum tc_setup_type type, |
| 1549 | + void *type_data) |
| 1550 | +{ |
| 1551 | + switch (type) { |
| 1552 | + case TC_SETUP_BLOCK: |
| 1553 | + return 0; |
| 1554 | + case TC_SETUP_QDISC_MQPRIO: |
| 1555 | + return dpaa2_eth_setup_mqprio(net_dev, type_data); |
| 1556 | + default: |
| 1557 | + return -EOPNOTSUPP; |
| 1558 | + } |
| 1559 | +} |
| 1560 | + |
| 1561 | static const struct net_device_ops dpaa2_eth_ops = { |
| 1562 | .ndo_open = dpaa2_eth_open, |
| 1563 | .ndo_start_xmit = dpaa2_eth_tx, |
| 1564 | @@ -4173,18 +4193,27 @@ static int __init dpaa2_eth_driver_init( |
| 1565 | |
| 1566 | dpaa2_eth_dbg_init(); |
| 1567 | err = fsl_mc_driver_register(&dpaa2_eth_driver); |
| 1568 | - if (err) { |
| 1569 | - dpaa2_eth_dbg_exit(); |
| 1570 | - return err; |
| 1571 | - } |
| 1572 | + if (err) |
| 1573 | + goto out_debugfs_err; |
| 1574 | + |
| 1575 | + err = dpaa2_ceetm_register(); |
| 1576 | + if (err) |
| 1577 | + goto out_ceetm_err; |
| 1578 | |
| 1579 | return 0; |
| 1580 | + |
| 1581 | +out_ceetm_err: |
| 1582 | + fsl_mc_driver_unregister(&dpaa2_eth_driver); |
| 1583 | +out_debugfs_err: |
| 1584 | + dpaa2_eth_dbg_exit(); |
| 1585 | + return err; |
| 1586 | } |
| 1587 | |
| 1588 | static void __exit dpaa2_eth_driver_exit(void) |
| 1589 | { |
| 1590 | - dpaa2_eth_dbg_exit(); |
| 1591 | + dpaa2_ceetm_unregister(); |
| 1592 | fsl_mc_driver_unregister(&dpaa2_eth_driver); |
| 1593 | + dpaa2_eth_dbg_exit(); |
| 1594 | } |
| 1595 | |
| 1596 | module_init(dpaa2_eth_driver_init); |
| 1597 | --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |
| 1598 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |
| 1599 | @@ -454,6 +454,8 @@ struct dpaa2_eth_priv { |
| 1600 | struct dpaa2_debugfs dbg; |
| 1601 | #endif |
| 1602 | struct dpni_tx_shaping_cfg shaping_cfg; |
| 1603 | + |
| 1604 | + bool ceetm_en; |
| 1605 | }; |
| 1606 | |
| 1607 | #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ |
| 1608 | @@ -574,6 +576,11 @@ static inline unsigned int dpaa2_eth_rx_ |
| 1609 | return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE; |
| 1610 | } |
| 1611 | |
| 1612 | +static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv) |
| 1613 | +{ |
| 1614 | + return 1; |
| 1615 | +} |
| 1616 | + |
| 1617 | int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); |
| 1618 | int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key); |
| 1619 | int dpaa2_eth_cls_key_size(u64 key); |