| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | From 73b0aa73b401810424afa90bf58663a56ad9d51a Mon Sep 17 00:00:00 2001 | 
|  | 2 | From: Ioana Radulescu <ruxandra.radulescu@nxp.com> | 
|  | 3 | Date: Fri, 5 May 2017 19:07:50 +0300 | 
|  | 4 | Subject: [PATCH] dpaa2-eth: Add Rx error queue | 
|  | 5 |  | 
|  | 6 | Until now all error frames on the ingress path were discarded | 
|  | 7 | in hardware. For debug purposes, add an option to have these | 
|  | 8 | frames delivered to the cpu, on a dedicated queue. | 
|  | 9 |  | 
|  | 10 | TODO: Remove Kconfig option, find another way to enable | 
|  | 11 | Rx error queue support | 
|  | 12 |  | 
|  | 13 | Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com> | 
|  | 14 | --- | 
|  | 15 | drivers/net/ethernet/freescale/dpaa2/Kconfig     | 10 +++ | 
|  | 16 | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 97 ++++++++++++++++++++++++ | 
|  | 17 | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h |  5 +- | 
|  | 18 | 3 files changed, 111 insertions(+), 1 deletion(-) | 
|  | 19 |  | 
|  | 20 | --- a/drivers/net/ethernet/freescale/dpaa2/Kconfig | 
|  | 21 | +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig | 
|  | 22 | @@ -15,6 +15,16 @@ config FSL_DPAA2_ETH_DCB | 
|  | 23 | depends on DCB | 
|  | 24 | help | 
|  | 25 | Enable Priority-Based Flow Control (PFC) support in the driver | 
|  | 26 | + | 
|  | 27 | +config FSL_DPAA2_ETH_USE_ERR_QUEUE | 
|  | 28 | +	bool "Enable Rx error queue" | 
|  | 29 | +	default n | 
|  | 30 | +	help | 
|  | 31 | +	  Allow Rx error frames to be enqueued on an error queue | 
|  | 32 | +	  and processed by the driver (by default they are dropped | 
|  | 33 | +	  in hardware). | 
|  | 34 | +	  This may impact performance, recommended for debugging | 
|  | 35 | +	  purposes only. | 
|  | 36 | endif | 
|  | 37 |  | 
|  | 38 | config FSL_DPAA2_PTP_CLOCK | 
|  | 39 | --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 
|  | 40 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 
|  | 41 | @@ -449,6 +449,53 @@ err_frame_format: | 
|  | 42 | percpu_stats->rx_dropped++; | 
|  | 43 | } | 
|  | 44 |  | 
|  | 45 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE | 
|  | 46 | +/* Processing of Rx frames received on the error FQ | 
|  | 47 | + * We check and print the error bits and then free the frame | 
|  | 48 | + */ | 
|  | 49 | +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, | 
|  | 50 | +			     struct dpaa2_eth_channel *ch, | 
|  | 51 | +			     const struct dpaa2_fd *fd, | 
|  | 52 | +			     struct dpaa2_eth_fq *fq __always_unused) | 
|  | 53 | +{ | 
|  | 54 | +	struct device *dev = priv->net_dev->dev.parent; | 
|  | 55 | +	dma_addr_t addr = dpaa2_fd_get_addr(fd); | 
|  | 56 | +	void *vaddr; | 
|  | 57 | +	struct rtnl_link_stats64 *percpu_stats; | 
|  | 58 | +	struct dpaa2_fas *fas; | 
|  | 59 | +	u32 status = 0; | 
|  | 60 | +	u32 fd_errors; | 
|  | 61 | +	bool has_fas_errors = false; | 
|  | 62 | + | 
|  | 63 | +	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); | 
|  | 64 | +	dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); | 
|  | 65 | + | 
|  | 66 | +	/* check frame errors in the FD field */ | 
|  | 67 | +	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK; | 
|  | 68 | +	if (likely(fd_errors)) { | 
|  | 69 | +		has_fas_errors = (fd_errors & FD_CTRL_FAERR) && | 
|  | 70 | +				 !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); | 
|  | 71 | +		if (net_ratelimit()) | 
|  | 72 | +			netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n", | 
|  | 73 | +				   fd_errors); | 
|  | 74 | +	} | 
|  | 75 | + | 
|  | 76 | +	/* check frame errors in the FAS field */ | 
|  | 77 | +	if (has_fas_errors) { | 
|  | 78 | +		fas = dpaa2_get_fas(vaddr, false); | 
|  | 79 | +		status = le32_to_cpu(fas->status); | 
|  | 80 | +		if (net_ratelimit()) | 
|  | 81 | +			netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n", | 
|  | 82 | +				   status & DPAA2_FAS_RX_ERR_MASK); | 
|  | 83 | +	} | 
|  | 84 | +	free_rx_fd(priv, fd, vaddr); | 
|  | 85 | + | 
|  | 86 | +	percpu_stats = this_cpu_ptr(priv->percpu_stats); | 
|  | 87 | +	percpu_stats->rx_errors++; | 
|  | 88 | +	ch->buf_count--; | 
|  | 89 | +} | 
|  | 90 | +#endif | 
|  | 91 | + | 
|  | 92 | /* Consume all frames pull-dequeued into the store. This is the simplest way to | 
|  | 93 | * make sure we don't accidentally issue another volatile dequeue which would | 
|  | 94 | * overwrite (leak) frames already in the store. | 
|  | 95 | @@ -2351,6 +2398,7 @@ static void set_fq_affinity(struct dpaa2 | 
|  | 96 | fq = &priv->fq[i]; | 
|  | 97 | switch (fq->type) { | 
|  | 98 | case DPAA2_RX_FQ: | 
|  | 99 | +		case DPAA2_RX_ERR_FQ: | 
|  | 100 | fq->target_cpu = rx_cpu; | 
|  | 101 | rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); | 
|  | 102 | if (rx_cpu >= nr_cpu_ids) | 
|  | 103 | @@ -2394,6 +2442,12 @@ static void setup_fqs(struct dpaa2_eth_p | 
|  | 104 | } | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE | 
|  | 108 | +	/* We have exactly one Rx error queue per DPNI */ | 
|  | 109 | +	priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; | 
|  | 110 | +	priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; | 
|  | 111 | +#endif | 
|  | 112 | + | 
|  | 113 | /* For each FQ, decide on which core to process incoming frames */ | 
|  | 114 | set_fq_affinity(priv); | 
|  | 115 | } | 
|  | 116 | @@ -2946,6 +3000,40 @@ static int setup_tx_flow(struct dpaa2_et | 
|  | 117 | return 0; | 
|  | 118 | } | 
|  | 119 |  | 
|  | 120 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE | 
|  | 121 | +static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, | 
|  | 122 | +			     struct dpaa2_eth_fq *fq) | 
|  | 123 | +{ | 
|  | 124 | +	struct device *dev = priv->net_dev->dev.parent; | 
|  | 125 | +	struct dpni_queue q = { { 0 } }; | 
|  | 126 | +	struct dpni_queue_id qid; | 
|  | 127 | +	u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; | 
|  | 128 | +	int err; | 
|  | 129 | + | 
|  | 130 | +	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | 
|  | 131 | +			     DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid); | 
|  | 132 | +	if (err) { | 
|  | 133 | +		dev_err(dev, "dpni_get_queue() failed (%d)\n", err); | 
|  | 134 | +		return err; | 
|  | 135 | +	} | 
|  | 136 | + | 
|  | 137 | +	fq->fqid = qid.fqid; | 
|  | 138 | + | 
|  | 139 | +	q.destination.id = fq->channel->dpcon_id; | 
|  | 140 | +	q.destination.type = DPNI_DEST_DPCON; | 
|  | 141 | +	q.destination.priority = 1; | 
|  | 142 | +	q.user_context = (u64)fq; | 
|  | 143 | +	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, | 
|  | 144 | +			     DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q); | 
|  | 145 | +	if (err) { | 
|  | 146 | +		dev_err(dev, "dpni_set_queue() failed (%d)\n", err); | 
|  | 147 | +		return err; | 
|  | 148 | +	} | 
|  | 149 | + | 
|  | 150 | +	return 0; | 
|  | 151 | +} | 
|  | 152 | +#endif | 
|  | 153 | + | 
|  | 154 | /* Supported header fields for Rx hash distribution key */ | 
|  | 155 | static const struct dpaa2_eth_dist_fields dist_fields[] = { | 
|  | 156 | { | 
|  | 157 | @@ -3315,7 +3403,11 @@ static int bind_dpni(struct dpaa2_eth_pr | 
|  | 158 | /* Configure handling of error frames */ | 
|  | 159 | err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; | 
|  | 160 | err_cfg.set_frame_annotation = 1; | 
|  | 161 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE | 
|  | 162 | +	err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; | 
|  | 163 | +#else | 
|  | 164 | err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; | 
|  | 165 | +#endif | 
|  | 166 | err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, | 
|  | 167 | &err_cfg); | 
|  | 168 | if (err) { | 
|  | 169 | @@ -3332,6 +3424,11 @@ static int bind_dpni(struct dpaa2_eth_pr | 
|  | 170 | case DPAA2_TX_CONF_FQ: | 
|  | 171 | err = setup_tx_flow(priv, &priv->fq[i]); | 
|  | 172 | break; | 
|  | 173 | +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE | 
|  | 174 | +		case DPAA2_RX_ERR_FQ: | 
|  | 175 | +			err = setup_rx_err_flow(priv, &priv->fq[i]); | 
|  | 176 | +			break; | 
|  | 177 | +#endif | 
|  | 178 | default: | 
|  | 179 | dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); | 
|  | 180 | return -EINVAL; | 
|  | 181 | --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 
|  | 182 | +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 
|  | 183 | @@ -318,8 +318,10 @@ struct dpaa2_eth_ch_stats { | 
|  | 184 | #define DPAA2_ETH_MAX_RX_QUEUES		\ | 
|  | 185 | (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS) | 
|  | 186 | #define DPAA2_ETH_MAX_TX_QUEUES		16 | 
|  | 187 | +#define DPAA2_ETH_MAX_RX_ERR_QUEUES	1 | 
|  | 188 | #define DPAA2_ETH_MAX_QUEUES		(DPAA2_ETH_MAX_RX_QUEUES + \ | 
|  | 189 | -					DPAA2_ETH_MAX_TX_QUEUES) | 
|  | 190 | +					DPAA2_ETH_MAX_TX_QUEUES + \ | 
|  | 191 | +					DPAA2_ETH_MAX_RX_ERR_QUEUES) | 
|  | 192 | #define DPAA2_ETH_MAX_NETDEV_QUEUES	\ | 
|  | 193 | (DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS) | 
|  | 194 |  | 
|  | 195 | @@ -328,6 +330,7 @@ struct dpaa2_eth_ch_stats { | 
|  | 196 | enum dpaa2_eth_fq_type { | 
|  | 197 | DPAA2_RX_FQ = 0, | 
|  | 198 | DPAA2_TX_CONF_FQ, | 
|  | 199 | +	DPAA2_RX_ERR_FQ | 
|  | 200 | }; | 
|  | 201 |  | 
|  | 202 | struct dpaa2_eth_priv; |