blob: c90d0b18f3a9878441ca4592261306130a35a79b [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * asr emac driver
4 *
5 * Copyright (C) 2019 ASR Micro Limited
6 *
7 */
8
9#include <linux/bitops.h>
10#include <linux/kernel.h>
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/delay.h>
14#include <linux/dma-mapping.h>
15#include <linux/etherdevice.h>
16#include <linux/ethtool.h>
17#include <linux/in.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/ip.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/of_net.h>
26#include <linux/of_mdio.h>
27#include <linux/of_irq.h>
28#include <linux/of_device.h>
29#include <linux/phy.h>
30#include <linux/platform_device.h>
31#include <linux/tcp.h>
32#include <linux/timer.h>
33#include <linux/types.h>
34#include <linux/udp.h>
35#include <linux/workqueue.h>
36#include <linux/phy_fixed.h>
37#include <linux/pm_qos.h>
38#include <asm/cacheflush.h>
39#include <linux/cputype.h>
40#include <linux/iopoll.h>
41#include <linux/genalloc.h>
b.liub17525e2025-05-14 17:22:29 +080042#include <linux/regulator/consumer.h>
b.liue9582032025-04-17 19:18:16 +080043
44#ifdef CONFIG_DEBUG_FS
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#endif /* CONFIG_DEBUG_FS */
48#include <asm/atomic.h>
49#include "emac_eth.h"
50#include <linux/skbrb.h>
51
52#ifdef WAN_LAN_AUTO_ADAPT
53#include <linux/if_vlan.h>
54#include <linux/if_ether.h>
55#include <linux/kobject.h>
56#endif
57
58#define DRIVER_NAME "asr_emac"
59
b.liub17525e2025-05-14 17:22:29 +080060#define CLOSE_AIB_POWER_DOMAIN 1
b.liue9582032025-04-17 19:18:16 +080061#define AXI_PHYS_BASE 0xd4200000
62
63#define AIB_GMAC_IO_REG 0xD401E804
64#define APBC_ASFAR 0xD4015050
65#define AKEY_ASFAR 0xbaba
66#define AKEY_ASSAR 0xeb10
67
68#define EMAC_DIRECT_MAP
69#define TUNING_CMD_LEN 50
70#define CLK_PHASE_CNT 8
71#define TXCLK_PHASE_DEFAULT 0
72#define RXCLK_PHASE_DEFAULT 0
73#define TX_PHASE 1
74#define RX_PHASE 0
75
76#define EMAC_DMA_REG_CNT 16
77#define EMAC_MAC_REG_CNT 61
78#define EMAC_EMPTY_FROM_DMA_TO_MAC 48
79#define EMAC_REG_SPACE_SIZE ((EMAC_DMA_REG_CNT + \
80 EMAC_MAC_REG_CNT + EMAC_EMPTY_FROM_DMA_TO_MAC) * 4)
81#define EMAC_ETHTOOL_STAT(x) { #x, \
82 offsetof(struct emac_hw_stats, x) / sizeof(u32) }
83
84#define EMAC_SKBRB_SLOT_SIZE 1600
85#define EMAC_EXTRA_ROOM 72
86#define EMAC_SKBRB_MAX_PAYLOAD (EMAC_SKBRB_SLOT_SIZE - EMAC_EXTRA_ROOM - NET_IP_ALIGN)
87
88#define EMAC_RX_FILL_TIMER_US 0
89#define EMAC_TX_COAL_TIMER_US (1000)
90#define EMAC_TX_FRAMES (64)
91
92#ifdef WAN_LAN_AUTO_ADAPT
93#define DHCP_DISCOVER 1
94#define DHCP_OFFER 2
95#define DHCP_REQUEST 3
96#define DHCP_ACK 5
97#define IP175D_PHY_ID 0x02430d80
98
99enum emac_SIG {
100 CARRIER_DOWN = 0,
101 CARRIER_UP,
102 DHCP_EVENT_CLIENT,
103 DHCP_EVENT_SERVER,
104 PHY_IP175D_CONNECT,
105 CARRIER_DOWN_IP175D,
106 CARRIER_UP_IP175D,
107};
108
109enum emac_DHCP {
110 DHCP_SEND_REQ = 1,
111 DHCP_REC_RESP = 2,
112};
113
114struct emac_event {
115 const char *name;
116 char *action;
117 int port;
118 struct sk_buff *skb;
119 struct work_struct work;
120};
121
122extern u64 uevent_next_seqnum(void);
123static int emac_sig_workq(int event, int port);
124#endif
125
126static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable);
127static int clk_phase_set(struct emac_priv *priv, bool is_tx);
128#ifdef CONFIG_ASR_EMAC_NAPI
129static int emac_rx_clean_desc(struct emac_priv *priv, int budget);
130#else
131static int emac_rx_clean_desc(struct emac_priv *priv);
132#endif
133static void emac_alloc_rx_desc_buffers(struct emac_priv *priv);
134static int emac_phy_connect(struct net_device *dev);
135
b.liub17525e2025-05-14 17:22:29 +0800136struct regulator *g_vcc3v3_gmac= NULL;
b.liue9582032025-04-17 19:18:16 +0800137/* for falcon */
138struct emac_regdata asr_emac_regdata_v1 = {
139 .support_dual_vol_power = 1,
140 .ptp_rx_ts_all_events = 0,
141 .clk_rst_ctrl_reg_offset = 0x160,
142 .axi_mst_single_id_shift = 17,
143 .phy_intr_enable_shift = 16,
144 .int_clk_src_sel_shift = -1,
145 .rgmii_tx_clk_src_sel_shift = 5,
146 .rgmii_rx_clk_src_sel_shift = 4,
147 .rmii_rx_clk_sel_shift = 7,
148 .rmii_tx_clk_sel_shift = 6,
149 .rmii_ref_clk_sel_shift = -1,
150 .mac_intf_sel_shift = 2,
151 .rgmii_tx_dline_reg_offset = -1,
152 .rgmii_tx_delay_code_shift = -1,
153 .rgmii_tx_delay_code_mask =-1,
154 .rgmii_tx_delay_step_shift = -1,
155 .rgmii_tx_delay_step_mask = -1,
156 .rgmii_tx_delay_enable_shift = -1,
157 .rgmii_rx_dline_reg_offset = -1,
158 .rgmii_rx_delay_code_shift = -1,
159 .rgmii_rx_delay_code_mask = -1,
160 .rgmii_rx_delay_step_shift = -1,
161 .rgmii_rx_delay_step_mask = -1,
162 .rgmii_rx_delay_enable_shift = -1,
163};
164
165/* for kagu */
166struct emac_regdata asr_emac_regdata_v2 = {
167 .support_dual_vol_power = 0,
168 .ptp_rx_ts_all_events = 0,
169 .clk_rst_ctrl_reg_offset = 0x160,
170 .axi_mst_single_id_shift = 13,
171 .phy_intr_enable_shift = 12,
172 .int_clk_src_sel_shift = 9,
173 .rgmii_tx_clk_src_sel_shift = 8,
174 .rgmii_rx_clk_src_sel_shift = -1,
175 .rmii_rx_clk_sel_shift = 7,
176 .rmii_tx_clk_sel_shift = 6,
177 .rmii_ref_clk_sel_shift = 3,
178 .mac_intf_sel_shift = 2,
179 .rgmii_tx_dline_reg_offset = 0x178,
180 .rgmii_tx_delay_code_shift = 24,
181 .rgmii_tx_delay_code_mask = 0xff,
182 .rgmii_tx_delay_step_shift = 20,
183 .rgmii_tx_delay_step_mask = 0x3,
184 .rgmii_tx_delay_enable_shift = 16,
185 .rgmii_rx_dline_reg_offset = 0x178,
186 .rgmii_rx_delay_code_shift = 8,
187 .rgmii_rx_delay_code_mask = 0xff,
188 .rgmii_rx_delay_step_shift = 4,
189 .rgmii_rx_delay_step_mask = 0x3,
190 .rgmii_rx_delay_enable_shift = 0,
191};
192
193/* for lapwing */
194struct emac_regdata asr_emac_regdata_v3 = {
195 .support_dual_vol_power = 1,
196 .ptp_rx_ts_all_events = 1,
197 .clk_rst_ctrl_reg_offset = 0x164,
198 .axi_mst_single_id_shift = 13,
199 .phy_intr_enable_shift = 12,
200 .int_clk_src_sel_shift = 9,
201 .rgmii_tx_clk_src_sel_shift = 8,
202 .rgmii_rx_clk_src_sel_shift = -1,
203 .rmii_rx_clk_sel_shift = 7,
204 .rmii_tx_clk_sel_shift = 6,
205 .rmii_ref_clk_sel_shift = 3,
206 .mac_intf_sel_shift = 2,
207 .rgmii_tx_dline_reg_offset = 0x16c,
208 .rgmii_tx_delay_code_shift = 8,
209 .rgmii_tx_delay_code_mask = 0xff,
210 .rgmii_tx_delay_step_shift = 0,
211 .rgmii_tx_delay_step_mask = 0x3,
212 .rgmii_tx_delay_enable_shift = 31,
213 .rgmii_rx_dline_reg_offset = 0x168,
214 .rgmii_rx_delay_code_shift = 8,
215 .rgmii_rx_delay_code_mask = 0xff,
216 .rgmii_rx_delay_step_shift = 0,
217 .rgmii_rx_delay_step_mask = 0x3,
218 .rgmii_rx_delay_enable_shift = 31,
219};
220
221static const struct of_device_id emac_of_match[] = {
222 {
223 .compatible = "asr,asr-eth",
224 .data = (void *)&asr_emac_regdata_v1,
225 },
226 {
227 .compatible = "asr,asr-eth-v2",
228 .data = (void *)&asr_emac_regdata_v2,
229 },
230 {
231 .compatible = "asr,asr-eth-v3",
232 .data = (void *)&asr_emac_regdata_v3,
233 },
234 { },
235};
236MODULE_DEVICE_TABLE(of, emac_of_match);
237
238#ifdef EMAC_DIRECT_MAP
239dma_addr_t inline emac_map_direct(unsigned buf, unsigned len)
240{
241 unsigned ret;
242 ret = mv_cp_virtual_to_physical(buf);
243 BUG_ON(ret == buf);
244 __cpuc_flush_dcache_area((void *)(buf & ~ 31),
245 ((len + (buf & 31) + 31) & ~ 31));
246 return (dma_addr_t)ret;
247}
248#endif
249
250static inline void emac_unmap_single(struct device *dev, dma_addr_t handle,
251 size_t size, enum dma_data_direction dir)
252{
253#ifdef EMAC_DIRECT_MAP
254 if (dir == DMA_TO_DEVICE)
255 return;
256#endif
257 dma_unmap_single(dev, handle, size ,dir);
258}
259
260static inline dma_addr_t emac_map_single(struct device *dev, void *ptr,
261 size_t size,enum dma_data_direction dir)
262{
263 if (dir == DMA_FROM_DEVICE)
264 return dma_map_single(dev, ptr, size, dir);
265#ifndef EMAC_DIRECT_MAP
266 return dma_map_single(dev, ptr, size, dir);
267#else
268 return emac_map_direct((unsigned)ptr, (unsigned)size);
269#endif
270}
271
272#ifdef CONFIG_DDR_DEVFREQ
273static void emac_ddr_qos_work(struct work_struct *work)
274{
275 struct emac_priv *priv;
276 int val;
277
278 priv = container_of(work, struct emac_priv, qos_work);
279 val = priv->clk_scaling.qos_val;
280
281 if (val == PM_QOS_DEFAULT_VALUE)
282 pm_qos_update_request(&priv->clk_scaling.ddr_qos, val);
283 else
284 pm_qos_update_request_timeout(
285 &priv->clk_scaling.ddr_qos, val, (2 * USEC_PER_SEC));
286}
287
288static void emac_ddr_clk_scaling(struct emac_priv *priv)
289{
290 struct net_device *ndev = priv->ndev;
291 unsigned long rx_bytes, tx_bytes;
292 unsigned long last_rx_bytes, last_tx_bytes;
293 unsigned long total_time_ms = 0;
294 unsigned int cur_rx_threshold, cur_tx_threshold;
295 unsigned long polling_jiffies;
296 int qos_val;
297
298 polling_jiffies = msecs_to_jiffies(priv->clk_scaling.polling_delay_ms);
299 if (time_is_after_jiffies(priv->clk_scaling.window_time +
300 polling_jiffies))
301 return;
302
303 total_time_ms = jiffies_to_msecs((long)jiffies -
304 (long)priv->clk_scaling.window_time);
305
306 if (!ndev) {
307 pr_err("%s: dev or net is not ready\n", __func__);
308 return;
309 }
310
311 qos_val = priv->clk_scaling.qos_val;
312 last_rx_bytes = priv->clk_scaling.rx_bytes;
313 last_tx_bytes = priv->clk_scaling.tx_bytes;
314 if (!last_rx_bytes && !last_tx_bytes)
315 goto out;
316
317 if (likely(ndev->stats.rx_bytes > last_rx_bytes))
318 rx_bytes = ndev->stats.rx_bytes - last_rx_bytes;
319 else
320 rx_bytes = ULONG_MAX - last_rx_bytes + ndev->stats.rx_bytes + 1;
321
322 if (likely(ndev->stats.tx_bytes > last_tx_bytes))
323 tx_bytes = ndev->stats.tx_bytes - last_tx_bytes;
324 else
325 tx_bytes = ULONG_MAX - last_tx_bytes + ndev->stats.tx_bytes + 1;
326
327 cur_tx_threshold = tx_bytes * 8 / (total_time_ms * 1000);
328 pr_debug("%s: tx_rate=%dMbps, up_threshold=%dMbps\n",
329 __func__, cur_tx_threshold, priv->clk_scaling.tx_up_threshold);
330 if (cur_tx_threshold >= priv->clk_scaling.tx_up_threshold) {
331 qos_val = ASR_EMAC_DDR_BOOST_FREQ;
332 goto out;
333 }
334
335 cur_rx_threshold = rx_bytes * 8 / (total_time_ms * 1000);
336 pr_debug("%s: rx_rate=%dMbps, up_threshold=%dMbps\n",
337 __func__, cur_rx_threshold, priv->clk_scaling.rx_up_threshold);
338 if (cur_rx_threshold >= priv->clk_scaling.rx_up_threshold) {
339 qos_val = ASR_EMAC_DDR_BOOST_FREQ;
340 goto out;
341 }
342
343 if (cur_tx_threshold < priv->clk_scaling.tx_down_threshold &&
344 cur_rx_threshold < priv->clk_scaling.rx_down_threshold)
345 qos_val = PM_QOS_DEFAULT_VALUE;
346
347out:
348 priv->clk_scaling.rx_bytes = ndev->stats.rx_bytes;
349 priv->clk_scaling.tx_bytes = ndev->stats.tx_bytes;
350 priv->clk_scaling.window_time = jiffies;
351
352 if (qos_val != priv->clk_scaling.qos_val) {
353 priv->clk_scaling.qos_val = qos_val;
354 schedule_work(&priv->qos_work);
355 }
356
357 return;
358}
359#endif
360
361/* strings used by ethtool */
362static const struct emac_ethtool_stats {
363 char str[ETH_GSTRING_LEN];
364 u32 offset;
365} emac_ethtool_stats[] = {
366 EMAC_ETHTOOL_STAT(tx_ok_pkts),
367 EMAC_ETHTOOL_STAT(tx_total_pkts),
368 EMAC_ETHTOOL_STAT(tx_ok_bytes),
369 EMAC_ETHTOOL_STAT(tx_err_pkts),
370 EMAC_ETHTOOL_STAT(tx_singleclsn_pkts),
371 EMAC_ETHTOOL_STAT(tx_multiclsn_pkts),
372 EMAC_ETHTOOL_STAT(tx_lateclsn_pkts),
373 EMAC_ETHTOOL_STAT(tx_excessclsn_pkts),
374 EMAC_ETHTOOL_STAT(tx_unicast_pkts),
375 EMAC_ETHTOOL_STAT(tx_multicast_pkts),
376 EMAC_ETHTOOL_STAT(tx_broadcast_pkts),
377 EMAC_ETHTOOL_STAT(tx_pause_pkts),
378 EMAC_ETHTOOL_STAT(rx_ok_pkts),
379 EMAC_ETHTOOL_STAT(rx_total_pkts),
380 EMAC_ETHTOOL_STAT(rx_crc_err_pkts),
381 EMAC_ETHTOOL_STAT(rx_align_err_pkts),
382 EMAC_ETHTOOL_STAT(rx_err_total_pkts),
383 EMAC_ETHTOOL_STAT(rx_ok_bytes),
384 EMAC_ETHTOOL_STAT(rx_total_bytes),
385 EMAC_ETHTOOL_STAT(rx_unicast_pkts),
386 EMAC_ETHTOOL_STAT(rx_multicast_pkts),
387 EMAC_ETHTOOL_STAT(rx_broadcast_pkts),
388 EMAC_ETHTOOL_STAT(rx_pause_pkts),
389 EMAC_ETHTOOL_STAT(rx_len_err_pkts),
390 EMAC_ETHTOOL_STAT(rx_len_undersize_pkts),
391 EMAC_ETHTOOL_STAT(rx_len_oversize_pkts),
392 EMAC_ETHTOOL_STAT(rx_len_fragment_pkts),
393 EMAC_ETHTOOL_STAT(rx_len_jabber_pkts),
394 EMAC_ETHTOOL_STAT(rx_64_pkts),
395 EMAC_ETHTOOL_STAT(rx_65_127_pkts),
396 EMAC_ETHTOOL_STAT(rx_128_255_pkts),
397 EMAC_ETHTOOL_STAT(rx_256_511_pkts),
398 EMAC_ETHTOOL_STAT(rx_512_1023_pkts),
399 EMAC_ETHTOOL_STAT(rx_1024_1518_pkts),
400 EMAC_ETHTOOL_STAT(rx_1519_plus_pkts),
401 EMAC_ETHTOOL_STAT(rx_drp_fifo_full_pkts),
402 EMAC_ETHTOOL_STAT(rx_truncate_fifo_full_pkts),
403 EMAC_ETHTOOL_STAT(rx_dma_missed_frame_cnt),
404 EMAC_ETHTOOL_STAT(tx_tso_pkts),
405 EMAC_ETHTOOL_STAT(tx_tso_bytes),
406};
407
408static int emac_set_speed_duplex(struct emac_priv *priv)
409{
410 u32 ctrl;
411
412 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
413 if (priv->duplex)
414 ctrl |= MREGBIT_FULL_DUPLEX_MODE;
415 else
416 ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
417
418 switch (priv->speed) {
419 case SPEED_1000:
420 ctrl |= MREGBIT_SPEED_1000M;
421 break;
422 case SPEED_100:
423 ctrl |= MREGBIT_SPEED_100M;
424 break;
425 case SPEED_10:
426 ctrl |= MREGBIT_SPEED_10M;
427 break;
428 default:
429 pr_err("broken speed: %d\n", priv->speed);
430 return 0;
431 }
432 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
433 pr_info("emac: force link speed:%dM duplex:%s\n",
434 priv->speed, priv->duplex ? "Full": "Half");
435
436 return 0;
437}
438
439static int emac_set_fixed_link(struct device_node *np, struct emac_priv *priv)
440{
441 struct fixed_phy_status status = {};
442 struct device_node *fixed_link_node;
443 u32 fixed_link_prop[5];
444 const char *managed;
445 int interface;
446
447 if (of_property_read_string(np, "managed", &managed) == 0 &&
448 strcmp(managed, "in-band-status") == 0) {
449 /* status is zeroed, namely its .link member */
450 goto fix_link;
451 }
452
453 /* New binding */
454 fixed_link_node = of_get_child_by_name(np, "fixed-link");
455 if (fixed_link_node) {
456 status.link = 1;
457 status.duplex = of_property_read_bool(fixed_link_node,
458 "full-duplex");
459 if (of_property_read_u32(fixed_link_node, "speed",
460 &status.speed)) {
461 of_node_put(fixed_link_node);
462 return -EINVAL;
463 }
464 status.pause = of_property_read_bool(fixed_link_node, "pause");
465 status.asym_pause = of_property_read_bool(fixed_link_node,
466 "asym-pause");
467 interface = of_get_phy_mode(fixed_link_node);
468 if (interface < 0) {
469 priv->interface = PHY_INTERFACE_MODE_RGMII;
470 pr_info("no interface for fix-link, use RGMII\n");
471 } else {
472 priv->interface = interface;
473 }
474
475 of_node_put(fixed_link_node);
476 goto fix_link;
477 }
478
479 /* Old binding */
480 if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
481 ARRAY_SIZE(fixed_link_prop)) == 0) {
482 status.link = 1;
483 status.duplex = fixed_link_prop[1];
484 status.speed = fixed_link_prop[2];
485 status.pause = fixed_link_prop[3];
486 status.asym_pause = fixed_link_prop[4];
487 goto fix_link;
488 }
489
490 return -ENODEV;
491
492fix_link:
493 priv->speed = status.speed;
494 priv->duplex = status.duplex;
495
496 return emac_set_speed_duplex(priv);
497}
498
499void register_dump(struct emac_priv *priv)
500{
501 int i;
502 void __iomem *base = priv->iobase;
503
504 for (i = 0; i < 16; i++) {
505 pr_info("DMA:0x%x:0x%x\n",
506 DMA_CONFIGURATION + i * 4,
507 readl(base + DMA_CONFIGURATION + i * 4));
508 }
509 for (i = 0; i < 60; i++) {
510 pr_info("MAC:0x%x:0x%x\n",
511 MAC_GLOBAL_CONTROL + i * 4,
512 readl(base + MAC_GLOBAL_CONTROL + i * 4));
513 }
514
515 for (i = 0; i < 4; i++) {
516 pr_info("1588:0x%x:0x%x\n",
517 PTP_1588_CTRL + i * 4,
518 readl(base + PTP_1588_CTRL + i * 4));
519 }
520
521 for (i = 0; i < 6; i++) {
522 pr_info("1588:0x%x:0x%x\n",
523 SYS_TIME_GET_LOW + i * 4,
524 readl(base + SYS_TIME_GET_LOW + i * 4));
525 }
526 for (i = 0; i < 5; i++) {
527 pr_info("1588:0x%x:0x%x\n",
528 RX_TIMESTAMP_LOW + i * 4,
529 readl(base + RX_TIMESTAMP_LOW + i * 4));
530 }
531 for (i = 0; i < 2; i++) {
532 pr_info("1588:0x%x:0x%x\n",
533 PTP_1588_IRQ_STS + i * 4,
534 readl(base + PTP_1588_IRQ_STS + i * 4));
535 }
536
537 if (priv->tso) {
538 for (i = 0; i < 18; i++) {
539 pr_info("TSO:0x%x:0x%x\n", i * 4,
540 emac_rd_tso(priv, i * 4));
541 }
542 }
543}
544
545void print_pkt(unsigned char *buf, int len)
546{
547 int i = 0;
548
549 pr_debug("data len = %d byte, buf addr: 0x%x\n",
550 len, (unsigned int)buf);
551 for (i = 0; i < len; i = i + 8) {
552 pr_debug("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
553 *(buf + i),
554 *(buf + i + 1),
555 *(buf + i + 2),
556 *(buf + i + 3),
557 *(buf + i + 4),
558 *(buf + i + 5),
559 *(buf + i + 6),
560 *(buf + i + 7)
561 );
562 }
563}
564
565#ifdef EMAC_DEBUG
566void print_desc(unsigned char *buf, int len)
567{
568 int i;
569
570 pr_info("descriptor len = %d byte, buf addr: 0x%x\n",
571 len, (unsigned int)buf);
572 for (i = 0; i < len; i = i + 4) {
573 pr_info("0x%02x%02x%02x%02x\n",
574 *(buf + i + 3),
575 *(buf + i + 2),
576 *(buf + i + 1),
577 *(buf + i));
578 }
579}
580#else
581void print_desc(unsigned char *buf, int len)
582{
583
584}
585#endif
586
587/* Name emac_reset_hw
588 * Arguments priv : pointer to hardware data structure
589 * Return Status: 0 - Success; non-zero - Fail
590 * Description TBDL
591 */
592int emac_reset_hw(struct emac_priv *priv)
593{
594 mutex_lock(&priv->mii_mutex);
595 /* disable all the interrupts */
596 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
597 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
598
599 /* disable transmit and receive units */
600 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
601 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
602
603 /* stop the DMA */
604 emac_wr(priv, DMA_CONTROL, 0x0000);
605
606 /* reset mac, statistic counters */
607 emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0018);
608
609 emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0000);
610
611 emac_wr(priv, MAC_MDIO_CLK_DIV,
612 priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
613 mutex_unlock(&priv->mii_mutex);
614 return 0;
615}
616
617/* Name emac_init_hw
618 * Arguments pstHWData : pointer to hardware data structure
619 * Return Status: 0 - Success; non-zero - Fail
620 * Description TBDL
621 * Assumes that the controller has previously been reset
622 * and is in apost-reset uninitialized state.
623 * Initializes the receive address registers,
624 * multicast table, and VLAN filter table.
625 * Calls routines to setup link
626 * configuration and flow control settings.
627 * Clears all on-chip counters. Leaves
628 * the transmit and receive units disabled and uninitialized.
629 */
630int emac_init_hw(struct emac_priv *priv)
631{
632 u32 val = 0, threshold;
633
634 mutex_lock(&priv->mii_mutex);
635 /* MAC Init
636 * disable transmit and receive units
637 */
638 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
639 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
640
641 /* enable mac address 1 filtering */
642 //emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0001);
643 emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0100);
644
645 /* zero initialize the multicast hash table */
646 emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0000);
647 emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0000);
648 emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0000);
649 emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0000);
650
651 emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, EMAC_TX_FIFO_DWORDS - 8);
652
653 if (priv->speed == SPEED_1000)
654 threshold = 1024;
655 else if (priv->speed == SPEED_100)
656 threshold = 256;
657 else
658 threshold = TX_STORE_FORWARD_MODE;
659 emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, threshold);
660
661 emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, 0xc);
662
663 /* reset dma */
664 emac_wr(priv, DMA_CONTROL, 0x0000);
665
666 emac_wr(priv, DMA_CONFIGURATION, 0x01);
667 mdelay(10);
668 emac_wr(priv, DMA_CONFIGURATION, 0x00);
669 mdelay(10);
670
671 val |= MREGBIT_WAIT_FOR_DONE;
672 val |= MREGBIT_STRICT_BURST;
673 val |= MREGBIT_DMA_64BIT_MODE;
674 val |= MREGBIT_BURST_16WORD; //MREGBIT_BURST_1WORD;
675
676 emac_wr(priv, DMA_CONFIGURATION, val);
677
678 /* MDC Clock Division: AXI-312M/96 = 3.25M */
679 emac_wr(priv, MAC_MDIO_CLK_DIV,
680 priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
681
682 mutex_unlock(&priv->mii_mutex);
683
684 printk("MDIO clock div: 0x%x\n", emac_rd(priv, MAC_MDIO_CLK_DIV));
685 return 0;
686}
687
688int emac_set_mac_addr(struct emac_priv *priv, unsigned char *addr)
689{
690 emac_wr(priv, MAC_ADDRESS1_HIGH, (addr[1] << 8 | addr[0]));
691 emac_wr(priv, MAC_ADDRESS1_MED, (addr[3] << 8 | addr[2]));
692 emac_wr(priv, MAC_ADDRESS1_LOW, (addr[5] << 8 | addr[4]));
693
694 return 0;
695}
696
697void emac_set_fc_source_addr(struct emac_priv *priv, unsigned char *addr)
698{
699 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_HIGH, (addr[1] << 8 | addr[0]));
700 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_MED, (addr[3] << 8 | addr[2]));
701 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_LOW, (addr[5] << 8 | addr[4]));
702
703 return;
704}
705
706static inline void emac_dma_start_transmit(struct emac_priv *priv)
707{
708 emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 0xFF);
709}
710
711static inline void emac_dma_start_receive(struct emac_priv *priv)
712{
713 emac_wr(priv, DMA_RECEIVE_POLL_DEMAND, 0xFF);
714}
715
716#ifdef CONFIG_ASR_EMAC_NAPI
717void emac_enable_interrupt(struct emac_priv *priv, int tx)
718{
719 u32 val;
720
721 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
722
723 if (tx) {
724 val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
725 } else {
726 val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
727 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
728 if (priv->tso)
729 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
730 TSO_AP_RX_INTR_ENA_CSUM_DONE |
731 TSO_AP_RX_INTR_ENA_CSUM_ERR);
732 }
733
734 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
735}
736
737void emac_disable_interrupt(struct emac_priv *priv, int tx)
738{
739 u32 val;
740
741 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
742
743 if (tx) {
744 val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
745 } else {
746 val &= ~(MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
747 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
748 if (priv->tso)
749 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA, 0x0);
750 }
751
752 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
753}
754#endif
755
756bool emac_is_rmii_interface(struct emac_priv *priv)
757{
758 const struct emac_regdata *regdata = priv->regdata;
759 void __iomem* apmu;
760 u32 val;
761
762 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
763 if (apmu == NULL) {
764 pr_err("error to ioremap APMU base\n");
765 return -ENOMEM;
766 }
767
768 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
769 val &= (0x1 << regdata->mac_intf_sel_shift);
770 if (val)
771 return false;
772 else
773 return true;
774}
775
776void emac_config_phy_interrupt(struct emac_priv *priv, int enable)
777{
778 const struct emac_regdata *regdata = priv->regdata;
779 void __iomem* apmu;
780 u32 val;
781
782 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
783 if (apmu == NULL) {
784 pr_err("error to ioremap APMU base\n");
785 return;
786 }
787
788 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
789 if (enable)
790 val |= 0x1 << regdata->phy_intr_enable_shift;
791 else
792 val &= ~(0x1 << regdata->phy_intr_enable_shift);
793 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
794 iounmap(apmu);
795 return;
796}
797
798void emac_phy_interface_config(struct emac_priv *priv, int phy_interface)
799{
800 const struct emac_regdata *regdata = priv->regdata;
801 void __iomem* apmu;
802 u32 val;
803
804 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
805 if (apmu == NULL) {
806 pr_err("error to ioremap APMU base\n");
807 return;
808 }
809
810 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
811 if (PHY_INTERFACE_MODE_RMII == phy_interface) {
812 val &= ~(0x1 << regdata->mac_intf_sel_shift);
813 printk("===> set eamc interface: rmii\n");
814 } else {
815 val |= 0x1 << regdata->mac_intf_sel_shift;
816 printk("===> set eamc interface: rgmii\n");
817 }
818 val |= 0x1 << regdata->axi_mst_single_id_shift;
819 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
820
821 iounmap(apmu);
822 priv->interface = phy_interface;
823 return;
824}
825
826static void emac_set_aib_power_domain(struct emac_priv *priv)
827{
828 const struct emac_regdata *regdata = priv->regdata;
829 void __iomem *aib_emac_io;
830 void __iomem *apbc_asfar;
831 u32 tmp;
832
833 if (!regdata->support_dual_vol_power)
834 return;
835
836 aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
837 apbc_asfar = ioremap(APBC_ASFAR, 8);
838
839 writel(AKEY_ASFAR, apbc_asfar);
840 writel(AKEY_ASSAR, apbc_asfar + 4);
841 tmp = readl(aib_emac_io);
b.liub17525e2025-05-14 17:22:29 +0800842priv->power_domain = 0;
b.liue9582032025-04-17 19:18:16 +0800843 /* 0= power down, only set power down when vol = 0 */
844 if (priv->power_domain) {
845 tmp &= ~(0x1 << 2); /* 3.3v */
846 printk("===> emac set io to 3.3v\n");
847 } else {
848 tmp |= 0x1 << 2; /* 1.8v */
849 printk("===> emac set io to 1.8v\n");
850 }
851
852 writel(AKEY_ASFAR, apbc_asfar);
853 writel(AKEY_ASSAR, apbc_asfar + 4);
854 writel(tmp, aib_emac_io);
855
856 writel(AKEY_ASFAR, apbc_asfar);
857 writel(AKEY_ASSAR, apbc_asfar + 4);
858 tmp = readl(aib_emac_io);
859 printk("===> emac AIB read back: 0x%x\n", tmp);
860
861 iounmap(apbc_asfar);
862 iounmap(aib_emac_io);
863}
864
865static void emac_pause_generate_work_fuc(struct work_struct *work)
866{
867 struct emac_priv *priv= container_of(work, struct emac_priv, emac_pause_work.work);
868 int time_nxt = 0;
869 /* because pause time value = 0XFFFF,equal to stopping for 336ms(100M)/34ms(1000M) to transmit */
870 /* by a repeated testing, delay 20ms(1000M)/300ms(100M) satisfy making the neighbor stop transmission */
871 time_nxt = (priv->speed == SPEED_1000) ? 20 : 300;
872 if (!priv->pause.pause_time_max) {
873 emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0xffff);
874 priv->pause.pause_time_max = 1;
875 }
876
877 emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
878 schedule_delayed_work(&priv->emac_pause_work, msecs_to_jiffies(time_nxt));
879 return;
880}
881
882static inline void emac_check_ring_and_send_pause(struct emac_priv *priv)
883{
884 int pos;
885 int high_water;
886 int low_water;
887 struct emac_rx_desc *rx_desc;
888 struct emac_desc_ring *rx_ring;
889
890 rx_ring = &priv->rx_ring;
891 pos = rx_ring->nxt_clean;
892 high_water = (pos + priv->pause.high_water) % priv->rx_ring.total_cnt;
893 low_water = (pos + priv->pause.low_water) % priv->rx_ring.total_cnt;
894
895 rx_desc = emac_get_rx_desc(priv, high_water);
896 if (priv->pause.pause_sending == 0 && rx_desc->OWN == 0) {
897 schedule_delayed_work(&priv->emac_pause_work, 0);
898 priv->pause.pause_sending = 1;
899 }
900
901 rx_desc = emac_get_rx_desc(priv, low_water);
902 if (rx_desc->OWN && priv->pause.pause_sending) {
903 cancel_delayed_work_sync(&priv->emac_pause_work);
904 emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0);
905 emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
906 priv->pause.pause_time_max = 0;
907 priv->pause.pause_sending = 0;
908 }
909}
910
911/* Name emac_sw_init
912 * Arguments priv : pointer to driver private data structure
913 * Return Status: 0 - Success; non-zero - Fail
914 * Description Reads PCI space configuration information and
915 * initializes the variables with
916 * their default values
917 */
918static int emac_sw_init(struct emac_priv *priv)
919{
920 priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
921
922 mutex_init(&priv->mii_mutex);
923 spin_lock_init(&priv->spStatsLock);
924 spin_lock_init(&priv->spTxLock);
925 spin_lock_init(&priv->intr_lock);
926
927 return 0;
928}
929
930static int emac_check_ptp_packet(struct emac_priv *priv,
931 struct sk_buff *skb, int txrx)
932{
933 struct ethhdr *eth = (struct ethhdr *)skb->data;
934 struct ptp_header *ptph = NULL;
935 struct iphdr *iph;
936 struct udphdr *udph;
937 int msg_type, msg_id;
938 int ts;
939
940 if (eth->h_proto == htons(ETH_P_1588)) {
941 netdev_dbg(priv->ndev, "get PTP packet over ETH\n");
942 ptph = (struct ptp_header *)((u8 *)eth + sizeof(struct ethhdr));
943 } else if (eth->h_proto == htons(ETH_P_IP)) {
944 iph = (struct iphdr *)((u8 *)eth + sizeof(struct ethhdr));
945 if (iph->protocol != IPPROTO_UDP)
946 return -1;
947
948 udph = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
949 if ((htons(udph->dest) != PTP_EVENT_PORT ||
950 htons(udph->source) != PTP_EVENT_PORT))
951 return -1;
952
953 netdev_dbg(priv->ndev, "get PTP packet over UDP\n");
954 ptph = (struct ptp_header *)((u8 *)udph + sizeof(struct udphdr));
955 } else {
956 return -1;
957 }
958
959 msg_id = -1;
960 ts = ptph->tsmt & 0xF0;
961 msg_type = (ptph->tsmt) & 0x0F;
962 if (txrx) {
963 if (msg_type == MSG_SYNC) {
964 if (ts)
965 msg_id = MSG_PDELAY_REQ;
966 else
967 msg_id = MSG_DELAY_REQ;
968 } else if (msg_type == MSG_DELAY_REQ) {
969 msg_id = MSG_SYNC;
970 } else if (msg_type == MSG_PDELAY_REQ) {
971 msg_id = MSG_PDELAY_RESP;
972 memcpy(&priv->sourcePortIdentity,
973 &ptph->sourcePortIdentity,
974 sizeof(struct PortIdentity));
975 } else if (msg_type == MSG_PDELAY_RESP) {
976 msg_id = MSG_PDELAY_REQ;
977 }
978 } else {
979 netdev_dbg(priv->ndev, "RX timestamp for message type %d\n",
980 ptph->tsmt);
981
982 if (msg_type == MSG_PDELAY_RESP) {
983 struct pdelay_resp_msg *presp = (struct pdelay_resp_msg *)ptph;
984
985 /*
986 * Change to monitor SYNC packet if pdelay response
987 * received for same clock indentity.
988 */
989 if (!memcmp(&presp->requestingPortIdentity.clockIdentity,
990 &priv->sourcePortIdentity.clockIdentity,
991 sizeof(struct ClockIdentity))) {
992 msg_id = MSG_SYNC;
993 }
994 }
995 }
996
997 /*
998 * Since some platform not support to timestamp two or more
999 * message type, so change here.
1000 */
1001 if (msg_id >= 0) {
1002 if (priv->regdata->ptp_rx_ts_all_events) {
1003 msg_id = ALL_EVENTS;
1004 msg_id |= ts | ts << 8 | ts << 16 | ts << 24;
1005 } else {
1006 msg_id |= ts;
1007 }
1008
1009 priv->hwptp->config_hw_tstamping(priv, 1, PTP_V2_L2_L4, msg_id);
1010 }
1011
1012 return ptph->tsmt;
1013}
1014
1015/* emac_get_tx_hwtstamp - get HW TX timestamps
1016 * @priv: driver private structure
1017 * @skb : the socket buffer
1018 * Description :
1019 * This function will read timestamp from the register & pass it to stack.
1020 * and also perform some sanity checks.
1021 */
1022static void emac_get_tx_hwtstamp(struct emac_priv *priv, struct sk_buff *skb)
1023{
1024 struct skb_shared_hwtstamps shhwtstamp;
1025 u64 ns;
1026
1027 if (!priv->hwts_tx_en)
1028 return;
1029
1030 /* exit if skb doesn't support hw tstamp */
1031 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
1032 return;
1033
1034 emac_check_ptp_packet(priv, skb, 1);
1035
1036 /* get the valid tstamp */
1037 ns = priv->hwptp->get_tx_timestamp(priv);
1038
1039 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1040 shhwtstamp.hwtstamp = ns_to_ktime(ns);
1041
1042 wmb();
1043 netdev_dbg(priv->ndev, "get valid TX hw timestamp %llu\n", ns);
1044 /* pass tstamp to stack */
1045 skb_tstamp_tx(skb, &shhwtstamp);
1046
1047 return;
1048}
1049
1050/* emac_get_rx_hwtstamp - get HW RX timestamps
1051 * @priv: driver private structure
1052 * @p : descriptor pointer
1053 * @skb : the socket buffer
1054 * Description :
1055 * This function will read received packet's timestamp from the descriptor
1056 * and pass it to stack. It also perform some sanity checks.
1057 */
1058static void emac_get_rx_hwtstamp(struct emac_priv *priv, struct emac_rx_desc *p,
1059 struct sk_buff *skb)
1060{
1061 struct skb_shared_hwtstamps *shhwtstamp = NULL;
1062 u64 ns;
1063
1064 if (!priv->hwts_rx_en)
1065 return;
1066
1067 /* Check if timestamp is available */
1068 if (p->ptp_pkt && p->rx_timestamp) {
1069 emac_check_ptp_packet(priv, skb, 0);
1070 ns = priv->hwptp->get_rx_timestamp(priv);
1071 netdev_dbg(priv->ndev, "get valid RX hw timestamp %llu\n", ns);
1072 shhwtstamp = skb_hwtstamps(skb);
1073 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1074 shhwtstamp->hwtstamp = ns_to_ktime(ns);
1075 } else {
1076 netdev_dbg(priv->ndev, "cannot get RX hw timestamp\n");
1077 }
1078}
1079
1080/**
1081 * emac_hwtstamp_set - control hardware timestamping.
1082 * @dev: device pointer.
1083 * @ifr: An IOCTL specific structure, that can contain a pointer to
1084 * a proprietary structure used to pass information to the driver.
1085 * Description:
1086 * This function configures the MAC to enable/disable both outgoing(TX)
1087 * and incoming(RX) packets time stamping based on user input.
1088 * Return Value:
1089 * 0 on success and an appropriate -ve integer on failure.
1090 */
1091static int emac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1092{
1093 struct emac_priv *priv = netdev_priv(dev);
1094 struct hwtstamp_config config;
1095 struct timespec64 now;
1096 u64 ns_ptp;
1097 u32 ptp_event_msg_id = 0;
1098 u32 rx_ptp_type = 0;
1099
1100 if (!priv->ptp_support) {
1101 netdev_alert(priv->ndev, "No support for HW time stamping\n");
1102 priv->hwts_tx_en = 0;
1103 priv->hwts_rx_en = 0;
1104
1105 return -EOPNOTSUPP;
1106 }
1107
1108 if (copy_from_user(&config, ifr->ifr_data,
1109 sizeof(struct hwtstamp_config)))
1110 return -EFAULT;
1111
1112 netdev_dbg(priv->ndev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
1113 __func__, config.flags, config.tx_type, config.rx_filter);
1114
1115 /* reserved for future extensions */
1116 if (config.flags)
1117 return -EINVAL;
1118
1119 if (config.tx_type != HWTSTAMP_TX_OFF &&
1120 config.tx_type != HWTSTAMP_TX_ON)
1121 return -ERANGE;
1122
1123 switch (config.rx_filter) {
1124 case HWTSTAMP_FILTER_NONE:
1125 /* time stamp no incoming packet at all */
1126 config.rx_filter = HWTSTAMP_FILTER_NONE;
1127 break;
1128
1129 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1130 /* PTP v1, UDP, Sync packet */
1131 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
1132 /* take time stamp for SYNC messages only */
1133 ptp_event_msg_id = MSG_SYNC;
1134 rx_ptp_type = PTP_V1_L4_ONLY;
1135 break;
1136
1137 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1138 /* PTP v1, UDP, Delay_req packet */
1139 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
1140 /* take time stamp for Delay_Req messages only */
1141 ptp_event_msg_id = MSG_DELAY_REQ;
1142 rx_ptp_type = PTP_V1_L4_ONLY;
1143 break;
1144
1145 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1146 /* PTP v2, UDP, Sync packet */
1147 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
1148 /* take time stamp for SYNC messages only */
1149 ptp_event_msg_id = MSG_SYNC;
1150 rx_ptp_type = PTP_V2_L2_L4;
1151 break;
1152
1153 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1154 /* PTP v2, UDP, Delay_req packet */
1155 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
1156 /* take time stamp for Delay_Req messages only */
1157 ptp_event_msg_id = MSG_DELAY_REQ;
1158 rx_ptp_type = PTP_V2_L2_L4;
1159 break;
1160
1161 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1162 /* PTP v2/802.AS1 any layer, any kind of event packet */
1163 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1164
1165 /*
1166 * IF not support ALL EVENTS, default timestamp SYNC packet,
1167 * changed to MSG_DELAY_REQ automactically if needed
1168 */
1169 if (priv->regdata->ptp_rx_ts_all_events)
1170 ptp_event_msg_id = ALL_EVENTS;
1171 else
1172 ptp_event_msg_id = MSG_SYNC;
1173
1174 rx_ptp_type = PTP_V2_L2_L4;
1175 break;
1176
1177 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1178 /* PTP v2/802.AS1, any layer, Sync packet */
1179 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
1180 /* take time stamp for SYNC messages only */
1181 ptp_event_msg_id = MSG_SYNC;
1182 rx_ptp_type = PTP_V2_L2_L4;
1183 break;
1184
1185 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1186 /* PTP v2/802.AS1, any layer, Delay_req packet */
1187 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
1188 /* take time stamp for Delay_Req messages only */
1189 ptp_event_msg_id = MSG_DELAY_REQ;
1190 rx_ptp_type = PTP_V2_L2_L4;
1191 break;
1192 default:
1193 return -ERANGE;
1194 }
1195
1196 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
1197 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
1198
1199 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
1200 priv->hwptp->config_hw_tstamping(priv, 0, 0, 0);
1201 else {
1202
1203 priv->hwptp->config_hw_tstamping(priv, 1,
1204 rx_ptp_type, ptp_event_msg_id);
1205
1206 /* initialize system time */
1207 ktime_get_real_ts64(&now);
1208 priv->hwptp->init_systime(priv, timespec64_to_ns(&now));
1209
1210 /* program Increment reg */
1211 priv->hwptp->config_systime_increment(priv);
1212
1213 ns_ptp = priv->hwptp->get_phc_time(priv);
1214 ktime_get_real_ts64(&now);
1215 /* check the diff between ptp timer and system time */
1216 if (abs(timespec64_to_ns(&now) - ns_ptp) > 5000)
1217 priv->hwptp->init_systime(priv,
1218 timespec64_to_ns(&now));
1219 }
1220
1221 memcpy(&priv->tstamp_config, &config, sizeof(config));
1222
1223 return copy_to_user(ifr->ifr_data, &config,
1224 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
1225}
1226
1227/**
1228 * emac_hwtstamp_get - read hardware timestamping.
1229 * @dev: device pointer.
1230 * @ifr: An IOCTL specific structure, that can contain a pointer to
1231 * a proprietary structure used to pass information to the driver.
1232 * Description:
1233 * This function obtain the current hardware timestamping settings
1234 as requested.
1235 */
1236static int emac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1237{
1238 struct emac_priv *priv = netdev_priv(dev);
1239 struct hwtstamp_config *config = &priv->tstamp_config;
1240
1241 if (!priv->ptp_support)
1242 return -EOPNOTSUPP;
1243
1244 return copy_to_user(ifr->ifr_data, config,
1245 sizeof(*config)) ? -EFAULT : 0;
1246}
1247
1248/* Name emac_ioctl
1249 * Arguments pstNetdev : pointer to net_device structure
1250 * pstIfReq : pointer to interface request structure used.
1251 * u32Cmd : IOCTL command number
1252 * Return Status: 0 - Success; non-zero - Fail
1253 * Description It is called by upper layer and
1254 * handling various task IOCTL commands.
1255 */
1256static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1257{
1258 int ret = -EOPNOTSUPP;
1259
1260 if (!netif_running(ndev))
1261 return -EINVAL;
1262
1263 switch (cmd) {
1264 case SIOCGMIIPHY:
1265 case SIOCGMIIREG:
1266 case SIOCSMIIREG:
1267 if (!ndev->phydev)
1268 return -EINVAL;
1269 ret = phy_mii_ioctl(ndev->phydev, rq, cmd);
1270 break;
1271 case SIOCSHWTSTAMP:
1272 ret = emac_hwtstamp_set(ndev, rq);
1273 break;
1274 case SIOCGHWTSTAMP:
1275 ret = emac_hwtstamp_get(ndev, rq);
1276 break;
1277 default:
1278 break;
1279 }
1280
1281 return ret;
1282}
1283
1284static irqreturn_t emac_wakeup_handler(int irq, void *dev_id)
1285{
1286 struct net_device *ndev = (struct net_device *)dev_id;
1287 struct emac_priv *priv = netdev_priv(ndev);
1288 u32 ctrl;
1289
1290 emac_set_axi_bus_clock(priv, 1);
1291 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1292 if (!(ctrl & (MREGBIT_UNICAST_WAKEUP_MODE |
1293 MREGBIT_MAGIC_PACKET_WAKEUP_MODE)))
1294 return IRQ_NONE;
1295
1296 ctrl &= ~(MREGBIT_UNICAST_WAKEUP_MODE |
1297 MREGBIT_MAGIC_PACKET_WAKEUP_MODE);
1298 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1299 return IRQ_HANDLED;
1300}
1301
1302static irqreturn_t emac_irq_tso(int irq, void *dev_id)
1303{
1304 struct net_device *ndev = (struct net_device *)dev_id;
1305 struct emac_priv *priv = netdev_priv(ndev);
1306 u32 status;
1307
1308 /* handle rx */
1309 status = emac_rd_tso(priv, TSO_AP_RX_INTR_STS);
1310 if (status) {
1311 emac_print("TSO_AP_RX_INTR_STS=0x%x", status);
1312
1313 if (status & TSO_AP_RX_INTR_ENA_CSUM_DONE) {
1314#ifdef CONFIG_ASR_EMAC_NAPI
1315 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1316 unsigned long flags;
1317
1318 spin_lock_irqsave(&priv->intr_lock, flags);
1319 emac_disable_interrupt(priv, 0);
1320 spin_unlock_irqrestore(&priv->intr_lock, flags);
1321 __napi_schedule(&priv->rx_napi);
1322 }
1323#else
1324 emac_rx_clean_desc(priv);
1325#endif
1326 }
1327
1328#ifdef EMAC_DEBUG
1329 if (status & TSO_AP_RX_INTR_ENA_CSUM_ERR)
1330 pr_err("rx checksum err irq\n");
1331#endif
1332 /* clear rx status */
1333 emac_wr_tso(priv, TSO_AP_RX_INTR_STS, status);
1334 }
1335
1336 /* handle tx */
1337 status = emac_rd_tso(priv, TSO_AP_TX_INTR_STS);
1338 if (status) {
1339 emac_print("TSO_AP_TX_INTR_STS=0x%x\n", status);
1340 if (status & TSO_AP_TX_INTR_TSO_DONE) {
1341 emac_print("TX TSO done\n");
1342 emac_dma_start_transmit(priv);
1343 }
1344
1345 if (status & TSO_AP_TX_INTR_CSUM_DONE) {
1346 emac_print("TX checksum done\n");
1347 emac_dma_start_transmit(priv);
1348 }
1349
1350 /* clear tx status */
1351 emac_wr_tso(priv, TSO_AP_TX_INTR_STS, status);
1352 }
1353
1354 /* handle err */
1355 status = emac_rd_tso(priv, TSO_ERR_INTR_STS);
1356 if (status) {
1357 pr_err("TSO: TX/RX ERR, status=0x%x\n", status);
1358 emac_wr_tso(priv, TSO_ERR_INTR_STS, status);
1359 }
1360
1361 return IRQ_HANDLED;
1362}
1363
1364
1365/* Name emac_interrupt_handler
1366 * Arguments irq : irq number for which the interrupt is fired
1367 * dev_id : pointer was passed to request_irq and same pointer is passed
1368 * back to handler
1369 * Return irqreturn_t : integer value
1370 * Description Interrupt handler routine for interrupts from target for RX packets indication.
1371 */
1372static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1373{
1374 struct net_device *ndev = (struct net_device *)dev_id;
1375 struct emac_priv *priv = netdev_priv(ndev);
1376 u32 status;
1377 u32 clr = 0;
1378
1379 /* read the status register for IRQ received */
1380 status = emac_rd(priv, DMA_STATUS_IRQ);
1381
1382 /* Check if emac is up */
1383 if (test_bit(EMAC_DOWN, &priv->state)) {
1384 emac_wr(priv, DMA_STATUS_IRQ, status & 0x1F7);
1385 return IRQ_HANDLED;
1386 }
1387
1388 if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1389 clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1390#ifdef CONFIG_ASR_EMAC_NAPI
1391 if (likely(napi_schedule_prep(&priv->tx_napi))) {
1392 unsigned long flags;
1393
1394 spin_lock_irqsave(&priv->intr_lock, flags);
1395 emac_disable_interrupt(priv, 1);
1396 spin_unlock_irqrestore(&priv->intr_lock, flags);
1397 __napi_schedule(&priv->tx_napi);
1398 }
1399#else
1400 emac_tx_clean_desc(priv);
1401#endif
1402 }
1403
1404 if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1405 clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1406
1407 if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1408 clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1409
1410 if (status & (MREGBIT_RECEIVE_TRANSFER_DONE_IRQ |
1411 MREGBIT_RECEIVE_MISSED_FRAME_IRQ)) {
1412 if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ)
1413 clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1414
1415 if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1416 clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1417
1418 if (priv->tso)
1419 emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
1420
1421#ifdef CONFIG_ASR_EMAC_NAPI
1422 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&priv->intr_lock, flags);
1426 emac_disable_interrupt(priv, 0);
1427 spin_unlock_irqrestore(&priv->intr_lock, flags);
1428 __napi_schedule(&priv->rx_napi);
1429 }
1430#else
1431 emac_rx_clean_desc(priv);
1432#endif
1433 }
1434
1435 if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1436 clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1437
1438 if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1439 clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1440
1441 emac_wr(priv, DMA_STATUS_IRQ, clr);
1442
1443 return IRQ_HANDLED;
1444}
1445
1446/* Name emac_command_options
1447 * Arguments priv : pointer to driver private data structure
1448 * Return none
1449 * Description This function actually handles the command line para passed
1450 * when the driver is loaded at the command prompt.
1451 * It parses the parameters and validates them for valid values.
1452 */
1453void emac_command_options(struct emac_priv *priv)
1454{
1455 int pages = totalram_pages();
1456
1457 if (pages <= (EMAC_SMALL_RING_MEM_LIMIT >> PAGE_SHIFT))
1458 priv->rx_ring.total_cnt = EMAC_SMALL_RX_RING_SIZE;
1459 else
1460 priv->rx_ring.total_cnt = EMAC_RX_RING_SIZE;
1461 priv->tx_ring.total_cnt = EMAC_TX_RING_SIZE;
1462
1463 pr_info("emac: rx_ring=%d, tx_ring=%d, pages=%d\n",
1464 priv->rx_ring.total_cnt, priv->tx_ring.total_cnt, pages);
1465}
1466
1467/* Name emac_configure_tx
1468 * Arguments priv : pointer to driver private data structure
1469 * Return none
1470 * Description Configures the transmit unit of the device
1471 */
1472static void emac_configure_tx(struct emac_priv *priv)
1473{
1474 u32 val;
1475
1476 /* set the transmit base address */
1477 val = (u32)(priv->tx_ring.desc_dma_addr);
1478
1479 emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1480
1481 /* Tx Inter Packet Gap value and enable the transmit */
1482 val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1483 val &= (~MREGBIT_IFG_LEN);
1484 val |= MREGBIT_TRANSMIT_ENABLE;
1485 val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1486 emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1487
1488 emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x00);
1489
1490 /* start tx dma */
1491 val = emac_rd(priv, DMA_CONTROL);
1492 val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1493 emac_wr(priv, DMA_CONTROL, val);
1494}
1495
1496/* Name emac_configure_rx
1497 * Arguments priv : pointer to driver private data structure
1498 * Return none
1499 * Description Configures the receive unit of the device
1500 */
1501static void emac_configure_rx(struct emac_priv *priv)
1502{
1503 u32 val;
1504
1505 /* set the receive base address */
1506 val = (u32)(priv->rx_ring.desc_dma_addr);
1507 emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1508
1509 /* enable the receive */
1510 val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1511 val |= MREGBIT_RECEIVE_ENABLE;
1512 val |= MREGBIT_STORE_FORWARD;
1513 val |= MREGBIT_ACOOUNT_VLAN;
1514 emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1515
1516 /* start rx dma */
1517 val = emac_rd(priv, DMA_CONTROL);
1518 val |= MREGBIT_START_STOP_RECEIVE_DMA;
1519 emac_wr(priv, DMA_CONTROL, val);
1520}
1521
1522/* Name emac_clean_tx_desc_ring
1523 * Arguments priv : pointer to driver private data structure
1524 * Return none
1525 * Description Freeing the TX resources allocated earlier.
1526 */
1527static void emac_clean_tx_desc_ring(struct emac_priv *priv)
1528{
1529 struct emac_desc_ring *tx_ring = &priv->tx_ring;
1530 struct emac_desc_buffer *tx_buf;
1531 u32 i;
1532
1533 /* Free all the Tx ring sk_buffs */
1534 for (i = 0; i < tx_ring->total_cnt; i++) {
1535 tx_buf = &tx_ring->desc_buf[i];
1536
1537 if (tx_buf->dma_addr) {
1538 dma_unmap_page(&priv->pdev->dev,
1539 tx_buf->dma_addr,
1540 tx_buf->dma_len,
1541 DMA_TO_DEVICE);
1542 tx_buf->dma_addr = 0;
1543 }
1544
1545 if (tx_buf->skb) {
1546 dev_kfree_skb_any(tx_buf->skb);
1547 tx_buf->skb = NULL;
1548 }
1549 }
1550
1551 tx_ring->nxt_use = 0;
1552 tx_ring->nxt_clean = 0;
1553}
1554
1555/* Name emac_clean_rx_desc_ring
1556 * Arguments priv : pointer to driver private data structure
1557 * Return none
1558 * Description Freeing the RX resources allocated earlier.
1559 */
1560static void emac_clean_rx_desc_ring(struct emac_priv *priv)
1561{
1562 struct emac_desc_ring *rx_ring;
1563 struct emac_desc_buffer *rx_buf;
1564 u32 i;
1565
1566 rx_ring = &priv->rx_ring;
1567
1568 /* Free all the Rx ring sk_buffs */
1569 for (i = 0; i < rx_ring->total_cnt; i++) {
1570 rx_buf = &rx_ring->desc_buf[i];
1571 if (rx_buf->skb) {
1572 emac_unmap_single(&priv->pdev->dev,
1573 rx_buf->dma_addr,
1574 rx_buf->dma_len,
1575 DMA_FROM_DEVICE);
1576 dev_kfree_skb(rx_buf->skb);
1577 rx_buf->skb = NULL;
1578 }
1579
1580 if (rx_buf->buff_addr) {
1581#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
1582 kfree(rx_buf->buff_addr);
1583#endif
1584 rx_buf->buff_addr = NULL;
1585 }
1586 }
1587
1588 rx_ring->nxt_clean = 0;
1589 rx_ring->nxt_use = 0;
1590}
1591
1592void emac_ptp_init(struct emac_priv *priv)
1593{
1594 int ret;
1595
1596 if (priv->ptp_support) {
1597 ret = clk_prepare_enable(priv->ptp_clk);
1598 if (ret < 0) {
1599 pr_warning("ptp clock failed to enable \n");
1600 priv->ptp_clk = NULL;
1601 }
1602
1603 emac_ptp_register(priv);
1604
1605 if (IS_ERR_OR_NULL(priv->ptp_clock)) {
1606 priv->ptp_support = 0;
1607 pr_warning("disable PTP due to clock not enabled\n");
1608 }
1609 }
1610}
1611
1612void emac_ptp_deinit(struct emac_priv *priv)
1613{
1614 if (priv->ptp_support) {
1615 if (priv->ptp_clk)
1616 clk_disable_unprepare(priv->ptp_clk);
1617
1618 emac_ptp_unregister(priv);
1619 }
1620}
1621
1622static void emac_rx_timer_arm(struct emac_priv *priv)
1623{
1624 u32 rx_fill_timer = EMAC_RX_FILL_TIMER_US;
1625
1626 if (!rx_fill_timer)
1627 return;
1628
1629 if (hrtimer_is_queued(&priv->rx_timer))
1630 return;
1631
1632 hrtimer_start(&priv->rx_timer,
1633 ns_to_ktime(rx_fill_timer) * NSEC_PER_USEC,
1634 HRTIMER_MODE_REL);
1635}
1636
1637static enum hrtimer_restart emac_rx_timer(struct hrtimer *t)
1638{
1639 struct emac_priv *priv = container_of(t, struct emac_priv, rx_timer);
1640 struct napi_struct *napi = &priv->rx_napi;
1641
1642 if (likely(napi_schedule_prep(napi))) {
1643 unsigned long flags;
1644
1645 spin_lock_irqsave(&priv->intr_lock, flags);
1646 emac_disable_interrupt(priv, 0);
1647 spin_unlock_irqrestore(&priv->intr_lock, flags);
1648 __napi_schedule(napi);
1649 }
1650
1651 return HRTIMER_NORESTART;
1652}
1653
1654static void emac_tx_timer_arm(struct emac_priv *priv)
1655{
1656 u32 tx_coal_timer = EMAC_TX_COAL_TIMER_US;
1657
1658 if (!tx_coal_timer)
1659 return;
1660
1661 if (hrtimer_is_queued(&priv->tx_timer))
1662 return;
1663
1664 hrtimer_start(&priv->tx_timer,
1665 ns_to_ktime(tx_coal_timer) * NSEC_PER_USEC,
1666 HRTIMER_MODE_REL);
1667}
1668
1669static enum hrtimer_restart emac_tx_timer(struct hrtimer *t)
1670{
1671 struct emac_priv *priv = container_of(t, struct emac_priv, tx_timer);
1672 struct napi_struct *napi = &priv->tx_napi;
1673
1674 if (priv->tso) {
1675 emac_dma_start_transmit(priv);
1676 return HRTIMER_NORESTART;
1677 }
1678
1679 if (likely(napi_schedule_prep(napi))) {
1680 unsigned long flags;
1681
1682 spin_lock_irqsave(&priv->intr_lock, flags);
1683 emac_disable_interrupt(priv, 1);
1684 spin_unlock_irqrestore(&priv->intr_lock, flags);
1685 __napi_schedule(napi);
1686 }
1687
1688 return HRTIMER_NORESTART;
1689}
1690
1691
1692static int emac_tso_config(struct emac_priv *priv)
1693{
1694 struct emac_desc_ring * tx_ring = &priv->tx_ring;
1695 u32 val = 0;
1696
1697 /* reset */
1698 emac_wr_tso(priv, TSO_CONFIG, TSO_CONFIG_RST);
1699 mdelay(1);
1700 emac_wr_tso(priv, TSO_CONFIG, 0x0);
1701
1702 emac_wr_tso(priv, TSO_DMA_CONFIG, 0x2 << 8);
1703
1704 /* rx */
1705 /* set the transmit base address */
1706 val = (u32)(priv->rx_ring.desc_dma_addr);
1707 emac_wr_tso(priv, TSO_RX_DESC_BA, val >> 1);
1708 emac_wr_tso(priv, TSO_RX_AUTO_POLL_CNT, 0x0);
1709
1710 /* tx */
1711 val = (u32)(priv->tx_ring.desc_dma_addr);
1712 emac_wr_tso(priv, TSO_TX_DESC_BA, val >> 1);
1713
1714 priv->tso_hdr = dma_alloc_coherent(&priv->pdev->dev,
1715 tx_ring->total_cnt * 0x80,
1716 &priv->tso_hdr_addr,
1717 GFP_KERNEL | __GFP_ZERO);
1718 if (!priv->tso_hdr) {
1719 pr_err("Memory allocation failed for tso_hdr\n");
1720 return -ENOMEM;
1721 }
1722
1723 val = (u32)(priv->tso_hdr_addr);
1724 emac_wr_tso(priv, TSO_TX_HDR_BA, val >> 1);
1725 emac_wr_tso(priv, TSO_TX_HDR_CTR, tx_ring->total_cnt);
1726 emac_wr_tso(priv, TSO_TX_AUTO_POLL_CNT, 0x0);
1727
1728 /* enable tx/rx tso/coe */
1729 emac_wr_tso(priv, TSO_CONFIG,
1730 TSO_CONFIG_RX_EN | TSO_CONFIG_TX_EN | TSO_CONFIG_RX_CSUM_EN);
1731
1732 /* enable tx/rx/err interrupt */
1733 emac_wr_tso(priv, TSO_ERR_INTR_ENA, 0xF0007);
1734 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
1735 TSO_AP_RX_INTR_ENA_CSUM_DONE | TSO_AP_RX_INTR_ENA_CSUM_ERR);
1736#if 1
1737 emac_wr_tso(priv, TSO_AP_TX_INTR_ENA,
1738 TSO_AP_TX_INTR_ENA_TSO_DONE | TSO_AP_TX_INTR_ENA_CSUM_DONE);
1739#else
1740 emac_wr_tso(priv, TSO_AP_TX_INTR_ENA, 0x0);
1741#endif
1742 return 0;
1743}
1744
1745/* Name emac_up
1746 * Arguments priv : pointer to driver private data structure
1747 * Return Status: 0 - Success; non-zero - Fail
1748 * Description This function is called from emac_open and
1749 * performs the things when net interface is about to up.
1750 * It configues the Tx and Rx unit of the device and
1751 * registers interrupt handler.
1752 * It also starts one watchdog timer to monitor
1753 * the net interface link status.
1754 */
1755int emac_up(struct emac_priv *priv)
1756{
1757 struct net_device *ndev = priv->ndev;
1758 int ret, val;
b.liub17525e2025-05-14 17:22:29 +08001759#if CLOSE_AIB_POWER_DOMAIN
1760 void __iomem *aib_emac_io;
1761 void __iomem *apbc_asfar;
1762 u32 tmp;
1763#endif
b.liue9582032025-04-17 19:18:16 +08001764#ifdef WAN_LAN_AUTO_ADAPT
1765 u32 phy_id;
1766#endif
1767
1768 priv->hw_stats->tx_tso_pkts = 0;
1769 priv->hw_stats->tx_tso_bytes = 0;
1770
1771 ret = emac_phy_connect(ndev);
1772 if (ret) {
1773 pr_err("%s phy_connet failed\n", __func__);
b.liub17525e2025-05-14 17:22:29 +08001774#if CLOSE_AIB_POWER_DOMAIN
1775 printk("===> enter emac_close_aib_power_domain\n");
1776 aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
1777 apbc_asfar = ioremap(APBC_ASFAR, 8);
1778 writel(AKEY_ASFAR, apbc_asfar);
1779 writel(AKEY_ASSAR, apbc_asfar + 4);
1780 writel(0x81, aib_emac_io);
1781 writel(AKEY_ASFAR, apbc_asfar);
1782 writel(AKEY_ASSAR, apbc_asfar + 4);
1783 tmp = readl(aib_emac_io);
1784 iounmap(apbc_asfar);
1785 iounmap(aib_emac_io);
1786 printk("===> exit emac_close_aib_power_domain = 0x%x\n", tmp);
1787#endif
b.liue9582032025-04-17 19:18:16 +08001788 return ret;
1789 }
1790
1791 if (!priv->en_suspend)
1792 pm_stay_awake(&priv->pdev->dev);
1793 pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
1794
1795 clk_phase_set(priv, TX_PHASE);
1796 clk_phase_set(priv, RX_PHASE);
1797
1798 /* init hardware */
1799 emac_init_hw(priv);
1800
1801 emac_ptp_init(priv);
1802
1803 emac_set_mac_addr(priv, ndev->dev_addr);
1804
1805 emac_set_fc_source_addr(priv, ndev->dev_addr);
1806
1807 /* configure transmit unit */
1808 emac_configure_tx(priv);
1809 /* configure rx unit */
1810 emac_configure_rx(priv);
1811
1812 /* allocate buffers for receive descriptors */
1813 emac_alloc_rx_desc_buffers(priv);
1814
1815 if (ndev->phydev)
1816 phy_start(ndev->phydev);
1817
1818 /* allocates interrupt resources and
1819 * enables the interrupt line and IRQ handling
1820 */
1821 ret = request_irq(priv->irq, emac_interrupt_handler,
1822 IRQF_SHARED, ndev->name, ndev);
1823 if (ret) {
1824 pr_err("request_irq failed, ret=%d\n", ret);
1825 goto request_irq_failed;
1826 }
1827
1828 if (priv->irq_wakeup) {
1829 ret = request_irq(priv->irq_wakeup, emac_wakeup_handler,
1830 IRQF_SHARED, ndev->name, ndev);
1831 if (ret) {
1832 pr_err("request wakeup_irq failed, ret=%d\\n", ret);
1833 goto request_wakeup_irq_failed;
1834 }
1835 }
1836
1837 if (priv->irq_tso) {
1838 ret = request_irq(priv->irq_tso, emac_irq_tso,
1839 IRQF_SHARED, "emac_tso", ndev);
1840 if (ret) {
1841 pr_err("request tso failed, ret=%d\\n", ret);
1842 goto request_tso_irq_failed;
1843 }
1844 }
1845
1846 if (priv->fix_link)
1847 emac_set_speed_duplex(priv);
1848
1849 clear_bit(EMAC_DOWN, &priv->state);
1850
1851 /* enable mac interrupt */
1852 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
1853
1854 /* both rx tx */
1855 val = MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1856 MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1857 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
1858#if 0
1859 val |= MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1860 MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1861 MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE;
1862#endif
1863 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
1864
1865#ifdef CONFIG_ASR_EMAC_NAPI
1866 napi_enable(&priv->rx_napi);
1867 napi_enable(&priv->tx_napi);
1868#endif
1869
1870 if (priv->fix_link && !netif_carrier_ok(ndev))
1871 netif_carrier_on(ndev);
1872
1873#ifdef WAN_LAN_AUTO_ADAPT
1874 phy_id = ndev->phydev->phy_id;
1875 if(phy_id == IP175D_PHY_ID)
1876 emac_sig_workq(CARRIER_UP_IP175D, 0);
1877 else
1878 emac_sig_workq(CARRIER_UP, 0);
1879#endif
1880
1881 hrtimer_init(&priv->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1882 priv->tx_timer.function = emac_tx_timer;
1883 hrtimer_init(&priv->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1884 priv->rx_timer.function = emac_rx_timer;
1885
1886 if (priv->tso)
1887 emac_tso_config(priv);
1888
1889 netif_tx_start_all_queues(ndev);
1890 return 0;
1891
1892request_tso_irq_failed:
1893 if (priv->irq_wakeup)
1894 free_irq(priv->irq_wakeup, ndev);
1895
1896request_wakeup_irq_failed:
1897 free_irq(priv->irq, ndev);
1898
1899request_irq_failed:
1900 if (ndev->phydev) {
1901 phy_stop(ndev->phydev);
1902 phy_disconnect(ndev->phydev);
1903 }
1904
1905 return ret;
1906}
1907
1908/* Name emac_down
1909 * Arguments priv : pointer to driver private data structure
1910 * Return Status: 0 - Success; non-zero - Fail
1911 * Description This function is called from emac_close and
1912 * performs the things when net interface is about to down.
1913 * It frees the irq, removes the various timers.
1914 * It sets the net interface off and
1915 * resets the hardware. Cleans the Tx and Rx
1916 * ring descriptor.
1917 */
1918int emac_down(struct emac_priv *priv)
1919{
1920 struct net_device *ndev = priv->ndev;
1921#ifdef WAN_LAN_AUTO_ADAPT
1922 u32 phy_id;
1923
1924 priv->dhcp = 0;
1925 priv->vlan_port = -1;
1926 priv->link = 0;
1927 phy_id = ndev->phydev->phy_id;
1928 if(priv->dhcp_delaywork){
1929 cancel_delayed_work(&priv->dhcp_work);
1930 priv->dhcp_delaywork = 0;
1931 }
1932#endif
1933 set_bit(EMAC_DOWN, &priv->state);
1934
1935 netif_tx_disable(ndev);
1936
1937 hrtimer_cancel(&priv->tx_timer);
1938 hrtimer_cancel(&priv->rx_timer);
1939 /* Stop and disconnect the PHY */
1940 if (ndev->phydev) {
1941 phy_stop(ndev->phydev);
1942 phy_disconnect(ndev->phydev);
1943 }
1944
1945 if (!priv->fix_link) {
1946 priv->duplex = DUPLEX_UNKNOWN;
1947 priv->speed = SPEED_UNKNOWN;
1948 }
1949
1950#ifdef CONFIG_ASR_EMAC_NAPI
1951 napi_disable(&priv->rx_napi);
1952 napi_disable(&priv->tx_napi);
1953#endif
1954 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
1955 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
1956
1957 free_irq(priv->irq, ndev);
1958 if (priv->irq_wakeup)
1959 free_irq(priv->irq_wakeup, ndev);
1960
1961 emac_ptp_deinit(priv);
1962
1963 emac_reset_hw(priv);
1964 netif_carrier_off(ndev);
1965
1966#ifdef WAN_LAN_AUTO_ADAPT
1967 if(phy_id == IP175D_PHY_ID)
1968 emac_sig_workq(CARRIER_DOWN_IP175D, 0);
1969 else
1970 emac_sig_workq(CARRIER_DOWN, 0);
1971#endif
1972
1973#ifdef CONFIG_ASR_EMAC_DDR_QOS
1974 flush_work(&priv->qos_work);
1975 pm_qos_update_request(&priv->clk_scaling.ddr_qos, PM_QOS_DEFAULT_VALUE);
1976#endif
1977 pm_qos_update_request(&priv->pm_qos_req,
1978 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
1979
1980 if (!priv->en_suspend)
1981 pm_relax(&priv->pdev->dev);
1982
1983 if (priv->tso) {
1984 dma_free_coherent(&priv->pdev->dev,
1985 priv->tx_ring.total_cnt * 0x80,
1986 priv->tso_hdr,
1987 priv->tso_hdr_addr);
1988 }
1989
1990 return 0;
1991}
1992
1993/* Name emac_alloc_tx_resources
1994 * Arguments priv : pointer to driver private data structure
1995 * Return Status: 0 - Success; non-zero - Fail
1996 * Description Allocates TX resources and getting virtual & physical address.
1997 */
1998int emac_alloc_tx_resources(struct emac_priv *priv)
1999{
2000 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2001 struct platform_device *pdev = priv->pdev;
2002 u32 size;
2003
2004 size = sizeof(struct emac_desc_buffer) * tx_ring->total_cnt;
2005
2006 /* allocate memory */
2007 tx_ring->desc_buf = kzalloc(size, GFP_KERNEL);
2008 if (!tx_ring->desc_buf) {
2009 pr_err("Memory allocation failed for the Transmit descriptor buffer\n");
2010 return -ENOMEM;
2011 }
2012
2013 memset(tx_ring->desc_buf, 0, size);
2014
2015 tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_tx_desc);
2016
2017 EMAC_ROUNDUP(tx_ring->total_size, 1024);
2018
2019 if (priv->sram_pool) {
2020 tx_ring->desc_addr =
2021 (void *)gen_pool_dma_alloc(
2022 priv->sram_pool, tx_ring->total_size,
2023 &tx_ring->desc_dma_addr);
2024 tx_ring->in_sram = true;
2025 }
2026
2027 if (!tx_ring->desc_addr) {
2028 tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
2029 tx_ring->total_size,
2030 &tx_ring->desc_dma_addr,
2031 GFP_KERNEL | __GFP_ZERO);
2032 if (!tx_ring->desc_addr) {
2033 pr_err("Memory allocation failed for the Transmit descriptor ring\n");
2034 kfree(tx_ring->desc_buf);
2035 return -ENOMEM;
2036 }
2037
2038 if (priv->sram_pool) {
2039 pr_err("sram pool left size not enough, tx fallback\n");
2040 tx_ring->in_sram = false;
2041 }
2042 }
2043
2044 memset(tx_ring->desc_addr, 0, tx_ring->total_size);
2045
2046 tx_ring->nxt_use = 0;
2047 tx_ring->nxt_clean = 0;
2048
2049 return 0;
2050}
2051
2052/* Name emac_alloc_rx_resources
2053 * Arguments priv : pointer to driver private data structure
2054 * Return Status: 0 - Success; non-zero - Fail
2055 * Description Allocates RX resources and getting virtual & physical address.
2056 */
2057int emac_alloc_rx_resources(struct emac_priv *priv)
2058{
2059 struct emac_desc_ring *rx_ring = &priv->rx_ring;
2060 struct platform_device *pdev = priv->pdev;
2061 u32 buf_len;
2062
2063 buf_len = sizeof(struct emac_desc_buffer) * rx_ring->total_cnt;
2064
2065 rx_ring->desc_buf = kzalloc(buf_len, GFP_KERNEL);
2066 if (!rx_ring->desc_buf) {
2067 pr_err("Memory allocation failed for the Receive descriptor buffer\n");
2068 return -ENOMEM;
2069 }
2070
2071 memset(rx_ring->desc_buf, 0, buf_len);
2072
2073 /* round up to nearest 4K */
2074 rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_rx_desc);
2075
2076 EMAC_ROUNDUP(rx_ring->total_size, 1024);
2077
2078 if (priv->sram_pool) {
2079 rx_ring->desc_addr =
2080 (void *)gen_pool_dma_alloc(
2081 priv->sram_pool, rx_ring->total_size,
2082 &rx_ring->desc_dma_addr);
2083 rx_ring->in_sram = true;
2084 }
2085
2086 if (!rx_ring->desc_addr) {
2087 rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
2088 rx_ring->total_size,
2089 &rx_ring->desc_dma_addr,
2090 GFP_KERNEL | __GFP_ZERO);
2091 if (!rx_ring->desc_addr) {
2092 pr_err("Memory allocation failed for the Receive descriptor ring\n");
2093 kfree(rx_ring->desc_buf);
2094 return -ENOMEM;
2095 }
2096
2097 if (priv->sram_pool) {
2098 pr_err("sram pool left size not enough, rx fallback\n");
2099 rx_ring->in_sram = false;
2100 }
2101 }
2102
2103 memset(rx_ring->desc_addr, 0, rx_ring->total_size);
2104
2105 rx_ring->nxt_use = 0;
2106 rx_ring->nxt_clean = 0;
2107
2108 return 0;
2109}
2110
2111/* Name emac_free_tx_resources
2112 * Arguments priv : pointer to driver private data structure
2113 * Return none
2114 * Description Frees the Tx resources allocated
2115 */
2116void emac_free_tx_resources(struct emac_priv *priv)
2117{
2118 emac_clean_tx_desc_ring(priv);
2119 kfree(priv->tx_ring.desc_buf);
2120 priv->tx_ring.desc_buf = NULL;
2121 if (priv->tx_ring.in_sram)
2122 gen_pool_free(priv->sram_pool,
2123 (unsigned long) priv->tx_ring.desc_addr,
2124 priv->tx_ring.total_size);
2125 else
2126 dma_free_coherent(&priv->pdev->dev, priv->tx_ring.total_size,
2127 priv->tx_ring.desc_addr,
2128 priv->tx_ring.desc_dma_addr);
2129 priv->tx_ring.desc_addr = NULL;
2130}
2131
2132/* Name emac_free_rx_resources
2133 * Arguments priv : pointer to driver private data structure
2134 * Return none
2135 * Description Frees the Rx resources allocated
2136 */
2137void emac_free_rx_resources(struct emac_priv *priv)
2138{
2139 emac_clean_rx_desc_ring(priv);
2140 kfree(priv->rx_ring.desc_buf);
2141 priv->rx_ring.desc_buf = NULL;
2142 if (priv->rx_ring.in_sram)
2143 gen_pool_free(priv->sram_pool,
2144 (unsigned long) priv->rx_ring.desc_addr,
2145 priv->rx_ring.total_size);
2146 else
2147 dma_free_coherent(&priv->pdev->dev, priv->rx_ring.total_size,
2148 priv->rx_ring.desc_addr,
2149 priv->rx_ring.desc_dma_addr);
2150 priv->rx_ring.desc_addr = NULL;
2151}
2152
2153/* Name emac_open
2154 * Arguments pstNetdev : pointer to net_device structure
2155 * Return Status: 0 - Success; non-zero - Fail
2156 * Description This function is called when net interface is made up.
2157 * Setting up Tx and Rx
2158 * resources and making the interface up.
2159 */
2160static int emac_open(struct net_device *ndev)
2161{
2162 struct emac_priv *priv = netdev_priv(ndev);
2163 int ret;
2164
2165 ret = emac_alloc_tx_resources(priv);
2166 if (ret) {
2167 pr_err("Error in setting up the Tx resources\n");
2168 goto emac_alloc_tx_resource_fail;
2169 }
2170
2171 ret = emac_alloc_rx_resources(priv);
2172 if (ret) {
2173 pr_err("Error in setting up the Rx resources\n");
2174 goto emac_alloc_rx_resource_fail;
2175 }
2176
2177 ret = emac_up(priv);
2178 if (ret) {
2179 pr_err("Error in making the net intrface up\n");
2180 goto emac_up_fail;
2181 }
2182 return 0;
2183
2184emac_up_fail:
2185 emac_free_rx_resources(priv);
2186emac_alloc_rx_resource_fail:
2187 emac_free_tx_resources(priv);
2188emac_alloc_tx_resource_fail:
2189 emac_reset_hw(priv);
2190 return ret;
2191}
2192
2193/* Name emac_close
2194 * Arguments pstNetdev : pointer to net_device structure
2195 * Return Status: 0 - Success; non-zero - Fail
2196 * Description This function is called when net interface is made down.
2197 * It calls the appropriate functions to
2198 * free Tx and Rx resources.
2199 */
2200static int emac_close(struct net_device *ndev)
2201{
2202 struct emac_priv *priv = netdev_priv(ndev);
2203
2204 emac_down(priv);
2205 emac_free_tx_resources(priv);
2206 emac_free_rx_resources(priv);
2207
2208 return 0;
2209}
2210
2211/* Name emac_tx_clean_desc
2212 * Arguments priv : pointer to driver private data structure
2213 * Return 1: Cleaned; 0:Failed
2214 * Description
2215 */
2216#ifdef CONFIG_ASR_EMAC_NAPI
2217static int emac_tx_clean_desc(struct emac_priv *priv, int budget)
2218#else
2219static int emac_tx_clean_desc(struct emac_priv *priv)
2220#endif
2221{
2222 struct emac_desc_ring *tx_ring;
2223 struct emac_tx_desc *tx_desc, *end_desc;
2224 struct emac_desc_buffer *tx_buf;
2225 struct net_device *ndev = priv->ndev;
2226 u32 i, u32LastIndex;
2227 u8 u8Cleaned;
2228 unsigned int count = 0;
2229
2230 tx_ring = &priv->tx_ring;
2231 i = tx_ring->nxt_clean;
2232 do {
2233 if (i == tx_ring->nxt_use)
2234 break;
2235
2236 u32LastIndex = tx_ring->desc_buf[i].nxt_watch;
2237 end_desc = emac_get_tx_desc(priv, u32LastIndex);
2238 if (end_desc->OWN == 1 ||
2239 (priv->tso && (end_desc->tso || end_desc->coe)))
2240 break;
2241
2242 u8Cleaned = false;
2243 for ( ; !u8Cleaned; count++) {
2244 tx_desc = emac_get_tx_desc(priv, i);
2245 tx_buf = &tx_ring->desc_buf[i];
2246
2247 emac_get_tx_hwtstamp(priv, tx_buf->skb);
2248
2249 /* own bit will be reset to 0 by dma
2250 * once packet is transmitted
2251 */
2252 if (tx_buf->dma_addr) {
2253 dma_unmap_page(&priv->pdev->dev,
2254 tx_buf->dma_addr,
2255 tx_buf->dma_len,
2256 DMA_TO_DEVICE);
2257 tx_buf->dma_addr = 0;
2258 }
2259 if (tx_buf->skb) {
2260 dev_kfree_skb_any(tx_buf->skb);
2261 tx_buf->skb = NULL;
2262 }
2263 if (tx_buf->buff_addr)
2264 tx_buf->buff_addr = NULL;
2265
2266 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2267 u8Cleaned = (i == u32LastIndex);
2268 if (++i == tx_ring->total_cnt)
2269 i = 0;
2270 }
2271
2272#ifdef CONFIG_ASR_EMAC_NAPI
2273 if (count >= budget) {
2274 count = budget;
2275 break;
2276 }
2277#endif
2278 } while (1);
2279 tx_ring->nxt_clean = i;
2280
2281#ifndef CONFIG_ASR_EMAC_NAPI
2282 spin_lock(&priv->spTxLock);
2283#endif
2284 if (unlikely(count && netif_queue_stopped(ndev) &&
2285 netif_carrier_ok(ndev) &&
2286 EMAC_DESC_UNUSED(tx_ring) >= EMAC_TX_WAKE_THRESHOLD))
2287 netif_wake_queue(ndev);
2288#ifndef CONFIG_ASR_EMAC_NAPI
2289 spin_unlock(&priv->spTxLock);
2290#endif
2291 return count;
2292}
2293
2294static int emac_rx_frame_status(struct emac_priv *priv, struct emac_rx_desc *dsc)
2295{
2296 /* if last descritpor isn't set, so we drop it*/
2297 if (!dsc->LastDescriptor) {
2298 netdev_dbg(priv->ndev, "rx LD bit isn't set, drop it.\n");
2299 return frame_discard;
2300 }
2301
2302 /*
2303 * A Frame that is less than 64-bytes (from DA thru the FCS field)
2304 * is considered as Runt Frame.
2305 * Most of the Runt Frames happen because of collisions.
2306 */
2307 if (dsc->ApplicationStatus & EMAC_RX_FRAME_RUNT) {
2308 netdev_dbg(priv->ndev, "rx frame less than 64.\n");
2309 return frame_discard;
2310 }
2311
2312 /*
2313 * When the frame fails the CRC check,
2314 * the frame is assumed to have the CRC error
2315 */
2316 if (dsc->ApplicationStatus & EMAC_RX_FRAME_CRC_ERR) {
2317 netdev_dbg(priv->ndev, "rx frame crc error\n");
2318 return frame_discard;
2319 }
2320
2321 if (priv->tso && dsc->csum_res == EMAC_CSUM_FAIL) {
2322 netdev_dbg(priv->ndev, "COE: rx frame checksum error\n");
2323 return frame_discard;
2324 }
2325
2326 /*
2327 * When the length of the frame exceeds
2328 * the Programmed Max Frame Length
2329 */
2330 if (dsc->ApplicationStatus & EMAC_RX_FRAME_MAX_LEN_ERR) {
2331 netdev_dbg(priv->ndev, "rx frame too long\n");
2332 return frame_discard;
2333 }
2334
2335 /*
2336 * frame reception is truncated at that point and
2337 * frame is considered to have Jabber Error
2338 */
2339 if (dsc->ApplicationStatus & EMAC_RX_FRAME_JABBER_ERR) {
2340 netdev_dbg(priv->ndev, "rx frame has been truncated\n");
2341 return frame_discard;
2342 }
2343
2344 /* this bit is only for 802.3 Type Frames */
2345 if (dsc->ApplicationStatus & EMAC_RX_FRAME_LENGTH_ERR) {
2346 netdev_dbg(priv->ndev, "rx frame length err for 802.3\n");
2347 return frame_discard;
2348 }
2349
2350 if (dsc->FramePacketLength <= ETHERNET_FCS_SIZE ||
2351 dsc->FramePacketLength > EMAC_RX_BUFFER_2048) {
2352 netdev_dbg(priv->ndev, "rx frame len too small or too long\n");
2353 return frame_discard;
2354 }
2355 return frame_ok;
2356}
2357
2358/* Name emac_rx_clean_desc
2359 * Arguments priv : pointer to driver private data structure
2360 * Return 1: Cleaned; 0:Failed
2361 * Description
2362 */
2363#ifdef CONFIG_ASR_EMAC_NAPI
2364static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
2365#else
2366static int emac_rx_clean_desc(struct emac_priv *priv)
2367#endif
2368{
2369 struct emac_desc_ring *rx_ring;
2370 struct emac_desc_buffer *rx_buf;
2371 struct net_device *ndev = priv->ndev;
2372 struct emac_rx_desc *rx_desc;
2373 struct sk_buff *skb = NULL;
2374 int status;
2375#ifdef CONFIG_ASR_EMAC_NAPI
2376 u32 receive_packet = 0;
2377#endif
2378 u32 i;
2379 u32 u32Len;
2380 u32 u32Size;
2381 u8 *pu8Data;
2382#ifdef WAN_LAN_AUTO_ADAPT
2383 int port = -1, vlan = -1;
2384 struct vlan_hdr *vhdr;
2385 struct iphdr *iph = NULL;
2386 struct udphdr *udph = NULL;
2387#endif
2388
2389 rx_ring = &priv->rx_ring;
2390 i = rx_ring->nxt_clean;
2391 rx_desc = emac_get_rx_desc(priv, i);
2392 u32Size = 0;
2393
2394 if (priv->pause.tx_pause && !priv->pause.fc_auto)
2395 emac_check_ring_and_send_pause(priv);
2396
2397 while (rx_desc->OWN == 0) {
2398 if (priv->tso && !rx_desc->csum_done)
2399 break;
2400
2401 if (skb_queue_len(&priv->rx_skb) > priv->rx_ring.total_cnt)
2402 break;
2403
2404 rx_buf = &rx_ring->desc_buf[i];
2405 if (!rx_buf->skb)
2406 break;
2407
2408 emac_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
2409 rx_buf->dma_len, DMA_FROM_DEVICE);
2410 status = emac_rx_frame_status(priv, rx_desc);
2411 if (unlikely(status == frame_discard)) {
2412 ndev->stats.rx_dropped++;
2413 dev_kfree_skb_irq(rx_buf->skb);
2414 rx_buf->skb = NULL;
2415 } else {
2416 skb = rx_buf->skb;
2417 u32Len = rx_desc->FramePacketLength - ETHERNET_FCS_SIZE;
2418
2419 pu8Data = skb_put(skb, u32Len);
2420#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
2421 memcpy(pu8Data, (u8 *)rx_buf->buff_addr, u32Len);
2422#endif
2423 skb->dev = ndev;
2424 ndev->hard_header_len = ETH_HLEN;
2425
2426 emac_get_rx_hwtstamp(priv, rx_desc, skb);
2427
2428 skb->protocol = eth_type_trans(skb, ndev);
2429 if (priv->tso)
2430 skb->ip_summed = CHECKSUM_UNNECESSARY;
2431 else
2432 skb->ip_summed = CHECKSUM_NONE;
2433
2434#ifdef WAN_LAN_AUTO_ADAPT
2435 {/* Special tag format: DA-SA-0x81-xx-data.
2436 Bit 7-3 Packet Information
2437 - bit 4: Reserved
2438 - bit 3: Reserved
2439 - bit 2: Miss address table
2440 - bit 1: Security violation
2441 - bit 0: VLAN violation
2442 Bit 2-0 Ingress Port number
2443 - b000: Disabled
2444 - b001: Port 0
2445 - b010: Port 1
2446 - b011: Port 2
2447 - b100: Port 3
2448 - b101: Port 4
2449 - Other: Reserved */
2450 if(ntohs(skb->protocol)>>8 == 0x81) {
2451 port = ntohs(skb->protocol) & 0x7;
2452 if(port > 0 && port <= 0x5) {
2453 skb->protocol = htons(ETH_P_8021Q);
2454 port = port - 1;
2455 }
2456 }
2457 if (skb->protocol == htons(ETH_P_8021Q)) {
2458 vhdr = (struct vlan_hdr *) skb->data;
2459 vlan = ntohs(vhdr->h_vlan_TCI);
2460 iph = (struct iphdr *)(skb->data + VLAN_HLEN);
2461 } else if (skb->protocol == htons(ETH_P_IP))
2462 iph = (struct iphdr *)skb->data;
2463
2464 if (iph && iph->protocol == IPPROTO_UDP) {
2465 udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
2466 if ((htons(udph->dest) == 68 && htons(udph->source) == 67)) {
2467 u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
2468 u8 dhcp_type = *(udp_data + 242);
2469 if ((DHCP_ACK == dhcp_type || DHCP_OFFER == dhcp_type)
2470 && (DHCP_SEND_REQ == priv->dhcp)) {
2471 priv->dhcp = DHCP_REC_RESP;
2472 if (ndev->phydev->phy_id == IP175D_PHY_ID)
2473 priv->vlan_port = port;
2474 else
2475 priv->vlan_port = -1;
2476 }
2477 }
2478 }
2479 }
2480#endif
2481 skb_queue_tail(&priv->rx_skb, skb);
2482 rx_buf->skb = NULL;
2483 }
2484
2485 if (++i == rx_ring->total_cnt)
2486 i = 0;
2487
2488 rx_desc = emac_get_rx_desc(priv, i);
2489
2490 /* restart RX COE */
2491 if (priv->tso)
2492 emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
2493 }
2494
2495 rx_ring->nxt_clean = i;
2496
2497 emac_alloc_rx_desc_buffers(priv);
2498
2499 /*
2500 * Since netif_rx may consume too much time, put this after
2501 * emac_alloc_rx_desc_buffers so that RX DMA desc refill ASAP,
2502 * reduce packet loss probability.
2503 */
2504 while ((skb = skb_dequeue(&priv->rx_skb))) {
2505 ndev->stats.rx_packets++;
2506 ndev->stats.rx_bytes += skb->len;
2507#ifdef CONFIG_ASR_EMAC_NAPI
2508 napi_gro_receive(&priv->rx_napi, skb);
2509#else
2510 netif_rx(skb);
2511#endif
2512
2513#ifdef CONFIG_ASR_EMAC_NAPI
2514 receive_packet++;
2515 if (receive_packet >= budget)
2516 break;
2517#endif
2518 }
2519
2520#ifdef CONFIG_ASR_EMAC_DDR_QOS
2521 emac_ddr_clk_scaling(priv);
2522#endif
2523
2524#ifdef CONFIG_ASR_EMAC_NAPI
2525 return receive_packet;
2526#else
2527 return 0;
2528#endif
2529}
2530
2531/* Name emac_alloc_rx_desc_buffers
2532 * Arguments priv : pointer to driver private data structure
2533 * Return 1: Cleaned; 0:Failed
2534 * Description
2535 */
2536static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
2537{
2538 struct net_device *ndev = priv->ndev;
2539 struct emac_desc_ring *rx_ring = &priv->rx_ring;
2540 struct emac_desc_buffer *rx_buf;
2541 struct sk_buff *skb;
2542 struct emac_rx_desc *rx_desc;
2543 u32 i;
2544#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
2545 void *buff;
2546#endif
2547 u32 buff_len;
2548 int fail_cnt = 0;
2549
2550 i = rx_ring->nxt_use;
2551 rx_buf = &rx_ring->desc_buf[i];
2552
2553 buff_len = priv->u32RxBufferLen;
2554
2555 while (!rx_buf->skb) {
2556 skb = emac_skbrb_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
2557 if (!skb) {
2558 if (priv->rx_ring.total_cnt == EMAC_RX_RING_SIZE)
2559 skb = dev_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
2560 if (!skb) {
2561 fail_cnt++;
2562 pr_warn_ratelimited("emac sk_buff allocation failed\n");
2563 break;
2564 }
2565 }
2566
2567 /* make buffer alignment */
2568 skb_reserve(skb, NET_IP_ALIGN + EMAC_EXTRA_ROOM);
2569 skb->dev = ndev;
2570
2571#ifdef CONFIG_ASR_EMAC_RX_NO_COPY
2572 rx_buf->buff_addr = skb->data;
2573#else
2574 if (!rx_buf->buff_addr) {
2575 buff = kmalloc(buff_len, GFP_ATOMIC | GFP_DMA);
2576 if (!buff) {
2577 pr_err("kmalloc failed\n");
2578 dev_kfree_skb(skb);
2579 break;
2580 }
2581 rx_buf->buff_addr = buff;
2582 }
2583#endif
2584 rx_buf->skb = skb;
2585 rx_buf->dma_len = buff_len;
2586 rx_buf->dma_addr = emac_map_single(&priv->pdev->dev,
2587 rx_buf->buff_addr,
2588 buff_len,
2589 DMA_FROM_DEVICE);
2590
2591 rx_desc = emac_get_rx_desc(priv, i);
2592 rx_desc->BufferAddr1 = rx_buf->dma_addr;
2593 rx_desc->BufferSize1 = rx_buf->dma_len;
2594 rx_desc->rx_timestamp = 0;
2595 rx_desc->ptp_pkt = 0;
2596 rx_desc->FirstDescriptor = 0;
2597 rx_desc->LastDescriptor = 0;
2598 rx_desc->FramePacketLength = 0;
2599 rx_desc->ApplicationStatus = 0;
2600 if (++i == rx_ring->total_cnt) {
2601 rx_desc->EndRing = 1;
2602 i = 0;
2603 }
2604
2605 wmb();
2606 rx_desc->OWN = 1;
2607 if (priv->tso)
2608 rx_desc->csum_done = 0;
2609
2610 rx_buf = &rx_ring->desc_buf[i];
2611 }
2612 rx_ring->nxt_use = i;
2613
2614 if (fail_cnt)
2615 priv->refill = 1;
2616 else
2617 priv->refill = 0;
2618 emac_dma_start_receive(priv);
2619}
2620
2621#ifdef CONFIG_ASR_EMAC_NAPI
2622static int emac_rx_poll(struct napi_struct *napi, int budget)
2623{
2624 struct emac_priv *priv = container_of(napi, struct emac_priv, rx_napi);
2625 int work_done;
2626
2627 work_done = emac_rx_clean_desc(priv, budget);
2628 if (work_done < budget && napi_complete_done(napi, work_done)) {
2629 unsigned long flags;
2630
2631 spin_lock_irqsave(&priv->intr_lock, flags);
2632 emac_enable_interrupt(priv, 0);
2633 spin_unlock_irqrestore(&priv->intr_lock, flags);
2634
2635 if (priv->refill)
2636 emac_rx_timer_arm(priv);
2637 }
2638
2639 return work_done;
2640}
2641
2642static int emac_tx_poll(struct napi_struct *napi, int budget)
2643{
2644 struct emac_priv *priv = container_of(napi, struct emac_priv, tx_napi);
2645 int work_done;
2646
2647 work_done = emac_tx_clean_desc(priv, budget);
2648 if (work_done < budget && napi_complete_done(napi, work_done)) {
2649 unsigned long flags;
2650
2651 spin_lock_irqsave(&priv->intr_lock, flags);
2652 emac_enable_interrupt(priv, 1);
2653 spin_unlock_irqrestore(&priv->intr_lock, flags);
2654 }
2655
2656 return work_done;
2657}
2658#endif
2659
2660/* Name emac_tx_mem_map
2661 * Arguments priv : pointer to driver private data structure
2662 * pstSkb : pointer to sk_buff structure passed by upper layer
2663 * max_tx_len : max data len per descriptor
2664 * frag_num : number of fragments in the packet
2665 * Return number of descriptors needed for transmitting packet
2666 * Description
2667 */
2668static int emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb,
2669 u32 max_tx_len, u32 frag_num, int ioc)
2670{
2671 struct emac_desc_ring *tx_ring;
2672 struct emac_desc_buffer *tx_buf;
2673 struct emac_tx_desc *tx_desc, *first_desc;
2674 u32 skb_len;
2675 u32 u32Offset, u32Size, i;
2676 u32 use_desc_cnt;
2677 u32 f;
2678 void *pvPtr;
2679 u32 cur_desc_addr;
2680 u32 cur_desc_idx;
2681 u8 do_tx_timestamp = 0;
2682 bool use_buf2 = 0;
2683
2684 u32Offset = 0;
2685 use_desc_cnt = 0;
2686
2687 skb_tx_timestamp(skb);
2688 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2689 priv->hwts_tx_en)) {
2690 /* declare that device is doing timestamping */
2691 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2692 do_tx_timestamp = 1;
2693 }
2694
2695 tx_ring = &priv->tx_ring;
2696 skb_len = skb->len - skb->data_len;
2697 i = cur_desc_idx = tx_ring->nxt_use;
2698 cur_desc_addr = emac_rd(priv, DMA_TRANSMIT_BASE_ADDRESS);
2699 while (skb_len > 0) {
2700 u32Size = min(skb_len, max_tx_len);
2701 skb_len -= u32Size;
2702
2703 tx_buf = &tx_ring->desc_buf[i];
2704 tx_buf->dma_len = u32Size;
2705 pvPtr = skb->data + u32Offset;
2706 tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pvPtr,
2707 u32Size, DMA_TO_DEVICE);
2708 tx_buf->buff_addr = pvPtr;
2709 tx_buf->ulTimeStamp = jiffies;
2710
2711 tx_desc = emac_get_tx_desc(priv, i);
2712
2713 if (use_buf2) {
2714 tx_desc->BufferAddr2 = tx_buf->dma_addr;
2715 tx_desc->BufferSize2 = tx_buf->dma_len;
2716 i++;
2717 use_buf2 = 0;
2718 } else {
2719 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2720 tx_desc->BufferAddr1 = tx_buf->dma_addr;
2721 tx_desc->BufferSize1 = tx_buf->dma_len;
2722 use_buf2 = 1;
2723 }
2724
2725 if (use_desc_cnt == 0) {
2726 first_desc = tx_desc;
2727 tx_desc->FirstSegment = 1;
2728 if (do_tx_timestamp)
2729 tx_desc->tx_timestamp = 1;
2730 }
2731
2732 if (skb_len == 0 && frag_num == 0) {
2733 tx_desc->LastSegment = 1;
2734 tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
2735 }
2736
2737 if (!use_buf2 && i == tx_ring->total_cnt) {
2738 tx_desc->EndRing = 1;
2739 i = 0;
2740 }
2741
2742 /* trigger first desc OWN bit later */
2743 use_desc_cnt++;
2744 if (use_desc_cnt > 2)
2745 tx_desc->OWN = 1;
2746
2747 u32Offset += u32Size;
2748 }
2749
2750 /* if the data is fragmented */
2751 for (f = 0; f < frag_num; f++) {
2752 skb_frag_t *frag;
2753
2754 frag = &(skb_shinfo(skb)->frags[f]);
2755 skb_len = skb_frag_size(frag);
2756 u32Offset = skb_frag_off(frag);
2757
2758 while (skb_len) {
2759 u32Size = min(skb_len, max_tx_len);
2760 skb_len -= u32Size;
2761
2762 tx_buf = &tx_ring->desc_buf[i];
2763 tx_buf->dma_len = u32Size;
2764 tx_buf->dma_addr =
2765 dma_map_page(&priv->pdev->dev,
2766 skb_frag_page(frag),
2767 u32Offset,
2768 u32Size,
2769 DMA_TO_DEVICE);
2770 tx_buf->ulTimeStamp = jiffies;
2771
2772 tx_desc = emac_get_tx_desc(priv, i);
2773 if (use_buf2) {
2774 tx_desc->BufferAddr2 = tx_buf->dma_addr;
2775 tx_desc->BufferSize2 = tx_buf->dma_len;
2776 i++;
2777 use_buf2 = 0;
2778 } else {
2779 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2780 tx_desc->BufferAddr1 = tx_buf->dma_addr;
2781 tx_desc->BufferSize1 = tx_buf->dma_len;
2782 use_buf2 = 1;
2783 }
2784
2785 if (skb_len == 0 && f == (frag_num - 1)) {
2786 tx_desc->LastSegment = 1;
2787 tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
2788 }
2789
2790 if (!use_buf2 && i == tx_ring->total_cnt) {
2791 tx_desc->EndRing = 1;
2792 i = 0;
2793 }
2794
2795 /* trigger first desc OWN bit later */
2796 use_desc_cnt++;
2797 if (use_desc_cnt > 2)
2798 tx_desc->OWN = 1;
2799
2800 u32Offset += u32Size;
2801 }
2802 }
2803
2804 if (use_buf2 && ++i == tx_ring->total_cnt) {
2805 tx_desc->EndRing = 1;
2806 i = 0;
2807 }
2808
2809 tx_ring->desc_buf[cur_desc_idx].skb = skb;
2810 tx_ring->desc_buf[cur_desc_idx].nxt_watch =
2811 (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
2812
2813 wmb();
2814
2815 first_desc->OWN = 1;
2816
2817 emac_dma_start_transmit(priv);
2818
2819 tx_ring->nxt_use = i;
2820 return use_desc_cnt;
2821}
2822
2823static int emac_prepare_tso_desc(struct emac_priv *priv, int idx,
2824 bool tso, bool coe,
2825 u32 addr, int payload, u8 hlen, int mss,
2826 bool fst, bool last, bool ioc, bool ts,
2827 u32 *cnt)
2828{
2829 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2830 struct emac_tx_desc *pdesc;
2831
2832 pdesc = emac_get_tx_desc(priv, idx);
2833 if (tso) {
2834 if (fst && hlen) {
2835 emac_set_buf1_addr_len(pdesc, addr, 0);
2836 payload -= hlen;
2837 addr += hlen;
2838 }
2839 emac_set_buf2_addr_len(pdesc, addr, payload);
2840 } else {
2841 emac_set_buf1_addr_len(pdesc, addr, payload);
2842 }
2843
2844 if (fst) {
2845 emac_tx_desc_set_fd(pdesc);
2846 } else {
2847 if (tso)
2848 emac_tx_desc_set_offload(pdesc, 1, 1, 1);
2849 else if (coe)
2850 emac_tx_desc_set_offload(pdesc, 0, 1, 0);
2851 else
2852 emac_tx_desc_set_offload(pdesc, 1, 0, 0);
2853 }
2854
2855 if (ts)
2856 emac_tx_desc_set_ts(pdesc);
2857
2858 if (last) {
2859 /* last segment */
2860 emac_tx_desc_set_ld(pdesc);
2861 if (ioc)
2862 emac_tx_desc_set_ioc(pdesc);
2863 }
2864
2865 print_desc((void *)pdesc, 16);
2866 if (payload <= 0)
2867 return idx;
2868
2869 do {
2870 (*cnt)++;
2871
2872 if (++idx == tx_ring->total_cnt) {
2873 emac_tx_desc_set_ring_end(pdesc);
2874 idx = 0;
2875 }
2876
2877 if (!tso)
2878 break;
2879
2880 payload -= mss;
2881 if (payload <= 0)
2882 break;
2883
2884 pdesc = emac_get_tx_desc(priv, idx);
2885 emac_tx_desc_set_offload(pdesc, 1, 1, 0);
2886
2887 print_desc((void *)pdesc, 16);
2888 } while (1);
2889
2890 return idx;
2891}
2892
2893static int emac_tso_xmit(struct sk_buff *skb, struct net_device *ndev,
2894 bool tso, bool coe)
2895{
2896 struct emac_priv *priv = netdev_priv(ndev);
2897 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2898 struct emac_desc_buffer *tx_buf;
2899 struct emac_tx_desc *pdesc;
2900 skb_frag_t *frag;
2901 u32 desc_cnt, frag_num, f, mss, fst;
2902 u32 offset, i;
2903 u8 hlen;
2904 int skb_len, payload;
2905 void *pbuf;
2906 int ioc;
2907 u8 timestamp = 0;
2908
2909 frag_num = skb_shinfo(skb)->nr_frags;
2910 skb_len = skb->len - skb->data_len;
2911 if (tso) {
2912 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
2913 mss = skb_shinfo(skb)->gso_size;
2914 desc_cnt = (skb_len / mss) + 1;
2915 for (f = 0; f < frag_num; f++) {
2916 frag = &skb_shinfo(skb)->frags[f];
2917 desc_cnt += (skb_frag_size(frag) / mss) + 1;
2918 }
2919 } else {
2920 hlen = 0;
2921 mss = 0;
2922 desc_cnt = EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
2923 for (i = 0; i < frag_num; i++) {
2924 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2925 desc_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
2926 MAX_DATA_PWR_TX_DES);
2927 }
2928 }
2929
2930 emac_print("%s: skb=0x%x, skb->len=%d skb_len=%d mss=%d frag_num=%d hlen=%d\n",
2931 __func__, (unsigned)skb, skb->len, skb_len, mss, frag_num, hlen);
2932
2933#ifdef EMAC_DEBUG
2934 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 32, 1, skb->data, skb_len, 0);
2935#endif
2936 /* disable hard interrupt on local CPUs */
2937#ifndef CONFIG_ASR_EMAC_NAPI
2938 local_irq_save(ulFlags);
2939#endif
2940 if (!spin_trylock(&priv->spTxLock)) {
2941 pr_err("Collision detected\n");
2942#ifndef CONFIG_ASR_EMAC_NAPI
2943 local_irq_restore(ulFlags);
2944#endif
2945 return NETDEV_TX_BUSY;
2946 }
2947
2948 /* check whether sufficient free descriptors are there */
2949 if (EMAC_DESC_UNUSED(tx_ring) < (desc_cnt + 2)) {
2950 pr_err_ratelimited("TSO Descriptors are not free\n");
2951 netif_stop_queue(ndev);
2952#ifndef CONFIG_ASR_EMAC_NAPI
2953 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
2954#else
2955 spin_unlock(&priv->spTxLock);
2956#endif
2957 return NETDEV_TX_BUSY;
2958 }
2959
2960 priv->tx_count_frames += desc_cnt;
2961 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2962 priv->hwts_tx_en))
2963 ioc = 1;
2964 else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
2965 ioc = 1;
2966 else
2967 ioc = 0;
2968
2969 if (ioc)
2970 priv->tx_count_frames = 0;
2971
2972 skb_tx_timestamp(skb);
2973 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2974 priv->hwts_tx_en)) {
2975 /* declare that device is doing timestamping */
2976 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2977 timestamp = 1;
2978 }
2979
2980 offset = 0;
2981 desc_cnt = 0;
2982 i = fst = tx_ring->nxt_use;
2983 do {
2984 payload = min(skb_len, TSO_MAX_SEG_SIZE);
2985
2986 tx_buf = &tx_ring->desc_buf[i];
2987 tx_buf->dma_len = payload;
2988 pbuf = skb->data + offset;
2989 tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pbuf,
2990 payload, DMA_TO_DEVICE);
2991 tx_buf->buff_addr = pbuf;
2992 tx_buf->ulTimeStamp = jiffies;
2993
2994 skb_len -= payload;
2995 offset += payload;
2996
2997 i = emac_prepare_tso_desc(priv, i, tso, coe,
2998 tx_buf->dma_addr, payload, hlen, mss,
2999 (i == fst), (skb_len == 0 && frag_num == 0),
3000 ioc, timestamp, &desc_cnt);
3001 } while (skb_len > 0);
3002
3003 /* if the data is fragmented */
3004 for (f = 0; f < frag_num; f++) {
3005 frag = &(skb_shinfo(skb)->frags[f]);
3006 skb_len = skb_frag_size(frag);
3007 offset = skb_frag_off(frag);
3008
3009 emac_print("%s: frag %d len=%d\n", __func__, f, skb_len);
3010#ifdef EMAC_DEBUG
3011 {
3012 u8 *vaddr;
3013
3014 vaddr = kmap_atomic(skb_frag_page(frag));
3015 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
3016 32, 1, vaddr + offset, skb_len, 0);
3017 kunmap_atomic(vaddr);
3018 }
3019#endif
3020 do {
3021 payload = min(skb_len, TSO_MAX_SEG_SIZE);
3022
3023 tx_buf = &tx_ring->desc_buf[i];
3024 tx_buf->dma_len = payload;
3025 //pbuf = skb->data + offset;
3026 tx_buf->dma_addr = dma_map_page(&priv->pdev->dev,
3027 skb_frag_page(frag),
3028 offset, payload,
3029 DMA_TO_DEVICE);
3030 tx_buf->ulTimeStamp = jiffies;
3031
3032 skb_len -= payload;
3033 offset += payload;
3034
3035 i = emac_prepare_tso_desc(priv, i, tso, coe,
3036 tx_buf->dma_addr, payload, 0, mss,
3037 (i == fst),
3038 (skb_len == 0 && f == (frag_num - 1)),
3039 ioc, timestamp, &desc_cnt);
3040 } while (skb_len > 0);
3041 }
3042
3043 tx_ring->desc_buf[fst].skb = skb;
3044 tx_ring->desc_buf[fst].nxt_watch =
3045 (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
3046
3047 wmb();
3048
3049 /* set first descriptor for this packet */
3050 pdesc = emac_get_tx_desc(priv, fst);
3051 emac_tx_update_fst_desc(pdesc, hlen, mss, tso, coe);
3052 print_desc((void *)pdesc, 16);
3053
3054 tx_ring->nxt_use = i;
3055
3056 ndev->stats.tx_packets++;
3057 ndev->stats.tx_bytes += skb->len;
3058 if (tso) {
3059 priv->hw_stats->tx_tso_pkts++;
3060 priv->hw_stats->tx_tso_bytes += skb->len;
3061 }
3062
3063 emac_wr_tso(priv, TSO_TX_POLL_DEMAND, 0xFF);
3064 /* Make sure there is space in the ring for the next send. */
3065 if (EMAC_DESC_UNUSED(tx_ring) < (MAX_SKB_FRAGS + 2)) {
3066 pr_debug_ratelimited("TSO Descriptors not enough, stop\n");
3067 netif_stop_queue(ndev);
3068 }
3069
3070#ifndef CONFIG_ASR_EMAC_NAPI
3071 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3072#else
3073 spin_unlock(&priv->spTxLock);
3074#endif
3075#ifdef CONFIG_ASR_EMAC_DDR_QOS
3076 emac_ddr_clk_scaling(priv);
3077#endif
3078
3079 if (!tso && !coe)
3080 emac_tx_timer_arm(priv);
3081
3082 return NETDEV_TX_OK;
3083}
3084
3085/* Name emac_start_xmit
3086 * Arguments pstSkb : pointer to sk_buff structure passed by upper layer
3087 * pstNetdev : pointer to net_device structure
3088 * Return Status: 0 - Success; non-zero - Fail
3089 * Description This function is called by upper layer to
3090 * handover the Tx packet to the driver
3091 * for sending it to the device.
3092 * Currently this is doing nothing but
3093 * simply to simulate the tx packet handling.
3094 */
3095static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3096{
3097 struct emac_priv *priv = netdev_priv(ndev);
3098 int ioc;
3099 u32 frag_num;
3100 u32 skb_len;
3101 u32 tx_des_cnt = 0;
3102 u32 i;
3103#ifndef CONFIG_ASR_EMAC_NAPI
3104 unsigned long ulFlags;
3105#endif
3106#ifdef WAN_LAN_AUTO_ADAPT
3107 int vlan = 0;
3108 struct iphdr *iph = NULL;
3109 struct udphdr *udph = NULL;
3110 struct vlan_hdr *vhdr;
3111
3112 { struct ethhdr *myeth = (struct ethhdr *)skb->data;
3113 if (myeth->h_proto == htons(ETH_P_8021Q)) {
3114 vhdr = (struct vlan_hdr *)((u8 *)myeth + sizeof(struct ethhdr));
3115 vlan = ntohs(vhdr->h_vlan_TCI);
3116 iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr) + VLAN_HLEN);
3117 }
3118 else if (myeth->h_proto == htons(ETH_P_IP))
3119 iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr));
3120
3121 if (iph && iph->protocol == IPPROTO_UDP) {
3122 udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
3123 if ((htons(udph->dest) == 67 && htons(udph->source) == 68)) {
3124 u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
3125 u8 dhcp_type = *(udp_data + 242);
3126 if ((DHCP_DISCOVER == dhcp_type || DHCP_REQUEST == dhcp_type)
3127 && (0 == priv->dhcp)) {
3128 priv->dhcp = DHCP_SEND_REQ;
3129 if (ndev->phydev->phy_id == IP175D_PHY_ID)
3130 priv->vlan_port = vlan;
3131 else
3132 priv->vlan_port = -1;
3133 }
3134 }
3135 }
3136 }
3137#endif
3138
3139 /* pstSkb->len: is the full length of the data in the packet
3140 * pstSkb->data_len: the number of bytes in skb fragments
3141 * u16Len: length of the first fragment
3142 */
3143 skb_len = skb->len - skb->data_len;
3144
3145 if (skb->len <= 0) {
3146 pr_err("Packet length is zero\n");
3147 dev_kfree_skb_any(skb);
3148 return NETDEV_TX_OK;
3149 }
3150
3151 if (priv->tso) {
3152 bool tso = false, coe = false;
3153
3154 if (skb_is_gso(skb) &&
3155 (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3156 tso = true;
3157 coe = true;
3158 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
3159 coe = true;
3160 }
3161
3162 /* WR: COE need skb->data to be 2 bytes alinged */
3163 if (coe && !IS_ALIGNED((unsigned long)skb->data, 2))
3164 pskb_expand_head(skb, 1, 0, GFP_ATOMIC);
3165
3166 return emac_tso_xmit(skb, ndev, tso, coe);
3167 }
3168
3169 /* increment the count if len exceeds MAX_DATA_LEN_TX_DES */
3170 tx_des_cnt += EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
3171
3172 frag_num = skb_shinfo(skb)->nr_frags;
3173
3174 for (i = 0; i < frag_num; i++) {
3175 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3176 tx_des_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
3177 MAX_DATA_PWR_TX_DES);
3178 }
3179
3180 /* disable hard interrupt on local CPUs */
3181#ifndef CONFIG_ASR_EMAC_NAPI
3182 local_irq_save(ulFlags);
3183#endif
3184 if (!spin_trylock(&priv->spTxLock)) {
3185 pr_err("Collision detected\n");
3186#ifndef CONFIG_ASR_EMAC_NAPI
3187 local_irq_restore(ulFlags);
3188#endif
3189 return NETDEV_TX_BUSY;
3190 }
3191
3192 /* check whether sufficient free descriptors are there */
3193 if (EMAC_DESC_UNUSED(&priv->tx_ring) < (tx_des_cnt + 2)) {
3194 pr_err_ratelimited("Descriptors are not free\n");
3195 netif_stop_queue(ndev);
3196#ifndef CONFIG_ASR_EMAC_NAPI
3197 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3198#else
3199 spin_unlock(&priv->spTxLock);
3200#endif
3201 return NETDEV_TX_BUSY;
3202 }
3203
3204 priv->tx_count_frames += frag_num + 1;
3205 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3206 priv->hwts_tx_en))
3207 ioc = 1;
3208 else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
3209 ioc = 1;
3210 else
3211 ioc = 0;
3212
3213 if (ioc)
3214 priv->tx_count_frames = 0;
3215
3216 tx_des_cnt = emac_tx_mem_map(priv, skb, MAX_DATA_LEN_TX_DES, frag_num, ioc);
3217 if (tx_des_cnt == 0) {
3218 pr_err("Could not acquire memory from pool\n");
3219 netif_stop_queue(ndev);
3220#ifndef CONFIG_ASR_EMAC_NAPI
3221 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3222#else
3223 spin_unlock(&priv->spTxLock);
3224#endif
3225 return NETDEV_TX_BUSY;
3226 }
3227 ndev->stats.tx_packets++;
3228 ndev->stats.tx_bytes += skb->len;
3229
3230 /* Make sure there is space in the ring for the next send. */
3231 if (EMAC_DESC_UNUSED(&priv->tx_ring) < (MAX_SKB_FRAGS + 2))
3232 netif_stop_queue(ndev);
3233
3234#ifndef CONFIG_ASR_EMAC_NAPI
3235 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3236#else
3237 spin_unlock(&priv->spTxLock);
3238#endif
3239#ifdef CONFIG_ASR_EMAC_DDR_QOS
3240 emac_ddr_clk_scaling(priv);
3241#endif
3242 emac_tx_timer_arm(priv);
3243 return NETDEV_TX_OK;
3244}
3245
3246u32 ReadTxStatCounters(struct emac_priv *priv, u8 cnt)
3247{
3248 u32 val, tmp;
3249
3250 val = 0x8000 | cnt;
3251 emac_wr(priv, MAC_TX_STATCTR_CONTROL, val);
3252 val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
3253
3254 while (val & 0x8000)
3255 val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
3256
3257 tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_HIGH);
3258 val = tmp << 16;
3259 tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_LOW);
3260 val |= tmp;
3261
3262 return val;
3263}
3264
3265u32 ReadRxStatCounters(struct emac_priv *priv, u8 cnt)
3266{
3267 u32 val, tmp;
3268
3269 val = 0x8000 | cnt;
3270 emac_wr(priv, MAC_RX_STATCTR_CONTROL, val);
3271 val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
3272
3273 while (val & 0x8000)
3274 val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
3275
3276 tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_HIGH);
3277 val = tmp << 16;
3278 tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_LOW);
3279 val |= tmp;
3280 return val;
3281}
3282
3283/* Name emac_set_mac_address
3284 * Arguments pstNetdev : pointer to net_device structure
3285 * addr : pointer to addr
3286 * Return Status: 0 - Success; non-zero - Fail
3287 * Description It is called by upper layer to set the mac address.
3288 */
3289static int emac_set_mac_address(struct net_device *ndev, void *addr)
3290{
3291 struct sockaddr *sa = addr;
3292 struct emac_priv *priv = netdev_priv(ndev);
3293
3294 if (!is_valid_ether_addr(sa->sa_data))
3295 return -EADDRNOTAVAIL;
3296
3297 memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
hj.shao8b7d94f2025-06-10 04:37:26 -07003298
3299//#LYNQ_MODFIY modify for task-1620 2025/6/10 start
3300 (ndev->dev_addr)[0] = 0x2;
3301 (ndev->dev_addr)[1] = 0x0;
3302 (ndev->dev_addr)[2] = 0x0;
3303 (ndev->dev_addr)[3] = 0x0;
3304 (ndev->dev_addr)[4] = 0x10;
3305 (ndev->dev_addr)[5] = 0x1;
3306//#LYNQ_MODFIY modify for task-1620 2025/6/10 end
b.liue9582032025-04-17 19:18:16 +08003307
3308 emac_set_mac_addr(priv, ndev->dev_addr);
3309
3310 emac_set_fc_source_addr(priv, ndev->dev_addr);
3311
3312 return 0;
3313}
3314
3315/* Name emac_change_mtu
3316 * Arguments pstNetdev : pointer to net_device structure
3317 * u32MTU : maximum transmit unit value
3318 * Return Status: 0 - Success; non-zero - Fail
3319 * Description It is called by upper layer to set the MTU value.
3320 */
3321static int emac_change_mtu(struct net_device *ndev, int mtu)
3322{
3323 struct emac_priv *priv = netdev_priv(ndev);
3324 u32 frame_len;
3325
3326 if (netif_running(ndev)) {
3327 pr_err("must be stopped to change its MTU\n");
3328 return -EBUSY;
3329 }
3330
3331 frame_len = mtu + ETHERNET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3332
3333 if (frame_len < MINIMUM_ETHERNET_FRAME_SIZE ||
3334 frame_len > EMAC_SKBRB_MAX_PAYLOAD) {
3335 pr_err("Invalid MTU setting\n");
3336 return -EINVAL;
3337 }
3338
3339 if (frame_len <= EMAC_RX_BUFFER_1024)
3340 priv->u32RxBufferLen = EMAC_RX_BUFFER_1024;
3341 else
3342 priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
3343
3344 ndev->mtu = mtu;
3345
3346 return 0;
3347}
3348
3349static void emac_reset(struct emac_priv *priv)
3350{
3351 if (!test_and_clear_bit(EMAC_RESET_REQUESTED, &priv->state))
3352 return;
3353 if (test_bit(EMAC_DOWN, &priv->state))
3354 return;
3355
3356 netdev_dbg(priv->ndev, "Reset controller.\n");
3357
3358 rtnl_lock();
3359 //netif_trans_update(priv->ndev);
3360 while (test_and_set_bit(EMAC_RESETING, &priv->state))
3361 usleep_range(1000, 2000);
3362
3363 dev_close(priv->ndev);
3364 dev_open(priv->ndev, NULL);
3365 clear_bit(EMAC_RESETING, &priv->state);
3366 rtnl_unlock();
3367}
3368
3369static void emac_tx_timeout_task(struct work_struct *work)
3370{
3371 struct emac_priv *priv = container_of(work,
3372 struct emac_priv, tx_timeout_task);
3373 emac_reset(priv);
3374 clear_bit(EMAC_TASK_SCHED, &priv->state);
3375}
3376
3377/* Name emac_tx_timeout
3378 * Arguments pstNetdev : pointer to net_device structure
3379 * Return none
3380 * Description It is called by upper layer
3381 * for packet transmit timeout.
3382 */
3383static void emac_tx_timeout(struct net_device *ndev)
3384{
3385 struct emac_priv *priv = netdev_priv(ndev);
3386
3387 netdev_info(ndev, "TX timeout\n");
3388 register_dump(priv);
3389
3390 netif_carrier_off(priv->ndev);
3391 set_bit(EMAC_RESET_REQUESTED, &priv->state);
3392
3393 if (!test_bit(EMAC_DOWN, &priv->state) &&
3394 !test_and_set_bit(EMAC_TASK_SCHED, &priv->state))
3395 schedule_work(&priv->tx_timeout_task);
3396}
3397
3398static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable)
3399{
3400 const struct emac_regdata *regdata = priv->regdata;
3401 void __iomem* apmu;
3402 u32 val;
3403
3404 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3405 if (apmu == NULL) {
3406 pr_err("error to ioremap APMU base\n");
3407 return -ENOMEM;
3408 }
3409
3410 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3411 if (enable) {
3412 val |= 0x1;
3413 } else {
3414 val &= ~0x1;
3415 }
3416 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3417 iounmap(apmu);
3418 return 0;
3419}
3420
3421static int clk_phase_rgmii_set(struct emac_priv *priv, bool is_tx)
3422{
3423 const struct emac_regdata *regdata = priv->regdata;
3424 void __iomem* apmu;
3425 u32 val, dline;
3426 u8 phase, tmp;
3427
3428 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3429 if (apmu == NULL) {
3430 pr_err("error to ioremap APMU base\n");
3431 return -ENOMEM;
3432 }
3433
3434 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3435 if (is_tx) {
3436 if (regdata->rgmii_tx_clk_src_sel_shift > 0) {
3437 phase = (priv->tx_clk_config >> 16) & 0x1;
3438 val &= ~(0x1 << regdata->rgmii_tx_clk_src_sel_shift);
3439 val |= phase << regdata->rgmii_tx_clk_src_sel_shift;
3440 }
3441
3442 if (regdata->rgmii_tx_dline_reg_offset > 0) {
3443 /* Set RGMIII TX DLINE */
3444 dline = readl(apmu + regdata->rgmii_tx_dline_reg_offset);
3445
3446 /* delay code */
3447 tmp = (priv->tx_clk_config >> 8) &
3448 regdata->rgmii_tx_delay_code_mask;
3449 dline &= ~(regdata->rgmii_tx_delay_code_mask <<
3450 regdata->rgmii_tx_delay_code_shift);
3451 dline |= tmp << regdata->rgmii_tx_delay_code_shift;
3452
3453 /* delay step */
3454 tmp = priv->tx_clk_config &
3455 regdata->rgmii_tx_delay_step_mask;
3456 dline &= ~(regdata->rgmii_tx_delay_step_mask <<
3457 regdata->rgmii_tx_delay_step_shift);
3458 dline |= tmp << regdata->rgmii_tx_delay_step_shift;
3459
3460 /* delay line enable */
3461 dline |= 1 << regdata->rgmii_tx_delay_enable_shift;
3462 writel(dline, apmu + regdata->rgmii_tx_dline_reg_offset);
3463 pr_info("===> emac set tx dline 0x%x 0x%x", dline,
3464 readl(apmu + regdata->rgmii_tx_dline_reg_offset));
3465 }
3466 } else {
3467 if (regdata->rgmii_rx_clk_src_sel_shift > 0) {
3468 phase = (priv->rx_clk_config >> 16) & 0x1;
3469 val &= ~(0x1 << regdata->rgmii_rx_clk_src_sel_shift);
3470 val |= phase << regdata->rgmii_rx_clk_src_sel_shift;
3471 }
3472
3473 /* Set RGMIII RX DLINE */
3474 if (regdata->rgmii_rx_dline_reg_offset > 0) {
3475 dline = readl(apmu + regdata->rgmii_rx_dline_reg_offset);
3476
3477 /* delay code */
3478 tmp = (priv->rx_clk_config >> 8) &
3479 regdata->rgmii_rx_delay_code_mask;
3480 dline &= ~(regdata->rgmii_rx_delay_code_mask <<
3481 regdata->rgmii_rx_delay_code_shift);
3482 dline |= tmp << regdata->rgmii_rx_delay_code_shift;
3483
3484 /* delay step */
3485 tmp = priv->rx_clk_config &
3486 regdata->rgmii_rx_delay_step_mask;
3487 dline &= ~(regdata->rgmii_rx_delay_step_mask <<
3488 regdata->rgmii_rx_delay_step_shift);
3489 dline |= tmp << regdata->rgmii_rx_delay_step_shift;
3490
3491 /* delay line enable */
3492 dline |= 1 << regdata->rgmii_rx_delay_enable_shift;
3493 writel(dline, apmu + regdata->rgmii_rx_dline_reg_offset);
3494 pr_info("===> emac set rx dline 0x%x 0x%x", dline,
3495 readl(apmu + regdata->rgmii_rx_dline_reg_offset));
3496 }
3497 }
3498 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3499 pr_info("%s phase:%d direction:%s 0x%x 0x%x\n", __func__, phase,
3500 is_tx ? "tx": "rx", val,
3501 readl(apmu + regdata->clk_rst_ctrl_reg_offset));
3502
3503 iounmap(apmu);
3504 return 0;
3505}
3506
3507static int clk_phase_rmii_set(struct emac_priv *priv, bool is_tx)
3508{
3509 const struct emac_regdata *regdata = priv->regdata;
3510 void __iomem* apmu;
3511 u32 val;
3512 u8 phase, tmp;
3513
3514 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3515 if (apmu == NULL) {
3516 pr_err("error to ioremap APMU base\n");
3517 return -ENOMEM;
3518 }
3519
3520 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3521 if (is_tx) {
3522 /* rmii tx clock select */
3523 if (regdata->rmii_tx_clk_sel_shift > 0) {
3524 tmp = (priv->tx_clk_config >> 16) & 0x1;
3525 val &= ~(0x1 << regdata->rmii_tx_clk_sel_shift);
3526 val |= tmp << regdata->rmii_tx_clk_sel_shift;
3527 }
3528
3529 /* rmii ref clock selct, 1 - from soc, 0 - from phy */
3530 if (regdata->rmii_rx_clk_sel_shift) {
3531 tmp = (priv->tx_clk_config >> 24) & 0x1;
3532 val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
3533 val |= tmp << regdata->rmii_ref_clk_sel_shift;
3534 }
3535 } else {
3536 /* rmii rx clock select */
3537 if (regdata->rmii_rx_clk_sel_shift > 0) {
3538 tmp = (priv->rx_clk_config >> 16) & 0x1;
3539 val &= ~(0x1 << regdata->rmii_rx_clk_sel_shift);
3540 val |= tmp << regdata->rmii_rx_clk_sel_shift;
3541 }
3542
3543 /* rmii ref clock selct, 1 - from soc, 0 - from phy */
3544 if (regdata->rmii_rx_clk_sel_shift) {
3545 tmp = (priv->tx_clk_config >> 24) & 0x1;
3546 val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
3547 val |= tmp << regdata->rmii_ref_clk_sel_shift;
3548 }
3549 }
3550
3551 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3552 pr_debug("%s phase:%d direction:%s\n", __func__, phase,
3553 is_tx ? "tx": "rx");
3554
3555 iounmap(apmu);
3556 return 0;
3557}
3558
3559static int clk_phase_set(struct emac_priv *priv, bool is_tx)
3560{
3561 if (emac_is_rmii_interface(priv)) {
3562 clk_phase_rmii_set(priv, is_tx);
3563 } else {
3564 clk_phase_rgmii_set(priv, is_tx);
3565 }
3566
3567 return 0;
3568}
3569
3570#ifdef CONFIG_DEBUG_FS
3571static int clk_phase_show(struct seq_file *s, void *data)
3572{
3573 struct emac_priv *priv = s->private;
3574 bool rmii_intf;
3575 rmii_intf = emac_is_rmii_interface(priv);
3576
3577 seq_printf(s, "Emac MII Interface : %s\n", rmii_intf ? "RMII" : "RGMII");
3578 seq_printf(s, "Current rx clk config : %d\n", priv->rx_clk_config);
3579 seq_printf(s, "Current tx clk config : %d\n", priv->tx_clk_config);
3580 return 0;
3581}
3582
3583static ssize_t clk_tuning_write(struct file *file,
3584 const char __user *user_buf,
3585 size_t count, loff_t *ppos)
3586{
3587 struct emac_priv *priv =
3588 ((struct seq_file *)(file->private_data))->private;
3589 int err;
3590 int clk_phase;
3591 char buff[TUNING_CMD_LEN] = { 0 };
3592 char mode_str[20];
3593
3594 if (count > TUNING_CMD_LEN) {
3595 pr_err("count must be less than 50.\n");
3596 return count;
3597 }
3598 err = copy_from_user(buff, user_buf, count);
3599 if (err)
3600 return err;
3601
3602 err = sscanf(buff, "%s %d", (char *)&mode_str, &clk_phase);
3603 if (err != 2) {
3604 pr_err("debugfs para count error\n");
3605 return count;
3606 }
3607 pr_info("input:%s %d\n", mode_str, clk_phase);
3608
3609 if (strcmp(mode_str, "tx") == 0) {
3610 priv->tx_clk_config = clk_phase;
3611 clk_phase_set(priv, TX_PHASE);
3612 } else if (strcmp(mode_str, "rx") == 0) {
3613 priv->rx_clk_config = clk_phase;
3614 clk_phase_set(priv, RX_PHASE);
3615 } else {
3616 pr_err("command error\n");
3617 pr_err("eg: echo rx 1 > clk_tuning\n");
3618 return count;
3619 }
3620
3621 return count;
3622}
3623
3624static int clk_tuning_open(struct inode *inode, struct file *file)
3625{
3626 return single_open(file, clk_phase_show, inode->i_private);
3627}
3628
3629const struct file_operations clk_tuning_fops = {
3630 .open = clk_tuning_open,
3631 .write = clk_tuning_write,
3632 .read = seq_read,
3633 .llseek = seq_lseek,
3634 .release = single_release,
3635};
3636
3637#endif
3638
3639static int emac_power_down(struct emac_priv *priv)
3640{
3641 if (priv->rst_gpio >= 0)
3642 gpio_direction_output(priv->rst_gpio,
3643 priv->low_active_rst ? 0 : 1);
3644
3645 if (priv->ldo_gpio >= 0)
3646 gpio_direction_output(priv->ldo_gpio,
3647 priv->low_active_ldo ? 0 : 1);
3648
3649 return 0;
3650}
3651
3652static int emac_power_up(struct emac_priv *priv)
3653{
3654 u32 *delays_ldo = priv->delays_ldo;
3655 u32 *delays_rst = priv->delays_rst;
3656 int rst_gpio = priv->rst_gpio;
3657 int low_active_rst = priv->low_active_rst;
3658 int ldo_gpio = priv->ldo_gpio;
3659 int low_active_ldo = priv->low_active_ldo;
3660
3661 if (rst_gpio >= 0) {
3662 gpio_direction_output(rst_gpio, low_active_rst ? 0 : 1);
3663 }
3664
3665 if (ldo_gpio >= 0) {
3666 gpio_direction_output(ldo_gpio, low_active_ldo ? 0 : 1);
3667 if (delays_ldo[0]) {
3668 gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
3669 msleep(DIV_ROUND_UP(delays_ldo[0], 1000));
3670 }
3671
3672 gpio_set_value(ldo_gpio, low_active_ldo ? 0 : 1);
3673 if (delays_ldo[1])
3674 msleep(DIV_ROUND_UP(delays_ldo[1], 1000));
3675
3676 gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
3677 if (delays_ldo[2])
3678 msleep(DIV_ROUND_UP(delays_ldo[2], 1000));
3679 }
3680
3681 if (rst_gpio >= 0) {
3682 if (delays_rst[0]) {
3683 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
3684 msleep(DIV_ROUND_UP(delays_rst[0], 1000));
3685 }
3686
3687 gpio_set_value(rst_gpio, low_active_rst ? 0 : 1);
3688 if (delays_rst[1])
3689 msleep(DIV_ROUND_UP(delays_rst[1], 1000));
3690
3691 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
3692 if (delays_rst[2])
3693 msleep(DIV_ROUND_UP(delays_rst[2], 1000));
3694 }
3695
3696 return 0;
3697}
3698
3699static int emac_mii_reset(struct mii_bus *bus)
3700{
3701 struct emac_priv *priv = bus->priv;
3702 struct device *dev = &priv->pdev->dev;
3703 struct device_node *np = dev->of_node;
3704 int rst_gpio, ldo_gpio;
3705 int low_active_ldo, low_active_rst;
3706 u32 *delays_ldo = priv->delays_ldo;
3707 u32 *delays_rst = priv->delays_rst;
3708
3709 priv->rst_gpio = -1;
3710 priv->ldo_gpio = -1;
3711
3712 if (!np)
3713 return 0;
3714
3715 rst_gpio = of_get_named_gpio(np, "reset-gpio", 0);
3716 if (rst_gpio >= 0) {
3717 low_active_rst = of_property_read_bool(np, "reset-active-low");
3718 of_property_read_u32_array(np, "reset-delays-us", delays_rst, 3);
3719
3720 if (gpio_request(rst_gpio, "mdio-reset")) {
3721 printk("emac: reset-gpio=%d request failed\n",
3722 rst_gpio);
3723 return 0;
3724 }
3725 priv->rst_gpio = rst_gpio;
3726 priv->low_active_rst = low_active_rst;
3727 }
3728
3729 ldo_gpio = of_get_named_gpio(np, "ldo-gpio", 0);
3730 if (ldo_gpio >= 0) {
3731 low_active_ldo = of_property_read_bool(np, "ldo-active-low");
3732 of_property_read_u32_array(np, "ldo-delays-us", delays_ldo, 3);
3733
3734 if (gpio_request(ldo_gpio, "mdio-ldo"))
3735 return 0;
3736
3737 priv->ldo_gpio = ldo_gpio;
3738 priv->low_active_ldo = low_active_ldo;
3739 }
3740
3741 /*
3742 * Some device not allow MDC/MDIO operation during power on/reset,
3743 * disable AXI clock to shutdown mdio clock.
3744 */
3745 clk_disable_unprepare(priv->clk);
3746
3747 emac_power_up(priv);
3748
3749 clk_prepare_enable(priv->clk);
3750
3751 emac_reset_hw(priv);
3752
3753 return 0;
3754}
3755
3756static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
3757{
3758 struct emac_priv *priv = bus->priv;
3759 u32 cmd = 0;
3760 u32 val;
3761
3762 if (!__clk_is_enabled(priv->clk))
3763 return -EBUSY;
3764
3765 mutex_lock(&priv->mii_mutex);
3766 cmd |= phy_addr & 0x1F;
3767 cmd |= (regnum & 0x1F) << 5;
3768 cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
3769
3770 /*
3771 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
3772 * change during MDIO read/write
3773 */
3774#ifdef CONFIG_DDR_DEVFREQ
3775 pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
3776#endif
3777 emac_wr(priv, MAC_MDIO_DATA, 0x0);
3778 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
3779
3780 if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
3781 !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
3782 return -EBUSY;
3783
3784 val = emac_rd(priv, MAC_MDIO_DATA);
3785
3786#ifdef CONFIG_DDR_DEVFREQ
3787 pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
3788#endif
3789 mutex_unlock(&priv->mii_mutex);
3790 return val;
3791}
3792
3793static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
3794 u16 value)
3795{
3796 struct emac_priv *priv = bus->priv;
3797 u32 cmd = 0;
3798 u32 val;
3799
3800 if (!__clk_is_enabled(priv->clk))
3801 return -EBUSY;
3802
3803 mutex_lock(&priv->mii_mutex);
3804 emac_wr(priv, MAC_MDIO_DATA, value);
3805
3806 cmd |= phy_addr & 0x1F;
3807 cmd |= (regnum & 0x1F) << 5;
3808 cmd |= MREGBIT_START_MDIO_TRANS;
3809
3810 /*
3811 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
3812 * change during MDIO read/write
3813 */
3814#ifdef CONFIG_DDR_DEVFREQ
3815 pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
3816#endif
3817 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
3818
3819 if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
3820 !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
3821 return -EBUSY;
3822
3823#ifdef CONFIG_DDR_DEVFREQ
3824 pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
3825#endif
3826
3827 mutex_unlock(&priv->mii_mutex);
3828 return 0;
3829}
3830
3831static void emac_adjust_link(struct net_device *dev)
3832{
3833 struct phy_device *phydev = dev->phydev;
3834 struct emac_priv *priv = netdev_priv(dev);
3835 u32 ctrl;
3836#ifdef WAN_LAN_AUTO_ADAPT
3837 int status_change = 0;
3838 int addr = 0;
3839 int i = 0;
3840#endif
3841 if (!phydev || priv->fix_link)
3842 return;
3843
3844 if (phydev->link) {
3845 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
3846
3847 /* Now we make sure that we can be in full duplex mode
3848 * If not, we operate in half-duplex mode.
3849 */
3850 if (phydev->duplex != priv->duplex) {
3851 if (!phydev->duplex)
3852 ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
3853 else
3854 ctrl |= MREGBIT_FULL_DUPLEX_MODE;
3855 priv->duplex = phydev->duplex;
3856 }
3857
3858 if (phydev->speed != priv->speed) {
3859 ctrl &= ~MREGBIT_SPEED;
3860
3861 switch (phydev->speed) {
3862 case SPEED_1000:
3863 ctrl |= MREGBIT_SPEED_1000M;
3864 break;
3865 case SPEED_100:
3866 ctrl |= MREGBIT_SPEED_100M;
3867 break;
3868 case SPEED_10:
3869 ctrl |= MREGBIT_SPEED_10M;
3870 break;
3871 default:
3872 pr_err("broken speed: %d\n", phydev->speed);
3873 phydev->speed = SPEED_UNKNOWN;
3874 break;
3875 }
3876 if (phydev->speed != SPEED_UNKNOWN) {
3877 priv->speed = phydev->speed;
3878 }
3879 }
3880 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
3881 pr_info("%s link:%d speed:%dM duplex:%s\n", __func__,
3882 phydev->link, phydev->speed,
3883 phydev->duplex ? "Full": "Half");
3884 }
3885
3886#ifdef WAN_LAN_AUTO_ADAPT
3887 if(phydev->phy_id == IP175D_PHY_ID) {
3888 if (phydev->link != priv->link) {
3889 for (i=0; i<16; i++) {
3890 if((priv->link & (1<<i)) != (phydev->link & (1<<i))) {
3891 addr = i;
3892 if (phydev->link & (1<<i)) {
3893 /* link up */
3894 printk("eth0 port%d link up\n", addr);
3895 priv->dhcp = 0;
3896 emac_sig_workq(CARRIER_UP_IP175D, addr);
3897 if(priv->dhcp_delaywork)
3898 cancel_delayed_work(&priv->dhcp_work);
3899 priv->dhcp_delaywork = 1;
3900 schedule_delayed_work(&priv->dhcp_work, 25*HZ);
3901 } else {
3902 /* link down */
3903 printk("eth0 port%d link down\n", addr);
3904 priv->dhcp = 0;
3905 if(priv->dhcp_delaywork)
3906 cancel_delayed_work(&priv->dhcp_work);
3907 priv->dhcp_delaywork = 0;
3908 emac_sig_workq(CARRIER_DOWN_IP175D, addr);
3909 }
3910 }
3911 }
3912 priv->link = phydev->link;
3913 }
3914 } else {
3915 if (phydev->link != priv->link) {
3916 priv->link = phydev->link;
3917 status_change = 1;
3918 }
3919
3920 if (status_change) {
3921 if (phydev->link) {
3922 /* link up */
3923 priv->dhcp = 0;
3924 emac_sig_workq(CARRIER_UP, 0);
3925 if(priv->dhcp_delaywork)
3926 cancel_delayed_work(&priv->dhcp_work);
3927 priv->dhcp_delaywork = 1;
3928 schedule_delayed_work(&priv->dhcp_work, 25*HZ);
3929
3930 } else {
3931 /* link down */
3932 priv->dhcp = 0;
3933 if(priv->dhcp_delaywork)
3934 cancel_delayed_work(&priv->dhcp_work);
3935 priv->dhcp_delaywork = 0;
3936 emac_sig_workq(CARRIER_DOWN, 0);
3937 }
3938 }
3939 }
3940#endif
3941}
3942
3943static int emac_phy_connect(struct net_device *dev)
3944{
3945 struct phy_device *phydev;
3946 int phy_interface;
3947 struct device_node *np;
3948 struct emac_priv *priv = netdev_priv(dev);
3949
3950 np = of_parse_phandle(priv->pdev->dev.of_node, "phy-handle", 0);
3951 if (!np) {
3952 if (priv->fix_link) {
3953 emac_phy_interface_config(priv, priv->interface);
3954 if (priv->interface == PHY_INTERFACE_MODE_RGMII)
3955 pinctrl_select_state(priv->pinctrl,
3956 priv->rgmii_pins);
3957 emac_config_phy_interrupt(priv, 0);
3958 return 0;
3959 }
3960 return -ENODEV;
3961 }
3962
3963 printk("%s: %s\n",__func__, np->full_name);
3964 phy_interface = of_get_phy_mode(np);
3965 emac_phy_interface_config(priv, phy_interface);
3966 if (phy_interface != PHY_INTERFACE_MODE_RMII)
3967 pinctrl_select_state(priv->pinctrl, priv->rgmii_pins);
b.liub17525e2025-05-14 17:22:29 +08003968 phydev = phy_find_first(priv->mii);
3969 if (!phydev) {
3970 printk("%s: no PHY found\n", dev->name);
3971 return -ENODEV;
3972 }
3973 phy_connect_direct(dev, phydev, emac_adjust_link, phy_interface); /* phy_start_machine */
3974 //phydev = of_phy_connect(dev, np,&emac_adjust_link, 0, phy_interface);
b.liue9582032025-04-17 19:18:16 +08003975 if (IS_ERR_OR_NULL(phydev)) {
3976 pr_err("Could not attach to PHY\n");
3977 emac_power_down(priv);
3978 if (!phydev)
3979 return -ENODEV;
3980 return PTR_ERR(phydev);
3981 }
3982
3983 if (!phydev->phy_id || phydev->phy_id == 0xffffffff) {
3984 pr_err("Not valid phy_id=0x%x\n", phydev->phy_id);
3985 emac_power_down(priv);
3986 return -ENODEV;
3987 }
3988
3989 if(phy_interrupt_is_valid(phydev))
3990 emac_config_phy_interrupt(priv, 1);
3991 else
3992 emac_config_phy_interrupt(priv, 0);
3993
3994 //phydev->supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
3995 pr_info("%s: %s: attached to PHY (UID 0x%x)"
3996 " Link = %d irq=%d\n", __func__,
3997 dev->name, phydev->phy_id, phydev->link, phydev->irq);
3998 dev->phydev = phydev;
3999
4000#ifdef WAN_LAN_AUTO_ADAPT
4001 if(phydev->phy_id == IP175D_PHY_ID)
4002 emac_sig_workq(PHY_IP175D_CONNECT, 0);
4003#endif
4004
4005 return 0;
4006}
4007
4008static int emac_mdio_init(struct emac_priv *priv)
4009{
4010 struct device_node *mii_np;
4011 struct device *dev = &priv->pdev->dev;
4012 int ret;
4013
4014 mii_np = of_get_child_by_name(dev->of_node, "mdio-bus");
4015 if (!mii_np) {
4016 dev_err(dev, "no %s child node found", "mdio-bus");
4017 return -ENODEV;
4018 }
4019
4020 if (!of_device_is_available(mii_np)) {
4021 ret = -ENODEV;
4022 goto err_put_node;
4023 }
4024
4025 priv->mii = mdiobus_alloc();//devm_mdiobus_alloc(dev);
4026 if (!priv->mii) {
4027 ret = -ENOMEM;
4028 goto err_put_node;
4029 }
4030 priv->mii->priv = priv;
4031 //priv->mii->irq = priv->mdio_irqs;
4032 priv->mii->name = "emac mii";
4033 priv->mii->reset = emac_mii_reset;
4034 priv->mii->read = emac_mii_read;
4035 priv->mii->write = emac_mii_write;
4036 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%pOFn",
4037 mii_np);
4038 priv->mii->parent = dev;
4039 priv->mii->phy_mask = 0xffffffff;
4040 ret = of_mdiobus_register(priv->mii, mii_np);
4041
4042err_put_node:
4043 of_node_put(mii_np);
4044 return ret;
4045}
4046
4047static int emac_mdio_deinit(struct emac_priv *priv)
4048{
4049 if (!priv->mii)
4050 return 0;
4051
4052 mdiobus_unregister(priv->mii);
4053 return 0;
4054}
4055
4056static int emac_get_ts_info(struct net_device *dev,
4057 struct ethtool_ts_info *info)
4058{
4059 struct emac_priv *priv = netdev_priv(dev);
4060
4061 if (priv->ptp_support) {
4062
4063 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4064 SOF_TIMESTAMPING_TX_HARDWARE |
4065 SOF_TIMESTAMPING_RX_SOFTWARE |
4066 SOF_TIMESTAMPING_RX_HARDWARE |
4067 SOF_TIMESTAMPING_SOFTWARE |
4068 SOF_TIMESTAMPING_RAW_HARDWARE;
4069
4070 if (priv->ptp_clock)
4071 info->phc_index = ptp_clock_index(priv->ptp_clock);
4072
4073 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
4074 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
4075 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
4076 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
4077 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
4078 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
4079 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
4080 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
4081 (1 << HWTSTAMP_FILTER_ALL));
4082 if (priv->regdata->ptp_rx_ts_all_events) {
4083 info->rx_filters |=
4084 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
4085 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
4086 }
4087
4088 return 0;
4089 } else
4090 return ethtool_op_get_ts_info(dev, info);
4091}
4092
4093static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4094{
4095 int i;
4096
4097 switch (stringset) {
4098 case ETH_SS_STATS:
4099 for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
4100 memcpy(data, emac_ethtool_stats[i].str, ETH_GSTRING_LEN);
4101 data += ETH_GSTRING_LEN;
4102 }
4103 break;
4104 }
4105}
4106
4107static int emac_get_sset_count(struct net_device *dev, int sset)
4108{
4109 switch (sset) {
4110 case ETH_SS_STATS:
4111 return ARRAY_SIZE(emac_ethtool_stats);
4112 default:
4113 return -EOPNOTSUPP;
4114 }
4115}
4116
4117static void emac_stats_update(struct emac_priv *priv)
4118{
4119 struct emac_hw_stats *hwstats = priv->hw_stats;
4120 int i;
4121 u32 *p;
4122
4123 p = (u32 *)(hwstats);
4124
4125 for (i = 0; i < MAX_TX_STATS_NUM; i++)
4126 *(p + i) = ReadTxStatCounters(priv, i);
4127
4128 p = (u32 *)hwstats + MAX_TX_STATS_NUM;
4129
4130 for (i = 0; i < MAX_RX_STATS_NUM; i++)
4131 *(p + i) = ReadRxStatCounters(priv, i);
4132
4133 *(p + i++) = emac_rd(priv, DMA_MISSED_FRAME_COUNTER);
4134
4135 *(p + i++) = hwstats->tx_tso_pkts;
4136 *(p + i++) = hwstats->tx_tso_bytes;
4137}
4138
4139static void emac_get_ethtool_stats(struct net_device *dev,
4140 struct ethtool_stats *stats, u64 *data)
4141{
4142 struct emac_priv *priv = netdev_priv(dev);
4143 struct emac_hw_stats *hwstats = priv->hw_stats;
4144 u32 *data_src;
4145 u64 *data_dst;
4146 int i;
4147
4148 if (netif_running(dev) && netif_device_present(dev)) {
4149 if (spin_trylock_bh(&hwstats->stats_lock)) {
4150 emac_stats_update(priv);
4151 spin_unlock_bh(&hwstats->stats_lock);
4152 }
4153 }
4154
4155 data_dst = data;
4156
4157 for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
4158 data_src = (u32 *)hwstats + emac_ethtool_stats[i].offset;
4159 *data_dst++ = (u64)(*data_src);
4160 }
4161}
4162
4163static int emac_ethtool_get_regs_len(struct net_device *dev)
4164{
4165 return EMAC_REG_SPACE_SIZE;
4166}
4167
4168static void emac_ethtool_get_regs(struct net_device *dev,
4169 struct ethtool_regs *regs, void *space)
4170{
4171 struct emac_priv *priv = netdev_priv(dev);
4172 u32 *reg_space = (u32 *) space;
4173 void __iomem *base = priv->iobase;
4174 int i;
4175
4176 regs->version = 1;
4177
4178 memset(reg_space, 0x0, EMAC_REG_SPACE_SIZE);
4179
4180 for (i = 0; i < EMAC_DMA_REG_CNT; i++)
4181 reg_space[i] = readl(base + DMA_CONFIGURATION + i * 4);
4182
4183 for (i = 0; i < EMAC_MAC_REG_CNT; i++)
4184 reg_space[i + MAC_GLOBAL_CONTROL / 4] = readl(base + MAC_GLOBAL_CONTROL + i * 4);
4185}
4186
4187static int emac_get_link_ksettings(struct net_device *ndev,
4188 struct ethtool_link_ksettings *cmd)
4189{
4190 if (!ndev->phydev)
4191 return -ENODEV;
4192
4193 phy_ethtool_ksettings_get(ndev->phydev, cmd);
4194 return 0;
4195}
4196
4197static int emac_set_link_ksettings(struct net_device *ndev,
4198 const struct ethtool_link_ksettings *cmd)
4199{
4200 if (!ndev->phydev)
4201 return -ENODEV;
4202
4203 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
4204}
4205
4206static void emac_get_drvinfo(struct net_device *dev,
4207 struct ethtool_drvinfo *info)
4208{
4209 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
4210 info->n_stats = ARRAY_SIZE(emac_ethtool_stats);
4211}
4212
4213static void emac_get_pauseparam(struct net_device *ndev,
4214 struct ethtool_pauseparam *param)
4215{
4216 struct emac_priv *priv = netdev_priv(ndev);
4217 int val = emac_mii_read(priv->mii, 0, 0);
4218
4219 param->autoneg = (val & BIT(12)) ? 1 : 0;
4220 param->rx_pause = priv->pause.rx_pause;
4221 param->tx_pause = priv->pause.tx_pause;
4222
4223 return;
4224}
4225
4226static int emac_set_pauseparam(struct net_device *ndev,
4227 struct ethtool_pauseparam *param)
4228{
4229 struct emac_priv *priv = netdev_priv(ndev);
4230 struct device *dev = &priv->pdev->dev;
4231 struct device_node *np = dev->of_node;
4232 int val;
4233 int phyval;
4234 u32 threshold[2];
4235 static int init_flag = 1;
4236
4237 val = readl(priv->iobase + MAC_FC_CONTROL);
4238 phyval = emac_mii_read(priv->mii, 0, 0);
4239
4240 if (param->rx_pause)
4241 val |= MREGBIT_FC_DECODE_ENABLE;
4242 else
4243 val &= ~MREGBIT_FC_DECODE_ENABLE;
4244
4245 if (param->tx_pause)
4246 val |= MREGBIT_FC_GENERATION_ENABLE;
4247 else
4248 val &= ~MREGBIT_FC_GENERATION_ENABLE;
4249
4250 if (init_flag && (param->rx_pause | param->tx_pause)) {
4251 val |= MREGBIT_MULTICAST_MODE;
4252 priv->pause.pause_time_max = 0;
4253 if (0 != of_property_read_u32_array(np, "flow-control-threshold", threshold, 2)) {
4254 threshold[0] = 60;
4255 threshold[1] = 90;
4256 }
4257 threshold[0] = clamp(threshold[0], 0U, 99U);
4258 threshold[1] = clamp(threshold[1], 1U, 100U);
4259
4260 if (cpu_is_asr18xx() || cpu_is_asr1903_z1()) {
4261 priv->pause.low_water = priv->rx_ring.total_cnt * threshold[0] / 100;
4262 priv->pause.high_water = priv->rx_ring.total_cnt * threshold[1] / 100 - 1;
4263 priv->pause.fc_auto = 0;
4264 } else {
4265 priv->pause.low_water = 0;
4266 priv->pause.high_water = 0;
4267 priv->pause.fc_auto = 1;
4268 val |= MREGBIT_AUTO_FC_GENERATION_ENABLE;
4269 threshold[0] = 1024 * threshold[0] / 100;
4270 threshold[1] = 1024 * threshold[1] / 100;
4271 emac_wr(priv, MAC_FC_AUTO_HIGH_THRESHOLD, threshold[1]);
4272 emac_wr(priv, MAC_FC_AUTO_LOW_THRESHOLD, threshold[0]);
4273 emac_wr(priv, MAC_FC_AUTO_HIGH_PAUSE_TIME_VALUE, 0xffff);
4274 emac_wr(priv, MAC_FC_AUTO_LOW_PAUSE_TIME_VALUE, 0);
4275 }
4276 init_flag = 0;
4277 }
4278 emac_wr(priv, MAC_FC_CONTROL, val);
4279
4280 if (param->autoneg)
4281 phyval |= BIT(12);
4282 else
4283 phyval &= ~BIT(12);
4284
4285 (void)emac_mii_write(priv->mii, 0, 0, (u16)phyval);
4286
4287 priv->pause.rx_pause = param->rx_pause;
4288 priv->pause.tx_pause = param->tx_pause;
4289 return 0;
4290}
4291
4292static void emac_get_wol(struct net_device *dev,
4293 struct ethtool_wolinfo *wol)
4294{
4295 struct emac_priv *priv = netdev_priv(dev);
4296 struct device *device = &priv->pdev->dev;
4297
4298 if (device_can_wakeup(device)) {
4299 wol->supported = WAKE_MAGIC | WAKE_UCAST;
4300 wol->wolopts = priv->wolopts;
4301 }
4302}
4303
4304static int emac_set_wol(struct net_device *dev,
4305 struct ethtool_wolinfo *wol)
4306{
4307 struct emac_priv *priv = netdev_priv(dev);
4308 struct device *device = &priv->pdev->dev;
4309 u32 support = WAKE_MAGIC | WAKE_UCAST;
4310
4311 if (!device_can_wakeup(device) || !priv->en_suspend)
4312 return -ENOTSUPP;
4313
4314 if (wol->wolopts & ~support)
4315 return -EINVAL;
4316
4317 priv->wolopts = wol->wolopts;
4318
4319 if (wol->wolopts) {
4320 device_set_wakeup_enable(device, 1);
4321 enable_irq_wake(priv->irq_wakeup);
4322 } else {
4323 device_set_wakeup_enable(device, 0);
4324 disable_irq_wake(priv->irq_wakeup);
4325 }
4326
4327 return 0;
4328}
4329
4330static const struct ethtool_ops emac_ethtool_ops = {
4331 .get_link_ksettings = emac_get_link_ksettings,
4332 .set_link_ksettings = emac_set_link_ksettings,
4333 .get_drvinfo = emac_get_drvinfo,
4334 .nway_reset = phy_ethtool_nway_reset,
4335 .get_link = ethtool_op_get_link,
4336 .get_pauseparam = emac_get_pauseparam,
4337 .set_pauseparam = emac_set_pauseparam,
4338 .get_strings = emac_get_strings,
4339 .get_sset_count = emac_get_sset_count,
4340 .get_ethtool_stats = emac_get_ethtool_stats,
4341 .get_regs = emac_ethtool_get_regs,
4342 .get_regs_len = emac_ethtool_get_regs_len,
4343 .get_ts_info = emac_get_ts_info,
4344 .get_wol = emac_get_wol,
4345 .set_wol = emac_set_wol,
4346};
4347
4348static const struct net_device_ops emac_netdev_ops = {
4349 .ndo_open = emac_open,
4350 .ndo_stop = emac_close,
4351 .ndo_start_xmit = emac_start_xmit,
4352 .ndo_set_mac_address = emac_set_mac_address,
4353 .ndo_do_ioctl = emac_ioctl,
4354 .ndo_change_mtu = emac_change_mtu,
4355 .ndo_tx_timeout = emac_tx_timeout,
4356};
4357
4358#ifdef WAN_LAN_AUTO_ADAPT
4359#define EMAC_SKB_SIZE 2048
4360static int emac_event_add_var(struct emac_event *event, int argv,
4361 const char *format, ...)
4362{
4363 static char buf[128];
4364 char *s;
4365 va_list args;
4366 int len;
4367
4368 if (argv)
4369 return 0;
4370
4371 va_start(args, format);
4372 len = vsnprintf(buf, sizeof(buf), format, args);
4373 va_end(args);
4374
4375 if (len >= sizeof(buf)) {
4376 printk("buffer size too small\n");
4377 WARN_ON(1);
4378 return -ENOMEM;
4379 }
4380
4381 s = skb_put(event->skb, len + 1);
4382 strcpy(s, buf);
4383
4384 return 0;
4385}
4386
4387static int emac_hotplug_fill_event(struct emac_event *event)
4388{
4389 int ret;
4390
4391 ret = emac_event_add_var(event, 0, "HOME=%s", "/");
4392 if (ret)
4393 return ret;
4394
4395 ret = emac_event_add_var(event, 0, "PATH=%s",
4396 "/sbin:/bin:/usr/sbin:/usr/bin");
4397 if (ret)
4398 return ret;
4399
4400 ret = emac_event_add_var(event, 0, "SUBSYSTEM=%s", "ethernet");
4401 if (ret)
4402 return ret;
4403
4404 ret = emac_event_add_var(event, 0, "ACTION=%s", event->action);
4405 if (ret)
4406 return ret;
4407
4408 ret = emac_event_add_var(event, 0, "ETH=%s", event->name);
4409 if (ret)
4410 return ret;
4411
4412 ret = emac_event_add_var(event, 0, "PORT=%d", event->port);
4413 if (ret)
4414 return ret;
4415
4416 ret = emac_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());
4417
4418 return ret;
4419}
4420
4421static void emac_hotplug_work(struct work_struct *work)
4422{
4423 struct emac_event *event = container_of(work, struct emac_event, work);
4424 int ret = 0;
4425
4426 event->skb = alloc_skb(EMAC_SKB_SIZE, GFP_KERNEL);
4427 if (!event->skb)
4428 goto out_free_event;
4429
4430 ret = emac_event_add_var(event, 0, "%s@", event->action);
4431 if (ret)
4432 goto out_free_skb;
4433
4434 ret = emac_hotplug_fill_event(event);
4435 if (ret)
4436 goto out_free_skb;
4437
4438 NETLINK_CB(event->skb).dst_group = 1;
4439 broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);
4440
4441 out_free_skb:
4442 if (ret) {
4443 printk("work error %d\n", ret);
4444 kfree_skb(event->skb);
4445 }
4446 out_free_event:
4447 kfree(event);
4448}
4449
4450static int emac_sig_workq(int event, int port)
4451{
4452 struct emac_event *u_event = NULL;
4453
4454 u_event = kzalloc(sizeof(*u_event), GFP_KERNEL);
4455 if (!u_event)
4456 return -ENOMEM;
4457
4458 u_event->name = DRIVER_NAME;
4459 if(event == CARRIER_UP)
4460 u_event->action = "LINKUP";
4461 else if(event == CARRIER_DOWN)
4462 u_event->action = "LINKDW";
4463 else if(event == CARRIER_DOWN_IP175D)
4464 u_event->action = "IP175D_LINKDW";
4465 else if(event == CARRIER_UP_IP175D)
4466 u_event->action = "IP175D_LINKUP";
4467 else if(event == DHCP_EVENT_CLIENT)
4468 u_event->action = "DHCPCLIENT";
4469 else if(event == DHCP_EVENT_SERVER)
4470 u_event->action = "DHCPSERVER";
4471 else if(event == PHY_IP175D_CONNECT)
4472 u_event->action = "PHY_CONNECT";
4473
4474 u_event->port = port;
4475 INIT_WORK(&u_event->work, (void *)emac_hotplug_work);
4476 schedule_work(&u_event->work);
4477
4478 return 0;
4479}
4480
4481static inline void __emac_dhcp_work_func(struct emac_priv *priv)
4482{
4483 if (priv->dhcp == DHCP_REC_RESP) {
4484 emac_sig_workq(DHCP_EVENT_CLIENT, priv->vlan_port);
4485 } else if (priv->dhcp == DHCP_SEND_REQ || priv->dhcp == 0) {
4486 emac_sig_workq(DHCP_EVENT_SERVER, priv->vlan_port);
4487 }
4488
4489 priv->dhcp = 0;
4490 if(priv->dhcp_delaywork){
4491 cancel_delayed_work(&priv->dhcp_work);
4492 priv->dhcp_delaywork = 0;
4493 }
4494}
4495
4496static void emac_dhcp_work_func_t(struct work_struct *work)
4497{
4498 struct emac_priv *priv = container_of(work, struct emac_priv, dhcp_work.work);
4499
4500 __emac_dhcp_work_func(priv);
4501}
4502#endif
4503
b.liub17525e2025-05-14 17:22:29 +08004504
4505
4506
4507long g_PhyVersionNumber = 0;
4508
4509
4510static ssize_t phy_version_show(struct device *dev,
4511 struct device_attribute *attr, char *buf)
4512{
4513 int len = 0;
4514
4515 len = sprintf(buf, "phy_version = 0x%x\n", g_PhyVersionNumber);
4516
4517 return (ssize_t)len;
4518}
4519
4520static ssize_t phy_version_store(struct device *dev,
4521 struct device_attribute *attr, const char *buf, size_t size)
4522{
4523 int reg, val, devad = 0;
4524
4525 struct emac_priv *priv = dev_get_drvdata(dev);
4526
4527 sscanf(buf, "%d", &val);
4528 if(val == 1)
4529 {
4530 devad = 0x1f;
4531 reg = 0x113;
4532 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4533 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4534 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4535 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4536
4537 }
4538 g_PhyVersionNumber = val;
4539
4540 return size;
4541}
4542
4543
4544static ssize_t lpsd_sleep_show(struct device *dev,
4545 struct device_attribute *attr, char *buf)
4546{
4547 int len = 0;
4548 int reg, val, devad = 0;
4549 struct emac_priv *priv = dev_get_drvdata(dev);
4550
4551 devad = 0x3;
4552 reg = 0x8700;
4553 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4554 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4555 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4556 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4557
4558 len = sprintf(buf, "phy_version = 0x%x\n", val);
4559
4560 return (ssize_t)len;
4561}
4562
4563static ssize_t lpsd_sleep_store(struct device *dev,
4564 struct device_attribute *attr, const char *buf, size_t size)
4565{
4566 int reg, val, devad = 0;
4567
4568 struct emac_priv *priv = dev_get_drvdata(dev);
4569
4570 sscanf(buf, "%d", &val);
4571 if(val == 1) //enter lpsd sleep mode
4572 {
4573 devad = 0x3;
4574 reg = 0x8700;
4575 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4576 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4577 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4578 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4579
4580 printk("lpsd sleep mode : reg3.8700 = 0x%x", val);
4581 msleep(200);
4582
4583 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4584 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4585 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4586 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, (val | BIT(0)));
4587
4588 }else
4589 {
4590
4591 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4592 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4593 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4594 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4595
4596 printk("lpsd sleep mode : reg3.8700 = 0x%x", val);
4597 msleep(200);
4598
4599 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4600 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4601 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4602 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, (val | ~BIT(0)));
4603 }
4604
4605 return size;
4606}
4607
4608
4609static int mode_type = -1;
4610static int enter_only_one = 0;
4611
4612
4613static ssize_t gmac_master_or_slave_store(struct device *dev,
4614 struct device_attribute *attr, const char *buf, size_t size)
4615{
4616 int val = 0;
4617 int reg = 0;
4618 int devad = 0;
4619 int ret = 0;
4620
4621 struct emac_priv *priv = dev_get_drvdata(dev);
4622
4623 //read mode_type
4624 ret = sscanf(buf, "%d", &mode_type);
4625 if(ret < 1)
4626 {
4627 printk(KERN_ERR "Please enter the number 0-3 to enable the corresponding mode \n"
4628 "Enter values in the non-0-3 range to get pattern description \n");
4629 return size;
4630 }
4631
4632 //Judgment model
4633 if (mode_type < 0 || mode_type > 3) {
4634 printk(KERN_DEBUG "Please enter the number range 0-3\n"
4635 "0: Set the slave mode \n"
4636 "1: Set the main mode \n"
4637 "2: indicates setting SQI value view mode \n"
4638 "3: Set the VCT value view mode \n"
4639 "After the mode is set, the corresponding value can be obtained\n");
4640 return ret ? ret : size;
4641 }
4642
4643 //Set the Ethernet slave mode
4644 if (mode_type == 0)
4645 {
4646 devad = 0x1;
4647 reg = 0x834;
4648 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4649 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4650 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4651 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4652 msleep(200);
4653
4654 val &= ~BIT(14);
4655 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4656 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4657 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4658 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, val );
4659
4660 }
4661 //Set the Ethernet master mode
4662 else if (mode_type == 1)
4663 {
4664 devad = 0x1;
4665 reg = 0x834;
4666 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4667 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4668 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4669 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4670 msleep(200);
4671
4672 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4673 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4674 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4675 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, val | BIT(14));
4676
4677
4678 }
4679
4680 return size;
4681}
4682
4683
4684static ssize_t gmac_master_or_slave_show(struct device *dev,
4685 struct device_attribute *attr, char *buf)
4686{
4687 int len = 0;
4688 int val = 0;
4689 int reg = 0;
4690 int devad = 0;
4691 int ret = 0;
4692 struct emac_priv *priv = dev_get_drvdata(dev);
4693
4694 if(enter_only_one == 1)
4695 {
4696 return 0;
4697 }
4698 enter_only_one = 1;
4699
4700 //Read the network master/slave
4701 if (mode_type == 0 || mode_type == 1)
4702 {
4703 devad = 0x1;
4704 reg = 0x834;
4705 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4706 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4707 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4708 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e) & BIT(14);
4709 if(val)
4710 memcpy(buf, "Master\n",7);
4711 else
4712 memcpy(buf, "Slave\n", 6);
4713
4714 printk(KERN_DEBUG "mode_type %d - gmac_master_or_slave is %s\n", mode_type, buf);
4715
4716 }
4717
4718 //Obtain the cable quality SQI value
4719 else if(mode_type == 2)
4720 {
4721 devad = 0x1;
4722 reg = 0x8B10;
4723 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0d, devad);
4724 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, reg);
4725 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0d, 0x4000 | devad);
4726 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4727 sprintf(buf, "0x%x\n", val);
4728 sprintf(buf, "SQI : 0x%x\n", val);
4729 printk(KERN_DEBUG "mode_type %d - SQI is 0x%x", mode_type, val);
4730
4731 }
4732
4733 //Obtain short circuit, open circuit and normal connection of VCT
4734 else if(mode_type == 3)
4735 {
4736 //--TDR Enable
4737 devad = 0x1;
4738 reg = 0x8B00;
4739 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4740 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4741 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4742 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, BIT(14));
4743
4744 msleep(200);
4745
4746 //--TDR Start
4747 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4748 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4749 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4750 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, BIT(12) | BIT(14));
4751
4752 msleep(20);
4753 //--Read VCT
4754 devad = 0x1;
4755 reg = 0x8B02;
4756 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4757 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4758 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4759 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0xe);
4760
4761 printk(KERN_DEBUG "Open status: %s - Short status: %s\n",
4762 (val & BIT(1)) ? "Open" : "Normal", (val & BIT(0)) ? "Short" : "Normal");
4763 sprintf(buf, "Open status: %s\nShort status: %s\n",
4764 (val & BIT(1)) ? "Open" : "Normal", (val & BIT(0)) ? "Short" : "Normal");
4765
4766 reg = 0x8B01;
4767 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4768 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4769 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4770 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0xe);
4771
4772 sprintf(buf, "%sDistance status: 0x%x\n", buf, val);
4773 printk(KERN_DEBUG "mode_type %d - Distance status is 0x%x\n", mode_type, val);
4774
4775 //--TDR Disable
4776 devad = 0x1;
4777 reg = 0x8B00;
4778 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4779 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4780 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4781 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, 0x0);
4782
4783
4784 }
4785 else{
4786 sprintf(buf, "Please enter the number range 0-3\n"
4787 "0: Set the slave mode \n"
4788 "1: Set the main mode \n"
4789 "2: indicates setting SQI value view mode \n"
4790 "3: Set the VCT value view mode \n"
4791 "After the mode is set, the corresponding value can be obtained\n");
4792 printk(KERN_DEBUG "Please enter the number range 0-3\n"
4793 "0: Set the slave mode \n"
4794 "1: Set the main mode \n"
4795 "2: indicates setting SQI value view mode \n"
4796 "3: Set the VCT value view mode \n"
4797 "After the mode is set, the corresponding value can be obtained\n");
4798 }
4799 enter_only_one = 0;
4800
4801 return strlen(buf);
4802}
4803
4804
4805
4806static DEVICE_ATTR(lpsd_sleep, S_IRUGO | S_IWUSR, lpsd_sleep_show, lpsd_sleep_store);
4807static DEVICE_ATTR(phy_version, S_IRUGO | S_IWUSR, phy_version_show, phy_version_store);
4808static DEVICE_ATTR(gmac_master_or_slave, S_IRUGO | S_IWUSR, gmac_master_or_slave_show, gmac_master_or_slave_store);
4809
4810
4811static struct attribute *ethrnet_opera_attrs[] = {
4812 &dev_attr_lpsd_sleep.attr,
4813 &dev_attr_phy_version.attr,
4814 &dev_attr_gmac_master_or_slave.attr,
4815 NULL,
4816};
4817
4818static const struct attribute_group demo_attr_grp = {
4819
4820 .attrs = ethrnet_opera_attrs,
4821
4822};
4823
b.liue9582032025-04-17 19:18:16 +08004824static int emac_probe(struct platform_device *pdev)
4825{
4826 struct emac_priv *priv;
4827 struct net_device *ndev = NULL;
4828 struct resource *res;
4829 struct device_node *np = pdev->dev.of_node;
4830 struct device *dev = &pdev->dev;
4831 const unsigned char *mac_addr = NULL;
4832 const struct of_device_id *match;
4833#ifdef CONFIG_DEBUG_FS
4834 struct dentry *emac_fs_dir = NULL;
4835 struct dentry *emac_clk_tuning;
4836#endif
4837 int ret;
b.liub17525e2025-05-14 17:22:29 +08004838 struct regulator *vcc3v3_gmac;
b.liue9582032025-04-17 19:18:16 +08004839
4840 ndev = alloc_etherdev(sizeof(struct emac_priv));
4841 if (!ndev) {
4842 ret = -ENOMEM;
4843 return ret;
4844 }
4845 priv = netdev_priv(ndev);
4846 priv->ndev = ndev;
4847 priv->pdev = pdev;
4848#ifdef WAN_LAN_AUTO_ADAPT
4849 priv->dhcp = -1;
4850 priv->vlan_port = -1;
4851 priv->dhcp_delaywork = 0;
4852#endif
4853 platform_set_drvdata(pdev, priv);
4854
4855 match = of_match_device(of_match_ptr(emac_of_match), &pdev->dev);
4856 if (match) {
4857 priv->regdata = match->data;
4858 } else {
4859 pr_info("===> not match valid device\n");
4860 }
4861
4862 emac_command_options(priv);
4863 emac_skbrb_init(EMAC_SKBRB_SLOT_SIZE, priv->rx_ring.total_cnt * 2);
4864
4865 priv->hw_stats = devm_kzalloc(&pdev->dev,
4866 sizeof(*priv->hw_stats),
4867 GFP_KERNEL);
4868 if (!priv->hw_stats) {
4869 dev_err(&pdev->dev, "failed to allocate counter memory\n");
4870 ret = -ENOMEM;
4871 goto err_netdev;
4872 }
4873
4874 spin_lock_init(&priv->hw_stats->stats_lock);
4875
4876 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4877 priv->iobase = devm_ioremap_resource(&pdev->dev, res);
4878 if (IS_ERR(priv->iobase)) {
4879 ret = -ENOMEM;
4880 goto err_netdev;
4881 }
4882
4883 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4884 priv->tso_base = devm_ioremap_resource(&pdev->dev, res);
4885 if (!IS_ERR(priv->tso_base)) {
4886 dev_info(&pdev->dev, "tso base=0x%x\n", (unsigned)priv->tso_base);
4887 }
4888
4889 priv->irq = irq_of_parse_and_map(np, 0);
4890 if (!priv->irq) {
4891 ret = -ENXIO;
4892 goto err_netdev;
4893 }
4894 priv->irq_wakeup = irq_of_parse_and_map(np, 1);
4895 if (!priv->irq_wakeup)
4896 dev_err(&pdev->dev, "wake_up irq not found\n");
4897
4898 priv->tso = of_property_read_bool(np, "tso-support");
4899 if (cpu_is_asr1903_a0() || cpu_is_asr1903_z1())
4900 priv->tso = false;
4901 if (priv->tso) {
4902 priv->irq_tso = irq_of_parse_and_map(np, 3);
4903 if (!priv->irq_tso) {
4904 dev_err(&pdev->dev, "tso irq not found\n");
4905 priv->tso = false;
4906 }
4907 }
4908
4909 priv->sram_pool = of_gen_pool_get(dev->of_node, "eth,sram", 0);
4910 if (priv->sram_pool) {
4911 dev_notice(&pdev->dev, "use sram as tx desc\n");
4912 }
4913
4914 ret = of_property_read_u32(np, "lpm-qos", &priv->pm_qos);
4915 if (ret)
4916 return ret;
4917
4918 ret = of_property_read_u32(np, "3v3-enable", &priv->power_domain);
4919 if (ret)
4920 priv->power_domain = 0;
4921
4922 ret = of_property_read_u32(np, "mdio-clk-div", &priv->mdio_clk_div);
4923 if (ret)
4924 priv->mdio_clk_div = 0xfe;
4925
4926 if (of_property_read_bool(np, "enable-suspend"))
4927 priv->en_suspend = 1;
4928 else
4929 priv->en_suspend = 0;
4930
4931 priv->wolopts = 0;
4932 if (of_property_read_bool(np, "magic-packet-wakeup"))
4933 priv->wolopts |= WAKE_MAGIC;
4934
4935 if (of_property_read_bool(np, "unicast-packet-wakeup"))
4936 priv->wolopts |= WAKE_UCAST;
4937
4938 priv->dev_flags = 0;
4939 if (of_property_read_bool(np, "suspend-not-keep-power")) {
4940 priv->dev_flags |= EMAC_SUSPEND_POWER_DOWN_PHY;
4941 priv->wolopts = 0;
4942 }
4943
b.liub17525e2025-05-14 17:22:29 +08004944 vcc3v3_gmac = devm_regulator_get(dev, "vmmc");
4945 if (!IS_ERR(vcc3v3_gmac))
4946 {
4947 if( regulator_set_voltage(vcc3v3_gmac, 1800000,1800000))
4948 pr_err("fail to set regulator vcc3v3_gmac to 1.8v\n");
4949
4950 if (!regulator_is_enabled(vcc3v3_gmac) && regulator_enable(vcc3v3_gmac))
4951 pr_err("fail to enable regulator vcc3v3_gmac\n");
4952 }
4953
4954 g_vcc3v3_gmac = vcc3v3_gmac;
4955
b.liue9582032025-04-17 19:18:16 +08004956 priv->pinctrl = devm_pinctrl_get(dev);
4957 if (IS_ERR(priv->pinctrl))
4958 dev_err(dev, "could not get pinctrl handle\n");
4959
4960 priv->rgmii_pins = pinctrl_lookup_state(priv->pinctrl, "rgmii-pins");
4961 if (IS_ERR(priv->rgmii_pins))
4962 dev_err(dev, "could not get rgmii-pins pinstate\n");
4963
4964 emac_set_aib_power_domain(priv);
4965
4966 device_init_wakeup(&pdev->dev, 1);
4967
4968 priv->pm_qos_req.name = pdev->name;
4969 pm_qos_add_request(&priv->pm_qos_req, PM_QOS_CPUIDLE_BLOCK,
4970 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
4971#ifdef CONFIG_DDR_DEVFREQ
4972 pm_qos_add_request(&priv->pm_ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
4973 PM_QOS_DEFAULT_VALUE);
4974
4975 priv->clk_scaling.polling_delay_ms = 1000; /* 1s window */
4976 priv->clk_scaling.tx_up_threshold = 120; /* 120Mbps */
4977 priv->clk_scaling.tx_down_threshold = 60;
4978 priv->clk_scaling.rx_up_threshold = 60; /* 60Mbps */
4979 priv->clk_scaling.rx_down_threshold = 20;
4980 priv->clk_scaling.window_time = jiffies;
4981 pm_qos_add_request(&priv->clk_scaling.ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
4982 PM_QOS_DEFAULT_VALUE);
4983 INIT_WORK(&priv->qos_work, emac_ddr_qos_work);
4984#endif
4985 skb_queue_head_init(&priv->rx_skb);
4986 ndev->watchdog_timeo = 5 * HZ;
4987 ndev->base_addr = (unsigned long)priv->iobase;
4988 ndev->irq = priv->irq;
4989 /* set hw features */
4990 ndev->features = NETIF_F_SG | NETIF_F_SOFT_FEATURES;
4991 if (priv->tso) {
4992 ndev->features |= NETIF_F_RXCSUM;
4993 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4994 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
4995 dev_info(&pdev->dev, "TSO feature enabled\n");
4996 }
4997 ndev->hw_features = ndev->features;
4998 ndev->vlan_features = ndev->features;
4999
5000 ndev->ethtool_ops = &emac_ethtool_ops;
5001 ndev->netdev_ops = &emac_netdev_ops;
5002 if (pdev->dev.of_node)
5003 mac_addr = of_get_mac_address(np);
5004
5005 if (!IS_ERR_OR_NULL(mac_addr)) {
5006 //ether_addr_copy(ndev->dev_addr, mac_addr);
5007 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
5008 if (!is_valid_ether_addr(ndev->dev_addr)) {
5009 dev_info(&pdev->dev, "Using random mac address\n");
5010 eth_hw_addr_random(ndev);
5011 }
5012 } else {
5013 dev_info(&pdev->dev, "Using random mac address\n");
5014 eth_hw_addr_random(ndev);
5015 }
5016
5017 priv->hw_adj = of_property_read_bool(np, "hw-increment");
5018 priv->ptp_support = of_property_read_bool(np, "ptp-support");
5019 if (priv->ptp_support) {
5020 pr_info("EMAC support IEEE1588 PTP Protocol\n");
5021 if (of_property_read_u32(np, "ptp-clk-rate",
5022 &priv->ptp_clk_rate)) {
5023 priv->ptp_clk_rate = 20000000;
5024 pr_info("%s ptp_clk rate using default value:%d may inaccurate!!1\n",
5025 __func__, priv->ptp_clk_rate);
5026 }
5027
5028 priv->ptp_clk = devm_clk_get(&pdev->dev, "ptp-clk");
5029 if (IS_ERR(priv->ptp_clk)) {
5030 dev_err(&pdev->dev, "ptp clock not found.\n");
5031 ret = PTR_ERR(priv->ptp_clk);
5032 goto err_netdev;
5033 }
5034
5035 clk_set_rate(priv->ptp_clk, priv->ptp_clk_rate);
5036 }
5037
5038 priv->pps_info.enable_pps = 0;
5039#ifdef CONFIG_PPS
5040 ret = of_property_read_u32(np, "pps_source", &priv->pps_info.pps_source);
5041 if (!ret) {
5042 priv->irq_pps = irq_of_parse_and_map(np, 2);
5043
5044 if (priv->pps_info.pps_source < EMAC_PPS_MAX)
5045 priv->pps_info.enable_pps = 1;
5046 else
5047 dev_err(&pdev->dev, "wrong PPS source!\n");
5048 }
5049#endif
5050 priv->clk = devm_clk_get(&pdev->dev, "emac-clk");
5051 if (IS_ERR(priv->clk)) {
5052 dev_err(&pdev->dev, "emac clock not found.\n");
5053 ret = PTR_ERR(priv->clk);
5054 goto err_netdev;
5055 }
5056
5057 ret = clk_prepare_enable(priv->clk);
5058 if (ret < 0) {
5059 dev_err(&pdev->dev, "failed to enable emac clock: %d\n",
5060 ret);
5061 goto clk_disable;
5062 }
5063
5064 emac_sw_init(priv);
5065 ret = emac_mdio_init(priv);
5066 if (ret)
5067 goto clk_disable;
5068
5069 INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
5070#ifdef WAN_LAN_AUTO_ADAPT
5071 INIT_DELAYED_WORK(&priv->dhcp_work, emac_dhcp_work_func_t);
5072#endif
5073 if (of_phy_is_fixed_link(np)) {
5074 if ((emac_set_fixed_link(np, priv) < 0)) {
5075 ret = -ENODEV;
5076 goto clk_disable;
5077 }
5078 dev_info(&pdev->dev, "find fixed link\n");
5079 priv->fix_link = 1;
5080 }
5081
5082 INIT_DELAYED_WORK(&priv->emac_pause_work, emac_pause_generate_work_fuc);
5083 SET_NETDEV_DEV(ndev, &pdev->dev);
5084 strcpy(ndev->name, "eth%d");
5085
5086 ret = register_netdev(ndev);
5087 if (ret) {
5088 pr_err("register_netdev failed\n");
5089 goto err_mdio_deinit;
5090 }
5091 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5092#ifdef CONFIG_ASR_EMAC_NAPI
5093 netif_napi_add(ndev, &priv->rx_napi, emac_rx_poll, 32);
5094 netif_tx_napi_add(ndev, &priv->tx_napi, emac_tx_poll, 32);
5095#endif
5096 priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
5097 priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
5098 priv->clk_tuning_enable = of_property_read_bool(np, "clk-tuning-enable");
5099
5100 if (priv->clk_tuning_enable) {
5101 ret = of_property_read_u32(np, "tx-clk-config",
5102 &priv->tx_clk_config);
5103 if (ret)
5104 priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
5105
5106 ret = of_property_read_u32(np, "rx-clk-config",
5107 &priv->rx_clk_config);
5108 if (ret)
5109 priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
5110#ifdef CONFIG_DEBUG_FS
5111 if (!emac_fs_dir) {
5112 emac_fs_dir = debugfs_create_dir(DRIVER_NAME, NULL);
5113
5114 if (!emac_fs_dir || IS_ERR(emac_fs_dir)) {
5115 pr_err("emac debugfs create directory failed\n");
5116 }else {
5117 emac_clk_tuning = debugfs_create_file("clk_tuning", 0664,
5118 emac_fs_dir, priv, &clk_tuning_fops);
5119 if (!emac_clk_tuning) {
5120 pr_err("emac debugfs create file failed\n");
5121 }
5122 }
5123 }
5124#endif
5125 }
b.liub17525e2025-05-14 17:22:29 +08005126
5127 sysfs_create_group(&pdev->dev.kobj,&demo_attr_grp);
5128
5129
5130 //device_create_file(&pdev->dev, &dev_attr_cable_sqi_value);
b.liue9582032025-04-17 19:18:16 +08005131 return 0;
5132
5133err_mdio_deinit:
5134 emac_mdio_deinit(priv);
5135clk_disable:
5136 clk_disable_unprepare(priv->clk);
5137err_netdev:
5138 free_netdev(ndev);
5139 emac_skbrb_release();
5140 return ret;
5141}
5142
5143static int emac_remove(struct platform_device *pdev)
5144{
5145 struct emac_priv *priv = platform_get_drvdata(pdev);
5146
5147 device_init_wakeup(&pdev->dev, 0);
5148 unregister_netdev(priv->ndev);
5149 emac_reset_hw(priv);
5150 free_netdev(priv->ndev);
5151 emac_mdio_deinit(priv);
5152 clk_disable_unprepare(priv->clk);
5153 pm_qos_remove_request(&priv->pm_qos_req);
5154 cancel_delayed_work_sync(&priv->emac_pause_work);
5155#ifdef CONFIG_DDR_DEVFREQ
5156 pm_qos_remove_request(&priv->pm_ddr_qos);
5157 pm_qos_remove_request(&priv->clk_scaling.ddr_qos);
5158#endif
5159 emac_skbrb_release();
5160 return 0;
5161}
5162
5163static void emac_shutdown(struct platform_device *pdev)
5164{
5165}
5166
5167#ifdef CONFIG_PM_SLEEP
5168static int emac_resume(struct device *dev)
5169{
5170 struct emac_priv *priv = dev_get_drvdata(dev);
5171 struct net_device *ndev = priv->ndev;
5172 u32 ctrl, wake_mode = 0;
5173
5174 if (!priv->en_suspend)
5175 return 0;
5176
5177 if (priv->wolopts) {
5178 if (netif_running(ndev)) {
5179 netif_device_attach(ndev);
5180#ifdef CONFIG_ASR_EMAC_NAPI
5181 napi_enable(&priv->rx_napi);
5182 napi_enable(&priv->tx_napi);
5183#endif
5184 }
5185
5186 if (priv->wolopts & WAKE_MAGIC)
5187 wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
5188 if (priv->wolopts & WAKE_UCAST)
5189 wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
5190
5191 disable_irq_wake(priv->irq_wakeup);
5192 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
5193 ctrl &= ~wake_mode;
5194 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
5195 } else {
5196 clk_prepare_enable(priv->clk);
5197
5198 if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
5199 emac_power_up(priv);
5200
5201 rtnl_lock();
5202 dev_open(ndev, NULL);
5203 rtnl_unlock();
5204 }
5205
5206 return 0;
5207}
5208
5209static int emac_suspend(struct device *dev)
5210{
5211 struct emac_priv *priv = dev_get_drvdata(dev);
5212 struct net_device *ndev = priv->ndev;
5213 u32 ctrl, wake_mode = 0;
5214
5215 if (!priv->en_suspend)
5216 return 0;
5217
5218 if (priv->wolopts) {
5219 if (netif_running(ndev)) {
5220 netif_device_detach(ndev);
5221#ifdef CONFIG_ASR_EMAC_NAPI
5222 napi_disable(&priv->rx_napi);
5223 napi_disable(&priv->tx_napi);
5224#endif
5225 }
5226
5227 if (priv->wolopts & WAKE_MAGIC)
5228 wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
5229 if (priv->wolopts & WAKE_UCAST)
5230 wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
5231
5232 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
5233 ctrl |= wake_mode;
5234 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
5235 enable_irq_wake(priv->irq_wakeup);
5236 } else {
5237 rtnl_lock();
5238 dev_close(ndev);
5239 rtnl_unlock();
5240
5241 if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
5242 emac_power_down(priv);
5243
5244 clk_disable_unprepare(priv->clk);
5245 }
5246
5247 return 0;
5248}
5249
5250static int emac_suspend_noirq(struct device *dev)
5251{
5252 struct emac_priv *priv = dev_get_drvdata(dev);
5253 struct net_device *ndev = priv->ndev;
5254
5255 if (!ndev->phydev && !priv->fix_link)
5256 return 0;
5257
5258 pr_pm_debug("==> enter emac_suspend_noirq\n");
5259 pm_qos_update_request(&priv->pm_qos_req,
5260 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
5261 return 0;
5262}
5263
5264static int emac_resume_noirq(struct device *dev)
5265{
5266 struct emac_priv *priv = dev_get_drvdata(dev);
5267 struct net_device *ndev = priv->ndev;
5268
5269 if (!ndev->phydev && !priv->fix_link)
5270 return 0;
5271
5272 pr_pm_debug("==> enter emac_resume_noirq\n");
5273 pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
5274 return 0;
5275}
5276
5277static const struct dev_pm_ops emac_pm_ops = {
5278 .suspend = emac_suspend,
5279 .resume = emac_resume,
5280 .suspend_noirq = emac_suspend_noirq,
5281 .resume_noirq = emac_resume_noirq,
5282};
5283
5284#define ASR_EMAC_PM_OPS (&emac_pm_ops)
5285#else
5286#define ASR_EMAC_PM_OPS NULL
5287#endif
5288
5289static struct platform_driver emac_driver = {
5290 .probe = emac_probe,
5291 .remove = emac_remove,
5292 .shutdown = emac_shutdown,
5293 .driver = {
5294 .name = DRIVER_NAME,
5295 .of_match_table = of_match_ptr(emac_of_match),
5296 .pm = ASR_EMAC_PM_OPS,
5297 },
5298};
5299
5300module_platform_driver(emac_driver);
5301
5302MODULE_LICENSE("GPL");
5303MODULE_DESCRIPTION("Ethernet driver for ASR Emac");
5304MODULE_ALIAS("platform:asr_eth");