blob: fb69678ee15c12ea1a0de724493af6e169db5707 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * asr emac driver
4 *
5 * Copyright (C) 2019 ASR Micro Limited
6 *
7 */
8
9#include <linux/bitops.h>
10#include <linux/kernel.h>
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/delay.h>
14#include <linux/dma-mapping.h>
15#include <linux/etherdevice.h>
16#include <linux/ethtool.h>
17#include <linux/in.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/ip.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/of_net.h>
26#include <linux/of_mdio.h>
27#include <linux/of_irq.h>
28#include <linux/of_device.h>
29#include <linux/phy.h>
30#include <linux/platform_device.h>
31#include <linux/tcp.h>
32#include <linux/timer.h>
33#include <linux/types.h>
34#include <linux/udp.h>
35#include <linux/workqueue.h>
36#include <linux/phy_fixed.h>
37#include <linux/pm_qos.h>
38#include <asm/cacheflush.h>
39#include <linux/cputype.h>
40#include <linux/iopoll.h>
41#include <linux/genalloc.h>
b.liub17525e2025-05-14 17:22:29 +080042#include <linux/regulator/consumer.h>
b.liue9582032025-04-17 19:18:16 +080043
44#ifdef CONFIG_DEBUG_FS
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#endif /* CONFIG_DEBUG_FS */
48#include <asm/atomic.h>
49#include "emac_eth.h"
50#include <linux/skbrb.h>
51
52#ifdef WAN_LAN_AUTO_ADAPT
53#include <linux/if_vlan.h>
54#include <linux/if_ether.h>
55#include <linux/kobject.h>
56#endif
57
58#define DRIVER_NAME "asr_emac"
59
b.liub17525e2025-05-14 17:22:29 +080060#define CLOSE_AIB_POWER_DOMAIN 1
b.liue9582032025-04-17 19:18:16 +080061#define AXI_PHYS_BASE 0xd4200000
62
63#define AIB_GMAC_IO_REG 0xD401E804
64#define APBC_ASFAR 0xD4015050
65#define AKEY_ASFAR 0xbaba
66#define AKEY_ASSAR 0xeb10
67
68#define EMAC_DIRECT_MAP
69#define TUNING_CMD_LEN 50
70#define CLK_PHASE_CNT 8
71#define TXCLK_PHASE_DEFAULT 0
72#define RXCLK_PHASE_DEFAULT 0
73#define TX_PHASE 1
74#define RX_PHASE 0
75
76#define EMAC_DMA_REG_CNT 16
77#define EMAC_MAC_REG_CNT 61
78#define EMAC_EMPTY_FROM_DMA_TO_MAC 48
79#define EMAC_REG_SPACE_SIZE ((EMAC_DMA_REG_CNT + \
80 EMAC_MAC_REG_CNT + EMAC_EMPTY_FROM_DMA_TO_MAC) * 4)
81#define EMAC_ETHTOOL_STAT(x) { #x, \
82 offsetof(struct emac_hw_stats, x) / sizeof(u32) }
83
84#define EMAC_SKBRB_SLOT_SIZE 1600
85#define EMAC_EXTRA_ROOM 72
86#define EMAC_SKBRB_MAX_PAYLOAD (EMAC_SKBRB_SLOT_SIZE - EMAC_EXTRA_ROOM - NET_IP_ALIGN)
87
88#define EMAC_RX_FILL_TIMER_US 0
89#define EMAC_TX_COAL_TIMER_US (1000)
90#define EMAC_TX_FRAMES (64)
91
92#ifdef WAN_LAN_AUTO_ADAPT
93#define DHCP_DISCOVER 1
94#define DHCP_OFFER 2
95#define DHCP_REQUEST 3
96#define DHCP_ACK 5
97#define IP175D_PHY_ID 0x02430d80
98
99enum emac_SIG {
100 CARRIER_DOWN = 0,
101 CARRIER_UP,
102 DHCP_EVENT_CLIENT,
103 DHCP_EVENT_SERVER,
104 PHY_IP175D_CONNECT,
105 CARRIER_DOWN_IP175D,
106 CARRIER_UP_IP175D,
107};
108
109enum emac_DHCP {
110 DHCP_SEND_REQ = 1,
111 DHCP_REC_RESP = 2,
112};
113
114struct emac_event {
115 const char *name;
116 char *action;
117 int port;
118 struct sk_buff *skb;
119 struct work_struct work;
120};
121
122extern u64 uevent_next_seqnum(void);
123static int emac_sig_workq(int event, int port);
124#endif
125
126static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable);
127static int clk_phase_set(struct emac_priv *priv, bool is_tx);
128#ifdef CONFIG_ASR_EMAC_NAPI
129static int emac_rx_clean_desc(struct emac_priv *priv, int budget);
130#else
131static int emac_rx_clean_desc(struct emac_priv *priv);
132#endif
133static void emac_alloc_rx_desc_buffers(struct emac_priv *priv);
134static int emac_phy_connect(struct net_device *dev);
135
b.liub17525e2025-05-14 17:22:29 +0800136struct regulator *g_vcc3v3_gmac= NULL;
b.liue9582032025-04-17 19:18:16 +0800137/* for falcon */
138struct emac_regdata asr_emac_regdata_v1 = {
139 .support_dual_vol_power = 1,
140 .ptp_rx_ts_all_events = 0,
141 .clk_rst_ctrl_reg_offset = 0x160,
142 .axi_mst_single_id_shift = 17,
143 .phy_intr_enable_shift = 16,
144 .int_clk_src_sel_shift = -1,
145 .rgmii_tx_clk_src_sel_shift = 5,
146 .rgmii_rx_clk_src_sel_shift = 4,
147 .rmii_rx_clk_sel_shift = 7,
148 .rmii_tx_clk_sel_shift = 6,
149 .rmii_ref_clk_sel_shift = -1,
150 .mac_intf_sel_shift = 2,
151 .rgmii_tx_dline_reg_offset = -1,
152 .rgmii_tx_delay_code_shift = -1,
153 .rgmii_tx_delay_code_mask =-1,
154 .rgmii_tx_delay_step_shift = -1,
155 .rgmii_tx_delay_step_mask = -1,
156 .rgmii_tx_delay_enable_shift = -1,
157 .rgmii_rx_dline_reg_offset = -1,
158 .rgmii_rx_delay_code_shift = -1,
159 .rgmii_rx_delay_code_mask = -1,
160 .rgmii_rx_delay_step_shift = -1,
161 .rgmii_rx_delay_step_mask = -1,
162 .rgmii_rx_delay_enable_shift = -1,
163};
164
165/* for kagu */
166struct emac_regdata asr_emac_regdata_v2 = {
167 .support_dual_vol_power = 0,
168 .ptp_rx_ts_all_events = 0,
169 .clk_rst_ctrl_reg_offset = 0x160,
170 .axi_mst_single_id_shift = 13,
171 .phy_intr_enable_shift = 12,
172 .int_clk_src_sel_shift = 9,
173 .rgmii_tx_clk_src_sel_shift = 8,
174 .rgmii_rx_clk_src_sel_shift = -1,
175 .rmii_rx_clk_sel_shift = 7,
176 .rmii_tx_clk_sel_shift = 6,
177 .rmii_ref_clk_sel_shift = 3,
178 .mac_intf_sel_shift = 2,
179 .rgmii_tx_dline_reg_offset = 0x178,
180 .rgmii_tx_delay_code_shift = 24,
181 .rgmii_tx_delay_code_mask = 0xff,
182 .rgmii_tx_delay_step_shift = 20,
183 .rgmii_tx_delay_step_mask = 0x3,
184 .rgmii_tx_delay_enable_shift = 16,
185 .rgmii_rx_dline_reg_offset = 0x178,
186 .rgmii_rx_delay_code_shift = 8,
187 .rgmii_rx_delay_code_mask = 0xff,
188 .rgmii_rx_delay_step_shift = 4,
189 .rgmii_rx_delay_step_mask = 0x3,
190 .rgmii_rx_delay_enable_shift = 0,
191};
192
193/* for lapwing */
194struct emac_regdata asr_emac_regdata_v3 = {
195 .support_dual_vol_power = 1,
196 .ptp_rx_ts_all_events = 1,
197 .clk_rst_ctrl_reg_offset = 0x164,
198 .axi_mst_single_id_shift = 13,
199 .phy_intr_enable_shift = 12,
200 .int_clk_src_sel_shift = 9,
201 .rgmii_tx_clk_src_sel_shift = 8,
202 .rgmii_rx_clk_src_sel_shift = -1,
203 .rmii_rx_clk_sel_shift = 7,
204 .rmii_tx_clk_sel_shift = 6,
205 .rmii_ref_clk_sel_shift = 3,
206 .mac_intf_sel_shift = 2,
207 .rgmii_tx_dline_reg_offset = 0x16c,
208 .rgmii_tx_delay_code_shift = 8,
209 .rgmii_tx_delay_code_mask = 0xff,
210 .rgmii_tx_delay_step_shift = 0,
211 .rgmii_tx_delay_step_mask = 0x3,
212 .rgmii_tx_delay_enable_shift = 31,
213 .rgmii_rx_dline_reg_offset = 0x168,
214 .rgmii_rx_delay_code_shift = 8,
215 .rgmii_rx_delay_code_mask = 0xff,
216 .rgmii_rx_delay_step_shift = 0,
217 .rgmii_rx_delay_step_mask = 0x3,
218 .rgmii_rx_delay_enable_shift = 31,
219};
220
221static const struct of_device_id emac_of_match[] = {
222 {
223 .compatible = "asr,asr-eth",
224 .data = (void *)&asr_emac_regdata_v1,
225 },
226 {
227 .compatible = "asr,asr-eth-v2",
228 .data = (void *)&asr_emac_regdata_v2,
229 },
230 {
231 .compatible = "asr,asr-eth-v3",
232 .data = (void *)&asr_emac_regdata_v3,
233 },
234 { },
235};
236MODULE_DEVICE_TABLE(of, emac_of_match);
237
238#ifdef EMAC_DIRECT_MAP
239dma_addr_t inline emac_map_direct(unsigned buf, unsigned len)
240{
241 unsigned ret;
242 ret = mv_cp_virtual_to_physical(buf);
243 BUG_ON(ret == buf);
244 __cpuc_flush_dcache_area((void *)(buf & ~ 31),
245 ((len + (buf & 31) + 31) & ~ 31));
246 return (dma_addr_t)ret;
247}
248#endif
249
250static inline void emac_unmap_single(struct device *dev, dma_addr_t handle,
251 size_t size, enum dma_data_direction dir)
252{
253#ifdef EMAC_DIRECT_MAP
254 if (dir == DMA_TO_DEVICE)
255 return;
256#endif
257 dma_unmap_single(dev, handle, size ,dir);
258}
259
260static inline dma_addr_t emac_map_single(struct device *dev, void *ptr,
261 size_t size,enum dma_data_direction dir)
262{
263 if (dir == DMA_FROM_DEVICE)
264 return dma_map_single(dev, ptr, size, dir);
265#ifndef EMAC_DIRECT_MAP
266 return dma_map_single(dev, ptr, size, dir);
267#else
268 return emac_map_direct((unsigned)ptr, (unsigned)size);
269#endif
270}
271
272#ifdef CONFIG_DDR_DEVFREQ
273static void emac_ddr_qos_work(struct work_struct *work)
274{
275 struct emac_priv *priv;
276 int val;
277
278 priv = container_of(work, struct emac_priv, qos_work);
279 val = priv->clk_scaling.qos_val;
280
281 if (val == PM_QOS_DEFAULT_VALUE)
282 pm_qos_update_request(&priv->clk_scaling.ddr_qos, val);
283 else
284 pm_qos_update_request_timeout(
285 &priv->clk_scaling.ddr_qos, val, (2 * USEC_PER_SEC));
286}
287
288static void emac_ddr_clk_scaling(struct emac_priv *priv)
289{
290 struct net_device *ndev = priv->ndev;
291 unsigned long rx_bytes, tx_bytes;
292 unsigned long last_rx_bytes, last_tx_bytes;
293 unsigned long total_time_ms = 0;
294 unsigned int cur_rx_threshold, cur_tx_threshold;
295 unsigned long polling_jiffies;
296 int qos_val;
297
298 polling_jiffies = msecs_to_jiffies(priv->clk_scaling.polling_delay_ms);
299 if (time_is_after_jiffies(priv->clk_scaling.window_time +
300 polling_jiffies))
301 return;
302
303 total_time_ms = jiffies_to_msecs((long)jiffies -
304 (long)priv->clk_scaling.window_time);
305
306 if (!ndev) {
307 pr_err("%s: dev or net is not ready\n", __func__);
308 return;
309 }
310
311 qos_val = priv->clk_scaling.qos_val;
312 last_rx_bytes = priv->clk_scaling.rx_bytes;
313 last_tx_bytes = priv->clk_scaling.tx_bytes;
314 if (!last_rx_bytes && !last_tx_bytes)
315 goto out;
316
317 if (likely(ndev->stats.rx_bytes > last_rx_bytes))
318 rx_bytes = ndev->stats.rx_bytes - last_rx_bytes;
319 else
320 rx_bytes = ULONG_MAX - last_rx_bytes + ndev->stats.rx_bytes + 1;
321
322 if (likely(ndev->stats.tx_bytes > last_tx_bytes))
323 tx_bytes = ndev->stats.tx_bytes - last_tx_bytes;
324 else
325 tx_bytes = ULONG_MAX - last_tx_bytes + ndev->stats.tx_bytes + 1;
326
327 cur_tx_threshold = tx_bytes * 8 / (total_time_ms * 1000);
328 pr_debug("%s: tx_rate=%dMbps, up_threshold=%dMbps\n",
329 __func__, cur_tx_threshold, priv->clk_scaling.tx_up_threshold);
330 if (cur_tx_threshold >= priv->clk_scaling.tx_up_threshold) {
331 qos_val = ASR_EMAC_DDR_BOOST_FREQ;
332 goto out;
333 }
334
335 cur_rx_threshold = rx_bytes * 8 / (total_time_ms * 1000);
336 pr_debug("%s: rx_rate=%dMbps, up_threshold=%dMbps\n",
337 __func__, cur_rx_threshold, priv->clk_scaling.rx_up_threshold);
338 if (cur_rx_threshold >= priv->clk_scaling.rx_up_threshold) {
339 qos_val = ASR_EMAC_DDR_BOOST_FREQ;
340 goto out;
341 }
342
343 if (cur_tx_threshold < priv->clk_scaling.tx_down_threshold &&
344 cur_rx_threshold < priv->clk_scaling.rx_down_threshold)
345 qos_val = PM_QOS_DEFAULT_VALUE;
346
347out:
348 priv->clk_scaling.rx_bytes = ndev->stats.rx_bytes;
349 priv->clk_scaling.tx_bytes = ndev->stats.tx_bytes;
350 priv->clk_scaling.window_time = jiffies;
351
352 if (qos_val != priv->clk_scaling.qos_val) {
353 priv->clk_scaling.qos_val = qos_val;
354 schedule_work(&priv->qos_work);
355 }
356
357 return;
358}
359#endif
360
361/* strings used by ethtool */
362static const struct emac_ethtool_stats {
363 char str[ETH_GSTRING_LEN];
364 u32 offset;
365} emac_ethtool_stats[] = {
366 EMAC_ETHTOOL_STAT(tx_ok_pkts),
367 EMAC_ETHTOOL_STAT(tx_total_pkts),
368 EMAC_ETHTOOL_STAT(tx_ok_bytes),
369 EMAC_ETHTOOL_STAT(tx_err_pkts),
370 EMAC_ETHTOOL_STAT(tx_singleclsn_pkts),
371 EMAC_ETHTOOL_STAT(tx_multiclsn_pkts),
372 EMAC_ETHTOOL_STAT(tx_lateclsn_pkts),
373 EMAC_ETHTOOL_STAT(tx_excessclsn_pkts),
374 EMAC_ETHTOOL_STAT(tx_unicast_pkts),
375 EMAC_ETHTOOL_STAT(tx_multicast_pkts),
376 EMAC_ETHTOOL_STAT(tx_broadcast_pkts),
377 EMAC_ETHTOOL_STAT(tx_pause_pkts),
378 EMAC_ETHTOOL_STAT(rx_ok_pkts),
379 EMAC_ETHTOOL_STAT(rx_total_pkts),
380 EMAC_ETHTOOL_STAT(rx_crc_err_pkts),
381 EMAC_ETHTOOL_STAT(rx_align_err_pkts),
382 EMAC_ETHTOOL_STAT(rx_err_total_pkts),
383 EMAC_ETHTOOL_STAT(rx_ok_bytes),
384 EMAC_ETHTOOL_STAT(rx_total_bytes),
385 EMAC_ETHTOOL_STAT(rx_unicast_pkts),
386 EMAC_ETHTOOL_STAT(rx_multicast_pkts),
387 EMAC_ETHTOOL_STAT(rx_broadcast_pkts),
388 EMAC_ETHTOOL_STAT(rx_pause_pkts),
389 EMAC_ETHTOOL_STAT(rx_len_err_pkts),
390 EMAC_ETHTOOL_STAT(rx_len_undersize_pkts),
391 EMAC_ETHTOOL_STAT(rx_len_oversize_pkts),
392 EMAC_ETHTOOL_STAT(rx_len_fragment_pkts),
393 EMAC_ETHTOOL_STAT(rx_len_jabber_pkts),
394 EMAC_ETHTOOL_STAT(rx_64_pkts),
395 EMAC_ETHTOOL_STAT(rx_65_127_pkts),
396 EMAC_ETHTOOL_STAT(rx_128_255_pkts),
397 EMAC_ETHTOOL_STAT(rx_256_511_pkts),
398 EMAC_ETHTOOL_STAT(rx_512_1023_pkts),
399 EMAC_ETHTOOL_STAT(rx_1024_1518_pkts),
400 EMAC_ETHTOOL_STAT(rx_1519_plus_pkts),
401 EMAC_ETHTOOL_STAT(rx_drp_fifo_full_pkts),
402 EMAC_ETHTOOL_STAT(rx_truncate_fifo_full_pkts),
403 EMAC_ETHTOOL_STAT(rx_dma_missed_frame_cnt),
404 EMAC_ETHTOOL_STAT(tx_tso_pkts),
405 EMAC_ETHTOOL_STAT(tx_tso_bytes),
406};
407
408static int emac_set_speed_duplex(struct emac_priv *priv)
409{
410 u32 ctrl;
411
412 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
413 if (priv->duplex)
414 ctrl |= MREGBIT_FULL_DUPLEX_MODE;
415 else
416 ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
417
418 switch (priv->speed) {
419 case SPEED_1000:
420 ctrl |= MREGBIT_SPEED_1000M;
421 break;
422 case SPEED_100:
423 ctrl |= MREGBIT_SPEED_100M;
424 break;
425 case SPEED_10:
426 ctrl |= MREGBIT_SPEED_10M;
427 break;
428 default:
429 pr_err("broken speed: %d\n", priv->speed);
430 return 0;
431 }
432 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
433 pr_info("emac: force link speed:%dM duplex:%s\n",
434 priv->speed, priv->duplex ? "Full": "Half");
435
436 return 0;
437}
438
439static int emac_set_fixed_link(struct device_node *np, struct emac_priv *priv)
440{
441 struct fixed_phy_status status = {};
442 struct device_node *fixed_link_node;
443 u32 fixed_link_prop[5];
444 const char *managed;
445 int interface;
446
447 if (of_property_read_string(np, "managed", &managed) == 0 &&
448 strcmp(managed, "in-band-status") == 0) {
449 /* status is zeroed, namely its .link member */
450 goto fix_link;
451 }
452
453 /* New binding */
454 fixed_link_node = of_get_child_by_name(np, "fixed-link");
455 if (fixed_link_node) {
456 status.link = 1;
457 status.duplex = of_property_read_bool(fixed_link_node,
458 "full-duplex");
459 if (of_property_read_u32(fixed_link_node, "speed",
460 &status.speed)) {
461 of_node_put(fixed_link_node);
462 return -EINVAL;
463 }
464 status.pause = of_property_read_bool(fixed_link_node, "pause");
465 status.asym_pause = of_property_read_bool(fixed_link_node,
466 "asym-pause");
467 interface = of_get_phy_mode(fixed_link_node);
468 if (interface < 0) {
469 priv->interface = PHY_INTERFACE_MODE_RGMII;
470 pr_info("no interface for fix-link, use RGMII\n");
471 } else {
472 priv->interface = interface;
473 }
474
475 of_node_put(fixed_link_node);
476 goto fix_link;
477 }
478
479 /* Old binding */
480 if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
481 ARRAY_SIZE(fixed_link_prop)) == 0) {
482 status.link = 1;
483 status.duplex = fixed_link_prop[1];
484 status.speed = fixed_link_prop[2];
485 status.pause = fixed_link_prop[3];
486 status.asym_pause = fixed_link_prop[4];
487 goto fix_link;
488 }
489
490 return -ENODEV;
491
492fix_link:
493 priv->speed = status.speed;
494 priv->duplex = status.duplex;
495
496 return emac_set_speed_duplex(priv);
497}
498
499void register_dump(struct emac_priv *priv)
500{
501 int i;
502 void __iomem *base = priv->iobase;
503
504 for (i = 0; i < 16; i++) {
505 pr_info("DMA:0x%x:0x%x\n",
506 DMA_CONFIGURATION + i * 4,
507 readl(base + DMA_CONFIGURATION + i * 4));
508 }
509 for (i = 0; i < 60; i++) {
510 pr_info("MAC:0x%x:0x%x\n",
511 MAC_GLOBAL_CONTROL + i * 4,
512 readl(base + MAC_GLOBAL_CONTROL + i * 4));
513 }
514
515 for (i = 0; i < 4; i++) {
516 pr_info("1588:0x%x:0x%x\n",
517 PTP_1588_CTRL + i * 4,
518 readl(base + PTP_1588_CTRL + i * 4));
519 }
520
521 for (i = 0; i < 6; i++) {
522 pr_info("1588:0x%x:0x%x\n",
523 SYS_TIME_GET_LOW + i * 4,
524 readl(base + SYS_TIME_GET_LOW + i * 4));
525 }
526 for (i = 0; i < 5; i++) {
527 pr_info("1588:0x%x:0x%x\n",
528 RX_TIMESTAMP_LOW + i * 4,
529 readl(base + RX_TIMESTAMP_LOW + i * 4));
530 }
531 for (i = 0; i < 2; i++) {
532 pr_info("1588:0x%x:0x%x\n",
533 PTP_1588_IRQ_STS + i * 4,
534 readl(base + PTP_1588_IRQ_STS + i * 4));
535 }
536
537 if (priv->tso) {
538 for (i = 0; i < 18; i++) {
539 pr_info("TSO:0x%x:0x%x\n", i * 4,
540 emac_rd_tso(priv, i * 4));
541 }
542 }
543}
544
545void print_pkt(unsigned char *buf, int len)
546{
547 int i = 0;
548
549 pr_debug("data len = %d byte, buf addr: 0x%x\n",
550 len, (unsigned int)buf);
551 for (i = 0; i < len; i = i + 8) {
552 pr_debug("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
553 *(buf + i),
554 *(buf + i + 1),
555 *(buf + i + 2),
556 *(buf + i + 3),
557 *(buf + i + 4),
558 *(buf + i + 5),
559 *(buf + i + 6),
560 *(buf + i + 7)
561 );
562 }
563}
564
565#ifdef EMAC_DEBUG
566void print_desc(unsigned char *buf, int len)
567{
568 int i;
569
570 pr_info("descriptor len = %d byte, buf addr: 0x%x\n",
571 len, (unsigned int)buf);
572 for (i = 0; i < len; i = i + 4) {
573 pr_info("0x%02x%02x%02x%02x\n",
574 *(buf + i + 3),
575 *(buf + i + 2),
576 *(buf + i + 1),
577 *(buf + i));
578 }
579}
580#else
581void print_desc(unsigned char *buf, int len)
582{
583
584}
585#endif
586
587/* Name emac_reset_hw
588 * Arguments priv : pointer to hardware data structure
589 * Return Status: 0 - Success; non-zero - Fail
590 * Description TBDL
591 */
592int emac_reset_hw(struct emac_priv *priv)
593{
594 mutex_lock(&priv->mii_mutex);
595 /* disable all the interrupts */
596 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
597 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
598
599 /* disable transmit and receive units */
600 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
601 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
602
603 /* stop the DMA */
604 emac_wr(priv, DMA_CONTROL, 0x0000);
605
606 /* reset mac, statistic counters */
607 emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0018);
608
609 emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0000);
610
611 emac_wr(priv, MAC_MDIO_CLK_DIV,
612 priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
613 mutex_unlock(&priv->mii_mutex);
614 return 0;
615}
616
617/* Name emac_init_hw
618 * Arguments pstHWData : pointer to hardware data structure
619 * Return Status: 0 - Success; non-zero - Fail
620 * Description TBDL
621 * Assumes that the controller has previously been reset
622 * and is in apost-reset uninitialized state.
623 * Initializes the receive address registers,
624 * multicast table, and VLAN filter table.
625 * Calls routines to setup link
626 * configuration and flow control settings.
627 * Clears all on-chip counters. Leaves
628 * the transmit and receive units disabled and uninitialized.
629 */
630int emac_init_hw(struct emac_priv *priv)
631{
632 u32 val = 0, threshold;
633
634 mutex_lock(&priv->mii_mutex);
635 /* MAC Init
636 * disable transmit and receive units
637 */
638 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
639 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
640
641 /* enable mac address 1 filtering */
642 //emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0001);
643 emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0100);
644
645 /* zero initialize the multicast hash table */
646 emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0000);
647 emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0000);
648 emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0000);
649 emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0000);
650
651 emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, EMAC_TX_FIFO_DWORDS - 8);
652
653 if (priv->speed == SPEED_1000)
654 threshold = 1024;
655 else if (priv->speed == SPEED_100)
656 threshold = 256;
657 else
658 threshold = TX_STORE_FORWARD_MODE;
659 emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, threshold);
660
661 emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, 0xc);
662
663 /* reset dma */
664 emac_wr(priv, DMA_CONTROL, 0x0000);
665
666 emac_wr(priv, DMA_CONFIGURATION, 0x01);
667 mdelay(10);
668 emac_wr(priv, DMA_CONFIGURATION, 0x00);
669 mdelay(10);
670
671 val |= MREGBIT_WAIT_FOR_DONE;
672 val |= MREGBIT_STRICT_BURST;
673 val |= MREGBIT_DMA_64BIT_MODE;
674 val |= MREGBIT_BURST_16WORD; //MREGBIT_BURST_1WORD;
675
676 emac_wr(priv, DMA_CONFIGURATION, val);
677
678 /* MDC Clock Division: AXI-312M/96 = 3.25M */
679 emac_wr(priv, MAC_MDIO_CLK_DIV,
680 priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
681
682 mutex_unlock(&priv->mii_mutex);
683
684 printk("MDIO clock div: 0x%x\n", emac_rd(priv, MAC_MDIO_CLK_DIV));
685 return 0;
686}
687
688int emac_set_mac_addr(struct emac_priv *priv, unsigned char *addr)
689{
690 emac_wr(priv, MAC_ADDRESS1_HIGH, (addr[1] << 8 | addr[0]));
691 emac_wr(priv, MAC_ADDRESS1_MED, (addr[3] << 8 | addr[2]));
692 emac_wr(priv, MAC_ADDRESS1_LOW, (addr[5] << 8 | addr[4]));
693
694 return 0;
695}
696
697void emac_set_fc_source_addr(struct emac_priv *priv, unsigned char *addr)
698{
699 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_HIGH, (addr[1] << 8 | addr[0]));
700 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_MED, (addr[3] << 8 | addr[2]));
701 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_LOW, (addr[5] << 8 | addr[4]));
702
703 return;
704}
705
706static inline void emac_dma_start_transmit(struct emac_priv *priv)
707{
708 emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 0xFF);
709}
710
711static inline void emac_dma_start_receive(struct emac_priv *priv)
712{
713 emac_wr(priv, DMA_RECEIVE_POLL_DEMAND, 0xFF);
714}
715
716#ifdef CONFIG_ASR_EMAC_NAPI
717void emac_enable_interrupt(struct emac_priv *priv, int tx)
718{
719 u32 val;
720
721 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
722
723 if (tx) {
724 val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
725 } else {
726 val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
727 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
728 if (priv->tso)
729 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
730 TSO_AP_RX_INTR_ENA_CSUM_DONE |
731 TSO_AP_RX_INTR_ENA_CSUM_ERR);
732 }
733
734 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
735}
736
737void emac_disable_interrupt(struct emac_priv *priv, int tx)
738{
739 u32 val;
740
741 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
742
743 if (tx) {
744 val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
745 } else {
746 val &= ~(MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
747 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
748 if (priv->tso)
749 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA, 0x0);
750 }
751
752 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
753}
754#endif
755
756bool emac_is_rmii_interface(struct emac_priv *priv)
757{
758 const struct emac_regdata *regdata = priv->regdata;
759 void __iomem* apmu;
760 u32 val;
761
762 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
763 if (apmu == NULL) {
764 pr_err("error to ioremap APMU base\n");
765 return -ENOMEM;
766 }
767
768 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
769 val &= (0x1 << regdata->mac_intf_sel_shift);
770 if (val)
771 return false;
772 else
773 return true;
774}
775
776void emac_config_phy_interrupt(struct emac_priv *priv, int enable)
777{
778 const struct emac_regdata *regdata = priv->regdata;
779 void __iomem* apmu;
780 u32 val;
781
782 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
783 if (apmu == NULL) {
784 pr_err("error to ioremap APMU base\n");
785 return;
786 }
787
788 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
789 if (enable)
790 val |= 0x1 << regdata->phy_intr_enable_shift;
791 else
792 val &= ~(0x1 << regdata->phy_intr_enable_shift);
793 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
794 iounmap(apmu);
795 return;
796}
797
798void emac_phy_interface_config(struct emac_priv *priv, int phy_interface)
799{
800 const struct emac_regdata *regdata = priv->regdata;
801 void __iomem* apmu;
802 u32 val;
803
804 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
805 if (apmu == NULL) {
806 pr_err("error to ioremap APMU base\n");
807 return;
808 }
809
810 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
811 if (PHY_INTERFACE_MODE_RMII == phy_interface) {
812 val &= ~(0x1 << regdata->mac_intf_sel_shift);
813 printk("===> set eamc interface: rmii\n");
814 } else {
815 val |= 0x1 << regdata->mac_intf_sel_shift;
816 printk("===> set eamc interface: rgmii\n");
817 }
818 val |= 0x1 << regdata->axi_mst_single_id_shift;
819 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
820
821 iounmap(apmu);
822 priv->interface = phy_interface;
823 return;
824}
825
826static void emac_set_aib_power_domain(struct emac_priv *priv)
827{
828 const struct emac_regdata *regdata = priv->regdata;
829 void __iomem *aib_emac_io;
830 void __iomem *apbc_asfar;
831 u32 tmp;
832
833 if (!regdata->support_dual_vol_power)
834 return;
835
836 aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
837 apbc_asfar = ioremap(APBC_ASFAR, 8);
838
839 writel(AKEY_ASFAR, apbc_asfar);
840 writel(AKEY_ASSAR, apbc_asfar + 4);
841 tmp = readl(aib_emac_io);
b.liub17525e2025-05-14 17:22:29 +0800842priv->power_domain = 0;
b.liue9582032025-04-17 19:18:16 +0800843 /* 0= power down, only set power down when vol = 0 */
844 if (priv->power_domain) {
845 tmp &= ~(0x1 << 2); /* 3.3v */
846 printk("===> emac set io to 3.3v\n");
847 } else {
848 tmp |= 0x1 << 2; /* 1.8v */
849 printk("===> emac set io to 1.8v\n");
850 }
851
852 writel(AKEY_ASFAR, apbc_asfar);
853 writel(AKEY_ASSAR, apbc_asfar + 4);
854 writel(tmp, aib_emac_io);
855
856 writel(AKEY_ASFAR, apbc_asfar);
857 writel(AKEY_ASSAR, apbc_asfar + 4);
858 tmp = readl(aib_emac_io);
859 printk("===> emac AIB read back: 0x%x\n", tmp);
860
861 iounmap(apbc_asfar);
862 iounmap(aib_emac_io);
863}
864
865static void emac_pause_generate_work_fuc(struct work_struct *work)
866{
867 struct emac_priv *priv= container_of(work, struct emac_priv, emac_pause_work.work);
868 int time_nxt = 0;
869 /* because pause time value = 0XFFFF,equal to stopping for 336ms(100M)/34ms(1000M) to transmit */
870 /* by a repeated testing, delay 20ms(1000M)/300ms(100M) satisfy making the neighbor stop transmission */
871 time_nxt = (priv->speed == SPEED_1000) ? 20 : 300;
872 if (!priv->pause.pause_time_max) {
873 emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0xffff);
874 priv->pause.pause_time_max = 1;
875 }
876
877 emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
878 schedule_delayed_work(&priv->emac_pause_work, msecs_to_jiffies(time_nxt));
879 return;
880}
881
882static inline void emac_check_ring_and_send_pause(struct emac_priv *priv)
883{
884 int pos;
885 int high_water;
886 int low_water;
887 struct emac_rx_desc *rx_desc;
888 struct emac_desc_ring *rx_ring;
889
890 rx_ring = &priv->rx_ring;
891 pos = rx_ring->nxt_clean;
892 high_water = (pos + priv->pause.high_water) % priv->rx_ring.total_cnt;
893 low_water = (pos + priv->pause.low_water) % priv->rx_ring.total_cnt;
894
895 rx_desc = emac_get_rx_desc(priv, high_water);
896 if (priv->pause.pause_sending == 0 && rx_desc->OWN == 0) {
897 schedule_delayed_work(&priv->emac_pause_work, 0);
898 priv->pause.pause_sending = 1;
899 }
900
901 rx_desc = emac_get_rx_desc(priv, low_water);
902 if (rx_desc->OWN && priv->pause.pause_sending) {
903 cancel_delayed_work_sync(&priv->emac_pause_work);
904 emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0);
905 emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
906 priv->pause.pause_time_max = 0;
907 priv->pause.pause_sending = 0;
908 }
909}
910
911/* Name emac_sw_init
912 * Arguments priv : pointer to driver private data structure
913 * Return Status: 0 - Success; non-zero - Fail
914 * Description Reads PCI space configuration information and
915 * initializes the variables with
916 * their default values
917 */
918static int emac_sw_init(struct emac_priv *priv)
919{
920 priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
921
922 mutex_init(&priv->mii_mutex);
923 spin_lock_init(&priv->spStatsLock);
924 spin_lock_init(&priv->spTxLock);
925 spin_lock_init(&priv->intr_lock);
926
927 return 0;
928}
929
930static int emac_check_ptp_packet(struct emac_priv *priv,
931 struct sk_buff *skb, int txrx)
932{
933 struct ethhdr *eth = (struct ethhdr *)skb->data;
934 struct ptp_header *ptph = NULL;
935 struct iphdr *iph;
936 struct udphdr *udph;
937 int msg_type, msg_id;
938 int ts;
939
940 if (eth->h_proto == htons(ETH_P_1588)) {
941 netdev_dbg(priv->ndev, "get PTP packet over ETH\n");
942 ptph = (struct ptp_header *)((u8 *)eth + sizeof(struct ethhdr));
943 } else if (eth->h_proto == htons(ETH_P_IP)) {
944 iph = (struct iphdr *)((u8 *)eth + sizeof(struct ethhdr));
945 if (iph->protocol != IPPROTO_UDP)
946 return -1;
947
948 udph = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
949 if ((htons(udph->dest) != PTP_EVENT_PORT ||
950 htons(udph->source) != PTP_EVENT_PORT))
951 return -1;
952
953 netdev_dbg(priv->ndev, "get PTP packet over UDP\n");
954 ptph = (struct ptp_header *)((u8 *)udph + sizeof(struct udphdr));
955 } else {
956 return -1;
957 }
958
959 msg_id = -1;
960 ts = ptph->tsmt & 0xF0;
961 msg_type = (ptph->tsmt) & 0x0F;
962 if (txrx) {
963 if (msg_type == MSG_SYNC) {
964 if (ts)
965 msg_id = MSG_PDELAY_REQ;
966 else
967 msg_id = MSG_DELAY_REQ;
968 } else if (msg_type == MSG_DELAY_REQ) {
969 msg_id = MSG_SYNC;
970 } else if (msg_type == MSG_PDELAY_REQ) {
971 msg_id = MSG_PDELAY_RESP;
972 memcpy(&priv->sourcePortIdentity,
973 &ptph->sourcePortIdentity,
974 sizeof(struct PortIdentity));
975 } else if (msg_type == MSG_PDELAY_RESP) {
976 msg_id = MSG_PDELAY_REQ;
977 }
978 } else {
979 netdev_dbg(priv->ndev, "RX timestamp for message type %d\n",
980 ptph->tsmt);
981
982 if (msg_type == MSG_PDELAY_RESP) {
983 struct pdelay_resp_msg *presp = (struct pdelay_resp_msg *)ptph;
984
985 /*
986 * Change to monitor SYNC packet if pdelay response
987 * received for same clock indentity.
988 */
989 if (!memcmp(&presp->requestingPortIdentity.clockIdentity,
990 &priv->sourcePortIdentity.clockIdentity,
991 sizeof(struct ClockIdentity))) {
992 msg_id = MSG_SYNC;
993 }
994 }
995 }
996
997 /*
998 * Since some platform not support to timestamp two or more
999 * message type, so change here.
1000 */
1001 if (msg_id >= 0) {
1002 if (priv->regdata->ptp_rx_ts_all_events) {
1003 msg_id = ALL_EVENTS;
1004 msg_id |= ts | ts << 8 | ts << 16 | ts << 24;
1005 } else {
1006 msg_id |= ts;
1007 }
1008
1009 priv->hwptp->config_hw_tstamping(priv, 1, PTP_V2_L2_L4, msg_id);
1010 }
1011
1012 return ptph->tsmt;
1013}
1014
1015/* emac_get_tx_hwtstamp - get HW TX timestamps
1016 * @priv: driver private structure
1017 * @skb : the socket buffer
1018 * Description :
1019 * This function will read timestamp from the register & pass it to stack.
1020 * and also perform some sanity checks.
1021 */
1022static void emac_get_tx_hwtstamp(struct emac_priv *priv, struct sk_buff *skb)
1023{
1024 struct skb_shared_hwtstamps shhwtstamp;
1025 u64 ns;
1026
1027 if (!priv->hwts_tx_en)
1028 return;
1029
1030 /* exit if skb doesn't support hw tstamp */
1031 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
1032 return;
1033
1034 emac_check_ptp_packet(priv, skb, 1);
1035
1036 /* get the valid tstamp */
1037 ns = priv->hwptp->get_tx_timestamp(priv);
1038
1039 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1040 shhwtstamp.hwtstamp = ns_to_ktime(ns);
1041
1042 wmb();
1043 netdev_dbg(priv->ndev, "get valid TX hw timestamp %llu\n", ns);
1044 /* pass tstamp to stack */
1045 skb_tstamp_tx(skb, &shhwtstamp);
1046
1047 return;
1048}
1049
1050/* emac_get_rx_hwtstamp - get HW RX timestamps
1051 * @priv: driver private structure
1052 * @p : descriptor pointer
1053 * @skb : the socket buffer
1054 * Description :
1055 * This function will read received packet's timestamp from the descriptor
1056 * and pass it to stack. It also perform some sanity checks.
1057 */
1058static void emac_get_rx_hwtstamp(struct emac_priv *priv, struct emac_rx_desc *p,
1059 struct sk_buff *skb)
1060{
1061 struct skb_shared_hwtstamps *shhwtstamp = NULL;
1062 u64 ns;
1063
1064 if (!priv->hwts_rx_en)
1065 return;
1066
1067 /* Check if timestamp is available */
1068 if (p->ptp_pkt && p->rx_timestamp) {
1069 emac_check_ptp_packet(priv, skb, 0);
1070 ns = priv->hwptp->get_rx_timestamp(priv);
1071 netdev_dbg(priv->ndev, "get valid RX hw timestamp %llu\n", ns);
1072 shhwtstamp = skb_hwtstamps(skb);
1073 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1074 shhwtstamp->hwtstamp = ns_to_ktime(ns);
1075 } else {
1076 netdev_dbg(priv->ndev, "cannot get RX hw timestamp\n");
1077 }
1078}
1079
1080/**
1081 * emac_hwtstamp_set - control hardware timestamping.
1082 * @dev: device pointer.
1083 * @ifr: An IOCTL specific structure, that can contain a pointer to
1084 * a proprietary structure used to pass information to the driver.
1085 * Description:
1086 * This function configures the MAC to enable/disable both outgoing(TX)
1087 * and incoming(RX) packets time stamping based on user input.
1088 * Return Value:
1089 * 0 on success and an appropriate -ve integer on failure.
1090 */
1091static int emac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1092{
1093 struct emac_priv *priv = netdev_priv(dev);
1094 struct hwtstamp_config config;
1095 struct timespec64 now;
1096 u64 ns_ptp;
1097 u32 ptp_event_msg_id = 0;
1098 u32 rx_ptp_type = 0;
1099
1100 if (!priv->ptp_support) {
1101 netdev_alert(priv->ndev, "No support for HW time stamping\n");
1102 priv->hwts_tx_en = 0;
1103 priv->hwts_rx_en = 0;
1104
1105 return -EOPNOTSUPP;
1106 }
1107
1108 if (copy_from_user(&config, ifr->ifr_data,
1109 sizeof(struct hwtstamp_config)))
1110 return -EFAULT;
1111
1112 netdev_dbg(priv->ndev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
1113 __func__, config.flags, config.tx_type, config.rx_filter);
1114
1115 /* reserved for future extensions */
1116 if (config.flags)
1117 return -EINVAL;
1118
1119 if (config.tx_type != HWTSTAMP_TX_OFF &&
1120 config.tx_type != HWTSTAMP_TX_ON)
1121 return -ERANGE;
1122
1123 switch (config.rx_filter) {
1124 case HWTSTAMP_FILTER_NONE:
1125 /* time stamp no incoming packet at all */
1126 config.rx_filter = HWTSTAMP_FILTER_NONE;
1127 break;
1128
1129 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1130 /* PTP v1, UDP, Sync packet */
1131 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
1132 /* take time stamp for SYNC messages only */
1133 ptp_event_msg_id = MSG_SYNC;
1134 rx_ptp_type = PTP_V1_L4_ONLY;
1135 break;
1136
1137 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1138 /* PTP v1, UDP, Delay_req packet */
1139 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
1140 /* take time stamp for Delay_Req messages only */
1141 ptp_event_msg_id = MSG_DELAY_REQ;
1142 rx_ptp_type = PTP_V1_L4_ONLY;
1143 break;
1144
1145 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1146 /* PTP v2, UDP, Sync packet */
1147 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
1148 /* take time stamp for SYNC messages only */
1149 ptp_event_msg_id = MSG_SYNC;
1150 rx_ptp_type = PTP_V2_L2_L4;
1151 break;
1152
1153 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1154 /* PTP v2, UDP, Delay_req packet */
1155 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
1156 /* take time stamp for Delay_Req messages only */
1157 ptp_event_msg_id = MSG_DELAY_REQ;
1158 rx_ptp_type = PTP_V2_L2_L4;
1159 break;
1160
1161 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1162 /* PTP v2/802.AS1 any layer, any kind of event packet */
1163 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1164
1165 /*
1166 * IF not support ALL EVENTS, default timestamp SYNC packet,
1167 * changed to MSG_DELAY_REQ automactically if needed
1168 */
1169 if (priv->regdata->ptp_rx_ts_all_events)
1170 ptp_event_msg_id = ALL_EVENTS;
1171 else
1172 ptp_event_msg_id = MSG_SYNC;
1173
1174 rx_ptp_type = PTP_V2_L2_L4;
1175 break;
1176
1177 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1178 /* PTP v2/802.AS1, any layer, Sync packet */
1179 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
1180 /* take time stamp for SYNC messages only */
1181 ptp_event_msg_id = MSG_SYNC;
1182 rx_ptp_type = PTP_V2_L2_L4;
1183 break;
1184
1185 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1186 /* PTP v2/802.AS1, any layer, Delay_req packet */
1187 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
1188 /* take time stamp for Delay_Req messages only */
1189 ptp_event_msg_id = MSG_DELAY_REQ;
1190 rx_ptp_type = PTP_V2_L2_L4;
1191 break;
1192 default:
1193 return -ERANGE;
1194 }
1195
1196 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
1197 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
1198
1199 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
1200 priv->hwptp->config_hw_tstamping(priv, 0, 0, 0);
1201 else {
1202
1203 priv->hwptp->config_hw_tstamping(priv, 1,
1204 rx_ptp_type, ptp_event_msg_id);
1205
1206 /* initialize system time */
1207 ktime_get_real_ts64(&now);
1208 priv->hwptp->init_systime(priv, timespec64_to_ns(&now));
1209
1210 /* program Increment reg */
1211 priv->hwptp->config_systime_increment(priv);
1212
1213 ns_ptp = priv->hwptp->get_phc_time(priv);
1214 ktime_get_real_ts64(&now);
1215 /* check the diff between ptp timer and system time */
1216 if (abs(timespec64_to_ns(&now) - ns_ptp) > 5000)
1217 priv->hwptp->init_systime(priv,
1218 timespec64_to_ns(&now));
1219 }
1220
1221 memcpy(&priv->tstamp_config, &config, sizeof(config));
1222
1223 return copy_to_user(ifr->ifr_data, &config,
1224 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
1225}
1226
1227/**
1228 * emac_hwtstamp_get - read hardware timestamping.
1229 * @dev: device pointer.
1230 * @ifr: An IOCTL specific structure, that can contain a pointer to
1231 * a proprietary structure used to pass information to the driver.
1232 * Description:
1233 * This function obtain the current hardware timestamping settings
1234 as requested.
1235 */
1236static int emac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1237{
1238 struct emac_priv *priv = netdev_priv(dev);
1239 struct hwtstamp_config *config = &priv->tstamp_config;
1240
1241 if (!priv->ptp_support)
1242 return -EOPNOTSUPP;
1243
1244 return copy_to_user(ifr->ifr_data, config,
1245 sizeof(*config)) ? -EFAULT : 0;
1246}
1247
1248/* Name emac_ioctl
1249 * Arguments pstNetdev : pointer to net_device structure
1250 * pstIfReq : pointer to interface request structure used.
1251 * u32Cmd : IOCTL command number
1252 * Return Status: 0 - Success; non-zero - Fail
1253 * Description It is called by upper layer and
1254 * handling various task IOCTL commands.
1255 */
1256static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1257{
1258 int ret = -EOPNOTSUPP;
1259
1260 if (!netif_running(ndev))
1261 return -EINVAL;
1262
1263 switch (cmd) {
1264 case SIOCGMIIPHY:
1265 case SIOCGMIIREG:
1266 case SIOCSMIIREG:
1267 if (!ndev->phydev)
1268 return -EINVAL;
1269 ret = phy_mii_ioctl(ndev->phydev, rq, cmd);
1270 break;
1271 case SIOCSHWTSTAMP:
1272 ret = emac_hwtstamp_set(ndev, rq);
1273 break;
1274 case SIOCGHWTSTAMP:
1275 ret = emac_hwtstamp_get(ndev, rq);
1276 break;
1277 default:
1278 break;
1279 }
1280
1281 return ret;
1282}
1283
1284static irqreturn_t emac_wakeup_handler(int irq, void *dev_id)
1285{
1286 struct net_device *ndev = (struct net_device *)dev_id;
1287 struct emac_priv *priv = netdev_priv(ndev);
1288 u32 ctrl;
1289
1290 emac_set_axi_bus_clock(priv, 1);
1291 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1292 if (!(ctrl & (MREGBIT_UNICAST_WAKEUP_MODE |
1293 MREGBIT_MAGIC_PACKET_WAKEUP_MODE)))
1294 return IRQ_NONE;
1295
1296 ctrl &= ~(MREGBIT_UNICAST_WAKEUP_MODE |
1297 MREGBIT_MAGIC_PACKET_WAKEUP_MODE);
1298 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1299 return IRQ_HANDLED;
1300}
1301
1302static irqreturn_t emac_irq_tso(int irq, void *dev_id)
1303{
1304 struct net_device *ndev = (struct net_device *)dev_id;
1305 struct emac_priv *priv = netdev_priv(ndev);
1306 u32 status;
1307
1308 /* handle rx */
1309 status = emac_rd_tso(priv, TSO_AP_RX_INTR_STS);
1310 if (status) {
1311 emac_print("TSO_AP_RX_INTR_STS=0x%x", status);
1312
1313 if (status & TSO_AP_RX_INTR_ENA_CSUM_DONE) {
1314#ifdef CONFIG_ASR_EMAC_NAPI
1315 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1316 unsigned long flags;
1317
1318 spin_lock_irqsave(&priv->intr_lock, flags);
1319 emac_disable_interrupt(priv, 0);
1320 spin_unlock_irqrestore(&priv->intr_lock, flags);
1321 __napi_schedule(&priv->rx_napi);
1322 }
1323#else
1324 emac_rx_clean_desc(priv);
1325#endif
1326 }
1327
1328#ifdef EMAC_DEBUG
1329 if (status & TSO_AP_RX_INTR_ENA_CSUM_ERR)
1330 pr_err("rx checksum err irq\n");
1331#endif
1332 /* clear rx status */
1333 emac_wr_tso(priv, TSO_AP_RX_INTR_STS, status);
1334 }
1335
1336 /* handle tx */
1337 status = emac_rd_tso(priv, TSO_AP_TX_INTR_STS);
1338 if (status) {
1339 emac_print("TSO_AP_TX_INTR_STS=0x%x\n", status);
1340 if (status & TSO_AP_TX_INTR_TSO_DONE) {
1341 emac_print("TX TSO done\n");
1342 emac_dma_start_transmit(priv);
1343 }
1344
1345 if (status & TSO_AP_TX_INTR_CSUM_DONE) {
1346 emac_print("TX checksum done\n");
1347 emac_dma_start_transmit(priv);
1348 }
1349
1350 /* clear tx status */
1351 emac_wr_tso(priv, TSO_AP_TX_INTR_STS, status);
1352 }
1353
1354 /* handle err */
1355 status = emac_rd_tso(priv, TSO_ERR_INTR_STS);
1356 if (status) {
1357 pr_err("TSO: TX/RX ERR, status=0x%x\n", status);
1358 emac_wr_tso(priv, TSO_ERR_INTR_STS, status);
1359 }
1360
1361 return IRQ_HANDLED;
1362}
1363
1364
1365/* Name emac_interrupt_handler
1366 * Arguments irq : irq number for which the interrupt is fired
1367 * dev_id : pointer was passed to request_irq and same pointer is passed
1368 * back to handler
1369 * Return irqreturn_t : integer value
1370 * Description Interrupt handler routine for interrupts from target for RX packets indication.
1371 */
1372static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1373{
1374 struct net_device *ndev = (struct net_device *)dev_id;
1375 struct emac_priv *priv = netdev_priv(ndev);
1376 u32 status;
1377 u32 clr = 0;
1378
1379 /* read the status register for IRQ received */
1380 status = emac_rd(priv, DMA_STATUS_IRQ);
1381
1382 /* Check if emac is up */
1383 if (test_bit(EMAC_DOWN, &priv->state)) {
1384 emac_wr(priv, DMA_STATUS_IRQ, status & 0x1F7);
1385 return IRQ_HANDLED;
1386 }
1387
1388 if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1389 clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1390#ifdef CONFIG_ASR_EMAC_NAPI
1391 if (likely(napi_schedule_prep(&priv->tx_napi))) {
1392 unsigned long flags;
1393
1394 spin_lock_irqsave(&priv->intr_lock, flags);
1395 emac_disable_interrupt(priv, 1);
1396 spin_unlock_irqrestore(&priv->intr_lock, flags);
1397 __napi_schedule(&priv->tx_napi);
1398 }
1399#else
1400 emac_tx_clean_desc(priv);
1401#endif
1402 }
1403
1404 if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1405 clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1406
1407 if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1408 clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1409
1410 if (status & (MREGBIT_RECEIVE_TRANSFER_DONE_IRQ |
1411 MREGBIT_RECEIVE_MISSED_FRAME_IRQ)) {
1412 if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ)
1413 clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1414
1415 if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1416 clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1417
1418 if (priv->tso)
1419 emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
1420
1421#ifdef CONFIG_ASR_EMAC_NAPI
1422 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&priv->intr_lock, flags);
1426 emac_disable_interrupt(priv, 0);
1427 spin_unlock_irqrestore(&priv->intr_lock, flags);
1428 __napi_schedule(&priv->rx_napi);
1429 }
1430#else
1431 emac_rx_clean_desc(priv);
1432#endif
1433 }
1434
1435 if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1436 clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1437
1438 if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1439 clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1440
1441 emac_wr(priv, DMA_STATUS_IRQ, clr);
1442
1443 return IRQ_HANDLED;
1444}
1445
1446/* Name emac_command_options
1447 * Arguments priv : pointer to driver private data structure
1448 * Return none
1449 * Description This function actually handles the command line para passed
1450 * when the driver is loaded at the command prompt.
1451 * It parses the parameters and validates them for valid values.
1452 */
1453void emac_command_options(struct emac_priv *priv)
1454{
1455 int pages = totalram_pages();
1456
1457 if (pages <= (EMAC_SMALL_RING_MEM_LIMIT >> PAGE_SHIFT))
1458 priv->rx_ring.total_cnt = EMAC_SMALL_RX_RING_SIZE;
1459 else
1460 priv->rx_ring.total_cnt = EMAC_RX_RING_SIZE;
1461 priv->tx_ring.total_cnt = EMAC_TX_RING_SIZE;
1462
1463 pr_info("emac: rx_ring=%d, tx_ring=%d, pages=%d\n",
1464 priv->rx_ring.total_cnt, priv->tx_ring.total_cnt, pages);
1465}
1466
1467/* Name emac_configure_tx
1468 * Arguments priv : pointer to driver private data structure
1469 * Return none
1470 * Description Configures the transmit unit of the device
1471 */
1472static void emac_configure_tx(struct emac_priv *priv)
1473{
1474 u32 val;
1475
1476 /* set the transmit base address */
1477 val = (u32)(priv->tx_ring.desc_dma_addr);
1478
1479 emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1480
1481 /* Tx Inter Packet Gap value and enable the transmit */
1482 val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1483 val &= (~MREGBIT_IFG_LEN);
1484 val |= MREGBIT_TRANSMIT_ENABLE;
1485 val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1486 emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1487
1488 emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x00);
1489
1490 /* start tx dma */
1491 val = emac_rd(priv, DMA_CONTROL);
1492 val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1493 emac_wr(priv, DMA_CONTROL, val);
1494}
1495
1496/* Name emac_configure_rx
1497 * Arguments priv : pointer to driver private data structure
1498 * Return none
1499 * Description Configures the receive unit of the device
1500 */
1501static void emac_configure_rx(struct emac_priv *priv)
1502{
1503 u32 val;
1504
1505 /* set the receive base address */
1506 val = (u32)(priv->rx_ring.desc_dma_addr);
1507 emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1508
1509 /* enable the receive */
1510 val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1511 val |= MREGBIT_RECEIVE_ENABLE;
1512 val |= MREGBIT_STORE_FORWARD;
1513 val |= MREGBIT_ACOOUNT_VLAN;
1514 emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1515
1516 /* start rx dma */
1517 val = emac_rd(priv, DMA_CONTROL);
1518 val |= MREGBIT_START_STOP_RECEIVE_DMA;
1519 emac_wr(priv, DMA_CONTROL, val);
1520}
1521
1522/* Name emac_clean_tx_desc_ring
1523 * Arguments priv : pointer to driver private data structure
1524 * Return none
1525 * Description Freeing the TX resources allocated earlier.
1526 */
1527static void emac_clean_tx_desc_ring(struct emac_priv *priv)
1528{
1529 struct emac_desc_ring *tx_ring = &priv->tx_ring;
1530 struct emac_desc_buffer *tx_buf;
1531 u32 i;
1532
1533 /* Free all the Tx ring sk_buffs */
1534 for (i = 0; i < tx_ring->total_cnt; i++) {
1535 tx_buf = &tx_ring->desc_buf[i];
1536
1537 if (tx_buf->dma_addr) {
1538 dma_unmap_page(&priv->pdev->dev,
1539 tx_buf->dma_addr,
1540 tx_buf->dma_len,
1541 DMA_TO_DEVICE);
1542 tx_buf->dma_addr = 0;
1543 }
1544
1545 if (tx_buf->skb) {
1546 dev_kfree_skb_any(tx_buf->skb);
1547 tx_buf->skb = NULL;
1548 }
1549 }
1550
1551 tx_ring->nxt_use = 0;
1552 tx_ring->nxt_clean = 0;
1553}
1554
1555/* Name emac_clean_rx_desc_ring
1556 * Arguments priv : pointer to driver private data structure
1557 * Return none
1558 * Description Freeing the RX resources allocated earlier.
1559 */
1560static void emac_clean_rx_desc_ring(struct emac_priv *priv)
1561{
1562 struct emac_desc_ring *rx_ring;
1563 struct emac_desc_buffer *rx_buf;
1564 u32 i;
1565
1566 rx_ring = &priv->rx_ring;
1567
1568 /* Free all the Rx ring sk_buffs */
1569 for (i = 0; i < rx_ring->total_cnt; i++) {
1570 rx_buf = &rx_ring->desc_buf[i];
1571 if (rx_buf->skb) {
1572 emac_unmap_single(&priv->pdev->dev,
1573 rx_buf->dma_addr,
1574 rx_buf->dma_len,
1575 DMA_FROM_DEVICE);
1576 dev_kfree_skb(rx_buf->skb);
1577 rx_buf->skb = NULL;
1578 }
1579
1580 if (rx_buf->buff_addr) {
1581#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
1582 kfree(rx_buf->buff_addr);
1583#endif
1584 rx_buf->buff_addr = NULL;
1585 }
1586 }
1587
1588 rx_ring->nxt_clean = 0;
1589 rx_ring->nxt_use = 0;
1590}
1591
1592void emac_ptp_init(struct emac_priv *priv)
1593{
1594 int ret;
1595
1596 if (priv->ptp_support) {
1597 ret = clk_prepare_enable(priv->ptp_clk);
1598 if (ret < 0) {
1599 pr_warning("ptp clock failed to enable \n");
1600 priv->ptp_clk = NULL;
1601 }
1602
1603 emac_ptp_register(priv);
1604
1605 if (IS_ERR_OR_NULL(priv->ptp_clock)) {
1606 priv->ptp_support = 0;
1607 pr_warning("disable PTP due to clock not enabled\n");
1608 }
1609 }
1610}
1611
1612void emac_ptp_deinit(struct emac_priv *priv)
1613{
1614 if (priv->ptp_support) {
1615 if (priv->ptp_clk)
1616 clk_disable_unprepare(priv->ptp_clk);
1617
1618 emac_ptp_unregister(priv);
1619 }
1620}
1621
1622static void emac_rx_timer_arm(struct emac_priv *priv)
1623{
1624 u32 rx_fill_timer = EMAC_RX_FILL_TIMER_US;
1625
1626 if (!rx_fill_timer)
1627 return;
1628
1629 if (hrtimer_is_queued(&priv->rx_timer))
1630 return;
1631
1632 hrtimer_start(&priv->rx_timer,
1633 ns_to_ktime(rx_fill_timer) * NSEC_PER_USEC,
1634 HRTIMER_MODE_REL);
1635}
1636
1637static enum hrtimer_restart emac_rx_timer(struct hrtimer *t)
1638{
1639 struct emac_priv *priv = container_of(t, struct emac_priv, rx_timer);
1640 struct napi_struct *napi = &priv->rx_napi;
1641
1642 if (likely(napi_schedule_prep(napi))) {
1643 unsigned long flags;
1644
1645 spin_lock_irqsave(&priv->intr_lock, flags);
1646 emac_disable_interrupt(priv, 0);
1647 spin_unlock_irqrestore(&priv->intr_lock, flags);
1648 __napi_schedule(napi);
1649 }
1650
1651 return HRTIMER_NORESTART;
1652}
1653
1654static void emac_tx_timer_arm(struct emac_priv *priv)
1655{
1656 u32 tx_coal_timer = EMAC_TX_COAL_TIMER_US;
1657
1658 if (!tx_coal_timer)
1659 return;
1660
1661 if (hrtimer_is_queued(&priv->tx_timer))
1662 return;
1663
1664 hrtimer_start(&priv->tx_timer,
1665 ns_to_ktime(tx_coal_timer) * NSEC_PER_USEC,
1666 HRTIMER_MODE_REL);
1667}
1668
1669static enum hrtimer_restart emac_tx_timer(struct hrtimer *t)
1670{
1671 struct emac_priv *priv = container_of(t, struct emac_priv, tx_timer);
1672 struct napi_struct *napi = &priv->tx_napi;
1673
1674 if (priv->tso) {
1675 emac_dma_start_transmit(priv);
1676 return HRTIMER_NORESTART;
1677 }
1678
1679 if (likely(napi_schedule_prep(napi))) {
1680 unsigned long flags;
1681
1682 spin_lock_irqsave(&priv->intr_lock, flags);
1683 emac_disable_interrupt(priv, 1);
1684 spin_unlock_irqrestore(&priv->intr_lock, flags);
1685 __napi_schedule(napi);
1686 }
1687
1688 return HRTIMER_NORESTART;
1689}
1690
1691
1692static int emac_tso_config(struct emac_priv *priv)
1693{
1694 struct emac_desc_ring * tx_ring = &priv->tx_ring;
1695 u32 val = 0;
1696
1697 /* reset */
1698 emac_wr_tso(priv, TSO_CONFIG, TSO_CONFIG_RST);
1699 mdelay(1);
1700 emac_wr_tso(priv, TSO_CONFIG, 0x0);
1701
1702 emac_wr_tso(priv, TSO_DMA_CONFIG, 0x2 << 8);
1703
1704 /* rx */
1705 /* set the transmit base address */
1706 val = (u32)(priv->rx_ring.desc_dma_addr);
1707 emac_wr_tso(priv, TSO_RX_DESC_BA, val >> 1);
1708 emac_wr_tso(priv, TSO_RX_AUTO_POLL_CNT, 0x0);
1709
1710 /* tx */
1711 val = (u32)(priv->tx_ring.desc_dma_addr);
1712 emac_wr_tso(priv, TSO_TX_DESC_BA, val >> 1);
1713
1714 priv->tso_hdr = dma_alloc_coherent(&priv->pdev->dev,
1715 tx_ring->total_cnt * 0x80,
1716 &priv->tso_hdr_addr,
1717 GFP_KERNEL | __GFP_ZERO);
1718 if (!priv->tso_hdr) {
1719 pr_err("Memory allocation failed for tso_hdr\n");
1720 return -ENOMEM;
1721 }
1722
1723 val = (u32)(priv->tso_hdr_addr);
1724 emac_wr_tso(priv, TSO_TX_HDR_BA, val >> 1);
1725 emac_wr_tso(priv, TSO_TX_HDR_CTR, tx_ring->total_cnt);
1726 emac_wr_tso(priv, TSO_TX_AUTO_POLL_CNT, 0x0);
1727
1728 /* enable tx/rx tso/coe */
1729 emac_wr_tso(priv, TSO_CONFIG,
1730 TSO_CONFIG_RX_EN | TSO_CONFIG_TX_EN | TSO_CONFIG_RX_CSUM_EN);
1731
1732 /* enable tx/rx/err interrupt */
1733 emac_wr_tso(priv, TSO_ERR_INTR_ENA, 0xF0007);
1734 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
1735 TSO_AP_RX_INTR_ENA_CSUM_DONE | TSO_AP_RX_INTR_ENA_CSUM_ERR);
1736#if 1
1737 emac_wr_tso(priv, TSO_AP_TX_INTR_ENA,
1738 TSO_AP_TX_INTR_ENA_TSO_DONE | TSO_AP_TX_INTR_ENA_CSUM_DONE);
1739#else
1740 emac_wr_tso(priv, TSO_AP_TX_INTR_ENA, 0x0);
1741#endif
1742 return 0;
1743}
1744
1745/* Name emac_up
1746 * Arguments priv : pointer to driver private data structure
1747 * Return Status: 0 - Success; non-zero - Fail
1748 * Description This function is called from emac_open and
1749 * performs the things when net interface is about to up.
1750 * It configues the Tx and Rx unit of the device and
1751 * registers interrupt handler.
1752 * It also starts one watchdog timer to monitor
1753 * the net interface link status.
1754 */
1755int emac_up(struct emac_priv *priv)
1756{
1757 struct net_device *ndev = priv->ndev;
1758 int ret, val;
b.liub17525e2025-05-14 17:22:29 +08001759#if CLOSE_AIB_POWER_DOMAIN
1760 void __iomem *aib_emac_io;
1761 void __iomem *apbc_asfar;
1762 u32 tmp;
1763#endif
b.liue9582032025-04-17 19:18:16 +08001764#ifdef WAN_LAN_AUTO_ADAPT
1765 u32 phy_id;
1766#endif
1767
1768 priv->hw_stats->tx_tso_pkts = 0;
1769 priv->hw_stats->tx_tso_bytes = 0;
1770
1771 ret = emac_phy_connect(ndev);
1772 if (ret) {
1773 pr_err("%s phy_connet failed\n", __func__);
b.liub17525e2025-05-14 17:22:29 +08001774#if CLOSE_AIB_POWER_DOMAIN
1775 printk("===> enter emac_close_aib_power_domain\n");
1776 aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
1777 apbc_asfar = ioremap(APBC_ASFAR, 8);
1778 writel(AKEY_ASFAR, apbc_asfar);
1779 writel(AKEY_ASSAR, apbc_asfar + 4);
1780 writel(0x81, aib_emac_io);
1781 writel(AKEY_ASFAR, apbc_asfar);
1782 writel(AKEY_ASSAR, apbc_asfar + 4);
1783 tmp = readl(aib_emac_io);
1784 iounmap(apbc_asfar);
1785 iounmap(aib_emac_io);
1786 printk("===> exit emac_close_aib_power_domain = 0x%x\n", tmp);
1787#endif
b.liue9582032025-04-17 19:18:16 +08001788 return ret;
1789 }
1790
1791 if (!priv->en_suspend)
1792 pm_stay_awake(&priv->pdev->dev);
1793 pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
1794
1795 clk_phase_set(priv, TX_PHASE);
1796 clk_phase_set(priv, RX_PHASE);
1797
1798 /* init hardware */
1799 emac_init_hw(priv);
1800
1801 emac_ptp_init(priv);
1802
1803 emac_set_mac_addr(priv, ndev->dev_addr);
1804
1805 emac_set_fc_source_addr(priv, ndev->dev_addr);
1806
1807 /* configure transmit unit */
1808 emac_configure_tx(priv);
1809 /* configure rx unit */
1810 emac_configure_rx(priv);
1811
1812 /* allocate buffers for receive descriptors */
1813 emac_alloc_rx_desc_buffers(priv);
1814
1815 if (ndev->phydev)
1816 phy_start(ndev->phydev);
1817
1818 /* allocates interrupt resources and
1819 * enables the interrupt line and IRQ handling
1820 */
1821 ret = request_irq(priv->irq, emac_interrupt_handler,
1822 IRQF_SHARED, ndev->name, ndev);
1823 if (ret) {
1824 pr_err("request_irq failed, ret=%d\n", ret);
1825 goto request_irq_failed;
1826 }
1827
1828 if (priv->irq_wakeup) {
1829 ret = request_irq(priv->irq_wakeup, emac_wakeup_handler,
1830 IRQF_SHARED, ndev->name, ndev);
1831 if (ret) {
1832 pr_err("request wakeup_irq failed, ret=%d\\n", ret);
1833 goto request_wakeup_irq_failed;
1834 }
1835 }
1836
1837 if (priv->irq_tso) {
1838 ret = request_irq(priv->irq_tso, emac_irq_tso,
1839 IRQF_SHARED, "emac_tso", ndev);
1840 if (ret) {
1841 pr_err("request tso failed, ret=%d\\n", ret);
1842 goto request_tso_irq_failed;
1843 }
1844 }
1845
1846 if (priv->fix_link)
1847 emac_set_speed_duplex(priv);
1848
1849 clear_bit(EMAC_DOWN, &priv->state);
1850
1851 /* enable mac interrupt */
1852 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
1853
1854 /* both rx tx */
1855 val = MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1856 MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1857 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
1858#if 0
1859 val |= MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1860 MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1861 MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE;
1862#endif
1863 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
1864
1865#ifdef CONFIG_ASR_EMAC_NAPI
1866 napi_enable(&priv->rx_napi);
1867 napi_enable(&priv->tx_napi);
1868#endif
1869
1870 if (priv->fix_link && !netif_carrier_ok(ndev))
1871 netif_carrier_on(ndev);
1872
1873#ifdef WAN_LAN_AUTO_ADAPT
1874 phy_id = ndev->phydev->phy_id;
1875 if(phy_id == IP175D_PHY_ID)
1876 emac_sig_workq(CARRIER_UP_IP175D, 0);
1877 else
1878 emac_sig_workq(CARRIER_UP, 0);
1879#endif
1880
1881 hrtimer_init(&priv->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1882 priv->tx_timer.function = emac_tx_timer;
1883 hrtimer_init(&priv->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1884 priv->rx_timer.function = emac_rx_timer;
1885
1886 if (priv->tso)
1887 emac_tso_config(priv);
1888
1889 netif_tx_start_all_queues(ndev);
1890 return 0;
1891
1892request_tso_irq_failed:
1893 if (priv->irq_wakeup)
1894 free_irq(priv->irq_wakeup, ndev);
1895
1896request_wakeup_irq_failed:
1897 free_irq(priv->irq, ndev);
1898
1899request_irq_failed:
1900 if (ndev->phydev) {
1901 phy_stop(ndev->phydev);
1902 phy_disconnect(ndev->phydev);
1903 }
1904
1905 return ret;
1906}
1907
1908/* Name emac_down
1909 * Arguments priv : pointer to driver private data structure
1910 * Return Status: 0 - Success; non-zero - Fail
1911 * Description This function is called from emac_close and
1912 * performs the things when net interface is about to down.
1913 * It frees the irq, removes the various timers.
1914 * It sets the net interface off and
1915 * resets the hardware. Cleans the Tx and Rx
1916 * ring descriptor.
1917 */
1918int emac_down(struct emac_priv *priv)
1919{
1920 struct net_device *ndev = priv->ndev;
hj.shao213a35e2025-06-24 04:25:54 -07001921 //#LYNQ_MODFIY modify for task-1618 2025/6/24 start
1922 struct pinctrl_state *sleep_pins = pinctrl_lookup_state(priv->pinctrl, "sleep");
1923 //#LYNQ_MODFIY modify for task-1618 2025/6/24 end
b.liue9582032025-04-17 19:18:16 +08001924#ifdef WAN_LAN_AUTO_ADAPT
1925 u32 phy_id;
1926
1927 priv->dhcp = 0;
1928 priv->vlan_port = -1;
1929 priv->link = 0;
1930 phy_id = ndev->phydev->phy_id;
1931 if(priv->dhcp_delaywork){
1932 cancel_delayed_work(&priv->dhcp_work);
1933 priv->dhcp_delaywork = 0;
1934 }
1935#endif
1936 set_bit(EMAC_DOWN, &priv->state);
1937
1938 netif_tx_disable(ndev);
1939
1940 hrtimer_cancel(&priv->tx_timer);
1941 hrtimer_cancel(&priv->rx_timer);
1942 /* Stop and disconnect the PHY */
1943 if (ndev->phydev) {
1944 phy_stop(ndev->phydev);
1945 phy_disconnect(ndev->phydev);
hj.shao213a35e2025-06-24 04:25:54 -07001946 //#LYNQ_MODFIY modify for task-1618 2025/6/24 start
1947 if (IS_ERR(priv->rgmii_pins))
1948 printk("could not get rgmii-pins pinstate\n");
1949 pinctrl_select_state(priv->pinctrl, sleep_pins);
1950 //#LYNQ_MODFIY modify for task-1618 2025/6/24 end
b.liue9582032025-04-17 19:18:16 +08001951 }
1952
1953 if (!priv->fix_link) {
1954 priv->duplex = DUPLEX_UNKNOWN;
1955 priv->speed = SPEED_UNKNOWN;
1956 }
1957
1958#ifdef CONFIG_ASR_EMAC_NAPI
1959 napi_disable(&priv->rx_napi);
1960 napi_disable(&priv->tx_napi);
1961#endif
1962 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
1963 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
1964
1965 free_irq(priv->irq, ndev);
1966 if (priv->irq_wakeup)
1967 free_irq(priv->irq_wakeup, ndev);
1968
1969 emac_ptp_deinit(priv);
1970
1971 emac_reset_hw(priv);
1972 netif_carrier_off(ndev);
1973
1974#ifdef WAN_LAN_AUTO_ADAPT
1975 if(phy_id == IP175D_PHY_ID)
1976 emac_sig_workq(CARRIER_DOWN_IP175D, 0);
1977 else
1978 emac_sig_workq(CARRIER_DOWN, 0);
1979#endif
1980
1981#ifdef CONFIG_ASR_EMAC_DDR_QOS
1982 flush_work(&priv->qos_work);
1983 pm_qos_update_request(&priv->clk_scaling.ddr_qos, PM_QOS_DEFAULT_VALUE);
1984#endif
1985 pm_qos_update_request(&priv->pm_qos_req,
1986 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
1987
1988 if (!priv->en_suspend)
1989 pm_relax(&priv->pdev->dev);
1990
1991 if (priv->tso) {
1992 dma_free_coherent(&priv->pdev->dev,
1993 priv->tx_ring.total_cnt * 0x80,
1994 priv->tso_hdr,
1995 priv->tso_hdr_addr);
1996 }
1997
1998 return 0;
1999}
2000
2001/* Name emac_alloc_tx_resources
2002 * Arguments priv : pointer to driver private data structure
2003 * Return Status: 0 - Success; non-zero - Fail
2004 * Description Allocates TX resources and getting virtual & physical address.
2005 */
2006int emac_alloc_tx_resources(struct emac_priv *priv)
2007{
2008 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2009 struct platform_device *pdev = priv->pdev;
2010 u32 size;
2011
2012 size = sizeof(struct emac_desc_buffer) * tx_ring->total_cnt;
2013
2014 /* allocate memory */
2015 tx_ring->desc_buf = kzalloc(size, GFP_KERNEL);
2016 if (!tx_ring->desc_buf) {
2017 pr_err("Memory allocation failed for the Transmit descriptor buffer\n");
2018 return -ENOMEM;
2019 }
2020
2021 memset(tx_ring->desc_buf, 0, size);
2022
2023 tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_tx_desc);
2024
2025 EMAC_ROUNDUP(tx_ring->total_size, 1024);
2026
2027 if (priv->sram_pool) {
2028 tx_ring->desc_addr =
2029 (void *)gen_pool_dma_alloc(
2030 priv->sram_pool, tx_ring->total_size,
2031 &tx_ring->desc_dma_addr);
2032 tx_ring->in_sram = true;
2033 }
2034
2035 if (!tx_ring->desc_addr) {
2036 tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
2037 tx_ring->total_size,
2038 &tx_ring->desc_dma_addr,
2039 GFP_KERNEL | __GFP_ZERO);
2040 if (!tx_ring->desc_addr) {
2041 pr_err("Memory allocation failed for the Transmit descriptor ring\n");
2042 kfree(tx_ring->desc_buf);
2043 return -ENOMEM;
2044 }
2045
2046 if (priv->sram_pool) {
2047 pr_err("sram pool left size not enough, tx fallback\n");
2048 tx_ring->in_sram = false;
2049 }
2050 }
2051
2052 memset(tx_ring->desc_addr, 0, tx_ring->total_size);
2053
2054 tx_ring->nxt_use = 0;
2055 tx_ring->nxt_clean = 0;
2056
2057 return 0;
2058}
2059
2060/* Name emac_alloc_rx_resources
2061 * Arguments priv : pointer to driver private data structure
2062 * Return Status: 0 - Success; non-zero - Fail
2063 * Description Allocates RX resources and getting virtual & physical address.
2064 */
2065int emac_alloc_rx_resources(struct emac_priv *priv)
2066{
2067 struct emac_desc_ring *rx_ring = &priv->rx_ring;
2068 struct platform_device *pdev = priv->pdev;
2069 u32 buf_len;
2070
2071 buf_len = sizeof(struct emac_desc_buffer) * rx_ring->total_cnt;
2072
2073 rx_ring->desc_buf = kzalloc(buf_len, GFP_KERNEL);
2074 if (!rx_ring->desc_buf) {
2075 pr_err("Memory allocation failed for the Receive descriptor buffer\n");
2076 return -ENOMEM;
2077 }
2078
2079 memset(rx_ring->desc_buf, 0, buf_len);
2080
2081 /* round up to nearest 4K */
2082 rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_rx_desc);
2083
2084 EMAC_ROUNDUP(rx_ring->total_size, 1024);
2085
2086 if (priv->sram_pool) {
2087 rx_ring->desc_addr =
2088 (void *)gen_pool_dma_alloc(
2089 priv->sram_pool, rx_ring->total_size,
2090 &rx_ring->desc_dma_addr);
2091 rx_ring->in_sram = true;
2092 }
2093
2094 if (!rx_ring->desc_addr) {
2095 rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
2096 rx_ring->total_size,
2097 &rx_ring->desc_dma_addr,
2098 GFP_KERNEL | __GFP_ZERO);
2099 if (!rx_ring->desc_addr) {
2100 pr_err("Memory allocation failed for the Receive descriptor ring\n");
2101 kfree(rx_ring->desc_buf);
2102 return -ENOMEM;
2103 }
2104
2105 if (priv->sram_pool) {
2106 pr_err("sram pool left size not enough, rx fallback\n");
2107 rx_ring->in_sram = false;
2108 }
2109 }
2110
2111 memset(rx_ring->desc_addr, 0, rx_ring->total_size);
2112
2113 rx_ring->nxt_use = 0;
2114 rx_ring->nxt_clean = 0;
2115
2116 return 0;
2117}
2118
2119/* Name emac_free_tx_resources
2120 * Arguments priv : pointer to driver private data structure
2121 * Return none
2122 * Description Frees the Tx resources allocated
2123 */
2124void emac_free_tx_resources(struct emac_priv *priv)
2125{
2126 emac_clean_tx_desc_ring(priv);
2127 kfree(priv->tx_ring.desc_buf);
2128 priv->tx_ring.desc_buf = NULL;
2129 if (priv->tx_ring.in_sram)
2130 gen_pool_free(priv->sram_pool,
2131 (unsigned long) priv->tx_ring.desc_addr,
2132 priv->tx_ring.total_size);
2133 else
2134 dma_free_coherent(&priv->pdev->dev, priv->tx_ring.total_size,
2135 priv->tx_ring.desc_addr,
2136 priv->tx_ring.desc_dma_addr);
2137 priv->tx_ring.desc_addr = NULL;
2138}
2139
2140/* Name emac_free_rx_resources
2141 * Arguments priv : pointer to driver private data structure
2142 * Return none
2143 * Description Frees the Rx resources allocated
2144 */
2145void emac_free_rx_resources(struct emac_priv *priv)
2146{
2147 emac_clean_rx_desc_ring(priv);
2148 kfree(priv->rx_ring.desc_buf);
2149 priv->rx_ring.desc_buf = NULL;
2150 if (priv->rx_ring.in_sram)
2151 gen_pool_free(priv->sram_pool,
2152 (unsigned long) priv->rx_ring.desc_addr,
2153 priv->rx_ring.total_size);
2154 else
2155 dma_free_coherent(&priv->pdev->dev, priv->rx_ring.total_size,
2156 priv->rx_ring.desc_addr,
2157 priv->rx_ring.desc_dma_addr);
2158 priv->rx_ring.desc_addr = NULL;
2159}
2160
2161/* Name emac_open
2162 * Arguments pstNetdev : pointer to net_device structure
2163 * Return Status: 0 - Success; non-zero - Fail
2164 * Description This function is called when net interface is made up.
2165 * Setting up Tx and Rx
2166 * resources and making the interface up.
2167 */
2168static int emac_open(struct net_device *ndev)
2169{
2170 struct emac_priv *priv = netdev_priv(ndev);
2171 int ret;
2172
2173 ret = emac_alloc_tx_resources(priv);
2174 if (ret) {
2175 pr_err("Error in setting up the Tx resources\n");
2176 goto emac_alloc_tx_resource_fail;
2177 }
2178
2179 ret = emac_alloc_rx_resources(priv);
2180 if (ret) {
2181 pr_err("Error in setting up the Rx resources\n");
2182 goto emac_alloc_rx_resource_fail;
2183 }
2184
2185 ret = emac_up(priv);
2186 if (ret) {
2187 pr_err("Error in making the net intrface up\n");
2188 goto emac_up_fail;
2189 }
2190 return 0;
2191
2192emac_up_fail:
2193 emac_free_rx_resources(priv);
2194emac_alloc_rx_resource_fail:
2195 emac_free_tx_resources(priv);
2196emac_alloc_tx_resource_fail:
2197 emac_reset_hw(priv);
2198 return ret;
2199}
2200
2201/* Name emac_close
2202 * Arguments pstNetdev : pointer to net_device structure
2203 * Return Status: 0 - Success; non-zero - Fail
2204 * Description This function is called when net interface is made down.
2205 * It calls the appropriate functions to
2206 * free Tx and Rx resources.
2207 */
2208static int emac_close(struct net_device *ndev)
2209{
2210 struct emac_priv *priv = netdev_priv(ndev);
2211
2212 emac_down(priv);
2213 emac_free_tx_resources(priv);
2214 emac_free_rx_resources(priv);
2215
2216 return 0;
2217}
2218
2219/* Name emac_tx_clean_desc
2220 * Arguments priv : pointer to driver private data structure
2221 * Return 1: Cleaned; 0:Failed
2222 * Description
2223 */
2224#ifdef CONFIG_ASR_EMAC_NAPI
2225static int emac_tx_clean_desc(struct emac_priv *priv, int budget)
2226#else
2227static int emac_tx_clean_desc(struct emac_priv *priv)
2228#endif
2229{
2230 struct emac_desc_ring *tx_ring;
2231 struct emac_tx_desc *tx_desc, *end_desc;
2232 struct emac_desc_buffer *tx_buf;
2233 struct net_device *ndev = priv->ndev;
2234 u32 i, u32LastIndex;
2235 u8 u8Cleaned;
2236 unsigned int count = 0;
2237
2238 tx_ring = &priv->tx_ring;
2239 i = tx_ring->nxt_clean;
2240 do {
2241 if (i == tx_ring->nxt_use)
2242 break;
2243
2244 u32LastIndex = tx_ring->desc_buf[i].nxt_watch;
2245 end_desc = emac_get_tx_desc(priv, u32LastIndex);
2246 if (end_desc->OWN == 1 ||
2247 (priv->tso && (end_desc->tso || end_desc->coe)))
2248 break;
2249
2250 u8Cleaned = false;
2251 for ( ; !u8Cleaned; count++) {
2252 tx_desc = emac_get_tx_desc(priv, i);
2253 tx_buf = &tx_ring->desc_buf[i];
2254
2255 emac_get_tx_hwtstamp(priv, tx_buf->skb);
2256
2257 /* own bit will be reset to 0 by dma
2258 * once packet is transmitted
2259 */
2260 if (tx_buf->dma_addr) {
2261 dma_unmap_page(&priv->pdev->dev,
2262 tx_buf->dma_addr,
2263 tx_buf->dma_len,
2264 DMA_TO_DEVICE);
2265 tx_buf->dma_addr = 0;
2266 }
2267 if (tx_buf->skb) {
2268 dev_kfree_skb_any(tx_buf->skb);
2269 tx_buf->skb = NULL;
2270 }
2271 if (tx_buf->buff_addr)
2272 tx_buf->buff_addr = NULL;
2273
2274 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2275 u8Cleaned = (i == u32LastIndex);
2276 if (++i == tx_ring->total_cnt)
2277 i = 0;
2278 }
2279
2280#ifdef CONFIG_ASR_EMAC_NAPI
2281 if (count >= budget) {
2282 count = budget;
2283 break;
2284 }
2285#endif
2286 } while (1);
2287 tx_ring->nxt_clean = i;
2288
2289#ifndef CONFIG_ASR_EMAC_NAPI
2290 spin_lock(&priv->spTxLock);
2291#endif
2292 if (unlikely(count && netif_queue_stopped(ndev) &&
2293 netif_carrier_ok(ndev) &&
2294 EMAC_DESC_UNUSED(tx_ring) >= EMAC_TX_WAKE_THRESHOLD))
2295 netif_wake_queue(ndev);
2296#ifndef CONFIG_ASR_EMAC_NAPI
2297 spin_unlock(&priv->spTxLock);
2298#endif
2299 return count;
2300}
2301
2302static int emac_rx_frame_status(struct emac_priv *priv, struct emac_rx_desc *dsc)
2303{
2304 /* if last descritpor isn't set, so we drop it*/
2305 if (!dsc->LastDescriptor) {
2306 netdev_dbg(priv->ndev, "rx LD bit isn't set, drop it.\n");
2307 return frame_discard;
2308 }
2309
2310 /*
2311 * A Frame that is less than 64-bytes (from DA thru the FCS field)
2312 * is considered as Runt Frame.
2313 * Most of the Runt Frames happen because of collisions.
2314 */
2315 if (dsc->ApplicationStatus & EMAC_RX_FRAME_RUNT) {
2316 netdev_dbg(priv->ndev, "rx frame less than 64.\n");
2317 return frame_discard;
2318 }
2319
2320 /*
2321 * When the frame fails the CRC check,
2322 * the frame is assumed to have the CRC error
2323 */
2324 if (dsc->ApplicationStatus & EMAC_RX_FRAME_CRC_ERR) {
2325 netdev_dbg(priv->ndev, "rx frame crc error\n");
2326 return frame_discard;
2327 }
2328
2329 if (priv->tso && dsc->csum_res == EMAC_CSUM_FAIL) {
2330 netdev_dbg(priv->ndev, "COE: rx frame checksum error\n");
2331 return frame_discard;
2332 }
2333
2334 /*
2335 * When the length of the frame exceeds
2336 * the Programmed Max Frame Length
2337 */
2338 if (dsc->ApplicationStatus & EMAC_RX_FRAME_MAX_LEN_ERR) {
2339 netdev_dbg(priv->ndev, "rx frame too long\n");
2340 return frame_discard;
2341 }
2342
2343 /*
2344 * frame reception is truncated at that point and
2345 * frame is considered to have Jabber Error
2346 */
2347 if (dsc->ApplicationStatus & EMAC_RX_FRAME_JABBER_ERR) {
2348 netdev_dbg(priv->ndev, "rx frame has been truncated\n");
2349 return frame_discard;
2350 }
2351
2352 /* this bit is only for 802.3 Type Frames */
2353 if (dsc->ApplicationStatus & EMAC_RX_FRAME_LENGTH_ERR) {
2354 netdev_dbg(priv->ndev, "rx frame length err for 802.3\n");
2355 return frame_discard;
2356 }
2357
2358 if (dsc->FramePacketLength <= ETHERNET_FCS_SIZE ||
2359 dsc->FramePacketLength > EMAC_RX_BUFFER_2048) {
2360 netdev_dbg(priv->ndev, "rx frame len too small or too long\n");
2361 return frame_discard;
2362 }
2363 return frame_ok;
2364}
2365
2366/* Name emac_rx_clean_desc
2367 * Arguments priv : pointer to driver private data structure
2368 * Return 1: Cleaned; 0:Failed
2369 * Description
2370 */
2371#ifdef CONFIG_ASR_EMAC_NAPI
2372static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
2373#else
2374static int emac_rx_clean_desc(struct emac_priv *priv)
2375#endif
2376{
2377 struct emac_desc_ring *rx_ring;
2378 struct emac_desc_buffer *rx_buf;
2379 struct net_device *ndev = priv->ndev;
2380 struct emac_rx_desc *rx_desc;
2381 struct sk_buff *skb = NULL;
2382 int status;
2383#ifdef CONFIG_ASR_EMAC_NAPI
2384 u32 receive_packet = 0;
2385#endif
2386 u32 i;
2387 u32 u32Len;
2388 u32 u32Size;
2389 u8 *pu8Data;
2390#ifdef WAN_LAN_AUTO_ADAPT
2391 int port = -1, vlan = -1;
2392 struct vlan_hdr *vhdr;
2393 struct iphdr *iph = NULL;
2394 struct udphdr *udph = NULL;
2395#endif
2396
2397 rx_ring = &priv->rx_ring;
2398 i = rx_ring->nxt_clean;
2399 rx_desc = emac_get_rx_desc(priv, i);
2400 u32Size = 0;
2401
2402 if (priv->pause.tx_pause && !priv->pause.fc_auto)
2403 emac_check_ring_and_send_pause(priv);
2404
2405 while (rx_desc->OWN == 0) {
2406 if (priv->tso && !rx_desc->csum_done)
2407 break;
2408
2409 if (skb_queue_len(&priv->rx_skb) > priv->rx_ring.total_cnt)
2410 break;
2411
2412 rx_buf = &rx_ring->desc_buf[i];
2413 if (!rx_buf->skb)
2414 break;
2415
2416 emac_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
2417 rx_buf->dma_len, DMA_FROM_DEVICE);
2418 status = emac_rx_frame_status(priv, rx_desc);
2419 if (unlikely(status == frame_discard)) {
2420 ndev->stats.rx_dropped++;
2421 dev_kfree_skb_irq(rx_buf->skb);
2422 rx_buf->skb = NULL;
2423 } else {
2424 skb = rx_buf->skb;
2425 u32Len = rx_desc->FramePacketLength - ETHERNET_FCS_SIZE;
2426
2427 pu8Data = skb_put(skb, u32Len);
2428#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
2429 memcpy(pu8Data, (u8 *)rx_buf->buff_addr, u32Len);
2430#endif
2431 skb->dev = ndev;
2432 ndev->hard_header_len = ETH_HLEN;
2433
2434 emac_get_rx_hwtstamp(priv, rx_desc, skb);
2435
2436 skb->protocol = eth_type_trans(skb, ndev);
2437 if (priv->tso)
2438 skb->ip_summed = CHECKSUM_UNNECESSARY;
2439 else
2440 skb->ip_summed = CHECKSUM_NONE;
2441
2442#ifdef WAN_LAN_AUTO_ADAPT
2443 {/* Special tag format: DA-SA-0x81-xx-data.
2444 Bit 7-3 Packet Information
2445 - bit 4: Reserved
2446 - bit 3: Reserved
2447 - bit 2: Miss address table
2448 - bit 1: Security violation
2449 - bit 0: VLAN violation
2450 Bit 2-0 Ingress Port number
2451 - b000: Disabled
2452 - b001: Port 0
2453 - b010: Port 1
2454 - b011: Port 2
2455 - b100: Port 3
2456 - b101: Port 4
2457 - Other: Reserved */
2458 if(ntohs(skb->protocol)>>8 == 0x81) {
2459 port = ntohs(skb->protocol) & 0x7;
2460 if(port > 0 && port <= 0x5) {
2461 skb->protocol = htons(ETH_P_8021Q);
2462 port = port - 1;
2463 }
2464 }
2465 if (skb->protocol == htons(ETH_P_8021Q)) {
2466 vhdr = (struct vlan_hdr *) skb->data;
2467 vlan = ntohs(vhdr->h_vlan_TCI);
2468 iph = (struct iphdr *)(skb->data + VLAN_HLEN);
2469 } else if (skb->protocol == htons(ETH_P_IP))
2470 iph = (struct iphdr *)skb->data;
2471
2472 if (iph && iph->protocol == IPPROTO_UDP) {
2473 udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
2474 if ((htons(udph->dest) == 68 && htons(udph->source) == 67)) {
2475 u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
2476 u8 dhcp_type = *(udp_data + 242);
2477 if ((DHCP_ACK == dhcp_type || DHCP_OFFER == dhcp_type)
2478 && (DHCP_SEND_REQ == priv->dhcp)) {
2479 priv->dhcp = DHCP_REC_RESP;
2480 if (ndev->phydev->phy_id == IP175D_PHY_ID)
2481 priv->vlan_port = port;
2482 else
2483 priv->vlan_port = -1;
2484 }
2485 }
2486 }
2487 }
2488#endif
2489 skb_queue_tail(&priv->rx_skb, skb);
2490 rx_buf->skb = NULL;
2491 }
2492
2493 if (++i == rx_ring->total_cnt)
2494 i = 0;
2495
2496 rx_desc = emac_get_rx_desc(priv, i);
2497
2498 /* restart RX COE */
2499 if (priv->tso)
2500 emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
2501 }
2502
2503 rx_ring->nxt_clean = i;
2504
2505 emac_alloc_rx_desc_buffers(priv);
2506
2507 /*
2508 * Since netif_rx may consume too much time, put this after
2509 * emac_alloc_rx_desc_buffers so that RX DMA desc refill ASAP,
2510 * reduce packet loss probability.
2511 */
2512 while ((skb = skb_dequeue(&priv->rx_skb))) {
2513 ndev->stats.rx_packets++;
2514 ndev->stats.rx_bytes += skb->len;
2515#ifdef CONFIG_ASR_EMAC_NAPI
2516 napi_gro_receive(&priv->rx_napi, skb);
2517#else
2518 netif_rx(skb);
2519#endif
2520
2521#ifdef CONFIG_ASR_EMAC_NAPI
2522 receive_packet++;
2523 if (receive_packet >= budget)
2524 break;
2525#endif
2526 }
2527
2528#ifdef CONFIG_ASR_EMAC_DDR_QOS
2529 emac_ddr_clk_scaling(priv);
2530#endif
2531
2532#ifdef CONFIG_ASR_EMAC_NAPI
2533 return receive_packet;
2534#else
2535 return 0;
2536#endif
2537}
2538
2539/* Name emac_alloc_rx_desc_buffers
2540 * Arguments priv : pointer to driver private data structure
2541 * Return 1: Cleaned; 0:Failed
2542 * Description
2543 */
2544static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
2545{
2546 struct net_device *ndev = priv->ndev;
2547 struct emac_desc_ring *rx_ring = &priv->rx_ring;
2548 struct emac_desc_buffer *rx_buf;
2549 struct sk_buff *skb;
2550 struct emac_rx_desc *rx_desc;
2551 u32 i;
2552#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
2553 void *buff;
2554#endif
2555 u32 buff_len;
2556 int fail_cnt = 0;
2557
2558 i = rx_ring->nxt_use;
2559 rx_buf = &rx_ring->desc_buf[i];
2560
2561 buff_len = priv->u32RxBufferLen;
2562
2563 while (!rx_buf->skb) {
2564 skb = emac_skbrb_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
2565 if (!skb) {
2566 if (priv->rx_ring.total_cnt == EMAC_RX_RING_SIZE)
2567 skb = dev_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
2568 if (!skb) {
2569 fail_cnt++;
2570 pr_warn_ratelimited("emac sk_buff allocation failed\n");
2571 break;
2572 }
2573 }
2574
2575 /* make buffer alignment */
2576 skb_reserve(skb, NET_IP_ALIGN + EMAC_EXTRA_ROOM);
2577 skb->dev = ndev;
2578
2579#ifdef CONFIG_ASR_EMAC_RX_NO_COPY
2580 rx_buf->buff_addr = skb->data;
2581#else
2582 if (!rx_buf->buff_addr) {
2583 buff = kmalloc(buff_len, GFP_ATOMIC | GFP_DMA);
2584 if (!buff) {
2585 pr_err("kmalloc failed\n");
2586 dev_kfree_skb(skb);
2587 break;
2588 }
2589 rx_buf->buff_addr = buff;
2590 }
2591#endif
2592 rx_buf->skb = skb;
2593 rx_buf->dma_len = buff_len;
2594 rx_buf->dma_addr = emac_map_single(&priv->pdev->dev,
2595 rx_buf->buff_addr,
2596 buff_len,
2597 DMA_FROM_DEVICE);
2598
2599 rx_desc = emac_get_rx_desc(priv, i);
2600 rx_desc->BufferAddr1 = rx_buf->dma_addr;
2601 rx_desc->BufferSize1 = rx_buf->dma_len;
2602 rx_desc->rx_timestamp = 0;
2603 rx_desc->ptp_pkt = 0;
2604 rx_desc->FirstDescriptor = 0;
2605 rx_desc->LastDescriptor = 0;
2606 rx_desc->FramePacketLength = 0;
2607 rx_desc->ApplicationStatus = 0;
2608 if (++i == rx_ring->total_cnt) {
2609 rx_desc->EndRing = 1;
2610 i = 0;
2611 }
2612
2613 wmb();
2614 rx_desc->OWN = 1;
2615 if (priv->tso)
2616 rx_desc->csum_done = 0;
2617
2618 rx_buf = &rx_ring->desc_buf[i];
2619 }
2620 rx_ring->nxt_use = i;
2621
2622 if (fail_cnt)
2623 priv->refill = 1;
2624 else
2625 priv->refill = 0;
2626 emac_dma_start_receive(priv);
2627}
2628
2629#ifdef CONFIG_ASR_EMAC_NAPI
2630static int emac_rx_poll(struct napi_struct *napi, int budget)
2631{
2632 struct emac_priv *priv = container_of(napi, struct emac_priv, rx_napi);
2633 int work_done;
2634
2635 work_done = emac_rx_clean_desc(priv, budget);
2636 if (work_done < budget && napi_complete_done(napi, work_done)) {
2637 unsigned long flags;
2638
2639 spin_lock_irqsave(&priv->intr_lock, flags);
2640 emac_enable_interrupt(priv, 0);
2641 spin_unlock_irqrestore(&priv->intr_lock, flags);
2642
2643 if (priv->refill)
2644 emac_rx_timer_arm(priv);
2645 }
2646
2647 return work_done;
2648}
2649
2650static int emac_tx_poll(struct napi_struct *napi, int budget)
2651{
2652 struct emac_priv *priv = container_of(napi, struct emac_priv, tx_napi);
2653 int work_done;
2654
2655 work_done = emac_tx_clean_desc(priv, budget);
2656 if (work_done < budget && napi_complete_done(napi, work_done)) {
2657 unsigned long flags;
2658
2659 spin_lock_irqsave(&priv->intr_lock, flags);
2660 emac_enable_interrupt(priv, 1);
2661 spin_unlock_irqrestore(&priv->intr_lock, flags);
2662 }
2663
2664 return work_done;
2665}
2666#endif
2667
2668/* Name emac_tx_mem_map
2669 * Arguments priv : pointer to driver private data structure
2670 * pstSkb : pointer to sk_buff structure passed by upper layer
2671 * max_tx_len : max data len per descriptor
2672 * frag_num : number of fragments in the packet
2673 * Return number of descriptors needed for transmitting packet
2674 * Description
2675 */
2676static int emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb,
2677 u32 max_tx_len, u32 frag_num, int ioc)
2678{
2679 struct emac_desc_ring *tx_ring;
2680 struct emac_desc_buffer *tx_buf;
2681 struct emac_tx_desc *tx_desc, *first_desc;
2682 u32 skb_len;
2683 u32 u32Offset, u32Size, i;
2684 u32 use_desc_cnt;
2685 u32 f;
2686 void *pvPtr;
2687 u32 cur_desc_addr;
2688 u32 cur_desc_idx;
2689 u8 do_tx_timestamp = 0;
2690 bool use_buf2 = 0;
2691
2692 u32Offset = 0;
2693 use_desc_cnt = 0;
2694
2695 skb_tx_timestamp(skb);
2696 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2697 priv->hwts_tx_en)) {
2698 /* declare that device is doing timestamping */
2699 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2700 do_tx_timestamp = 1;
2701 }
2702
2703 tx_ring = &priv->tx_ring;
2704 skb_len = skb->len - skb->data_len;
2705 i = cur_desc_idx = tx_ring->nxt_use;
2706 cur_desc_addr = emac_rd(priv, DMA_TRANSMIT_BASE_ADDRESS);
2707 while (skb_len > 0) {
2708 u32Size = min(skb_len, max_tx_len);
2709 skb_len -= u32Size;
2710
2711 tx_buf = &tx_ring->desc_buf[i];
2712 tx_buf->dma_len = u32Size;
2713 pvPtr = skb->data + u32Offset;
2714 tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pvPtr,
2715 u32Size, DMA_TO_DEVICE);
2716 tx_buf->buff_addr = pvPtr;
2717 tx_buf->ulTimeStamp = jiffies;
2718
2719 tx_desc = emac_get_tx_desc(priv, i);
2720
2721 if (use_buf2) {
2722 tx_desc->BufferAddr2 = tx_buf->dma_addr;
2723 tx_desc->BufferSize2 = tx_buf->dma_len;
2724 i++;
2725 use_buf2 = 0;
2726 } else {
2727 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2728 tx_desc->BufferAddr1 = tx_buf->dma_addr;
2729 tx_desc->BufferSize1 = tx_buf->dma_len;
2730 use_buf2 = 1;
2731 }
2732
2733 if (use_desc_cnt == 0) {
2734 first_desc = tx_desc;
2735 tx_desc->FirstSegment = 1;
2736 if (do_tx_timestamp)
2737 tx_desc->tx_timestamp = 1;
2738 }
2739
2740 if (skb_len == 0 && frag_num == 0) {
2741 tx_desc->LastSegment = 1;
2742 tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
2743 }
2744
2745 if (!use_buf2 && i == tx_ring->total_cnt) {
2746 tx_desc->EndRing = 1;
2747 i = 0;
2748 }
2749
2750 /* trigger first desc OWN bit later */
2751 use_desc_cnt++;
2752 if (use_desc_cnt > 2)
2753 tx_desc->OWN = 1;
2754
2755 u32Offset += u32Size;
2756 }
2757
2758 /* if the data is fragmented */
2759 for (f = 0; f < frag_num; f++) {
2760 skb_frag_t *frag;
2761
2762 frag = &(skb_shinfo(skb)->frags[f]);
2763 skb_len = skb_frag_size(frag);
2764 u32Offset = skb_frag_off(frag);
2765
2766 while (skb_len) {
2767 u32Size = min(skb_len, max_tx_len);
2768 skb_len -= u32Size;
2769
2770 tx_buf = &tx_ring->desc_buf[i];
2771 tx_buf->dma_len = u32Size;
2772 tx_buf->dma_addr =
2773 dma_map_page(&priv->pdev->dev,
2774 skb_frag_page(frag),
2775 u32Offset,
2776 u32Size,
2777 DMA_TO_DEVICE);
2778 tx_buf->ulTimeStamp = jiffies;
2779
2780 tx_desc = emac_get_tx_desc(priv, i);
2781 if (use_buf2) {
2782 tx_desc->BufferAddr2 = tx_buf->dma_addr;
2783 tx_desc->BufferSize2 = tx_buf->dma_len;
2784 i++;
2785 use_buf2 = 0;
2786 } else {
2787 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2788 tx_desc->BufferAddr1 = tx_buf->dma_addr;
2789 tx_desc->BufferSize1 = tx_buf->dma_len;
2790 use_buf2 = 1;
2791 }
2792
2793 if (skb_len == 0 && f == (frag_num - 1)) {
2794 tx_desc->LastSegment = 1;
2795 tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
2796 }
2797
2798 if (!use_buf2 && i == tx_ring->total_cnt) {
2799 tx_desc->EndRing = 1;
2800 i = 0;
2801 }
2802
2803 /* trigger first desc OWN bit later */
2804 use_desc_cnt++;
2805 if (use_desc_cnt > 2)
2806 tx_desc->OWN = 1;
2807
2808 u32Offset += u32Size;
2809 }
2810 }
2811
2812 if (use_buf2 && ++i == tx_ring->total_cnt) {
2813 tx_desc->EndRing = 1;
2814 i = 0;
2815 }
2816
2817 tx_ring->desc_buf[cur_desc_idx].skb = skb;
2818 tx_ring->desc_buf[cur_desc_idx].nxt_watch =
2819 (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
2820
2821 wmb();
2822
2823 first_desc->OWN = 1;
2824
2825 emac_dma_start_transmit(priv);
2826
2827 tx_ring->nxt_use = i;
2828 return use_desc_cnt;
2829}
2830
2831static int emac_prepare_tso_desc(struct emac_priv *priv, int idx,
2832 bool tso, bool coe,
2833 u32 addr, int payload, u8 hlen, int mss,
2834 bool fst, bool last, bool ioc, bool ts,
2835 u32 *cnt)
2836{
2837 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2838 struct emac_tx_desc *pdesc;
2839
2840 pdesc = emac_get_tx_desc(priv, idx);
2841 if (tso) {
2842 if (fst && hlen) {
2843 emac_set_buf1_addr_len(pdesc, addr, 0);
2844 payload -= hlen;
2845 addr += hlen;
2846 }
2847 emac_set_buf2_addr_len(pdesc, addr, payload);
2848 } else {
2849 emac_set_buf1_addr_len(pdesc, addr, payload);
2850 }
2851
2852 if (fst) {
2853 emac_tx_desc_set_fd(pdesc);
2854 } else {
2855 if (tso)
2856 emac_tx_desc_set_offload(pdesc, 1, 1, 1);
2857 else if (coe)
2858 emac_tx_desc_set_offload(pdesc, 0, 1, 0);
2859 else
2860 emac_tx_desc_set_offload(pdesc, 1, 0, 0);
2861 }
2862
2863 if (ts)
2864 emac_tx_desc_set_ts(pdesc);
2865
2866 if (last) {
2867 /* last segment */
2868 emac_tx_desc_set_ld(pdesc);
2869 if (ioc)
2870 emac_tx_desc_set_ioc(pdesc);
2871 }
2872
2873 print_desc((void *)pdesc, 16);
2874 if (payload <= 0)
2875 return idx;
2876
2877 do {
2878 (*cnt)++;
2879
2880 if (++idx == tx_ring->total_cnt) {
2881 emac_tx_desc_set_ring_end(pdesc);
2882 idx = 0;
2883 }
2884
2885 if (!tso)
2886 break;
2887
2888 payload -= mss;
2889 if (payload <= 0)
2890 break;
2891
2892 pdesc = emac_get_tx_desc(priv, idx);
2893 emac_tx_desc_set_offload(pdesc, 1, 1, 0);
2894
2895 print_desc((void *)pdesc, 16);
2896 } while (1);
2897
2898 return idx;
2899}
2900
2901static int emac_tso_xmit(struct sk_buff *skb, struct net_device *ndev,
2902 bool tso, bool coe)
2903{
2904 struct emac_priv *priv = netdev_priv(ndev);
2905 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2906 struct emac_desc_buffer *tx_buf;
2907 struct emac_tx_desc *pdesc;
2908 skb_frag_t *frag;
2909 u32 desc_cnt, frag_num, f, mss, fst;
2910 u32 offset, i;
2911 u8 hlen;
2912 int skb_len, payload;
2913 void *pbuf;
2914 int ioc;
2915 u8 timestamp = 0;
2916
2917 frag_num = skb_shinfo(skb)->nr_frags;
2918 skb_len = skb->len - skb->data_len;
2919 if (tso) {
2920 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
2921 mss = skb_shinfo(skb)->gso_size;
2922 desc_cnt = (skb_len / mss) + 1;
2923 for (f = 0; f < frag_num; f++) {
2924 frag = &skb_shinfo(skb)->frags[f];
2925 desc_cnt += (skb_frag_size(frag) / mss) + 1;
2926 }
2927 } else {
2928 hlen = 0;
2929 mss = 0;
2930 desc_cnt = EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
2931 for (i = 0; i < frag_num; i++) {
2932 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2933 desc_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
2934 MAX_DATA_PWR_TX_DES);
2935 }
2936 }
2937
2938 emac_print("%s: skb=0x%x, skb->len=%d skb_len=%d mss=%d frag_num=%d hlen=%d\n",
2939 __func__, (unsigned)skb, skb->len, skb_len, mss, frag_num, hlen);
2940
2941#ifdef EMAC_DEBUG
2942 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 32, 1, skb->data, skb_len, 0);
2943#endif
2944 /* disable hard interrupt on local CPUs */
2945#ifndef CONFIG_ASR_EMAC_NAPI
2946 local_irq_save(ulFlags);
2947#endif
2948 if (!spin_trylock(&priv->spTxLock)) {
2949 pr_err("Collision detected\n");
2950#ifndef CONFIG_ASR_EMAC_NAPI
2951 local_irq_restore(ulFlags);
2952#endif
2953 return NETDEV_TX_BUSY;
2954 }
2955
2956 /* check whether sufficient free descriptors are there */
2957 if (EMAC_DESC_UNUSED(tx_ring) < (desc_cnt + 2)) {
2958 pr_err_ratelimited("TSO Descriptors are not free\n");
2959 netif_stop_queue(ndev);
2960#ifndef CONFIG_ASR_EMAC_NAPI
2961 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
2962#else
2963 spin_unlock(&priv->spTxLock);
2964#endif
2965 return NETDEV_TX_BUSY;
2966 }
2967
2968 priv->tx_count_frames += desc_cnt;
2969 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2970 priv->hwts_tx_en))
2971 ioc = 1;
2972 else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
2973 ioc = 1;
2974 else
2975 ioc = 0;
2976
2977 if (ioc)
2978 priv->tx_count_frames = 0;
2979
2980 skb_tx_timestamp(skb);
2981 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2982 priv->hwts_tx_en)) {
2983 /* declare that device is doing timestamping */
2984 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2985 timestamp = 1;
2986 }
2987
2988 offset = 0;
2989 desc_cnt = 0;
2990 i = fst = tx_ring->nxt_use;
2991 do {
2992 payload = min(skb_len, TSO_MAX_SEG_SIZE);
2993
2994 tx_buf = &tx_ring->desc_buf[i];
2995 tx_buf->dma_len = payload;
2996 pbuf = skb->data + offset;
2997 tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pbuf,
2998 payload, DMA_TO_DEVICE);
2999 tx_buf->buff_addr = pbuf;
3000 tx_buf->ulTimeStamp = jiffies;
3001
3002 skb_len -= payload;
3003 offset += payload;
3004
3005 i = emac_prepare_tso_desc(priv, i, tso, coe,
3006 tx_buf->dma_addr, payload, hlen, mss,
3007 (i == fst), (skb_len == 0 && frag_num == 0),
3008 ioc, timestamp, &desc_cnt);
3009 } while (skb_len > 0);
3010
3011 /* if the data is fragmented */
3012 for (f = 0; f < frag_num; f++) {
3013 frag = &(skb_shinfo(skb)->frags[f]);
3014 skb_len = skb_frag_size(frag);
3015 offset = skb_frag_off(frag);
3016
3017 emac_print("%s: frag %d len=%d\n", __func__, f, skb_len);
3018#ifdef EMAC_DEBUG
3019 {
3020 u8 *vaddr;
3021
3022 vaddr = kmap_atomic(skb_frag_page(frag));
3023 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
3024 32, 1, vaddr + offset, skb_len, 0);
3025 kunmap_atomic(vaddr);
3026 }
3027#endif
3028 do {
3029 payload = min(skb_len, TSO_MAX_SEG_SIZE);
3030
3031 tx_buf = &tx_ring->desc_buf[i];
3032 tx_buf->dma_len = payload;
3033 //pbuf = skb->data + offset;
3034 tx_buf->dma_addr = dma_map_page(&priv->pdev->dev,
3035 skb_frag_page(frag),
3036 offset, payload,
3037 DMA_TO_DEVICE);
3038 tx_buf->ulTimeStamp = jiffies;
3039
3040 skb_len -= payload;
3041 offset += payload;
3042
3043 i = emac_prepare_tso_desc(priv, i, tso, coe,
3044 tx_buf->dma_addr, payload, 0, mss,
3045 (i == fst),
3046 (skb_len == 0 && f == (frag_num - 1)),
3047 ioc, timestamp, &desc_cnt);
3048 } while (skb_len > 0);
3049 }
3050
3051 tx_ring->desc_buf[fst].skb = skb;
3052 tx_ring->desc_buf[fst].nxt_watch =
3053 (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
3054
3055 wmb();
3056
3057 /* set first descriptor for this packet */
3058 pdesc = emac_get_tx_desc(priv, fst);
3059 emac_tx_update_fst_desc(pdesc, hlen, mss, tso, coe);
3060 print_desc((void *)pdesc, 16);
3061
3062 tx_ring->nxt_use = i;
3063
3064 ndev->stats.tx_packets++;
3065 ndev->stats.tx_bytes += skb->len;
3066 if (tso) {
3067 priv->hw_stats->tx_tso_pkts++;
3068 priv->hw_stats->tx_tso_bytes += skb->len;
3069 }
3070
3071 emac_wr_tso(priv, TSO_TX_POLL_DEMAND, 0xFF);
3072 /* Make sure there is space in the ring for the next send. */
3073 if (EMAC_DESC_UNUSED(tx_ring) < (MAX_SKB_FRAGS + 2)) {
3074 pr_debug_ratelimited("TSO Descriptors not enough, stop\n");
3075 netif_stop_queue(ndev);
3076 }
3077
3078#ifndef CONFIG_ASR_EMAC_NAPI
3079 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3080#else
3081 spin_unlock(&priv->spTxLock);
3082#endif
3083#ifdef CONFIG_ASR_EMAC_DDR_QOS
3084 emac_ddr_clk_scaling(priv);
3085#endif
3086
3087 if (!tso && !coe)
3088 emac_tx_timer_arm(priv);
3089
3090 return NETDEV_TX_OK;
3091}
3092
3093/* Name emac_start_xmit
3094 * Arguments pstSkb : pointer to sk_buff structure passed by upper layer
3095 * pstNetdev : pointer to net_device structure
3096 * Return Status: 0 - Success; non-zero - Fail
3097 * Description This function is called by upper layer to
3098 * handover the Tx packet to the driver
3099 * for sending it to the device.
3100 * Currently this is doing nothing but
3101 * simply to simulate the tx packet handling.
3102 */
3103static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3104{
3105 struct emac_priv *priv = netdev_priv(ndev);
3106 int ioc;
3107 u32 frag_num;
3108 u32 skb_len;
3109 u32 tx_des_cnt = 0;
3110 u32 i;
3111#ifndef CONFIG_ASR_EMAC_NAPI
3112 unsigned long ulFlags;
3113#endif
3114#ifdef WAN_LAN_AUTO_ADAPT
3115 int vlan = 0;
3116 struct iphdr *iph = NULL;
3117 struct udphdr *udph = NULL;
3118 struct vlan_hdr *vhdr;
3119
3120 { struct ethhdr *myeth = (struct ethhdr *)skb->data;
3121 if (myeth->h_proto == htons(ETH_P_8021Q)) {
3122 vhdr = (struct vlan_hdr *)((u8 *)myeth + sizeof(struct ethhdr));
3123 vlan = ntohs(vhdr->h_vlan_TCI);
3124 iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr) + VLAN_HLEN);
3125 }
3126 else if (myeth->h_proto == htons(ETH_P_IP))
3127 iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr));
3128
3129 if (iph && iph->protocol == IPPROTO_UDP) {
3130 udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
3131 if ((htons(udph->dest) == 67 && htons(udph->source) == 68)) {
3132 u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
3133 u8 dhcp_type = *(udp_data + 242);
3134 if ((DHCP_DISCOVER == dhcp_type || DHCP_REQUEST == dhcp_type)
3135 && (0 == priv->dhcp)) {
3136 priv->dhcp = DHCP_SEND_REQ;
3137 if (ndev->phydev->phy_id == IP175D_PHY_ID)
3138 priv->vlan_port = vlan;
3139 else
3140 priv->vlan_port = -1;
3141 }
3142 }
3143 }
3144 }
3145#endif
3146
3147 /* pstSkb->len: is the full length of the data in the packet
3148 * pstSkb->data_len: the number of bytes in skb fragments
3149 * u16Len: length of the first fragment
3150 */
3151 skb_len = skb->len - skb->data_len;
3152
3153 if (skb->len <= 0) {
3154 pr_err("Packet length is zero\n");
3155 dev_kfree_skb_any(skb);
3156 return NETDEV_TX_OK;
3157 }
3158
3159 if (priv->tso) {
3160 bool tso = false, coe = false;
3161
3162 if (skb_is_gso(skb) &&
3163 (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3164 tso = true;
3165 coe = true;
3166 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
3167 coe = true;
3168 }
3169
3170 /* WR: COE need skb->data to be 2 bytes alinged */
3171 if (coe && !IS_ALIGNED((unsigned long)skb->data, 2))
3172 pskb_expand_head(skb, 1, 0, GFP_ATOMIC);
3173
3174 return emac_tso_xmit(skb, ndev, tso, coe);
3175 }
3176
3177 /* increment the count if len exceeds MAX_DATA_LEN_TX_DES */
3178 tx_des_cnt += EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
3179
3180 frag_num = skb_shinfo(skb)->nr_frags;
3181
3182 for (i = 0; i < frag_num; i++) {
3183 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3184 tx_des_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
3185 MAX_DATA_PWR_TX_DES);
3186 }
3187
3188 /* disable hard interrupt on local CPUs */
3189#ifndef CONFIG_ASR_EMAC_NAPI
3190 local_irq_save(ulFlags);
3191#endif
3192 if (!spin_trylock(&priv->spTxLock)) {
3193 pr_err("Collision detected\n");
3194#ifndef CONFIG_ASR_EMAC_NAPI
3195 local_irq_restore(ulFlags);
3196#endif
3197 return NETDEV_TX_BUSY;
3198 }
3199
3200 /* check whether sufficient free descriptors are there */
3201 if (EMAC_DESC_UNUSED(&priv->tx_ring) < (tx_des_cnt + 2)) {
3202 pr_err_ratelimited("Descriptors are not free\n");
3203 netif_stop_queue(ndev);
3204#ifndef CONFIG_ASR_EMAC_NAPI
3205 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3206#else
3207 spin_unlock(&priv->spTxLock);
3208#endif
3209 return NETDEV_TX_BUSY;
3210 }
3211
3212 priv->tx_count_frames += frag_num + 1;
3213 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3214 priv->hwts_tx_en))
3215 ioc = 1;
3216 else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
3217 ioc = 1;
3218 else
3219 ioc = 0;
3220
3221 if (ioc)
3222 priv->tx_count_frames = 0;
3223
3224 tx_des_cnt = emac_tx_mem_map(priv, skb, MAX_DATA_LEN_TX_DES, frag_num, ioc);
3225 if (tx_des_cnt == 0) {
3226 pr_err("Could not acquire memory from pool\n");
3227 netif_stop_queue(ndev);
3228#ifndef CONFIG_ASR_EMAC_NAPI
3229 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3230#else
3231 spin_unlock(&priv->spTxLock);
3232#endif
3233 return NETDEV_TX_BUSY;
3234 }
3235 ndev->stats.tx_packets++;
3236 ndev->stats.tx_bytes += skb->len;
3237
3238 /* Make sure there is space in the ring for the next send. */
3239 if (EMAC_DESC_UNUSED(&priv->tx_ring) < (MAX_SKB_FRAGS + 2))
3240 netif_stop_queue(ndev);
3241
3242#ifndef CONFIG_ASR_EMAC_NAPI
3243 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3244#else
3245 spin_unlock(&priv->spTxLock);
3246#endif
3247#ifdef CONFIG_ASR_EMAC_DDR_QOS
3248 emac_ddr_clk_scaling(priv);
3249#endif
3250 emac_tx_timer_arm(priv);
3251 return NETDEV_TX_OK;
3252}
3253
3254u32 ReadTxStatCounters(struct emac_priv *priv, u8 cnt)
3255{
3256 u32 val, tmp;
3257
3258 val = 0x8000 | cnt;
3259 emac_wr(priv, MAC_TX_STATCTR_CONTROL, val);
3260 val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
3261
3262 while (val & 0x8000)
3263 val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
3264
3265 tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_HIGH);
3266 val = tmp << 16;
3267 tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_LOW);
3268 val |= tmp;
3269
3270 return val;
3271}
3272
3273u32 ReadRxStatCounters(struct emac_priv *priv, u8 cnt)
3274{
3275 u32 val, tmp;
3276
3277 val = 0x8000 | cnt;
3278 emac_wr(priv, MAC_RX_STATCTR_CONTROL, val);
3279 val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
3280
3281 while (val & 0x8000)
3282 val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
3283
3284 tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_HIGH);
3285 val = tmp << 16;
3286 tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_LOW);
3287 val |= tmp;
3288 return val;
3289}
3290
3291/* Name emac_set_mac_address
3292 * Arguments pstNetdev : pointer to net_device structure
3293 * addr : pointer to addr
3294 * Return Status: 0 - Success; non-zero - Fail
3295 * Description It is called by upper layer to set the mac address.
3296 */
3297static int emac_set_mac_address(struct net_device *ndev, void *addr)
3298{
3299 struct sockaddr *sa = addr;
3300 struct emac_priv *priv = netdev_priv(ndev);
3301
3302 if (!is_valid_ether_addr(sa->sa_data))
3303 return -EADDRNOTAVAIL;
3304
3305 memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
hj.shao8b7d94f2025-06-10 04:37:26 -07003306
hj.shao49e0d262025-07-07 03:21:43 -07003307//#LYNQ_MODFIY modify for task-1620 2025/7/7 start
hj.shao8b7d94f2025-06-10 04:37:26 -07003308 (ndev->dev_addr)[0] = 0x2;
3309 (ndev->dev_addr)[1] = 0x0;
3310 (ndev->dev_addr)[2] = 0x0;
3311 (ndev->dev_addr)[3] = 0x0;
hj.shao49e0d262025-07-07 03:21:43 -07003312 (ndev->dev_addr)[4] = 0x11;
hj.shao8b7d94f2025-06-10 04:37:26 -07003313 (ndev->dev_addr)[5] = 0x1;
hj.shao49e0d262025-07-07 03:21:43 -07003314//#LYNQ_MODFIY modify for task-1620 2025/7/7 end
b.liue9582032025-04-17 19:18:16 +08003315
3316 emac_set_mac_addr(priv, ndev->dev_addr);
3317
3318 emac_set_fc_source_addr(priv, ndev->dev_addr);
3319
3320 return 0;
3321}
3322
3323/* Name emac_change_mtu
3324 * Arguments pstNetdev : pointer to net_device structure
3325 * u32MTU : maximum transmit unit value
3326 * Return Status: 0 - Success; non-zero - Fail
3327 * Description It is called by upper layer to set the MTU value.
3328 */
3329static int emac_change_mtu(struct net_device *ndev, int mtu)
3330{
3331 struct emac_priv *priv = netdev_priv(ndev);
3332 u32 frame_len;
3333
3334 if (netif_running(ndev)) {
3335 pr_err("must be stopped to change its MTU\n");
3336 return -EBUSY;
3337 }
3338
3339 frame_len = mtu + ETHERNET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3340
3341 if (frame_len < MINIMUM_ETHERNET_FRAME_SIZE ||
3342 frame_len > EMAC_SKBRB_MAX_PAYLOAD) {
3343 pr_err("Invalid MTU setting\n");
3344 return -EINVAL;
3345 }
3346
3347 if (frame_len <= EMAC_RX_BUFFER_1024)
3348 priv->u32RxBufferLen = EMAC_RX_BUFFER_1024;
3349 else
3350 priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
3351
3352 ndev->mtu = mtu;
3353
3354 return 0;
3355}
3356
3357static void emac_reset(struct emac_priv *priv)
3358{
3359 if (!test_and_clear_bit(EMAC_RESET_REQUESTED, &priv->state))
3360 return;
3361 if (test_bit(EMAC_DOWN, &priv->state))
3362 return;
3363
3364 netdev_dbg(priv->ndev, "Reset controller.\n");
3365
3366 rtnl_lock();
3367 //netif_trans_update(priv->ndev);
3368 while (test_and_set_bit(EMAC_RESETING, &priv->state))
3369 usleep_range(1000, 2000);
3370
3371 dev_close(priv->ndev);
3372 dev_open(priv->ndev, NULL);
3373 clear_bit(EMAC_RESETING, &priv->state);
3374 rtnl_unlock();
3375}
3376
3377static void emac_tx_timeout_task(struct work_struct *work)
3378{
3379 struct emac_priv *priv = container_of(work,
3380 struct emac_priv, tx_timeout_task);
3381 emac_reset(priv);
3382 clear_bit(EMAC_TASK_SCHED, &priv->state);
3383}
3384
3385/* Name emac_tx_timeout
3386 * Arguments pstNetdev : pointer to net_device structure
3387 * Return none
3388 * Description It is called by upper layer
3389 * for packet transmit timeout.
3390 */
3391static void emac_tx_timeout(struct net_device *ndev)
3392{
3393 struct emac_priv *priv = netdev_priv(ndev);
3394
3395 netdev_info(ndev, "TX timeout\n");
3396 register_dump(priv);
3397
3398 netif_carrier_off(priv->ndev);
3399 set_bit(EMAC_RESET_REQUESTED, &priv->state);
3400
3401 if (!test_bit(EMAC_DOWN, &priv->state) &&
3402 !test_and_set_bit(EMAC_TASK_SCHED, &priv->state))
3403 schedule_work(&priv->tx_timeout_task);
3404}
3405
3406static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable)
3407{
3408 const struct emac_regdata *regdata = priv->regdata;
3409 void __iomem* apmu;
3410 u32 val;
3411
3412 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3413 if (apmu == NULL) {
3414 pr_err("error to ioremap APMU base\n");
3415 return -ENOMEM;
3416 }
3417
3418 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3419 if (enable) {
3420 val |= 0x1;
3421 } else {
3422 val &= ~0x1;
3423 }
3424 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3425 iounmap(apmu);
3426 return 0;
3427}
3428
3429static int clk_phase_rgmii_set(struct emac_priv *priv, bool is_tx)
3430{
3431 const struct emac_regdata *regdata = priv->regdata;
3432 void __iomem* apmu;
3433 u32 val, dline;
3434 u8 phase, tmp;
3435
3436 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3437 if (apmu == NULL) {
3438 pr_err("error to ioremap APMU base\n");
3439 return -ENOMEM;
3440 }
3441
3442 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3443 if (is_tx) {
3444 if (regdata->rgmii_tx_clk_src_sel_shift > 0) {
3445 phase = (priv->tx_clk_config >> 16) & 0x1;
3446 val &= ~(0x1 << regdata->rgmii_tx_clk_src_sel_shift);
3447 val |= phase << regdata->rgmii_tx_clk_src_sel_shift;
3448 }
3449
3450 if (regdata->rgmii_tx_dline_reg_offset > 0) {
3451 /* Set RGMIII TX DLINE */
3452 dline = readl(apmu + regdata->rgmii_tx_dline_reg_offset);
3453
3454 /* delay code */
3455 tmp = (priv->tx_clk_config >> 8) &
3456 regdata->rgmii_tx_delay_code_mask;
3457 dline &= ~(regdata->rgmii_tx_delay_code_mask <<
3458 regdata->rgmii_tx_delay_code_shift);
3459 dline |= tmp << regdata->rgmii_tx_delay_code_shift;
3460
3461 /* delay step */
3462 tmp = priv->tx_clk_config &
3463 regdata->rgmii_tx_delay_step_mask;
3464 dline &= ~(regdata->rgmii_tx_delay_step_mask <<
3465 regdata->rgmii_tx_delay_step_shift);
3466 dline |= tmp << regdata->rgmii_tx_delay_step_shift;
3467
3468 /* delay line enable */
3469 dline |= 1 << regdata->rgmii_tx_delay_enable_shift;
3470 writel(dline, apmu + regdata->rgmii_tx_dline_reg_offset);
3471 pr_info("===> emac set tx dline 0x%x 0x%x", dline,
3472 readl(apmu + regdata->rgmii_tx_dline_reg_offset));
3473 }
3474 } else {
3475 if (regdata->rgmii_rx_clk_src_sel_shift > 0) {
3476 phase = (priv->rx_clk_config >> 16) & 0x1;
3477 val &= ~(0x1 << regdata->rgmii_rx_clk_src_sel_shift);
3478 val |= phase << regdata->rgmii_rx_clk_src_sel_shift;
3479 }
3480
3481 /* Set RGMIII RX DLINE */
3482 if (regdata->rgmii_rx_dline_reg_offset > 0) {
3483 dline = readl(apmu + regdata->rgmii_rx_dline_reg_offset);
3484
3485 /* delay code */
3486 tmp = (priv->rx_clk_config >> 8) &
3487 regdata->rgmii_rx_delay_code_mask;
3488 dline &= ~(regdata->rgmii_rx_delay_code_mask <<
3489 regdata->rgmii_rx_delay_code_shift);
3490 dline |= tmp << regdata->rgmii_rx_delay_code_shift;
3491
3492 /* delay step */
3493 tmp = priv->rx_clk_config &
3494 regdata->rgmii_rx_delay_step_mask;
3495 dline &= ~(regdata->rgmii_rx_delay_step_mask <<
3496 regdata->rgmii_rx_delay_step_shift);
3497 dline |= tmp << regdata->rgmii_rx_delay_step_shift;
3498
3499 /* delay line enable */
3500 dline |= 1 << regdata->rgmii_rx_delay_enable_shift;
3501 writel(dline, apmu + regdata->rgmii_rx_dline_reg_offset);
3502 pr_info("===> emac set rx dline 0x%x 0x%x", dline,
3503 readl(apmu + regdata->rgmii_rx_dline_reg_offset));
3504 }
3505 }
3506 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3507 pr_info("%s phase:%d direction:%s 0x%x 0x%x\n", __func__, phase,
3508 is_tx ? "tx": "rx", val,
3509 readl(apmu + regdata->clk_rst_ctrl_reg_offset));
3510
3511 iounmap(apmu);
3512 return 0;
3513}
3514
3515static int clk_phase_rmii_set(struct emac_priv *priv, bool is_tx)
3516{
3517 const struct emac_regdata *regdata = priv->regdata;
3518 void __iomem* apmu;
3519 u32 val;
3520 u8 phase, tmp;
3521
3522 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3523 if (apmu == NULL) {
3524 pr_err("error to ioremap APMU base\n");
3525 return -ENOMEM;
3526 }
3527
3528 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3529 if (is_tx) {
3530 /* rmii tx clock select */
3531 if (regdata->rmii_tx_clk_sel_shift > 0) {
3532 tmp = (priv->tx_clk_config >> 16) & 0x1;
3533 val &= ~(0x1 << regdata->rmii_tx_clk_sel_shift);
3534 val |= tmp << regdata->rmii_tx_clk_sel_shift;
3535 }
3536
3537 /* rmii ref clock selct, 1 - from soc, 0 - from phy */
3538 if (regdata->rmii_rx_clk_sel_shift) {
3539 tmp = (priv->tx_clk_config >> 24) & 0x1;
3540 val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
3541 val |= tmp << regdata->rmii_ref_clk_sel_shift;
3542 }
3543 } else {
3544 /* rmii rx clock select */
3545 if (regdata->rmii_rx_clk_sel_shift > 0) {
3546 tmp = (priv->rx_clk_config >> 16) & 0x1;
3547 val &= ~(0x1 << regdata->rmii_rx_clk_sel_shift);
3548 val |= tmp << regdata->rmii_rx_clk_sel_shift;
3549 }
3550
3551 /* rmii ref clock selct, 1 - from soc, 0 - from phy */
3552 if (regdata->rmii_rx_clk_sel_shift) {
3553 tmp = (priv->tx_clk_config >> 24) & 0x1;
3554 val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
3555 val |= tmp << regdata->rmii_ref_clk_sel_shift;
3556 }
3557 }
3558
3559 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3560 pr_debug("%s phase:%d direction:%s\n", __func__, phase,
3561 is_tx ? "tx": "rx");
3562
3563 iounmap(apmu);
3564 return 0;
3565}
3566
3567static int clk_phase_set(struct emac_priv *priv, bool is_tx)
3568{
3569 if (emac_is_rmii_interface(priv)) {
3570 clk_phase_rmii_set(priv, is_tx);
3571 } else {
3572 clk_phase_rgmii_set(priv, is_tx);
3573 }
3574
3575 return 0;
3576}
3577
3578#ifdef CONFIG_DEBUG_FS
3579static int clk_phase_show(struct seq_file *s, void *data)
3580{
3581 struct emac_priv *priv = s->private;
3582 bool rmii_intf;
3583 rmii_intf = emac_is_rmii_interface(priv);
3584
3585 seq_printf(s, "Emac MII Interface : %s\n", rmii_intf ? "RMII" : "RGMII");
3586 seq_printf(s, "Current rx clk config : %d\n", priv->rx_clk_config);
3587 seq_printf(s, "Current tx clk config : %d\n", priv->tx_clk_config);
3588 return 0;
3589}
3590
3591static ssize_t clk_tuning_write(struct file *file,
3592 const char __user *user_buf,
3593 size_t count, loff_t *ppos)
3594{
3595 struct emac_priv *priv =
3596 ((struct seq_file *)(file->private_data))->private;
3597 int err;
3598 int clk_phase;
3599 char buff[TUNING_CMD_LEN] = { 0 };
3600 char mode_str[20];
3601
3602 if (count > TUNING_CMD_LEN) {
3603 pr_err("count must be less than 50.\n");
3604 return count;
3605 }
3606 err = copy_from_user(buff, user_buf, count);
3607 if (err)
3608 return err;
3609
3610 err = sscanf(buff, "%s %d", (char *)&mode_str, &clk_phase);
3611 if (err != 2) {
3612 pr_err("debugfs para count error\n");
3613 return count;
3614 }
3615 pr_info("input:%s %d\n", mode_str, clk_phase);
3616
3617 if (strcmp(mode_str, "tx") == 0) {
3618 priv->tx_clk_config = clk_phase;
3619 clk_phase_set(priv, TX_PHASE);
3620 } else if (strcmp(mode_str, "rx") == 0) {
3621 priv->rx_clk_config = clk_phase;
3622 clk_phase_set(priv, RX_PHASE);
3623 } else {
3624 pr_err("command error\n");
3625 pr_err("eg: echo rx 1 > clk_tuning\n");
3626 return count;
3627 }
3628
3629 return count;
3630}
3631
3632static int clk_tuning_open(struct inode *inode, struct file *file)
3633{
3634 return single_open(file, clk_phase_show, inode->i_private);
3635}
3636
3637const struct file_operations clk_tuning_fops = {
3638 .open = clk_tuning_open,
3639 .write = clk_tuning_write,
3640 .read = seq_read,
3641 .llseek = seq_lseek,
3642 .release = single_release,
3643};
3644
3645#endif
3646
3647static int emac_power_down(struct emac_priv *priv)
3648{
3649 if (priv->rst_gpio >= 0)
3650 gpio_direction_output(priv->rst_gpio,
3651 priv->low_active_rst ? 0 : 1);
3652
3653 if (priv->ldo_gpio >= 0)
3654 gpio_direction_output(priv->ldo_gpio,
3655 priv->low_active_ldo ? 0 : 1);
3656
3657 return 0;
3658}
3659
3660static int emac_power_up(struct emac_priv *priv)
3661{
3662 u32 *delays_ldo = priv->delays_ldo;
3663 u32 *delays_rst = priv->delays_rst;
3664 int rst_gpio = priv->rst_gpio;
3665 int low_active_rst = priv->low_active_rst;
3666 int ldo_gpio = priv->ldo_gpio;
3667 int low_active_ldo = priv->low_active_ldo;
3668
3669 if (rst_gpio >= 0) {
3670 gpio_direction_output(rst_gpio, low_active_rst ? 0 : 1);
3671 }
3672
3673 if (ldo_gpio >= 0) {
3674 gpio_direction_output(ldo_gpio, low_active_ldo ? 0 : 1);
3675 if (delays_ldo[0]) {
3676 gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
3677 msleep(DIV_ROUND_UP(delays_ldo[0], 1000));
3678 }
3679
3680 gpio_set_value(ldo_gpio, low_active_ldo ? 0 : 1);
3681 if (delays_ldo[1])
3682 msleep(DIV_ROUND_UP(delays_ldo[1], 1000));
3683
3684 gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
3685 if (delays_ldo[2])
3686 msleep(DIV_ROUND_UP(delays_ldo[2], 1000));
3687 }
3688
3689 if (rst_gpio >= 0) {
3690 if (delays_rst[0]) {
3691 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
3692 msleep(DIV_ROUND_UP(delays_rst[0], 1000));
3693 }
3694
3695 gpio_set_value(rst_gpio, low_active_rst ? 0 : 1);
3696 if (delays_rst[1])
3697 msleep(DIV_ROUND_UP(delays_rst[1], 1000));
3698
3699 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
3700 if (delays_rst[2])
3701 msleep(DIV_ROUND_UP(delays_rst[2], 1000));
3702 }
3703
3704 return 0;
3705}
3706
3707static int emac_mii_reset(struct mii_bus *bus)
3708{
3709 struct emac_priv *priv = bus->priv;
3710 struct device *dev = &priv->pdev->dev;
3711 struct device_node *np = dev->of_node;
3712 int rst_gpio, ldo_gpio;
3713 int low_active_ldo, low_active_rst;
3714 u32 *delays_ldo = priv->delays_ldo;
3715 u32 *delays_rst = priv->delays_rst;
3716
3717 priv->rst_gpio = -1;
3718 priv->ldo_gpio = -1;
3719
3720 if (!np)
3721 return 0;
3722
3723 rst_gpio = of_get_named_gpio(np, "reset-gpio", 0);
3724 if (rst_gpio >= 0) {
3725 low_active_rst = of_property_read_bool(np, "reset-active-low");
3726 of_property_read_u32_array(np, "reset-delays-us", delays_rst, 3);
3727
3728 if (gpio_request(rst_gpio, "mdio-reset")) {
3729 printk("emac: reset-gpio=%d request failed\n",
3730 rst_gpio);
3731 return 0;
3732 }
3733 priv->rst_gpio = rst_gpio;
3734 priv->low_active_rst = low_active_rst;
3735 }
3736
3737 ldo_gpio = of_get_named_gpio(np, "ldo-gpio", 0);
3738 if (ldo_gpio >= 0) {
3739 low_active_ldo = of_property_read_bool(np, "ldo-active-low");
3740 of_property_read_u32_array(np, "ldo-delays-us", delays_ldo, 3);
3741
3742 if (gpio_request(ldo_gpio, "mdio-ldo"))
3743 return 0;
3744
3745 priv->ldo_gpio = ldo_gpio;
3746 priv->low_active_ldo = low_active_ldo;
3747 }
3748
3749 /*
3750 * Some device not allow MDC/MDIO operation during power on/reset,
3751 * disable AXI clock to shutdown mdio clock.
3752 */
3753 clk_disable_unprepare(priv->clk);
3754
3755 emac_power_up(priv);
3756
3757 clk_prepare_enable(priv->clk);
3758
3759 emac_reset_hw(priv);
3760
3761 return 0;
3762}
3763
3764static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
3765{
3766 struct emac_priv *priv = bus->priv;
3767 u32 cmd = 0;
3768 u32 val;
3769
3770 if (!__clk_is_enabled(priv->clk))
3771 return -EBUSY;
3772
3773 mutex_lock(&priv->mii_mutex);
3774 cmd |= phy_addr & 0x1F;
3775 cmd |= (regnum & 0x1F) << 5;
3776 cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
3777
3778 /*
3779 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
3780 * change during MDIO read/write
3781 */
3782#ifdef CONFIG_DDR_DEVFREQ
3783 pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
3784#endif
3785 emac_wr(priv, MAC_MDIO_DATA, 0x0);
3786 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
3787
3788 if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
3789 !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
3790 return -EBUSY;
3791
3792 val = emac_rd(priv, MAC_MDIO_DATA);
3793
3794#ifdef CONFIG_DDR_DEVFREQ
3795 pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
3796#endif
3797 mutex_unlock(&priv->mii_mutex);
3798 return val;
3799}
3800
3801static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
3802 u16 value)
3803{
3804 struct emac_priv *priv = bus->priv;
3805 u32 cmd = 0;
3806 u32 val;
3807
3808 if (!__clk_is_enabled(priv->clk))
3809 return -EBUSY;
3810
3811 mutex_lock(&priv->mii_mutex);
3812 emac_wr(priv, MAC_MDIO_DATA, value);
3813
3814 cmd |= phy_addr & 0x1F;
3815 cmd |= (regnum & 0x1F) << 5;
3816 cmd |= MREGBIT_START_MDIO_TRANS;
3817
3818 /*
3819 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
3820 * change during MDIO read/write
3821 */
3822#ifdef CONFIG_DDR_DEVFREQ
3823 pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
3824#endif
3825 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
3826
3827 if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
3828 !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
3829 return -EBUSY;
3830
3831#ifdef CONFIG_DDR_DEVFREQ
3832 pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
3833#endif
3834
3835 mutex_unlock(&priv->mii_mutex);
3836 return 0;
3837}
3838
3839static void emac_adjust_link(struct net_device *dev)
3840{
3841 struct phy_device *phydev = dev->phydev;
3842 struct emac_priv *priv = netdev_priv(dev);
3843 u32 ctrl;
3844#ifdef WAN_LAN_AUTO_ADAPT
3845 int status_change = 0;
3846 int addr = 0;
3847 int i = 0;
3848#endif
3849 if (!phydev || priv->fix_link)
3850 return;
3851
3852 if (phydev->link) {
3853 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
3854
3855 /* Now we make sure that we can be in full duplex mode
3856 * If not, we operate in half-duplex mode.
3857 */
3858 if (phydev->duplex != priv->duplex) {
3859 if (!phydev->duplex)
3860 ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
3861 else
3862 ctrl |= MREGBIT_FULL_DUPLEX_MODE;
3863 priv->duplex = phydev->duplex;
3864 }
3865
3866 if (phydev->speed != priv->speed) {
3867 ctrl &= ~MREGBIT_SPEED;
3868
3869 switch (phydev->speed) {
3870 case SPEED_1000:
3871 ctrl |= MREGBIT_SPEED_1000M;
3872 break;
3873 case SPEED_100:
3874 ctrl |= MREGBIT_SPEED_100M;
3875 break;
3876 case SPEED_10:
3877 ctrl |= MREGBIT_SPEED_10M;
3878 break;
3879 default:
3880 pr_err("broken speed: %d\n", phydev->speed);
3881 phydev->speed = SPEED_UNKNOWN;
3882 break;
3883 }
3884 if (phydev->speed != SPEED_UNKNOWN) {
3885 priv->speed = phydev->speed;
3886 }
3887 }
3888 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
3889 pr_info("%s link:%d speed:%dM duplex:%s\n", __func__,
3890 phydev->link, phydev->speed,
3891 phydev->duplex ? "Full": "Half");
3892 }
3893
3894#ifdef WAN_LAN_AUTO_ADAPT
3895 if(phydev->phy_id == IP175D_PHY_ID) {
3896 if (phydev->link != priv->link) {
3897 for (i=0; i<16; i++) {
3898 if((priv->link & (1<<i)) != (phydev->link & (1<<i))) {
3899 addr = i;
3900 if (phydev->link & (1<<i)) {
3901 /* link up */
3902 printk("eth0 port%d link up\n", addr);
3903 priv->dhcp = 0;
3904 emac_sig_workq(CARRIER_UP_IP175D, addr);
3905 if(priv->dhcp_delaywork)
3906 cancel_delayed_work(&priv->dhcp_work);
3907 priv->dhcp_delaywork = 1;
3908 schedule_delayed_work(&priv->dhcp_work, 25*HZ);
3909 } else {
3910 /* link down */
3911 printk("eth0 port%d link down\n", addr);
3912 priv->dhcp = 0;
3913 if(priv->dhcp_delaywork)
3914 cancel_delayed_work(&priv->dhcp_work);
3915 priv->dhcp_delaywork = 0;
3916 emac_sig_workq(CARRIER_DOWN_IP175D, addr);
3917 }
3918 }
3919 }
3920 priv->link = phydev->link;
3921 }
3922 } else {
3923 if (phydev->link != priv->link) {
3924 priv->link = phydev->link;
3925 status_change = 1;
3926 }
3927
3928 if (status_change) {
3929 if (phydev->link) {
3930 /* link up */
3931 priv->dhcp = 0;
3932 emac_sig_workq(CARRIER_UP, 0);
3933 if(priv->dhcp_delaywork)
3934 cancel_delayed_work(&priv->dhcp_work);
3935 priv->dhcp_delaywork = 1;
3936 schedule_delayed_work(&priv->dhcp_work, 25*HZ);
3937
3938 } else {
3939 /* link down */
3940 priv->dhcp = 0;
3941 if(priv->dhcp_delaywork)
3942 cancel_delayed_work(&priv->dhcp_work);
3943 priv->dhcp_delaywork = 0;
3944 emac_sig_workq(CARRIER_DOWN, 0);
3945 }
3946 }
3947 }
3948#endif
3949}
3950
3951static int emac_phy_connect(struct net_device *dev)
3952{
3953 struct phy_device *phydev;
3954 int phy_interface;
3955 struct device_node *np;
3956 struct emac_priv *priv = netdev_priv(dev);
3957
3958 np = of_parse_phandle(priv->pdev->dev.of_node, "phy-handle", 0);
3959 if (!np) {
3960 if (priv->fix_link) {
3961 emac_phy_interface_config(priv, priv->interface);
3962 if (priv->interface == PHY_INTERFACE_MODE_RGMII)
3963 pinctrl_select_state(priv->pinctrl,
3964 priv->rgmii_pins);
3965 emac_config_phy_interrupt(priv, 0);
3966 return 0;
3967 }
3968 return -ENODEV;
3969 }
3970
3971 printk("%s: %s\n",__func__, np->full_name);
3972 phy_interface = of_get_phy_mode(np);
3973 emac_phy_interface_config(priv, phy_interface);
3974 if (phy_interface != PHY_INTERFACE_MODE_RMII)
3975 pinctrl_select_state(priv->pinctrl, priv->rgmii_pins);
b.liub17525e2025-05-14 17:22:29 +08003976 phydev = phy_find_first(priv->mii);
3977 if (!phydev) {
3978 printk("%s: no PHY found\n", dev->name);
3979 return -ENODEV;
3980 }
3981 phy_connect_direct(dev, phydev, emac_adjust_link, phy_interface); /* phy_start_machine */
3982 //phydev = of_phy_connect(dev, np,&emac_adjust_link, 0, phy_interface);
b.liue9582032025-04-17 19:18:16 +08003983 if (IS_ERR_OR_NULL(phydev)) {
3984 pr_err("Could not attach to PHY\n");
3985 emac_power_down(priv);
3986 if (!phydev)
3987 return -ENODEV;
3988 return PTR_ERR(phydev);
3989 }
3990
3991 if (!phydev->phy_id || phydev->phy_id == 0xffffffff) {
3992 pr_err("Not valid phy_id=0x%x\n", phydev->phy_id);
3993 emac_power_down(priv);
3994 return -ENODEV;
3995 }
3996
3997 if(phy_interrupt_is_valid(phydev))
3998 emac_config_phy_interrupt(priv, 1);
3999 else
4000 emac_config_phy_interrupt(priv, 0);
4001
4002 //phydev->supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
4003 pr_info("%s: %s: attached to PHY (UID 0x%x)"
4004 " Link = %d irq=%d\n", __func__,
4005 dev->name, phydev->phy_id, phydev->link, phydev->irq);
4006 dev->phydev = phydev;
4007
4008#ifdef WAN_LAN_AUTO_ADAPT
4009 if(phydev->phy_id == IP175D_PHY_ID)
4010 emac_sig_workq(PHY_IP175D_CONNECT, 0);
4011#endif
4012
4013 return 0;
4014}
4015
4016static int emac_mdio_init(struct emac_priv *priv)
4017{
4018 struct device_node *mii_np;
4019 struct device *dev = &priv->pdev->dev;
4020 int ret;
hj.shao213a35e2025-06-24 04:25:54 -07004021 //#LYNQ_MODFIY modify for task-1618 2025/6/24 start
hj.shaofb3ba9b2025-06-19 02:53:56 -07004022 struct device_node *phy_np;
4023 int phy_power_en_gpio;
hj.shao213a35e2025-06-24 04:25:54 -07004024 int phy_rst_gpio;
4025 //#LYNQ_MODFIY modify for task-1618 2025/6/24 end
b.liue9582032025-04-17 19:18:16 +08004026
4027 mii_np = of_get_child_by_name(dev->of_node, "mdio-bus");
4028 if (!mii_np) {
4029 dev_err(dev, "no %s child node found", "mdio-bus");
4030 return -ENODEV;
4031 }
4032
4033 if (!of_device_is_available(mii_np)) {
4034 ret = -ENODEV;
4035 goto err_put_node;
4036 }
4037
4038 priv->mii = mdiobus_alloc();//devm_mdiobus_alloc(dev);
4039 if (!priv->mii) {
4040 ret = -ENOMEM;
4041 goto err_put_node;
4042 }
4043 priv->mii->priv = priv;
4044 //priv->mii->irq = priv->mdio_irqs;
4045 priv->mii->name = "emac mii";
4046 priv->mii->reset = emac_mii_reset;
4047 priv->mii->read = emac_mii_read;
4048 priv->mii->write = emac_mii_write;
4049 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%pOFn",
4050 mii_np);
4051 priv->mii->parent = dev;
4052 priv->mii->phy_mask = 0xffffffff;
hj.shaofb3ba9b2025-06-19 02:53:56 -07004053
4054 //#LYNQ_MODFIY modify for task-1618 2025/6/19 start
4055 phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
4056 if (!phy_np) {
4057 dev_err(dev, "Failed to find PHY node via phy-handle\n");
4058 return -ENODEV;
4059 }
4060 phy_power_en_gpio = of_get_named_gpio(phy_np, "power-en-gpio", 0);
4061 if (phy_power_en_gpio < 0) {
4062 dev_err(dev, "Failed to get power_en gpio: %d\n", phy_power_en_gpio);
4063 return phy_power_en_gpio;
4064 }
4065 gpio_request(phy_power_en_gpio, "phy_power_en");
4066 gpio_direction_output(phy_power_en_gpio, 1);
hj.shao213a35e2025-06-24 04:25:54 -07004067 msleep(10);
hj.shaofb3ba9b2025-06-19 02:53:56 -07004068 gpio_free(phy_power_en_gpio);
4069 //#LYNQ_MODFIY modify for task-1618 2025/6/19 end
4070
hj.shao213a35e2025-06-24 04:25:54 -07004071 //#LYNQ_MODFIY modify for task-1618 2025/6/24 start
4072 phy_rst_gpio = of_get_named_gpio(phy_np, "rst-gpio", 0);
4073 if (phy_rst_gpio < 0) {
4074 dev_err(dev, "Failed to get phy_rst gpio: %d\n", phy_rst_gpio);
4075 return phy_rst_gpio;
4076 }
4077
4078 gpio_request(phy_rst_gpio, "phy_reset");
4079 gpio_direction_output(phy_rst_gpio, 0);
4080 msleep(10);
4081 gpio_set_value(phy_rst_gpio, 1);
4082 gpio_free(phy_rst_gpio);
4083 //#LYNQ_MODFIY modify for task-1618 2025/6/24 end
4084
4085
b.liue9582032025-04-17 19:18:16 +08004086 ret = of_mdiobus_register(priv->mii, mii_np);
4087
4088err_put_node:
4089 of_node_put(mii_np);
4090 return ret;
4091}
4092
4093static int emac_mdio_deinit(struct emac_priv *priv)
4094{
4095 if (!priv->mii)
4096 return 0;
4097
4098 mdiobus_unregister(priv->mii);
4099 return 0;
4100}
4101
4102static int emac_get_ts_info(struct net_device *dev,
4103 struct ethtool_ts_info *info)
4104{
4105 struct emac_priv *priv = netdev_priv(dev);
4106
4107 if (priv->ptp_support) {
4108
4109 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4110 SOF_TIMESTAMPING_TX_HARDWARE |
4111 SOF_TIMESTAMPING_RX_SOFTWARE |
4112 SOF_TIMESTAMPING_RX_HARDWARE |
4113 SOF_TIMESTAMPING_SOFTWARE |
4114 SOF_TIMESTAMPING_RAW_HARDWARE;
4115
4116 if (priv->ptp_clock)
4117 info->phc_index = ptp_clock_index(priv->ptp_clock);
4118
4119 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
4120 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
4121 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
4122 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
4123 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
4124 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
4125 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
4126 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
4127 (1 << HWTSTAMP_FILTER_ALL));
4128 if (priv->regdata->ptp_rx_ts_all_events) {
4129 info->rx_filters |=
4130 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
4131 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
4132 }
4133
4134 return 0;
4135 } else
4136 return ethtool_op_get_ts_info(dev, info);
4137}
4138
4139static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4140{
4141 int i;
4142
4143 switch (stringset) {
4144 case ETH_SS_STATS:
4145 for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
4146 memcpy(data, emac_ethtool_stats[i].str, ETH_GSTRING_LEN);
4147 data += ETH_GSTRING_LEN;
4148 }
4149 break;
4150 }
4151}
4152
4153static int emac_get_sset_count(struct net_device *dev, int sset)
4154{
4155 switch (sset) {
4156 case ETH_SS_STATS:
4157 return ARRAY_SIZE(emac_ethtool_stats);
4158 default:
4159 return -EOPNOTSUPP;
4160 }
4161}
4162
4163static void emac_stats_update(struct emac_priv *priv)
4164{
4165 struct emac_hw_stats *hwstats = priv->hw_stats;
4166 int i;
4167 u32 *p;
4168
4169 p = (u32 *)(hwstats);
4170
4171 for (i = 0; i < MAX_TX_STATS_NUM; i++)
4172 *(p + i) = ReadTxStatCounters(priv, i);
4173
4174 p = (u32 *)hwstats + MAX_TX_STATS_NUM;
4175
4176 for (i = 0; i < MAX_RX_STATS_NUM; i++)
4177 *(p + i) = ReadRxStatCounters(priv, i);
4178
4179 *(p + i++) = emac_rd(priv, DMA_MISSED_FRAME_COUNTER);
4180
4181 *(p + i++) = hwstats->tx_tso_pkts;
4182 *(p + i++) = hwstats->tx_tso_bytes;
4183}
4184
4185static void emac_get_ethtool_stats(struct net_device *dev,
4186 struct ethtool_stats *stats, u64 *data)
4187{
4188 struct emac_priv *priv = netdev_priv(dev);
4189 struct emac_hw_stats *hwstats = priv->hw_stats;
4190 u32 *data_src;
4191 u64 *data_dst;
4192 int i;
4193
4194 if (netif_running(dev) && netif_device_present(dev)) {
4195 if (spin_trylock_bh(&hwstats->stats_lock)) {
4196 emac_stats_update(priv);
4197 spin_unlock_bh(&hwstats->stats_lock);
4198 }
4199 }
4200
4201 data_dst = data;
4202
4203 for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
4204 data_src = (u32 *)hwstats + emac_ethtool_stats[i].offset;
4205 *data_dst++ = (u64)(*data_src);
4206 }
4207}
4208
4209static int emac_ethtool_get_regs_len(struct net_device *dev)
4210{
4211 return EMAC_REG_SPACE_SIZE;
4212}
4213
4214static void emac_ethtool_get_regs(struct net_device *dev,
4215 struct ethtool_regs *regs, void *space)
4216{
4217 struct emac_priv *priv = netdev_priv(dev);
4218 u32 *reg_space = (u32 *) space;
4219 void __iomem *base = priv->iobase;
4220 int i;
4221
4222 regs->version = 1;
4223
4224 memset(reg_space, 0x0, EMAC_REG_SPACE_SIZE);
4225
4226 for (i = 0; i < EMAC_DMA_REG_CNT; i++)
4227 reg_space[i] = readl(base + DMA_CONFIGURATION + i * 4);
4228
4229 for (i = 0; i < EMAC_MAC_REG_CNT; i++)
4230 reg_space[i + MAC_GLOBAL_CONTROL / 4] = readl(base + MAC_GLOBAL_CONTROL + i * 4);
4231}
4232
4233static int emac_get_link_ksettings(struct net_device *ndev,
4234 struct ethtool_link_ksettings *cmd)
4235{
4236 if (!ndev->phydev)
4237 return -ENODEV;
4238
4239 phy_ethtool_ksettings_get(ndev->phydev, cmd);
4240 return 0;
4241}
4242
4243static int emac_set_link_ksettings(struct net_device *ndev,
4244 const struct ethtool_link_ksettings *cmd)
4245{
4246 if (!ndev->phydev)
4247 return -ENODEV;
4248
4249 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
4250}
4251
4252static void emac_get_drvinfo(struct net_device *dev,
4253 struct ethtool_drvinfo *info)
4254{
4255 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
4256 info->n_stats = ARRAY_SIZE(emac_ethtool_stats);
4257}
4258
4259static void emac_get_pauseparam(struct net_device *ndev,
4260 struct ethtool_pauseparam *param)
4261{
4262 struct emac_priv *priv = netdev_priv(ndev);
4263 int val = emac_mii_read(priv->mii, 0, 0);
4264
4265 param->autoneg = (val & BIT(12)) ? 1 : 0;
4266 param->rx_pause = priv->pause.rx_pause;
4267 param->tx_pause = priv->pause.tx_pause;
4268
4269 return;
4270}
4271
4272static int emac_set_pauseparam(struct net_device *ndev,
4273 struct ethtool_pauseparam *param)
4274{
4275 struct emac_priv *priv = netdev_priv(ndev);
4276 struct device *dev = &priv->pdev->dev;
4277 struct device_node *np = dev->of_node;
4278 int val;
4279 int phyval;
4280 u32 threshold[2];
4281 static int init_flag = 1;
4282
4283 val = readl(priv->iobase + MAC_FC_CONTROL);
4284 phyval = emac_mii_read(priv->mii, 0, 0);
4285
4286 if (param->rx_pause)
4287 val |= MREGBIT_FC_DECODE_ENABLE;
4288 else
4289 val &= ~MREGBIT_FC_DECODE_ENABLE;
4290
4291 if (param->tx_pause)
4292 val |= MREGBIT_FC_GENERATION_ENABLE;
4293 else
4294 val &= ~MREGBIT_FC_GENERATION_ENABLE;
4295
4296 if (init_flag && (param->rx_pause | param->tx_pause)) {
4297 val |= MREGBIT_MULTICAST_MODE;
4298 priv->pause.pause_time_max = 0;
4299 if (0 != of_property_read_u32_array(np, "flow-control-threshold", threshold, 2)) {
4300 threshold[0] = 60;
4301 threshold[1] = 90;
4302 }
4303 threshold[0] = clamp(threshold[0], 0U, 99U);
4304 threshold[1] = clamp(threshold[1], 1U, 100U);
4305
4306 if (cpu_is_asr18xx() || cpu_is_asr1903_z1()) {
4307 priv->pause.low_water = priv->rx_ring.total_cnt * threshold[0] / 100;
4308 priv->pause.high_water = priv->rx_ring.total_cnt * threshold[1] / 100 - 1;
4309 priv->pause.fc_auto = 0;
4310 } else {
4311 priv->pause.low_water = 0;
4312 priv->pause.high_water = 0;
4313 priv->pause.fc_auto = 1;
4314 val |= MREGBIT_AUTO_FC_GENERATION_ENABLE;
4315 threshold[0] = 1024 * threshold[0] / 100;
4316 threshold[1] = 1024 * threshold[1] / 100;
4317 emac_wr(priv, MAC_FC_AUTO_HIGH_THRESHOLD, threshold[1]);
4318 emac_wr(priv, MAC_FC_AUTO_LOW_THRESHOLD, threshold[0]);
4319 emac_wr(priv, MAC_FC_AUTO_HIGH_PAUSE_TIME_VALUE, 0xffff);
4320 emac_wr(priv, MAC_FC_AUTO_LOW_PAUSE_TIME_VALUE, 0);
4321 }
4322 init_flag = 0;
4323 }
4324 emac_wr(priv, MAC_FC_CONTROL, val);
4325
4326 if (param->autoneg)
4327 phyval |= BIT(12);
4328 else
4329 phyval &= ~BIT(12);
4330
4331 (void)emac_mii_write(priv->mii, 0, 0, (u16)phyval);
4332
4333 priv->pause.rx_pause = param->rx_pause;
4334 priv->pause.tx_pause = param->tx_pause;
4335 return 0;
4336}
4337
4338static void emac_get_wol(struct net_device *dev,
4339 struct ethtool_wolinfo *wol)
4340{
4341 struct emac_priv *priv = netdev_priv(dev);
4342 struct device *device = &priv->pdev->dev;
4343
4344 if (device_can_wakeup(device)) {
4345 wol->supported = WAKE_MAGIC | WAKE_UCAST;
4346 wol->wolopts = priv->wolopts;
4347 }
4348}
4349
4350static int emac_set_wol(struct net_device *dev,
4351 struct ethtool_wolinfo *wol)
4352{
4353 struct emac_priv *priv = netdev_priv(dev);
4354 struct device *device = &priv->pdev->dev;
4355 u32 support = WAKE_MAGIC | WAKE_UCAST;
4356
4357 if (!device_can_wakeup(device) || !priv->en_suspend)
4358 return -ENOTSUPP;
4359
4360 if (wol->wolopts & ~support)
4361 return -EINVAL;
4362
4363 priv->wolopts = wol->wolopts;
4364
4365 if (wol->wolopts) {
4366 device_set_wakeup_enable(device, 1);
4367 enable_irq_wake(priv->irq_wakeup);
4368 } else {
4369 device_set_wakeup_enable(device, 0);
4370 disable_irq_wake(priv->irq_wakeup);
4371 }
4372
4373 return 0;
4374}
4375
4376static const struct ethtool_ops emac_ethtool_ops = {
4377 .get_link_ksettings = emac_get_link_ksettings,
4378 .set_link_ksettings = emac_set_link_ksettings,
4379 .get_drvinfo = emac_get_drvinfo,
4380 .nway_reset = phy_ethtool_nway_reset,
4381 .get_link = ethtool_op_get_link,
4382 .get_pauseparam = emac_get_pauseparam,
4383 .set_pauseparam = emac_set_pauseparam,
4384 .get_strings = emac_get_strings,
4385 .get_sset_count = emac_get_sset_count,
4386 .get_ethtool_stats = emac_get_ethtool_stats,
4387 .get_regs = emac_ethtool_get_regs,
4388 .get_regs_len = emac_ethtool_get_regs_len,
4389 .get_ts_info = emac_get_ts_info,
4390 .get_wol = emac_get_wol,
4391 .set_wol = emac_set_wol,
4392};
4393
4394static const struct net_device_ops emac_netdev_ops = {
4395 .ndo_open = emac_open,
4396 .ndo_stop = emac_close,
4397 .ndo_start_xmit = emac_start_xmit,
4398 .ndo_set_mac_address = emac_set_mac_address,
4399 .ndo_do_ioctl = emac_ioctl,
4400 .ndo_change_mtu = emac_change_mtu,
4401 .ndo_tx_timeout = emac_tx_timeout,
4402};
4403
4404#ifdef WAN_LAN_AUTO_ADAPT
4405#define EMAC_SKB_SIZE 2048
4406static int emac_event_add_var(struct emac_event *event, int argv,
4407 const char *format, ...)
4408{
4409 static char buf[128];
4410 char *s;
4411 va_list args;
4412 int len;
4413
4414 if (argv)
4415 return 0;
4416
4417 va_start(args, format);
4418 len = vsnprintf(buf, sizeof(buf), format, args);
4419 va_end(args);
4420
4421 if (len >= sizeof(buf)) {
4422 printk("buffer size too small\n");
4423 WARN_ON(1);
4424 return -ENOMEM;
4425 }
4426
4427 s = skb_put(event->skb, len + 1);
4428 strcpy(s, buf);
4429
4430 return 0;
4431}
4432
4433static int emac_hotplug_fill_event(struct emac_event *event)
4434{
4435 int ret;
4436
4437 ret = emac_event_add_var(event, 0, "HOME=%s", "/");
4438 if (ret)
4439 return ret;
4440
4441 ret = emac_event_add_var(event, 0, "PATH=%s",
4442 "/sbin:/bin:/usr/sbin:/usr/bin");
4443 if (ret)
4444 return ret;
4445
4446 ret = emac_event_add_var(event, 0, "SUBSYSTEM=%s", "ethernet");
4447 if (ret)
4448 return ret;
4449
4450 ret = emac_event_add_var(event, 0, "ACTION=%s", event->action);
4451 if (ret)
4452 return ret;
4453
4454 ret = emac_event_add_var(event, 0, "ETH=%s", event->name);
4455 if (ret)
4456 return ret;
4457
4458 ret = emac_event_add_var(event, 0, "PORT=%d", event->port);
4459 if (ret)
4460 return ret;
4461
4462 ret = emac_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());
4463
4464 return ret;
4465}
4466
4467static void emac_hotplug_work(struct work_struct *work)
4468{
4469 struct emac_event *event = container_of(work, struct emac_event, work);
4470 int ret = 0;
4471
4472 event->skb = alloc_skb(EMAC_SKB_SIZE, GFP_KERNEL);
4473 if (!event->skb)
4474 goto out_free_event;
4475
4476 ret = emac_event_add_var(event, 0, "%s@", event->action);
4477 if (ret)
4478 goto out_free_skb;
4479
4480 ret = emac_hotplug_fill_event(event);
4481 if (ret)
4482 goto out_free_skb;
4483
4484 NETLINK_CB(event->skb).dst_group = 1;
4485 broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);
4486
4487 out_free_skb:
4488 if (ret) {
4489 printk("work error %d\n", ret);
4490 kfree_skb(event->skb);
4491 }
4492 out_free_event:
4493 kfree(event);
4494}
4495
4496static int emac_sig_workq(int event, int port)
4497{
4498 struct emac_event *u_event = NULL;
4499
4500 u_event = kzalloc(sizeof(*u_event), GFP_KERNEL);
4501 if (!u_event)
4502 return -ENOMEM;
4503
4504 u_event->name = DRIVER_NAME;
4505 if(event == CARRIER_UP)
4506 u_event->action = "LINKUP";
4507 else if(event == CARRIER_DOWN)
4508 u_event->action = "LINKDW";
4509 else if(event == CARRIER_DOWN_IP175D)
4510 u_event->action = "IP175D_LINKDW";
4511 else if(event == CARRIER_UP_IP175D)
4512 u_event->action = "IP175D_LINKUP";
4513 else if(event == DHCP_EVENT_CLIENT)
4514 u_event->action = "DHCPCLIENT";
4515 else if(event == DHCP_EVENT_SERVER)
4516 u_event->action = "DHCPSERVER";
4517 else if(event == PHY_IP175D_CONNECT)
4518 u_event->action = "PHY_CONNECT";
4519
4520 u_event->port = port;
4521 INIT_WORK(&u_event->work, (void *)emac_hotplug_work);
4522 schedule_work(&u_event->work);
4523
4524 return 0;
4525}
4526
4527static inline void __emac_dhcp_work_func(struct emac_priv *priv)
4528{
4529 if (priv->dhcp == DHCP_REC_RESP) {
4530 emac_sig_workq(DHCP_EVENT_CLIENT, priv->vlan_port);
4531 } else if (priv->dhcp == DHCP_SEND_REQ || priv->dhcp == 0) {
4532 emac_sig_workq(DHCP_EVENT_SERVER, priv->vlan_port);
4533 }
4534
4535 priv->dhcp = 0;
4536 if(priv->dhcp_delaywork){
4537 cancel_delayed_work(&priv->dhcp_work);
4538 priv->dhcp_delaywork = 0;
4539 }
4540}
4541
4542static void emac_dhcp_work_func_t(struct work_struct *work)
4543{
4544 struct emac_priv *priv = container_of(work, struct emac_priv, dhcp_work.work);
4545
4546 __emac_dhcp_work_func(priv);
4547}
4548#endif
4549
b.liub17525e2025-05-14 17:22:29 +08004550
4551
4552
4553long g_PhyVersionNumber = 0;
4554
4555
4556static ssize_t phy_version_show(struct device *dev,
4557 struct device_attribute *attr, char *buf)
4558{
4559 int len = 0;
4560
4561 len = sprintf(buf, "phy_version = 0x%x\n", g_PhyVersionNumber);
4562
4563 return (ssize_t)len;
4564}
4565
4566static ssize_t phy_version_store(struct device *dev,
4567 struct device_attribute *attr, const char *buf, size_t size)
4568{
4569 int reg, val, devad = 0;
4570
4571 struct emac_priv *priv = dev_get_drvdata(dev);
4572
4573 sscanf(buf, "%d", &val);
4574 if(val == 1)
4575 {
4576 devad = 0x1f;
4577 reg = 0x113;
4578 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4579 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4580 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4581 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4582
4583 }
4584 g_PhyVersionNumber = val;
4585
4586 return size;
4587}
4588
4589
4590static ssize_t lpsd_sleep_show(struct device *dev,
4591 struct device_attribute *attr, char *buf)
4592{
4593 int len = 0;
4594 int reg, val, devad = 0;
4595 struct emac_priv *priv = dev_get_drvdata(dev);
4596
4597 devad = 0x3;
4598 reg = 0x8700;
4599 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4600 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4601 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4602 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4603
4604 len = sprintf(buf, "phy_version = 0x%x\n", val);
4605
4606 return (ssize_t)len;
4607}
4608
4609static ssize_t lpsd_sleep_store(struct device *dev,
4610 struct device_attribute *attr, const char *buf, size_t size)
4611{
4612 int reg, val, devad = 0;
4613
4614 struct emac_priv *priv = dev_get_drvdata(dev);
4615
4616 sscanf(buf, "%d", &val);
4617 if(val == 1) //enter lpsd sleep mode
4618 {
4619 devad = 0x3;
4620 reg = 0x8700;
4621 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4622 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4623 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4624 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4625
4626 printk("lpsd sleep mode : reg3.8700 = 0x%x", val);
4627 msleep(200);
4628
4629 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4630 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4631 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4632 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, (val | BIT(0)));
4633
4634 }else
4635 {
4636
4637 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4638 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4639 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4640 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4641
4642 printk("lpsd sleep mode : reg3.8700 = 0x%x", val);
4643 msleep(200);
4644
4645 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4646 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4647 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4648 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, (val | ~BIT(0)));
4649 }
4650
4651 return size;
4652}
4653
4654
4655static int mode_type = -1;
4656static int enter_only_one = 0;
4657
4658
4659static ssize_t gmac_master_or_slave_store(struct device *dev,
4660 struct device_attribute *attr, const char *buf, size_t size)
4661{
4662 int val = 0;
4663 int reg = 0;
4664 int devad = 0;
4665 int ret = 0;
4666
4667 struct emac_priv *priv = dev_get_drvdata(dev);
4668
4669 //read mode_type
4670 ret = sscanf(buf, "%d", &mode_type);
4671 if(ret < 1)
4672 {
4673 printk(KERN_ERR "Please enter the number 0-3 to enable the corresponding mode \n"
4674 "Enter values in the non-0-3 range to get pattern description \n");
4675 return size;
4676 }
4677
4678 //Judgment model
4679 if (mode_type < 0 || mode_type > 3) {
4680 printk(KERN_DEBUG "Please enter the number range 0-3\n"
4681 "0: Set the slave mode \n"
4682 "1: Set the main mode \n"
4683 "2: indicates setting SQI value view mode \n"
4684 "3: Set the VCT value view mode \n"
4685 "After the mode is set, the corresponding value can be obtained\n");
4686 return ret ? ret : size;
4687 }
4688
4689 //Set the Ethernet slave mode
4690 if (mode_type == 0)
4691 {
4692 devad = 0x1;
4693 reg = 0x834;
4694 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4695 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4696 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4697 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4698 msleep(200);
4699
4700 val &= ~BIT(14);
4701 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4702 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4703 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4704 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, val );
4705
4706 }
4707 //Set the Ethernet master mode
4708 else if (mode_type == 1)
4709 {
4710 devad = 0x1;
4711 reg = 0x834;
4712 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4713 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4714 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4715 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4716 msleep(200);
4717
4718 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4719 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4720 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4721 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, val | BIT(14));
4722
4723
4724 }
4725
4726 return size;
4727}
4728
4729
4730static ssize_t gmac_master_or_slave_show(struct device *dev,
4731 struct device_attribute *attr, char *buf)
4732{
4733 int len = 0;
4734 int val = 0;
4735 int reg = 0;
4736 int devad = 0;
4737 int ret = 0;
4738 struct emac_priv *priv = dev_get_drvdata(dev);
4739
4740 if(enter_only_one == 1)
4741 {
4742 return 0;
4743 }
4744 enter_only_one = 1;
4745
4746 //Read the network master/slave
4747 if (mode_type == 0 || mode_type == 1)
4748 {
4749 devad = 0x1;
4750 reg = 0x834;
4751 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4752 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4753 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4754 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e) & BIT(14);
4755 if(val)
4756 memcpy(buf, "Master\n",7);
4757 else
4758 memcpy(buf, "Slave\n", 6);
4759
4760 printk(KERN_DEBUG "mode_type %d - gmac_master_or_slave is %s\n", mode_type, buf);
4761
4762 }
4763
4764 //Obtain the cable quality SQI value
4765 else if(mode_type == 2)
4766 {
4767 devad = 0x1;
4768 reg = 0x8B10;
4769 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0d, devad);
4770 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, reg);
4771 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0d, 0x4000 | devad);
4772 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4773 sprintf(buf, "0x%x\n", val);
4774 sprintf(buf, "SQI : 0x%x\n", val);
4775 printk(KERN_DEBUG "mode_type %d - SQI is 0x%x", mode_type, val);
4776
4777 }
4778
4779 //Obtain short circuit, open circuit and normal connection of VCT
4780 else if(mode_type == 3)
4781 {
4782 //--TDR Enable
4783 devad = 0x1;
4784 reg = 0x8B00;
4785 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4786 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4787 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4788 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, BIT(14));
4789
4790 msleep(200);
4791
4792 //--TDR Start
4793 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4794 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4795 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4796 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, BIT(12) | BIT(14));
4797
4798 msleep(20);
4799 //--Read VCT
4800 devad = 0x1;
4801 reg = 0x8B02;
4802 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4803 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4804 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4805 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0xe);
4806
4807 printk(KERN_DEBUG "Open status: %s - Short status: %s\n",
4808 (val & BIT(1)) ? "Open" : "Normal", (val & BIT(0)) ? "Short" : "Normal");
4809 sprintf(buf, "Open status: %s\nShort status: %s\n",
4810 (val & BIT(1)) ? "Open" : "Normal", (val & BIT(0)) ? "Short" : "Normal");
4811
4812 reg = 0x8B01;
4813 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4814 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4815 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4816 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0xe);
4817
4818 sprintf(buf, "%sDistance status: 0x%x\n", buf, val);
4819 printk(KERN_DEBUG "mode_type %d - Distance status is 0x%x\n", mode_type, val);
4820
4821 //--TDR Disable
4822 devad = 0x1;
4823 reg = 0x8B00;
4824 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4825 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4826 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4827 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, 0x0);
4828
4829
4830 }
4831 else{
4832 sprintf(buf, "Please enter the number range 0-3\n"
4833 "0: Set the slave mode \n"
4834 "1: Set the main mode \n"
4835 "2: indicates setting SQI value view mode \n"
4836 "3: Set the VCT value view mode \n"
4837 "After the mode is set, the corresponding value can be obtained\n");
4838 printk(KERN_DEBUG "Please enter the number range 0-3\n"
4839 "0: Set the slave mode \n"
4840 "1: Set the main mode \n"
4841 "2: indicates setting SQI value view mode \n"
4842 "3: Set the VCT value view mode \n"
4843 "After the mode is set, the corresponding value can be obtained\n");
4844 }
4845 enter_only_one = 0;
4846
4847 return strlen(buf);
4848}
4849
4850
4851
4852static DEVICE_ATTR(lpsd_sleep, S_IRUGO | S_IWUSR, lpsd_sleep_show, lpsd_sleep_store);
4853static DEVICE_ATTR(phy_version, S_IRUGO | S_IWUSR, phy_version_show, phy_version_store);
4854static DEVICE_ATTR(gmac_master_or_slave, S_IRUGO | S_IWUSR, gmac_master_or_slave_show, gmac_master_or_slave_store);
4855
4856
4857static struct attribute *ethrnet_opera_attrs[] = {
4858 &dev_attr_lpsd_sleep.attr,
4859 &dev_attr_phy_version.attr,
4860 &dev_attr_gmac_master_or_slave.attr,
4861 NULL,
4862};
4863
4864static const struct attribute_group demo_attr_grp = {
4865
4866 .attrs = ethrnet_opera_attrs,
4867
4868};
4869
b.liue9582032025-04-17 19:18:16 +08004870static int emac_probe(struct platform_device *pdev)
4871{
4872 struct emac_priv *priv;
4873 struct net_device *ndev = NULL;
4874 struct resource *res;
4875 struct device_node *np = pdev->dev.of_node;
4876 struct device *dev = &pdev->dev;
4877 const unsigned char *mac_addr = NULL;
4878 const struct of_device_id *match;
4879#ifdef CONFIG_DEBUG_FS
4880 struct dentry *emac_fs_dir = NULL;
4881 struct dentry *emac_clk_tuning;
4882#endif
4883 int ret;
b.liub17525e2025-05-14 17:22:29 +08004884 struct regulator *vcc3v3_gmac;
b.liue9582032025-04-17 19:18:16 +08004885
4886 ndev = alloc_etherdev(sizeof(struct emac_priv));
4887 if (!ndev) {
4888 ret = -ENOMEM;
4889 return ret;
4890 }
4891 priv = netdev_priv(ndev);
4892 priv->ndev = ndev;
4893 priv->pdev = pdev;
4894#ifdef WAN_LAN_AUTO_ADAPT
4895 priv->dhcp = -1;
4896 priv->vlan_port = -1;
4897 priv->dhcp_delaywork = 0;
4898#endif
4899 platform_set_drvdata(pdev, priv);
4900
4901 match = of_match_device(of_match_ptr(emac_of_match), &pdev->dev);
4902 if (match) {
4903 priv->regdata = match->data;
4904 } else {
4905 pr_info("===> not match valid device\n");
4906 }
4907
4908 emac_command_options(priv);
4909 emac_skbrb_init(EMAC_SKBRB_SLOT_SIZE, priv->rx_ring.total_cnt * 2);
4910
4911 priv->hw_stats = devm_kzalloc(&pdev->dev,
4912 sizeof(*priv->hw_stats),
4913 GFP_KERNEL);
4914 if (!priv->hw_stats) {
4915 dev_err(&pdev->dev, "failed to allocate counter memory\n");
4916 ret = -ENOMEM;
4917 goto err_netdev;
4918 }
4919
4920 spin_lock_init(&priv->hw_stats->stats_lock);
4921
4922 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4923 priv->iobase = devm_ioremap_resource(&pdev->dev, res);
4924 if (IS_ERR(priv->iobase)) {
4925 ret = -ENOMEM;
4926 goto err_netdev;
4927 }
4928
4929 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4930 priv->tso_base = devm_ioremap_resource(&pdev->dev, res);
4931 if (!IS_ERR(priv->tso_base)) {
4932 dev_info(&pdev->dev, "tso base=0x%x\n", (unsigned)priv->tso_base);
4933 }
4934
4935 priv->irq = irq_of_parse_and_map(np, 0);
4936 if (!priv->irq) {
4937 ret = -ENXIO;
4938 goto err_netdev;
4939 }
4940 priv->irq_wakeup = irq_of_parse_and_map(np, 1);
4941 if (!priv->irq_wakeup)
4942 dev_err(&pdev->dev, "wake_up irq not found\n");
4943
4944 priv->tso = of_property_read_bool(np, "tso-support");
4945 if (cpu_is_asr1903_a0() || cpu_is_asr1903_z1())
4946 priv->tso = false;
4947 if (priv->tso) {
4948 priv->irq_tso = irq_of_parse_and_map(np, 3);
4949 if (!priv->irq_tso) {
4950 dev_err(&pdev->dev, "tso irq not found\n");
4951 priv->tso = false;
4952 }
4953 }
4954
4955 priv->sram_pool = of_gen_pool_get(dev->of_node, "eth,sram", 0);
4956 if (priv->sram_pool) {
4957 dev_notice(&pdev->dev, "use sram as tx desc\n");
4958 }
4959
4960 ret = of_property_read_u32(np, "lpm-qos", &priv->pm_qos);
4961 if (ret)
4962 return ret;
4963
4964 ret = of_property_read_u32(np, "3v3-enable", &priv->power_domain);
4965 if (ret)
4966 priv->power_domain = 0;
4967
4968 ret = of_property_read_u32(np, "mdio-clk-div", &priv->mdio_clk_div);
4969 if (ret)
4970 priv->mdio_clk_div = 0xfe;
4971
4972 if (of_property_read_bool(np, "enable-suspend"))
4973 priv->en_suspend = 1;
4974 else
4975 priv->en_suspend = 0;
4976
4977 priv->wolopts = 0;
4978 if (of_property_read_bool(np, "magic-packet-wakeup"))
4979 priv->wolopts |= WAKE_MAGIC;
4980
4981 if (of_property_read_bool(np, "unicast-packet-wakeup"))
4982 priv->wolopts |= WAKE_UCAST;
4983
4984 priv->dev_flags = 0;
4985 if (of_property_read_bool(np, "suspend-not-keep-power")) {
4986 priv->dev_flags |= EMAC_SUSPEND_POWER_DOWN_PHY;
4987 priv->wolopts = 0;
4988 }
4989
b.liub17525e2025-05-14 17:22:29 +08004990 vcc3v3_gmac = devm_regulator_get(dev, "vmmc");
4991 if (!IS_ERR(vcc3v3_gmac))
4992 {
4993 if( regulator_set_voltage(vcc3v3_gmac, 1800000,1800000))
4994 pr_err("fail to set regulator vcc3v3_gmac to 1.8v\n");
4995
4996 if (!regulator_is_enabled(vcc3v3_gmac) && regulator_enable(vcc3v3_gmac))
4997 pr_err("fail to enable regulator vcc3v3_gmac\n");
4998 }
4999
5000 g_vcc3v3_gmac = vcc3v3_gmac;
5001
b.liue9582032025-04-17 19:18:16 +08005002 priv->pinctrl = devm_pinctrl_get(dev);
5003 if (IS_ERR(priv->pinctrl))
5004 dev_err(dev, "could not get pinctrl handle\n");
5005
5006 priv->rgmii_pins = pinctrl_lookup_state(priv->pinctrl, "rgmii-pins");
5007 if (IS_ERR(priv->rgmii_pins))
5008 dev_err(dev, "could not get rgmii-pins pinstate\n");
5009
5010 emac_set_aib_power_domain(priv);
5011
5012 device_init_wakeup(&pdev->dev, 1);
5013
5014 priv->pm_qos_req.name = pdev->name;
5015 pm_qos_add_request(&priv->pm_qos_req, PM_QOS_CPUIDLE_BLOCK,
5016 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
5017#ifdef CONFIG_DDR_DEVFREQ
5018 pm_qos_add_request(&priv->pm_ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
5019 PM_QOS_DEFAULT_VALUE);
5020
5021 priv->clk_scaling.polling_delay_ms = 1000; /* 1s window */
5022 priv->clk_scaling.tx_up_threshold = 120; /* 120Mbps */
5023 priv->clk_scaling.tx_down_threshold = 60;
5024 priv->clk_scaling.rx_up_threshold = 60; /* 60Mbps */
5025 priv->clk_scaling.rx_down_threshold = 20;
5026 priv->clk_scaling.window_time = jiffies;
5027 pm_qos_add_request(&priv->clk_scaling.ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
5028 PM_QOS_DEFAULT_VALUE);
5029 INIT_WORK(&priv->qos_work, emac_ddr_qos_work);
5030#endif
5031 skb_queue_head_init(&priv->rx_skb);
5032 ndev->watchdog_timeo = 5 * HZ;
5033 ndev->base_addr = (unsigned long)priv->iobase;
5034 ndev->irq = priv->irq;
5035 /* set hw features */
5036 ndev->features = NETIF_F_SG | NETIF_F_SOFT_FEATURES;
5037 if (priv->tso) {
5038 ndev->features |= NETIF_F_RXCSUM;
5039 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5040 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
5041 dev_info(&pdev->dev, "TSO feature enabled\n");
5042 }
5043 ndev->hw_features = ndev->features;
5044 ndev->vlan_features = ndev->features;
5045
5046 ndev->ethtool_ops = &emac_ethtool_ops;
5047 ndev->netdev_ops = &emac_netdev_ops;
5048 if (pdev->dev.of_node)
5049 mac_addr = of_get_mac_address(np);
5050
5051 if (!IS_ERR_OR_NULL(mac_addr)) {
5052 //ether_addr_copy(ndev->dev_addr, mac_addr);
5053 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
5054 if (!is_valid_ether_addr(ndev->dev_addr)) {
5055 dev_info(&pdev->dev, "Using random mac address\n");
5056 eth_hw_addr_random(ndev);
5057 }
5058 } else {
5059 dev_info(&pdev->dev, "Using random mac address\n");
5060 eth_hw_addr_random(ndev);
5061 }
5062
5063 priv->hw_adj = of_property_read_bool(np, "hw-increment");
5064 priv->ptp_support = of_property_read_bool(np, "ptp-support");
5065 if (priv->ptp_support) {
5066 pr_info("EMAC support IEEE1588 PTP Protocol\n");
5067 if (of_property_read_u32(np, "ptp-clk-rate",
5068 &priv->ptp_clk_rate)) {
5069 priv->ptp_clk_rate = 20000000;
5070 pr_info("%s ptp_clk rate using default value:%d may inaccurate!!1\n",
5071 __func__, priv->ptp_clk_rate);
5072 }
5073
5074 priv->ptp_clk = devm_clk_get(&pdev->dev, "ptp-clk");
5075 if (IS_ERR(priv->ptp_clk)) {
5076 dev_err(&pdev->dev, "ptp clock not found.\n");
5077 ret = PTR_ERR(priv->ptp_clk);
5078 goto err_netdev;
5079 }
5080
5081 clk_set_rate(priv->ptp_clk, priv->ptp_clk_rate);
5082 }
5083
5084 priv->pps_info.enable_pps = 0;
5085#ifdef CONFIG_PPS
5086 ret = of_property_read_u32(np, "pps_source", &priv->pps_info.pps_source);
5087 if (!ret) {
5088 priv->irq_pps = irq_of_parse_and_map(np, 2);
5089
5090 if (priv->pps_info.pps_source < EMAC_PPS_MAX)
5091 priv->pps_info.enable_pps = 1;
5092 else
5093 dev_err(&pdev->dev, "wrong PPS source!\n");
5094 }
5095#endif
5096 priv->clk = devm_clk_get(&pdev->dev, "emac-clk");
5097 if (IS_ERR(priv->clk)) {
5098 dev_err(&pdev->dev, "emac clock not found.\n");
5099 ret = PTR_ERR(priv->clk);
5100 goto err_netdev;
5101 }
5102
5103 ret = clk_prepare_enable(priv->clk);
5104 if (ret < 0) {
5105 dev_err(&pdev->dev, "failed to enable emac clock: %d\n",
5106 ret);
5107 goto clk_disable;
5108 }
5109
5110 emac_sw_init(priv);
5111 ret = emac_mdio_init(priv);
5112 if (ret)
5113 goto clk_disable;
5114
5115 INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
5116#ifdef WAN_LAN_AUTO_ADAPT
5117 INIT_DELAYED_WORK(&priv->dhcp_work, emac_dhcp_work_func_t);
5118#endif
5119 if (of_phy_is_fixed_link(np)) {
5120 if ((emac_set_fixed_link(np, priv) < 0)) {
5121 ret = -ENODEV;
5122 goto clk_disable;
5123 }
5124 dev_info(&pdev->dev, "find fixed link\n");
5125 priv->fix_link = 1;
5126 }
5127
5128 INIT_DELAYED_WORK(&priv->emac_pause_work, emac_pause_generate_work_fuc);
5129 SET_NETDEV_DEV(ndev, &pdev->dev);
5130 strcpy(ndev->name, "eth%d");
5131
5132 ret = register_netdev(ndev);
5133 if (ret) {
5134 pr_err("register_netdev failed\n");
5135 goto err_mdio_deinit;
5136 }
5137 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5138#ifdef CONFIG_ASR_EMAC_NAPI
5139 netif_napi_add(ndev, &priv->rx_napi, emac_rx_poll, 32);
5140 netif_tx_napi_add(ndev, &priv->tx_napi, emac_tx_poll, 32);
5141#endif
5142 priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
5143 priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
5144 priv->clk_tuning_enable = of_property_read_bool(np, "clk-tuning-enable");
5145
5146 if (priv->clk_tuning_enable) {
5147 ret = of_property_read_u32(np, "tx-clk-config",
5148 &priv->tx_clk_config);
5149 if (ret)
5150 priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
5151
5152 ret = of_property_read_u32(np, "rx-clk-config",
5153 &priv->rx_clk_config);
5154 if (ret)
5155 priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
5156#ifdef CONFIG_DEBUG_FS
5157 if (!emac_fs_dir) {
5158 emac_fs_dir = debugfs_create_dir(DRIVER_NAME, NULL);
5159
5160 if (!emac_fs_dir || IS_ERR(emac_fs_dir)) {
5161 pr_err("emac debugfs create directory failed\n");
5162 }else {
5163 emac_clk_tuning = debugfs_create_file("clk_tuning", 0664,
5164 emac_fs_dir, priv, &clk_tuning_fops);
5165 if (!emac_clk_tuning) {
5166 pr_err("emac debugfs create file failed\n");
5167 }
5168 }
5169 }
5170#endif
5171 }
b.liub17525e2025-05-14 17:22:29 +08005172
5173 sysfs_create_group(&pdev->dev.kobj,&demo_attr_grp);
5174
5175
5176 //device_create_file(&pdev->dev, &dev_attr_cable_sqi_value);
b.liue9582032025-04-17 19:18:16 +08005177 return 0;
5178
5179err_mdio_deinit:
5180 emac_mdio_deinit(priv);
5181clk_disable:
5182 clk_disable_unprepare(priv->clk);
5183err_netdev:
5184 free_netdev(ndev);
5185 emac_skbrb_release();
5186 return ret;
5187}
5188
5189static int emac_remove(struct platform_device *pdev)
5190{
5191 struct emac_priv *priv = platform_get_drvdata(pdev);
5192
5193 device_init_wakeup(&pdev->dev, 0);
5194 unregister_netdev(priv->ndev);
5195 emac_reset_hw(priv);
5196 free_netdev(priv->ndev);
5197 emac_mdio_deinit(priv);
5198 clk_disable_unprepare(priv->clk);
5199 pm_qos_remove_request(&priv->pm_qos_req);
5200 cancel_delayed_work_sync(&priv->emac_pause_work);
5201#ifdef CONFIG_DDR_DEVFREQ
5202 pm_qos_remove_request(&priv->pm_ddr_qos);
5203 pm_qos_remove_request(&priv->clk_scaling.ddr_qos);
5204#endif
5205 emac_skbrb_release();
5206 return 0;
5207}
5208
5209static void emac_shutdown(struct platform_device *pdev)
5210{
5211}
5212
5213#ifdef CONFIG_PM_SLEEP
5214static int emac_resume(struct device *dev)
5215{
5216 struct emac_priv *priv = dev_get_drvdata(dev);
5217 struct net_device *ndev = priv->ndev;
5218 u32 ctrl, wake_mode = 0;
5219
5220 if (!priv->en_suspend)
5221 return 0;
5222
5223 if (priv->wolopts) {
5224 if (netif_running(ndev)) {
5225 netif_device_attach(ndev);
5226#ifdef CONFIG_ASR_EMAC_NAPI
5227 napi_enable(&priv->rx_napi);
5228 napi_enable(&priv->tx_napi);
5229#endif
5230 }
5231
5232 if (priv->wolopts & WAKE_MAGIC)
5233 wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
5234 if (priv->wolopts & WAKE_UCAST)
5235 wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
5236
5237 disable_irq_wake(priv->irq_wakeup);
5238 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
5239 ctrl &= ~wake_mode;
5240 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
5241 } else {
5242 clk_prepare_enable(priv->clk);
5243
5244 if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
5245 emac_power_up(priv);
5246
5247 rtnl_lock();
5248 dev_open(ndev, NULL);
5249 rtnl_unlock();
5250 }
5251
5252 return 0;
5253}
5254
5255static int emac_suspend(struct device *dev)
5256{
5257 struct emac_priv *priv = dev_get_drvdata(dev);
5258 struct net_device *ndev = priv->ndev;
5259 u32 ctrl, wake_mode = 0;
5260
5261 if (!priv->en_suspend)
5262 return 0;
5263
5264 if (priv->wolopts) {
5265 if (netif_running(ndev)) {
5266 netif_device_detach(ndev);
5267#ifdef CONFIG_ASR_EMAC_NAPI
5268 napi_disable(&priv->rx_napi);
5269 napi_disable(&priv->tx_napi);
5270#endif
5271 }
5272
5273 if (priv->wolopts & WAKE_MAGIC)
5274 wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
5275 if (priv->wolopts & WAKE_UCAST)
5276 wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
5277
5278 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
5279 ctrl |= wake_mode;
5280 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
5281 enable_irq_wake(priv->irq_wakeup);
5282 } else {
5283 rtnl_lock();
5284 dev_close(ndev);
5285 rtnl_unlock();
5286
5287 if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
5288 emac_power_down(priv);
5289
5290 clk_disable_unprepare(priv->clk);
5291 }
5292
5293 return 0;
5294}
5295
5296static int emac_suspend_noirq(struct device *dev)
5297{
5298 struct emac_priv *priv = dev_get_drvdata(dev);
5299 struct net_device *ndev = priv->ndev;
5300
5301 if (!ndev->phydev && !priv->fix_link)
5302 return 0;
5303
5304 pr_pm_debug("==> enter emac_suspend_noirq\n");
5305 pm_qos_update_request(&priv->pm_qos_req,
5306 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
5307 return 0;
5308}
5309
5310static int emac_resume_noirq(struct device *dev)
5311{
5312 struct emac_priv *priv = dev_get_drvdata(dev);
5313 struct net_device *ndev = priv->ndev;
5314
5315 if (!ndev->phydev && !priv->fix_link)
5316 return 0;
5317
5318 pr_pm_debug("==> enter emac_resume_noirq\n");
5319 pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
5320 return 0;
5321}
5322
5323static const struct dev_pm_ops emac_pm_ops = {
5324 .suspend = emac_suspend,
5325 .resume = emac_resume,
5326 .suspend_noirq = emac_suspend_noirq,
5327 .resume_noirq = emac_resume_noirq,
5328};
5329
5330#define ASR_EMAC_PM_OPS (&emac_pm_ops)
5331#else
5332#define ASR_EMAC_PM_OPS NULL
5333#endif
5334
5335static struct platform_driver emac_driver = {
5336 .probe = emac_probe,
5337 .remove = emac_remove,
5338 .shutdown = emac_shutdown,
5339 .driver = {
5340 .name = DRIVER_NAME,
5341 .of_match_table = of_match_ptr(emac_of_match),
5342 .pm = ASR_EMAC_PM_OPS,
5343 },
5344};
5345
5346module_platform_driver(emac_driver);
5347
5348MODULE_LICENSE("GPL");
5349MODULE_DESCRIPTION("Ethernet driver for ASR Emac");
5350MODULE_ALIAS("platform:asr_eth");