blob: d40767654276579474eec5a77971e8febc4db502 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * asr emac driver
4 *
5 * Copyright (C) 2019 ASR Micro Limited
6 *
7 */
8
9#include <linux/bitops.h>
10#include <linux/kernel.h>
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/delay.h>
14#include <linux/dma-mapping.h>
15#include <linux/etherdevice.h>
16#include <linux/ethtool.h>
17#include <linux/in.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/ip.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/of_net.h>
26#include <linux/of_mdio.h>
27#include <linux/of_irq.h>
28#include <linux/of_device.h>
29#include <linux/phy.h>
30#include <linux/platform_device.h>
31#include <linux/tcp.h>
32#include <linux/timer.h>
33#include <linux/types.h>
34#include <linux/udp.h>
35#include <linux/workqueue.h>
36#include <linux/phy_fixed.h>
37#include <linux/pm_qos.h>
38#include <asm/cacheflush.h>
39#include <linux/cputype.h>
40#include <linux/iopoll.h>
41#include <linux/genalloc.h>
b.liub17525e2025-05-14 17:22:29 +080042#include <linux/regulator/consumer.h>
b.liue9582032025-04-17 19:18:16 +080043
44#ifdef CONFIG_DEBUG_FS
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#endif /* CONFIG_DEBUG_FS */
48#include <asm/atomic.h>
49#include "emac_eth.h"
50#include <linux/skbrb.h>
51
52#ifdef WAN_LAN_AUTO_ADAPT
53#include <linux/if_vlan.h>
54#include <linux/if_ether.h>
55#include <linux/kobject.h>
56#endif
57
58#define DRIVER_NAME "asr_emac"
59
b.liub17525e2025-05-14 17:22:29 +080060#define CLOSE_AIB_POWER_DOMAIN 1
b.liue9582032025-04-17 19:18:16 +080061#define AXI_PHYS_BASE 0xd4200000
62
63#define AIB_GMAC_IO_REG 0xD401E804
64#define APBC_ASFAR 0xD4015050
65#define AKEY_ASFAR 0xbaba
66#define AKEY_ASSAR 0xeb10
67
68#define EMAC_DIRECT_MAP
69#define TUNING_CMD_LEN 50
70#define CLK_PHASE_CNT 8
71#define TXCLK_PHASE_DEFAULT 0
72#define RXCLK_PHASE_DEFAULT 0
73#define TX_PHASE 1
74#define RX_PHASE 0
75
76#define EMAC_DMA_REG_CNT 16
77#define EMAC_MAC_REG_CNT 61
78#define EMAC_EMPTY_FROM_DMA_TO_MAC 48
79#define EMAC_REG_SPACE_SIZE ((EMAC_DMA_REG_CNT + \
80 EMAC_MAC_REG_CNT + EMAC_EMPTY_FROM_DMA_TO_MAC) * 4)
81#define EMAC_ETHTOOL_STAT(x) { #x, \
82 offsetof(struct emac_hw_stats, x) / sizeof(u32) }
83
84#define EMAC_SKBRB_SLOT_SIZE 1600
85#define EMAC_EXTRA_ROOM 72
86#define EMAC_SKBRB_MAX_PAYLOAD (EMAC_SKBRB_SLOT_SIZE - EMAC_EXTRA_ROOM - NET_IP_ALIGN)
87
88#define EMAC_RX_FILL_TIMER_US 0
89#define EMAC_TX_COAL_TIMER_US (1000)
90#define EMAC_TX_FRAMES (64)
91
92#ifdef WAN_LAN_AUTO_ADAPT
93#define DHCP_DISCOVER 1
94#define DHCP_OFFER 2
95#define DHCP_REQUEST 3
96#define DHCP_ACK 5
97#define IP175D_PHY_ID 0x02430d80
98
99enum emac_SIG {
100 CARRIER_DOWN = 0,
101 CARRIER_UP,
102 DHCP_EVENT_CLIENT,
103 DHCP_EVENT_SERVER,
104 PHY_IP175D_CONNECT,
105 CARRIER_DOWN_IP175D,
106 CARRIER_UP_IP175D,
107};
108
109enum emac_DHCP {
110 DHCP_SEND_REQ = 1,
111 DHCP_REC_RESP = 2,
112};
113
114struct emac_event {
115 const char *name;
116 char *action;
117 int port;
118 struct sk_buff *skb;
119 struct work_struct work;
120};
121
122extern u64 uevent_next_seqnum(void);
123static int emac_sig_workq(int event, int port);
124#endif
125
126static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable);
127static int clk_phase_set(struct emac_priv *priv, bool is_tx);
128#ifdef CONFIG_ASR_EMAC_NAPI
129static int emac_rx_clean_desc(struct emac_priv *priv, int budget);
130#else
131static int emac_rx_clean_desc(struct emac_priv *priv);
132#endif
133static void emac_alloc_rx_desc_buffers(struct emac_priv *priv);
134static int emac_phy_connect(struct net_device *dev);
135
b.liub17525e2025-05-14 17:22:29 +0800136struct regulator *g_vcc3v3_gmac= NULL;
b.liue9582032025-04-17 19:18:16 +0800137/* for falcon */
138struct emac_regdata asr_emac_regdata_v1 = {
139 .support_dual_vol_power = 1,
140 .ptp_rx_ts_all_events = 0,
141 .clk_rst_ctrl_reg_offset = 0x160,
142 .axi_mst_single_id_shift = 17,
143 .phy_intr_enable_shift = 16,
144 .int_clk_src_sel_shift = -1,
145 .rgmii_tx_clk_src_sel_shift = 5,
146 .rgmii_rx_clk_src_sel_shift = 4,
147 .rmii_rx_clk_sel_shift = 7,
148 .rmii_tx_clk_sel_shift = 6,
149 .rmii_ref_clk_sel_shift = -1,
150 .mac_intf_sel_shift = 2,
151 .rgmii_tx_dline_reg_offset = -1,
152 .rgmii_tx_delay_code_shift = -1,
153 .rgmii_tx_delay_code_mask =-1,
154 .rgmii_tx_delay_step_shift = -1,
155 .rgmii_tx_delay_step_mask = -1,
156 .rgmii_tx_delay_enable_shift = -1,
157 .rgmii_rx_dline_reg_offset = -1,
158 .rgmii_rx_delay_code_shift = -1,
159 .rgmii_rx_delay_code_mask = -1,
160 .rgmii_rx_delay_step_shift = -1,
161 .rgmii_rx_delay_step_mask = -1,
162 .rgmii_rx_delay_enable_shift = -1,
163};
164
165/* for kagu */
166struct emac_regdata asr_emac_regdata_v2 = {
167 .support_dual_vol_power = 0,
168 .ptp_rx_ts_all_events = 0,
169 .clk_rst_ctrl_reg_offset = 0x160,
170 .axi_mst_single_id_shift = 13,
171 .phy_intr_enable_shift = 12,
172 .int_clk_src_sel_shift = 9,
173 .rgmii_tx_clk_src_sel_shift = 8,
174 .rgmii_rx_clk_src_sel_shift = -1,
175 .rmii_rx_clk_sel_shift = 7,
176 .rmii_tx_clk_sel_shift = 6,
177 .rmii_ref_clk_sel_shift = 3,
178 .mac_intf_sel_shift = 2,
179 .rgmii_tx_dline_reg_offset = 0x178,
180 .rgmii_tx_delay_code_shift = 24,
181 .rgmii_tx_delay_code_mask = 0xff,
182 .rgmii_tx_delay_step_shift = 20,
183 .rgmii_tx_delay_step_mask = 0x3,
184 .rgmii_tx_delay_enable_shift = 16,
185 .rgmii_rx_dline_reg_offset = 0x178,
186 .rgmii_rx_delay_code_shift = 8,
187 .rgmii_rx_delay_code_mask = 0xff,
188 .rgmii_rx_delay_step_shift = 4,
189 .rgmii_rx_delay_step_mask = 0x3,
190 .rgmii_rx_delay_enable_shift = 0,
191};
192
193/* for lapwing */
194struct emac_regdata asr_emac_regdata_v3 = {
195 .support_dual_vol_power = 1,
196 .ptp_rx_ts_all_events = 1,
197 .clk_rst_ctrl_reg_offset = 0x164,
198 .axi_mst_single_id_shift = 13,
199 .phy_intr_enable_shift = 12,
200 .int_clk_src_sel_shift = 9,
201 .rgmii_tx_clk_src_sel_shift = 8,
202 .rgmii_rx_clk_src_sel_shift = -1,
203 .rmii_rx_clk_sel_shift = 7,
204 .rmii_tx_clk_sel_shift = 6,
205 .rmii_ref_clk_sel_shift = 3,
206 .mac_intf_sel_shift = 2,
207 .rgmii_tx_dline_reg_offset = 0x16c,
208 .rgmii_tx_delay_code_shift = 8,
209 .rgmii_tx_delay_code_mask = 0xff,
210 .rgmii_tx_delay_step_shift = 0,
211 .rgmii_tx_delay_step_mask = 0x3,
212 .rgmii_tx_delay_enable_shift = 31,
213 .rgmii_rx_dline_reg_offset = 0x168,
214 .rgmii_rx_delay_code_shift = 8,
215 .rgmii_rx_delay_code_mask = 0xff,
216 .rgmii_rx_delay_step_shift = 0,
217 .rgmii_rx_delay_step_mask = 0x3,
218 .rgmii_rx_delay_enable_shift = 31,
219};
220
221static const struct of_device_id emac_of_match[] = {
222 {
223 .compatible = "asr,asr-eth",
224 .data = (void *)&asr_emac_regdata_v1,
225 },
226 {
227 .compatible = "asr,asr-eth-v2",
228 .data = (void *)&asr_emac_regdata_v2,
229 },
230 {
231 .compatible = "asr,asr-eth-v3",
232 .data = (void *)&asr_emac_regdata_v3,
233 },
234 { },
235};
236MODULE_DEVICE_TABLE(of, emac_of_match);
237
238#ifdef EMAC_DIRECT_MAP
239dma_addr_t inline emac_map_direct(unsigned buf, unsigned len)
240{
241 unsigned ret;
242 ret = mv_cp_virtual_to_physical(buf);
243 BUG_ON(ret == buf);
244 __cpuc_flush_dcache_area((void *)(buf & ~ 31),
245 ((len + (buf & 31) + 31) & ~ 31));
246 return (dma_addr_t)ret;
247}
248#endif
249
250static inline void emac_unmap_single(struct device *dev, dma_addr_t handle,
251 size_t size, enum dma_data_direction dir)
252{
253#ifdef EMAC_DIRECT_MAP
254 if (dir == DMA_TO_DEVICE)
255 return;
256#endif
257 dma_unmap_single(dev, handle, size ,dir);
258}
259
260static inline dma_addr_t emac_map_single(struct device *dev, void *ptr,
261 size_t size,enum dma_data_direction dir)
262{
263 if (dir == DMA_FROM_DEVICE)
264 return dma_map_single(dev, ptr, size, dir);
265#ifndef EMAC_DIRECT_MAP
266 return dma_map_single(dev, ptr, size, dir);
267#else
268 return emac_map_direct((unsigned)ptr, (unsigned)size);
269#endif
270}
271
272#ifdef CONFIG_DDR_DEVFREQ
273static void emac_ddr_qos_work(struct work_struct *work)
274{
275 struct emac_priv *priv;
276 int val;
277
278 priv = container_of(work, struct emac_priv, qos_work);
279 val = priv->clk_scaling.qos_val;
280
281 if (val == PM_QOS_DEFAULT_VALUE)
282 pm_qos_update_request(&priv->clk_scaling.ddr_qos, val);
283 else
284 pm_qos_update_request_timeout(
285 &priv->clk_scaling.ddr_qos, val, (2 * USEC_PER_SEC));
286}
287
288static void emac_ddr_clk_scaling(struct emac_priv *priv)
289{
290 struct net_device *ndev = priv->ndev;
291 unsigned long rx_bytes, tx_bytes;
292 unsigned long last_rx_bytes, last_tx_bytes;
293 unsigned long total_time_ms = 0;
294 unsigned int cur_rx_threshold, cur_tx_threshold;
295 unsigned long polling_jiffies;
296 int qos_val;
297
298 polling_jiffies = msecs_to_jiffies(priv->clk_scaling.polling_delay_ms);
299 if (time_is_after_jiffies(priv->clk_scaling.window_time +
300 polling_jiffies))
301 return;
302
303 total_time_ms = jiffies_to_msecs((long)jiffies -
304 (long)priv->clk_scaling.window_time);
305
306 if (!ndev) {
307 pr_err("%s: dev or net is not ready\n", __func__);
308 return;
309 }
310
311 qos_val = priv->clk_scaling.qos_val;
312 last_rx_bytes = priv->clk_scaling.rx_bytes;
313 last_tx_bytes = priv->clk_scaling.tx_bytes;
314 if (!last_rx_bytes && !last_tx_bytes)
315 goto out;
316
317 if (likely(ndev->stats.rx_bytes > last_rx_bytes))
318 rx_bytes = ndev->stats.rx_bytes - last_rx_bytes;
319 else
320 rx_bytes = ULONG_MAX - last_rx_bytes + ndev->stats.rx_bytes + 1;
321
322 if (likely(ndev->stats.tx_bytes > last_tx_bytes))
323 tx_bytes = ndev->stats.tx_bytes - last_tx_bytes;
324 else
325 tx_bytes = ULONG_MAX - last_tx_bytes + ndev->stats.tx_bytes + 1;
326
327 cur_tx_threshold = tx_bytes * 8 / (total_time_ms * 1000);
328 pr_debug("%s: tx_rate=%dMbps, up_threshold=%dMbps\n",
329 __func__, cur_tx_threshold, priv->clk_scaling.tx_up_threshold);
330 if (cur_tx_threshold >= priv->clk_scaling.tx_up_threshold) {
331 qos_val = ASR_EMAC_DDR_BOOST_FREQ;
332 goto out;
333 }
334
335 cur_rx_threshold = rx_bytes * 8 / (total_time_ms * 1000);
336 pr_debug("%s: rx_rate=%dMbps, up_threshold=%dMbps\n",
337 __func__, cur_rx_threshold, priv->clk_scaling.rx_up_threshold);
338 if (cur_rx_threshold >= priv->clk_scaling.rx_up_threshold) {
339 qos_val = ASR_EMAC_DDR_BOOST_FREQ;
340 goto out;
341 }
342
343 if (cur_tx_threshold < priv->clk_scaling.tx_down_threshold &&
344 cur_rx_threshold < priv->clk_scaling.rx_down_threshold)
345 qos_val = PM_QOS_DEFAULT_VALUE;
346
347out:
348 priv->clk_scaling.rx_bytes = ndev->stats.rx_bytes;
349 priv->clk_scaling.tx_bytes = ndev->stats.tx_bytes;
350 priv->clk_scaling.window_time = jiffies;
351
352 if (qos_val != priv->clk_scaling.qos_val) {
353 priv->clk_scaling.qos_val = qos_val;
354 schedule_work(&priv->qos_work);
355 }
356
357 return;
358}
359#endif
360
361/* strings used by ethtool */
362static const struct emac_ethtool_stats {
363 char str[ETH_GSTRING_LEN];
364 u32 offset;
365} emac_ethtool_stats[] = {
366 EMAC_ETHTOOL_STAT(tx_ok_pkts),
367 EMAC_ETHTOOL_STAT(tx_total_pkts),
368 EMAC_ETHTOOL_STAT(tx_ok_bytes),
369 EMAC_ETHTOOL_STAT(tx_err_pkts),
370 EMAC_ETHTOOL_STAT(tx_singleclsn_pkts),
371 EMAC_ETHTOOL_STAT(tx_multiclsn_pkts),
372 EMAC_ETHTOOL_STAT(tx_lateclsn_pkts),
373 EMAC_ETHTOOL_STAT(tx_excessclsn_pkts),
374 EMAC_ETHTOOL_STAT(tx_unicast_pkts),
375 EMAC_ETHTOOL_STAT(tx_multicast_pkts),
376 EMAC_ETHTOOL_STAT(tx_broadcast_pkts),
377 EMAC_ETHTOOL_STAT(tx_pause_pkts),
378 EMAC_ETHTOOL_STAT(rx_ok_pkts),
379 EMAC_ETHTOOL_STAT(rx_total_pkts),
380 EMAC_ETHTOOL_STAT(rx_crc_err_pkts),
381 EMAC_ETHTOOL_STAT(rx_align_err_pkts),
382 EMAC_ETHTOOL_STAT(rx_err_total_pkts),
383 EMAC_ETHTOOL_STAT(rx_ok_bytes),
384 EMAC_ETHTOOL_STAT(rx_total_bytes),
385 EMAC_ETHTOOL_STAT(rx_unicast_pkts),
386 EMAC_ETHTOOL_STAT(rx_multicast_pkts),
387 EMAC_ETHTOOL_STAT(rx_broadcast_pkts),
388 EMAC_ETHTOOL_STAT(rx_pause_pkts),
389 EMAC_ETHTOOL_STAT(rx_len_err_pkts),
390 EMAC_ETHTOOL_STAT(rx_len_undersize_pkts),
391 EMAC_ETHTOOL_STAT(rx_len_oversize_pkts),
392 EMAC_ETHTOOL_STAT(rx_len_fragment_pkts),
393 EMAC_ETHTOOL_STAT(rx_len_jabber_pkts),
394 EMAC_ETHTOOL_STAT(rx_64_pkts),
395 EMAC_ETHTOOL_STAT(rx_65_127_pkts),
396 EMAC_ETHTOOL_STAT(rx_128_255_pkts),
397 EMAC_ETHTOOL_STAT(rx_256_511_pkts),
398 EMAC_ETHTOOL_STAT(rx_512_1023_pkts),
399 EMAC_ETHTOOL_STAT(rx_1024_1518_pkts),
400 EMAC_ETHTOOL_STAT(rx_1519_plus_pkts),
401 EMAC_ETHTOOL_STAT(rx_drp_fifo_full_pkts),
402 EMAC_ETHTOOL_STAT(rx_truncate_fifo_full_pkts),
403 EMAC_ETHTOOL_STAT(rx_dma_missed_frame_cnt),
404 EMAC_ETHTOOL_STAT(tx_tso_pkts),
405 EMAC_ETHTOOL_STAT(tx_tso_bytes),
406};
407
408static int emac_set_speed_duplex(struct emac_priv *priv)
409{
410 u32 ctrl;
411
412 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
413 if (priv->duplex)
414 ctrl |= MREGBIT_FULL_DUPLEX_MODE;
415 else
416 ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
417
418 switch (priv->speed) {
419 case SPEED_1000:
420 ctrl |= MREGBIT_SPEED_1000M;
421 break;
422 case SPEED_100:
423 ctrl |= MREGBIT_SPEED_100M;
424 break;
425 case SPEED_10:
426 ctrl |= MREGBIT_SPEED_10M;
427 break;
428 default:
429 pr_err("broken speed: %d\n", priv->speed);
430 return 0;
431 }
432 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
433 pr_info("emac: force link speed:%dM duplex:%s\n",
434 priv->speed, priv->duplex ? "Full": "Half");
435
436 return 0;
437}
438
439static int emac_set_fixed_link(struct device_node *np, struct emac_priv *priv)
440{
441 struct fixed_phy_status status = {};
442 struct device_node *fixed_link_node;
443 u32 fixed_link_prop[5];
444 const char *managed;
445 int interface;
446
447 if (of_property_read_string(np, "managed", &managed) == 0 &&
448 strcmp(managed, "in-band-status") == 0) {
449 /* status is zeroed, namely its .link member */
450 goto fix_link;
451 }
452
453 /* New binding */
454 fixed_link_node = of_get_child_by_name(np, "fixed-link");
455 if (fixed_link_node) {
456 status.link = 1;
457 status.duplex = of_property_read_bool(fixed_link_node,
458 "full-duplex");
459 if (of_property_read_u32(fixed_link_node, "speed",
460 &status.speed)) {
461 of_node_put(fixed_link_node);
462 return -EINVAL;
463 }
464 status.pause = of_property_read_bool(fixed_link_node, "pause");
465 status.asym_pause = of_property_read_bool(fixed_link_node,
466 "asym-pause");
467 interface = of_get_phy_mode(fixed_link_node);
468 if (interface < 0) {
469 priv->interface = PHY_INTERFACE_MODE_RGMII;
470 pr_info("no interface for fix-link, use RGMII\n");
471 } else {
472 priv->interface = interface;
473 }
474
475 of_node_put(fixed_link_node);
476 goto fix_link;
477 }
478
479 /* Old binding */
480 if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
481 ARRAY_SIZE(fixed_link_prop)) == 0) {
482 status.link = 1;
483 status.duplex = fixed_link_prop[1];
484 status.speed = fixed_link_prop[2];
485 status.pause = fixed_link_prop[3];
486 status.asym_pause = fixed_link_prop[4];
487 goto fix_link;
488 }
489
490 return -ENODEV;
491
492fix_link:
493 priv->speed = status.speed;
494 priv->duplex = status.duplex;
495
496 return emac_set_speed_duplex(priv);
497}
498
499void register_dump(struct emac_priv *priv)
500{
501 int i;
502 void __iomem *base = priv->iobase;
503
504 for (i = 0; i < 16; i++) {
505 pr_info("DMA:0x%x:0x%x\n",
506 DMA_CONFIGURATION + i * 4,
507 readl(base + DMA_CONFIGURATION + i * 4));
508 }
509 for (i = 0; i < 60; i++) {
510 pr_info("MAC:0x%x:0x%x\n",
511 MAC_GLOBAL_CONTROL + i * 4,
512 readl(base + MAC_GLOBAL_CONTROL + i * 4));
513 }
514
515 for (i = 0; i < 4; i++) {
516 pr_info("1588:0x%x:0x%x\n",
517 PTP_1588_CTRL + i * 4,
518 readl(base + PTP_1588_CTRL + i * 4));
519 }
520
521 for (i = 0; i < 6; i++) {
522 pr_info("1588:0x%x:0x%x\n",
523 SYS_TIME_GET_LOW + i * 4,
524 readl(base + SYS_TIME_GET_LOW + i * 4));
525 }
526 for (i = 0; i < 5; i++) {
527 pr_info("1588:0x%x:0x%x\n",
528 RX_TIMESTAMP_LOW + i * 4,
529 readl(base + RX_TIMESTAMP_LOW + i * 4));
530 }
531 for (i = 0; i < 2; i++) {
532 pr_info("1588:0x%x:0x%x\n",
533 PTP_1588_IRQ_STS + i * 4,
534 readl(base + PTP_1588_IRQ_STS + i * 4));
535 }
536
537 if (priv->tso) {
538 for (i = 0; i < 18; i++) {
539 pr_info("TSO:0x%x:0x%x\n", i * 4,
540 emac_rd_tso(priv, i * 4));
541 }
542 }
543}
544
545void print_pkt(unsigned char *buf, int len)
546{
547 int i = 0;
548
549 pr_debug("data len = %d byte, buf addr: 0x%x\n",
550 len, (unsigned int)buf);
551 for (i = 0; i < len; i = i + 8) {
552 pr_debug("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
553 *(buf + i),
554 *(buf + i + 1),
555 *(buf + i + 2),
556 *(buf + i + 3),
557 *(buf + i + 4),
558 *(buf + i + 5),
559 *(buf + i + 6),
560 *(buf + i + 7)
561 );
562 }
563}
564
565#ifdef EMAC_DEBUG
566void print_desc(unsigned char *buf, int len)
567{
568 int i;
569
570 pr_info("descriptor len = %d byte, buf addr: 0x%x\n",
571 len, (unsigned int)buf);
572 for (i = 0; i < len; i = i + 4) {
573 pr_info("0x%02x%02x%02x%02x\n",
574 *(buf + i + 3),
575 *(buf + i + 2),
576 *(buf + i + 1),
577 *(buf + i));
578 }
579}
580#else
581void print_desc(unsigned char *buf, int len)
582{
583
584}
585#endif
586
587/* Name emac_reset_hw
588 * Arguments priv : pointer to hardware data structure
589 * Return Status: 0 - Success; non-zero - Fail
590 * Description TBDL
591 */
592int emac_reset_hw(struct emac_priv *priv)
593{
594 mutex_lock(&priv->mii_mutex);
595 /* disable all the interrupts */
596 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
597 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
598
599 /* disable transmit and receive units */
600 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
601 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
602
603 /* stop the DMA */
604 emac_wr(priv, DMA_CONTROL, 0x0000);
605
606 /* reset mac, statistic counters */
607 emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0018);
608
609 emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0000);
610
611 emac_wr(priv, MAC_MDIO_CLK_DIV,
612 priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
613 mutex_unlock(&priv->mii_mutex);
614 return 0;
615}
616
617/* Name emac_init_hw
618 * Arguments pstHWData : pointer to hardware data structure
619 * Return Status: 0 - Success; non-zero - Fail
620 * Description TBDL
621 * Assumes that the controller has previously been reset
622 * and is in apost-reset uninitialized state.
623 * Initializes the receive address registers,
624 * multicast table, and VLAN filter table.
625 * Calls routines to setup link
626 * configuration and flow control settings.
627 * Clears all on-chip counters. Leaves
628 * the transmit and receive units disabled and uninitialized.
629 */
630int emac_init_hw(struct emac_priv *priv)
631{
632 u32 val = 0, threshold;
633
634 mutex_lock(&priv->mii_mutex);
635 /* MAC Init
636 * disable transmit and receive units
637 */
638 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
639 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
640
641 /* enable mac address 1 filtering */
642 //emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0001);
643 emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0100);
644
645 /* zero initialize the multicast hash table */
646 emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0000);
647 emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0000);
648 emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0000);
649 emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0000);
650
651 emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, EMAC_TX_FIFO_DWORDS - 8);
652
653 if (priv->speed == SPEED_1000)
654 threshold = 1024;
655 else if (priv->speed == SPEED_100)
656 threshold = 256;
657 else
658 threshold = TX_STORE_FORWARD_MODE;
659 emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, threshold);
660
661 emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, 0xc);
662
663 /* reset dma */
664 emac_wr(priv, DMA_CONTROL, 0x0000);
665
666 emac_wr(priv, DMA_CONFIGURATION, 0x01);
667 mdelay(10);
668 emac_wr(priv, DMA_CONFIGURATION, 0x00);
669 mdelay(10);
670
671 val |= MREGBIT_WAIT_FOR_DONE;
672 val |= MREGBIT_STRICT_BURST;
673 val |= MREGBIT_DMA_64BIT_MODE;
674 val |= MREGBIT_BURST_16WORD; //MREGBIT_BURST_1WORD;
675
676 emac_wr(priv, DMA_CONFIGURATION, val);
677
678 /* MDC Clock Division: AXI-312M/96 = 3.25M */
679 emac_wr(priv, MAC_MDIO_CLK_DIV,
680 priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
681
682 mutex_unlock(&priv->mii_mutex);
683
684 printk("MDIO clock div: 0x%x\n", emac_rd(priv, MAC_MDIO_CLK_DIV));
685 return 0;
686}
687
688int emac_set_mac_addr(struct emac_priv *priv, unsigned char *addr)
689{
690 emac_wr(priv, MAC_ADDRESS1_HIGH, (addr[1] << 8 | addr[0]));
691 emac_wr(priv, MAC_ADDRESS1_MED, (addr[3] << 8 | addr[2]));
692 emac_wr(priv, MAC_ADDRESS1_LOW, (addr[5] << 8 | addr[4]));
693
694 return 0;
695}
696
697void emac_set_fc_source_addr(struct emac_priv *priv, unsigned char *addr)
698{
699 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_HIGH, (addr[1] << 8 | addr[0]));
700 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_MED, (addr[3] << 8 | addr[2]));
701 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_LOW, (addr[5] << 8 | addr[4]));
702
703 return;
704}
705
706static inline void emac_dma_start_transmit(struct emac_priv *priv)
707{
708 emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 0xFF);
709}
710
711static inline void emac_dma_start_receive(struct emac_priv *priv)
712{
713 emac_wr(priv, DMA_RECEIVE_POLL_DEMAND, 0xFF);
714}
715
716#ifdef CONFIG_ASR_EMAC_NAPI
717void emac_enable_interrupt(struct emac_priv *priv, int tx)
718{
719 u32 val;
720
721 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
722
723 if (tx) {
724 val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
725 } else {
726 val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
727 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
728 if (priv->tso)
729 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
730 TSO_AP_RX_INTR_ENA_CSUM_DONE |
731 TSO_AP_RX_INTR_ENA_CSUM_ERR);
732 }
733
734 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
735}
736
737void emac_disable_interrupt(struct emac_priv *priv, int tx)
738{
739 u32 val;
740
741 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
742
743 if (tx) {
744 val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
745 } else {
746 val &= ~(MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
747 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
748 if (priv->tso)
749 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA, 0x0);
750 }
751
752 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
753}
754#endif
755
756bool emac_is_rmii_interface(struct emac_priv *priv)
757{
758 const struct emac_regdata *regdata = priv->regdata;
759 void __iomem* apmu;
760 u32 val;
761
762 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
763 if (apmu == NULL) {
764 pr_err("error to ioremap APMU base\n");
765 return -ENOMEM;
766 }
767
768 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
769 val &= (0x1 << regdata->mac_intf_sel_shift);
770 if (val)
771 return false;
772 else
773 return true;
774}
775
776void emac_config_phy_interrupt(struct emac_priv *priv, int enable)
777{
778 const struct emac_regdata *regdata = priv->regdata;
779 void __iomem* apmu;
780 u32 val;
781
782 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
783 if (apmu == NULL) {
784 pr_err("error to ioremap APMU base\n");
785 return;
786 }
787
788 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
789 if (enable)
790 val |= 0x1 << regdata->phy_intr_enable_shift;
791 else
792 val &= ~(0x1 << regdata->phy_intr_enable_shift);
793 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
794 iounmap(apmu);
795 return;
796}
797
798void emac_phy_interface_config(struct emac_priv *priv, int phy_interface)
799{
800 const struct emac_regdata *regdata = priv->regdata;
801 void __iomem* apmu;
802 u32 val;
803
804 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
805 if (apmu == NULL) {
806 pr_err("error to ioremap APMU base\n");
807 return;
808 }
809
810 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
811 if (PHY_INTERFACE_MODE_RMII == phy_interface) {
812 val &= ~(0x1 << regdata->mac_intf_sel_shift);
813 printk("===> set eamc interface: rmii\n");
814 } else {
815 val |= 0x1 << regdata->mac_intf_sel_shift;
816 printk("===> set eamc interface: rgmii\n");
817 }
818 val |= 0x1 << regdata->axi_mst_single_id_shift;
819 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
820
821 iounmap(apmu);
822 priv->interface = phy_interface;
823 return;
824}
825
826static void emac_set_aib_power_domain(struct emac_priv *priv)
827{
828 const struct emac_regdata *regdata = priv->regdata;
829 void __iomem *aib_emac_io;
830 void __iomem *apbc_asfar;
831 u32 tmp;
832
833 if (!regdata->support_dual_vol_power)
834 return;
835
836 aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
837 apbc_asfar = ioremap(APBC_ASFAR, 8);
838
839 writel(AKEY_ASFAR, apbc_asfar);
840 writel(AKEY_ASSAR, apbc_asfar + 4);
841 tmp = readl(aib_emac_io);
b.liub17525e2025-05-14 17:22:29 +0800842priv->power_domain = 0;
b.liue9582032025-04-17 19:18:16 +0800843 /* 0= power down, only set power down when vol = 0 */
844 if (priv->power_domain) {
845 tmp &= ~(0x1 << 2); /* 3.3v */
846 printk("===> emac set io to 3.3v\n");
847 } else {
848 tmp |= 0x1 << 2; /* 1.8v */
849 printk("===> emac set io to 1.8v\n");
850 }
851
852 writel(AKEY_ASFAR, apbc_asfar);
853 writel(AKEY_ASSAR, apbc_asfar + 4);
854 writel(tmp, aib_emac_io);
855
856 writel(AKEY_ASFAR, apbc_asfar);
857 writel(AKEY_ASSAR, apbc_asfar + 4);
858 tmp = readl(aib_emac_io);
859 printk("===> emac AIB read back: 0x%x\n", tmp);
860
861 iounmap(apbc_asfar);
862 iounmap(aib_emac_io);
863}
864
865static void emac_pause_generate_work_fuc(struct work_struct *work)
866{
867 struct emac_priv *priv= container_of(work, struct emac_priv, emac_pause_work.work);
868 int time_nxt = 0;
869 /* because pause time value = 0XFFFF,equal to stopping for 336ms(100M)/34ms(1000M) to transmit */
870 /* by a repeated testing, delay 20ms(1000M)/300ms(100M) satisfy making the neighbor stop transmission */
871 time_nxt = (priv->speed == SPEED_1000) ? 20 : 300;
872 if (!priv->pause.pause_time_max) {
873 emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0xffff);
874 priv->pause.pause_time_max = 1;
875 }
876
877 emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
878 schedule_delayed_work(&priv->emac_pause_work, msecs_to_jiffies(time_nxt));
879 return;
880}
881
882static inline void emac_check_ring_and_send_pause(struct emac_priv *priv)
883{
884 int pos;
885 int high_water;
886 int low_water;
887 struct emac_rx_desc *rx_desc;
888 struct emac_desc_ring *rx_ring;
889
890 rx_ring = &priv->rx_ring;
891 pos = rx_ring->nxt_clean;
892 high_water = (pos + priv->pause.high_water) % priv->rx_ring.total_cnt;
893 low_water = (pos + priv->pause.low_water) % priv->rx_ring.total_cnt;
894
895 rx_desc = emac_get_rx_desc(priv, high_water);
896 if (priv->pause.pause_sending == 0 && rx_desc->OWN == 0) {
897 schedule_delayed_work(&priv->emac_pause_work, 0);
898 priv->pause.pause_sending = 1;
899 }
900
901 rx_desc = emac_get_rx_desc(priv, low_water);
902 if (rx_desc->OWN && priv->pause.pause_sending) {
903 cancel_delayed_work_sync(&priv->emac_pause_work);
904 emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0);
905 emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
906 priv->pause.pause_time_max = 0;
907 priv->pause.pause_sending = 0;
908 }
909}
910
911/* Name emac_sw_init
912 * Arguments priv : pointer to driver private data structure
913 * Return Status: 0 - Success; non-zero - Fail
914 * Description Reads PCI space configuration information and
915 * initializes the variables with
916 * their default values
917 */
918static int emac_sw_init(struct emac_priv *priv)
919{
920 priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
921
922 mutex_init(&priv->mii_mutex);
923 spin_lock_init(&priv->spStatsLock);
924 spin_lock_init(&priv->spTxLock);
925 spin_lock_init(&priv->intr_lock);
926
927 return 0;
928}
929
930static int emac_check_ptp_packet(struct emac_priv *priv,
931 struct sk_buff *skb, int txrx)
932{
933 struct ethhdr *eth = (struct ethhdr *)skb->data;
934 struct ptp_header *ptph = NULL;
935 struct iphdr *iph;
936 struct udphdr *udph;
937 int msg_type, msg_id;
938 int ts;
939
940 if (eth->h_proto == htons(ETH_P_1588)) {
941 netdev_dbg(priv->ndev, "get PTP packet over ETH\n");
942 ptph = (struct ptp_header *)((u8 *)eth + sizeof(struct ethhdr));
943 } else if (eth->h_proto == htons(ETH_P_IP)) {
944 iph = (struct iphdr *)((u8 *)eth + sizeof(struct ethhdr));
945 if (iph->protocol != IPPROTO_UDP)
946 return -1;
947
948 udph = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
949 if ((htons(udph->dest) != PTP_EVENT_PORT ||
950 htons(udph->source) != PTP_EVENT_PORT))
951 return -1;
952
953 netdev_dbg(priv->ndev, "get PTP packet over UDP\n");
954 ptph = (struct ptp_header *)((u8 *)udph + sizeof(struct udphdr));
955 } else {
956 return -1;
957 }
958
959 msg_id = -1;
960 ts = ptph->tsmt & 0xF0;
961 msg_type = (ptph->tsmt) & 0x0F;
962 if (txrx) {
963 if (msg_type == MSG_SYNC) {
964 if (ts)
965 msg_id = MSG_PDELAY_REQ;
966 else
967 msg_id = MSG_DELAY_REQ;
968 } else if (msg_type == MSG_DELAY_REQ) {
969 msg_id = MSG_SYNC;
970 } else if (msg_type == MSG_PDELAY_REQ) {
971 msg_id = MSG_PDELAY_RESP;
972 memcpy(&priv->sourcePortIdentity,
973 &ptph->sourcePortIdentity,
974 sizeof(struct PortIdentity));
975 } else if (msg_type == MSG_PDELAY_RESP) {
976 msg_id = MSG_PDELAY_REQ;
977 }
978 } else {
979 netdev_dbg(priv->ndev, "RX timestamp for message type %d\n",
980 ptph->tsmt);
981
982 if (msg_type == MSG_PDELAY_RESP) {
983 struct pdelay_resp_msg *presp = (struct pdelay_resp_msg *)ptph;
984
985 /*
986 * Change to monitor SYNC packet if pdelay response
987 * received for same clock indentity.
988 */
989 if (!memcmp(&presp->requestingPortIdentity.clockIdentity,
990 &priv->sourcePortIdentity.clockIdentity,
991 sizeof(struct ClockIdentity))) {
992 msg_id = MSG_SYNC;
993 }
994 }
995 }
996
997 /*
998 * Since some platform not support to timestamp two or more
999 * message type, so change here.
1000 */
1001 if (msg_id >= 0) {
1002 if (priv->regdata->ptp_rx_ts_all_events) {
1003 msg_id = ALL_EVENTS;
1004 msg_id |= ts | ts << 8 | ts << 16 | ts << 24;
1005 } else {
1006 msg_id |= ts;
1007 }
1008
1009 priv->hwptp->config_hw_tstamping(priv, 1, PTP_V2_L2_L4, msg_id);
1010 }
1011
1012 return ptph->tsmt;
1013}
1014
1015/* emac_get_tx_hwtstamp - get HW TX timestamps
1016 * @priv: driver private structure
1017 * @skb : the socket buffer
1018 * Description :
1019 * This function will read timestamp from the register & pass it to stack.
1020 * and also perform some sanity checks.
1021 */
1022static void emac_get_tx_hwtstamp(struct emac_priv *priv, struct sk_buff *skb)
1023{
1024 struct skb_shared_hwtstamps shhwtstamp;
1025 u64 ns;
1026
1027 if (!priv->hwts_tx_en)
1028 return;
1029
1030 /* exit if skb doesn't support hw tstamp */
1031 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
1032 return;
1033
1034 emac_check_ptp_packet(priv, skb, 1);
1035
1036 /* get the valid tstamp */
1037 ns = priv->hwptp->get_tx_timestamp(priv);
1038
1039 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1040 shhwtstamp.hwtstamp = ns_to_ktime(ns);
1041
1042 wmb();
1043 netdev_dbg(priv->ndev, "get valid TX hw timestamp %llu\n", ns);
1044 /* pass tstamp to stack */
1045 skb_tstamp_tx(skb, &shhwtstamp);
1046
1047 return;
1048}
1049
1050/* emac_get_rx_hwtstamp - get HW RX timestamps
1051 * @priv: driver private structure
1052 * @p : descriptor pointer
1053 * @skb : the socket buffer
1054 * Description :
1055 * This function will read received packet's timestamp from the descriptor
1056 * and pass it to stack. It also perform some sanity checks.
1057 */
1058static void emac_get_rx_hwtstamp(struct emac_priv *priv, struct emac_rx_desc *p,
1059 struct sk_buff *skb)
1060{
1061 struct skb_shared_hwtstamps *shhwtstamp = NULL;
1062 u64 ns;
1063
1064 if (!priv->hwts_rx_en)
1065 return;
1066
1067 /* Check if timestamp is available */
1068 if (p->ptp_pkt && p->rx_timestamp) {
1069 emac_check_ptp_packet(priv, skb, 0);
1070 ns = priv->hwptp->get_rx_timestamp(priv);
1071 netdev_dbg(priv->ndev, "get valid RX hw timestamp %llu\n", ns);
1072 shhwtstamp = skb_hwtstamps(skb);
1073 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1074 shhwtstamp->hwtstamp = ns_to_ktime(ns);
1075 } else {
1076 netdev_dbg(priv->ndev, "cannot get RX hw timestamp\n");
1077 }
1078}
1079
1080/**
1081 * emac_hwtstamp_set - control hardware timestamping.
1082 * @dev: device pointer.
1083 * @ifr: An IOCTL specific structure, that can contain a pointer to
1084 * a proprietary structure used to pass information to the driver.
1085 * Description:
1086 * This function configures the MAC to enable/disable both outgoing(TX)
1087 * and incoming(RX) packets time stamping based on user input.
1088 * Return Value:
1089 * 0 on success and an appropriate -ve integer on failure.
1090 */
1091static int emac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1092{
1093 struct emac_priv *priv = netdev_priv(dev);
1094 struct hwtstamp_config config;
1095 struct timespec64 now;
1096 u64 ns_ptp;
1097 u32 ptp_event_msg_id = 0;
1098 u32 rx_ptp_type = 0;
1099
1100 if (!priv->ptp_support) {
1101 netdev_alert(priv->ndev, "No support for HW time stamping\n");
1102 priv->hwts_tx_en = 0;
1103 priv->hwts_rx_en = 0;
1104
1105 return -EOPNOTSUPP;
1106 }
1107
1108 if (copy_from_user(&config, ifr->ifr_data,
1109 sizeof(struct hwtstamp_config)))
1110 return -EFAULT;
1111
1112 netdev_dbg(priv->ndev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
1113 __func__, config.flags, config.tx_type, config.rx_filter);
1114
1115 /* reserved for future extensions */
1116 if (config.flags)
1117 return -EINVAL;
1118
1119 if (config.tx_type != HWTSTAMP_TX_OFF &&
1120 config.tx_type != HWTSTAMP_TX_ON)
1121 return -ERANGE;
1122
1123 switch (config.rx_filter) {
1124 case HWTSTAMP_FILTER_NONE:
1125 /* time stamp no incoming packet at all */
1126 config.rx_filter = HWTSTAMP_FILTER_NONE;
1127 break;
1128
1129 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1130 /* PTP v1, UDP, Sync packet */
1131 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
1132 /* take time stamp for SYNC messages only */
1133 ptp_event_msg_id = MSG_SYNC;
1134 rx_ptp_type = PTP_V1_L4_ONLY;
1135 break;
1136
1137 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1138 /* PTP v1, UDP, Delay_req packet */
1139 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
1140 /* take time stamp for Delay_Req messages only */
1141 ptp_event_msg_id = MSG_DELAY_REQ;
1142 rx_ptp_type = PTP_V1_L4_ONLY;
1143 break;
1144
1145 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1146 /* PTP v2, UDP, Sync packet */
1147 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
1148 /* take time stamp for SYNC messages only */
1149 ptp_event_msg_id = MSG_SYNC;
1150 rx_ptp_type = PTP_V2_L2_L4;
1151 break;
1152
1153 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1154 /* PTP v2, UDP, Delay_req packet */
1155 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
1156 /* take time stamp for Delay_Req messages only */
1157 ptp_event_msg_id = MSG_DELAY_REQ;
1158 rx_ptp_type = PTP_V2_L2_L4;
1159 break;
1160
1161 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1162 /* PTP v2/802.AS1 any layer, any kind of event packet */
1163 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1164
1165 /*
1166 * IF not support ALL EVENTS, default timestamp SYNC packet,
1167 * changed to MSG_DELAY_REQ automactically if needed
1168 */
1169 if (priv->regdata->ptp_rx_ts_all_events)
1170 ptp_event_msg_id = ALL_EVENTS;
1171 else
1172 ptp_event_msg_id = MSG_SYNC;
1173
1174 rx_ptp_type = PTP_V2_L2_L4;
1175 break;
1176
1177 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1178 /* PTP v2/802.AS1, any layer, Sync packet */
1179 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
1180 /* take time stamp for SYNC messages only */
1181 ptp_event_msg_id = MSG_SYNC;
1182 rx_ptp_type = PTP_V2_L2_L4;
1183 break;
1184
1185 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1186 /* PTP v2/802.AS1, any layer, Delay_req packet */
1187 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
1188 /* take time stamp for Delay_Req messages only */
1189 ptp_event_msg_id = MSG_DELAY_REQ;
1190 rx_ptp_type = PTP_V2_L2_L4;
1191 break;
1192 default:
1193 return -ERANGE;
1194 }
1195
1196 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
1197 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
1198
1199 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
1200 priv->hwptp->config_hw_tstamping(priv, 0, 0, 0);
1201 else {
1202
1203 priv->hwptp->config_hw_tstamping(priv, 1,
1204 rx_ptp_type, ptp_event_msg_id);
1205
1206 /* initialize system time */
1207 ktime_get_real_ts64(&now);
1208 priv->hwptp->init_systime(priv, timespec64_to_ns(&now));
1209
1210 /* program Increment reg */
1211 priv->hwptp->config_systime_increment(priv);
1212
1213 ns_ptp = priv->hwptp->get_phc_time(priv);
1214 ktime_get_real_ts64(&now);
1215 /* check the diff between ptp timer and system time */
1216 if (abs(timespec64_to_ns(&now) - ns_ptp) > 5000)
1217 priv->hwptp->init_systime(priv,
1218 timespec64_to_ns(&now));
1219 }
1220
1221 memcpy(&priv->tstamp_config, &config, sizeof(config));
1222
1223 return copy_to_user(ifr->ifr_data, &config,
1224 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
1225}
1226
1227/**
1228 * emac_hwtstamp_get - read hardware timestamping.
1229 * @dev: device pointer.
1230 * @ifr: An IOCTL specific structure, that can contain a pointer to
1231 * a proprietary structure used to pass information to the driver.
1232 * Description:
1233 * This function obtain the current hardware timestamping settings
1234 as requested.
1235 */
1236static int emac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1237{
1238 struct emac_priv *priv = netdev_priv(dev);
1239 struct hwtstamp_config *config = &priv->tstamp_config;
1240
1241 if (!priv->ptp_support)
1242 return -EOPNOTSUPP;
1243
1244 return copy_to_user(ifr->ifr_data, config,
1245 sizeof(*config)) ? -EFAULT : 0;
1246}
1247
1248/* Name emac_ioctl
1249 * Arguments pstNetdev : pointer to net_device structure
1250 * pstIfReq : pointer to interface request structure used.
1251 * u32Cmd : IOCTL command number
1252 * Return Status: 0 - Success; non-zero - Fail
1253 * Description It is called by upper layer and
1254 * handling various task IOCTL commands.
1255 */
1256static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1257{
1258 int ret = -EOPNOTSUPP;
1259
1260 if (!netif_running(ndev))
1261 return -EINVAL;
1262
1263 switch (cmd) {
1264 case SIOCGMIIPHY:
1265 case SIOCGMIIREG:
1266 case SIOCSMIIREG:
1267 if (!ndev->phydev)
1268 return -EINVAL;
1269 ret = phy_mii_ioctl(ndev->phydev, rq, cmd);
1270 break;
1271 case SIOCSHWTSTAMP:
1272 ret = emac_hwtstamp_set(ndev, rq);
1273 break;
1274 case SIOCGHWTSTAMP:
1275 ret = emac_hwtstamp_get(ndev, rq);
1276 break;
1277 default:
1278 break;
1279 }
1280
1281 return ret;
1282}
1283
1284static irqreturn_t emac_wakeup_handler(int irq, void *dev_id)
1285{
1286 struct net_device *ndev = (struct net_device *)dev_id;
1287 struct emac_priv *priv = netdev_priv(ndev);
1288 u32 ctrl;
1289
1290 emac_set_axi_bus_clock(priv, 1);
1291 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1292 if (!(ctrl & (MREGBIT_UNICAST_WAKEUP_MODE |
1293 MREGBIT_MAGIC_PACKET_WAKEUP_MODE)))
1294 return IRQ_NONE;
1295
1296 ctrl &= ~(MREGBIT_UNICAST_WAKEUP_MODE |
1297 MREGBIT_MAGIC_PACKET_WAKEUP_MODE);
1298 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1299 return IRQ_HANDLED;
1300}
1301
1302static irqreturn_t emac_irq_tso(int irq, void *dev_id)
1303{
1304 struct net_device *ndev = (struct net_device *)dev_id;
1305 struct emac_priv *priv = netdev_priv(ndev);
1306 u32 status;
1307
1308 /* handle rx */
1309 status = emac_rd_tso(priv, TSO_AP_RX_INTR_STS);
1310 if (status) {
1311 emac_print("TSO_AP_RX_INTR_STS=0x%x", status);
1312
1313 if (status & TSO_AP_RX_INTR_ENA_CSUM_DONE) {
1314#ifdef CONFIG_ASR_EMAC_NAPI
1315 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1316 unsigned long flags;
1317
1318 spin_lock_irqsave(&priv->intr_lock, flags);
1319 emac_disable_interrupt(priv, 0);
1320 spin_unlock_irqrestore(&priv->intr_lock, flags);
1321 __napi_schedule(&priv->rx_napi);
1322 }
1323#else
1324 emac_rx_clean_desc(priv);
1325#endif
1326 }
1327
1328#ifdef EMAC_DEBUG
1329 if (status & TSO_AP_RX_INTR_ENA_CSUM_ERR)
1330 pr_err("rx checksum err irq\n");
1331#endif
1332 /* clear rx status */
1333 emac_wr_tso(priv, TSO_AP_RX_INTR_STS, status);
1334 }
1335
1336 /* handle tx */
1337 status = emac_rd_tso(priv, TSO_AP_TX_INTR_STS);
1338 if (status) {
1339 emac_print("TSO_AP_TX_INTR_STS=0x%x\n", status);
1340 if (status & TSO_AP_TX_INTR_TSO_DONE) {
1341 emac_print("TX TSO done\n");
1342 emac_dma_start_transmit(priv);
1343 }
1344
1345 if (status & TSO_AP_TX_INTR_CSUM_DONE) {
1346 emac_print("TX checksum done\n");
1347 emac_dma_start_transmit(priv);
1348 }
1349
1350 /* clear tx status */
1351 emac_wr_tso(priv, TSO_AP_TX_INTR_STS, status);
1352 }
1353
1354 /* handle err */
1355 status = emac_rd_tso(priv, TSO_ERR_INTR_STS);
1356 if (status) {
1357 pr_err("TSO: TX/RX ERR, status=0x%x\n", status);
1358 emac_wr_tso(priv, TSO_ERR_INTR_STS, status);
1359 }
1360
1361 return IRQ_HANDLED;
1362}
1363
1364
1365/* Name emac_interrupt_handler
1366 * Arguments irq : irq number for which the interrupt is fired
1367 * dev_id : pointer was passed to request_irq and same pointer is passed
1368 * back to handler
1369 * Return irqreturn_t : integer value
1370 * Description Interrupt handler routine for interrupts from target for RX packets indication.
1371 */
1372static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1373{
1374 struct net_device *ndev = (struct net_device *)dev_id;
1375 struct emac_priv *priv = netdev_priv(ndev);
1376 u32 status;
1377 u32 clr = 0;
1378
1379 /* read the status register for IRQ received */
1380 status = emac_rd(priv, DMA_STATUS_IRQ);
1381
1382 /* Check if emac is up */
1383 if (test_bit(EMAC_DOWN, &priv->state)) {
1384 emac_wr(priv, DMA_STATUS_IRQ, status & 0x1F7);
1385 return IRQ_HANDLED;
1386 }
1387
1388 if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1389 clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1390#ifdef CONFIG_ASR_EMAC_NAPI
1391 if (likely(napi_schedule_prep(&priv->tx_napi))) {
1392 unsigned long flags;
1393
1394 spin_lock_irqsave(&priv->intr_lock, flags);
1395 emac_disable_interrupt(priv, 1);
1396 spin_unlock_irqrestore(&priv->intr_lock, flags);
1397 __napi_schedule(&priv->tx_napi);
1398 }
1399#else
1400 emac_tx_clean_desc(priv);
1401#endif
1402 }
1403
1404 if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1405 clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1406
1407 if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1408 clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1409
1410 if (status & (MREGBIT_RECEIVE_TRANSFER_DONE_IRQ |
1411 MREGBIT_RECEIVE_MISSED_FRAME_IRQ)) {
1412 if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ)
1413 clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1414
1415 if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1416 clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1417
1418 if (priv->tso)
1419 emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
1420
1421#ifdef CONFIG_ASR_EMAC_NAPI
1422 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&priv->intr_lock, flags);
1426 emac_disable_interrupt(priv, 0);
1427 spin_unlock_irqrestore(&priv->intr_lock, flags);
1428 __napi_schedule(&priv->rx_napi);
1429 }
1430#else
1431 emac_rx_clean_desc(priv);
1432#endif
1433 }
1434
1435 if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1436 clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1437
1438 if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1439 clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1440
1441 emac_wr(priv, DMA_STATUS_IRQ, clr);
1442
1443 return IRQ_HANDLED;
1444}
1445
1446/* Name emac_command_options
1447 * Arguments priv : pointer to driver private data structure
1448 * Return none
1449 * Description This function actually handles the command line para passed
1450 * when the driver is loaded at the command prompt.
1451 * It parses the parameters and validates them for valid values.
1452 */
1453void emac_command_options(struct emac_priv *priv)
1454{
1455 int pages = totalram_pages();
1456
1457 if (pages <= (EMAC_SMALL_RING_MEM_LIMIT >> PAGE_SHIFT))
1458 priv->rx_ring.total_cnt = EMAC_SMALL_RX_RING_SIZE;
1459 else
1460 priv->rx_ring.total_cnt = EMAC_RX_RING_SIZE;
1461 priv->tx_ring.total_cnt = EMAC_TX_RING_SIZE;
1462
1463 pr_info("emac: rx_ring=%d, tx_ring=%d, pages=%d\n",
1464 priv->rx_ring.total_cnt, priv->tx_ring.total_cnt, pages);
1465}
1466
1467/* Name emac_configure_tx
1468 * Arguments priv : pointer to driver private data structure
1469 * Return none
1470 * Description Configures the transmit unit of the device
1471 */
1472static void emac_configure_tx(struct emac_priv *priv)
1473{
1474 u32 val;
1475
1476 /* set the transmit base address */
1477 val = (u32)(priv->tx_ring.desc_dma_addr);
1478
1479 emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1480
1481 /* Tx Inter Packet Gap value and enable the transmit */
1482 val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1483 val &= (~MREGBIT_IFG_LEN);
1484 val |= MREGBIT_TRANSMIT_ENABLE;
1485 val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1486 emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1487
1488 emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x00);
1489
1490 /* start tx dma */
1491 val = emac_rd(priv, DMA_CONTROL);
1492 val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1493 emac_wr(priv, DMA_CONTROL, val);
1494}
1495
1496/* Name emac_configure_rx
1497 * Arguments priv : pointer to driver private data structure
1498 * Return none
1499 * Description Configures the receive unit of the device
1500 */
1501static void emac_configure_rx(struct emac_priv *priv)
1502{
1503 u32 val;
1504
1505 /* set the receive base address */
1506 val = (u32)(priv->rx_ring.desc_dma_addr);
1507 emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1508
1509 /* enable the receive */
1510 val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1511 val |= MREGBIT_RECEIVE_ENABLE;
1512 val |= MREGBIT_STORE_FORWARD;
1513 val |= MREGBIT_ACOOUNT_VLAN;
1514 emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1515
1516 /* start rx dma */
1517 val = emac_rd(priv, DMA_CONTROL);
1518 val |= MREGBIT_START_STOP_RECEIVE_DMA;
1519 emac_wr(priv, DMA_CONTROL, val);
1520}
1521
1522/* Name emac_clean_tx_desc_ring
1523 * Arguments priv : pointer to driver private data structure
1524 * Return none
1525 * Description Freeing the TX resources allocated earlier.
1526 */
1527static void emac_clean_tx_desc_ring(struct emac_priv *priv)
1528{
1529 struct emac_desc_ring *tx_ring = &priv->tx_ring;
1530 struct emac_desc_buffer *tx_buf;
1531 u32 i;
1532
1533 /* Free all the Tx ring sk_buffs */
1534 for (i = 0; i < tx_ring->total_cnt; i++) {
1535 tx_buf = &tx_ring->desc_buf[i];
1536
1537 if (tx_buf->dma_addr) {
1538 dma_unmap_page(&priv->pdev->dev,
1539 tx_buf->dma_addr,
1540 tx_buf->dma_len,
1541 DMA_TO_DEVICE);
1542 tx_buf->dma_addr = 0;
1543 }
1544
1545 if (tx_buf->skb) {
1546 dev_kfree_skb_any(tx_buf->skb);
1547 tx_buf->skb = NULL;
1548 }
1549 }
1550
1551 tx_ring->nxt_use = 0;
1552 tx_ring->nxt_clean = 0;
1553}
1554
1555/* Name emac_clean_rx_desc_ring
1556 * Arguments priv : pointer to driver private data structure
1557 * Return none
1558 * Description Freeing the RX resources allocated earlier.
1559 */
1560static void emac_clean_rx_desc_ring(struct emac_priv *priv)
1561{
1562 struct emac_desc_ring *rx_ring;
1563 struct emac_desc_buffer *rx_buf;
1564 u32 i;
1565
1566 rx_ring = &priv->rx_ring;
1567
1568 /* Free all the Rx ring sk_buffs */
1569 for (i = 0; i < rx_ring->total_cnt; i++) {
1570 rx_buf = &rx_ring->desc_buf[i];
1571 if (rx_buf->skb) {
1572 emac_unmap_single(&priv->pdev->dev,
1573 rx_buf->dma_addr,
1574 rx_buf->dma_len,
1575 DMA_FROM_DEVICE);
1576 dev_kfree_skb(rx_buf->skb);
1577 rx_buf->skb = NULL;
1578 }
1579
1580 if (rx_buf->buff_addr) {
1581#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
1582 kfree(rx_buf->buff_addr);
1583#endif
1584 rx_buf->buff_addr = NULL;
1585 }
1586 }
1587
1588 rx_ring->nxt_clean = 0;
1589 rx_ring->nxt_use = 0;
1590}
1591
1592void emac_ptp_init(struct emac_priv *priv)
1593{
1594 int ret;
1595
1596 if (priv->ptp_support) {
1597 ret = clk_prepare_enable(priv->ptp_clk);
1598 if (ret < 0) {
1599 pr_warning("ptp clock failed to enable \n");
1600 priv->ptp_clk = NULL;
1601 }
1602
1603 emac_ptp_register(priv);
1604
1605 if (IS_ERR_OR_NULL(priv->ptp_clock)) {
1606 priv->ptp_support = 0;
1607 pr_warning("disable PTP due to clock not enabled\n");
1608 }
1609 }
1610}
1611
1612void emac_ptp_deinit(struct emac_priv *priv)
1613{
1614 if (priv->ptp_support) {
1615 if (priv->ptp_clk)
1616 clk_disable_unprepare(priv->ptp_clk);
1617
1618 emac_ptp_unregister(priv);
1619 }
1620}
1621
1622static void emac_rx_timer_arm(struct emac_priv *priv)
1623{
1624 u32 rx_fill_timer = EMAC_RX_FILL_TIMER_US;
1625
1626 if (!rx_fill_timer)
1627 return;
1628
1629 if (hrtimer_is_queued(&priv->rx_timer))
1630 return;
1631
1632 hrtimer_start(&priv->rx_timer,
1633 ns_to_ktime(rx_fill_timer) * NSEC_PER_USEC,
1634 HRTIMER_MODE_REL);
1635}
1636
1637static enum hrtimer_restart emac_rx_timer(struct hrtimer *t)
1638{
1639 struct emac_priv *priv = container_of(t, struct emac_priv, rx_timer);
1640 struct napi_struct *napi = &priv->rx_napi;
1641
1642 if (likely(napi_schedule_prep(napi))) {
1643 unsigned long flags;
1644
1645 spin_lock_irqsave(&priv->intr_lock, flags);
1646 emac_disable_interrupt(priv, 0);
1647 spin_unlock_irqrestore(&priv->intr_lock, flags);
1648 __napi_schedule(napi);
1649 }
1650
1651 return HRTIMER_NORESTART;
1652}
1653
1654static void emac_tx_timer_arm(struct emac_priv *priv)
1655{
1656 u32 tx_coal_timer = EMAC_TX_COAL_TIMER_US;
1657
1658 if (!tx_coal_timer)
1659 return;
1660
1661 if (hrtimer_is_queued(&priv->tx_timer))
1662 return;
1663
1664 hrtimer_start(&priv->tx_timer,
1665 ns_to_ktime(tx_coal_timer) * NSEC_PER_USEC,
1666 HRTIMER_MODE_REL);
1667}
1668
1669static enum hrtimer_restart emac_tx_timer(struct hrtimer *t)
1670{
1671 struct emac_priv *priv = container_of(t, struct emac_priv, tx_timer);
1672 struct napi_struct *napi = &priv->tx_napi;
1673
1674 if (priv->tso) {
1675 emac_dma_start_transmit(priv);
1676 return HRTIMER_NORESTART;
1677 }
1678
1679 if (likely(napi_schedule_prep(napi))) {
1680 unsigned long flags;
1681
1682 spin_lock_irqsave(&priv->intr_lock, flags);
1683 emac_disable_interrupt(priv, 1);
1684 spin_unlock_irqrestore(&priv->intr_lock, flags);
1685 __napi_schedule(napi);
1686 }
1687
1688 return HRTIMER_NORESTART;
1689}
1690
1691
1692static int emac_tso_config(struct emac_priv *priv)
1693{
1694 struct emac_desc_ring * tx_ring = &priv->tx_ring;
1695 u32 val = 0;
1696
1697 /* reset */
1698 emac_wr_tso(priv, TSO_CONFIG, TSO_CONFIG_RST);
1699 mdelay(1);
1700 emac_wr_tso(priv, TSO_CONFIG, 0x0);
1701
1702 emac_wr_tso(priv, TSO_DMA_CONFIG, 0x2 << 8);
1703
1704 /* rx */
1705 /* set the transmit base address */
1706 val = (u32)(priv->rx_ring.desc_dma_addr);
1707 emac_wr_tso(priv, TSO_RX_DESC_BA, val >> 1);
1708 emac_wr_tso(priv, TSO_RX_AUTO_POLL_CNT, 0x0);
1709
1710 /* tx */
1711 val = (u32)(priv->tx_ring.desc_dma_addr);
1712 emac_wr_tso(priv, TSO_TX_DESC_BA, val >> 1);
1713
1714 priv->tso_hdr = dma_alloc_coherent(&priv->pdev->dev,
1715 tx_ring->total_cnt * 0x80,
1716 &priv->tso_hdr_addr,
1717 GFP_KERNEL | __GFP_ZERO);
1718 if (!priv->tso_hdr) {
1719 pr_err("Memory allocation failed for tso_hdr\n");
1720 return -ENOMEM;
1721 }
1722
1723 val = (u32)(priv->tso_hdr_addr);
1724 emac_wr_tso(priv, TSO_TX_HDR_BA, val >> 1);
1725 emac_wr_tso(priv, TSO_TX_HDR_CTR, tx_ring->total_cnt);
1726 emac_wr_tso(priv, TSO_TX_AUTO_POLL_CNT, 0x0);
1727
1728 /* enable tx/rx tso/coe */
1729 emac_wr_tso(priv, TSO_CONFIG,
1730 TSO_CONFIG_RX_EN | TSO_CONFIG_TX_EN | TSO_CONFIG_RX_CSUM_EN);
1731
1732 /* enable tx/rx/err interrupt */
1733 emac_wr_tso(priv, TSO_ERR_INTR_ENA, 0xF0007);
1734 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
1735 TSO_AP_RX_INTR_ENA_CSUM_DONE | TSO_AP_RX_INTR_ENA_CSUM_ERR);
1736#if 1
1737 emac_wr_tso(priv, TSO_AP_TX_INTR_ENA,
1738 TSO_AP_TX_INTR_ENA_TSO_DONE | TSO_AP_TX_INTR_ENA_CSUM_DONE);
1739#else
1740 emac_wr_tso(priv, TSO_AP_TX_INTR_ENA, 0x0);
1741#endif
1742 return 0;
1743}
1744
1745/* Name emac_up
1746 * Arguments priv : pointer to driver private data structure
1747 * Return Status: 0 - Success; non-zero - Fail
1748 * Description This function is called from emac_open and
1749 * performs the things when net interface is about to up.
1750 * It configues the Tx and Rx unit of the device and
1751 * registers interrupt handler.
1752 * It also starts one watchdog timer to monitor
1753 * the net interface link status.
1754 */
1755int emac_up(struct emac_priv *priv)
1756{
1757 struct net_device *ndev = priv->ndev;
1758 int ret, val;
b.liub17525e2025-05-14 17:22:29 +08001759#if CLOSE_AIB_POWER_DOMAIN
1760 void __iomem *aib_emac_io;
1761 void __iomem *apbc_asfar;
1762 u32 tmp;
1763#endif
b.liue9582032025-04-17 19:18:16 +08001764#ifdef WAN_LAN_AUTO_ADAPT
1765 u32 phy_id;
1766#endif
1767
1768 priv->hw_stats->tx_tso_pkts = 0;
1769 priv->hw_stats->tx_tso_bytes = 0;
1770
1771 ret = emac_phy_connect(ndev);
1772 if (ret) {
1773 pr_err("%s phy_connet failed\n", __func__);
b.liub17525e2025-05-14 17:22:29 +08001774#if CLOSE_AIB_POWER_DOMAIN
1775 printk("===> enter emac_close_aib_power_domain\n");
1776 aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
1777 apbc_asfar = ioremap(APBC_ASFAR, 8);
1778 writel(AKEY_ASFAR, apbc_asfar);
1779 writel(AKEY_ASSAR, apbc_asfar + 4);
1780 writel(0x81, aib_emac_io);
1781 writel(AKEY_ASFAR, apbc_asfar);
1782 writel(AKEY_ASSAR, apbc_asfar + 4);
1783 tmp = readl(aib_emac_io);
1784 iounmap(apbc_asfar);
1785 iounmap(aib_emac_io);
1786 printk("===> exit emac_close_aib_power_domain = 0x%x\n", tmp);
1787#endif
b.liue9582032025-04-17 19:18:16 +08001788 return ret;
1789 }
1790
1791 if (!priv->en_suspend)
1792 pm_stay_awake(&priv->pdev->dev);
1793 pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
1794
1795 clk_phase_set(priv, TX_PHASE);
1796 clk_phase_set(priv, RX_PHASE);
1797
1798 /* init hardware */
1799 emac_init_hw(priv);
1800
1801 emac_ptp_init(priv);
1802
1803 emac_set_mac_addr(priv, ndev->dev_addr);
1804
1805 emac_set_fc_source_addr(priv, ndev->dev_addr);
1806
1807 /* configure transmit unit */
1808 emac_configure_tx(priv);
1809 /* configure rx unit */
1810 emac_configure_rx(priv);
1811
1812 /* allocate buffers for receive descriptors */
1813 emac_alloc_rx_desc_buffers(priv);
1814
1815 if (ndev->phydev)
1816 phy_start(ndev->phydev);
1817
1818 /* allocates interrupt resources and
1819 * enables the interrupt line and IRQ handling
1820 */
1821 ret = request_irq(priv->irq, emac_interrupt_handler,
1822 IRQF_SHARED, ndev->name, ndev);
1823 if (ret) {
1824 pr_err("request_irq failed, ret=%d\n", ret);
1825 goto request_irq_failed;
1826 }
1827
1828 if (priv->irq_wakeup) {
1829 ret = request_irq(priv->irq_wakeup, emac_wakeup_handler,
1830 IRQF_SHARED, ndev->name, ndev);
1831 if (ret) {
1832 pr_err("request wakeup_irq failed, ret=%d\\n", ret);
1833 goto request_wakeup_irq_failed;
1834 }
1835 }
1836
1837 if (priv->irq_tso) {
1838 ret = request_irq(priv->irq_tso, emac_irq_tso,
1839 IRQF_SHARED, "emac_tso", ndev);
1840 if (ret) {
1841 pr_err("request tso failed, ret=%d\\n", ret);
1842 goto request_tso_irq_failed;
1843 }
1844 }
1845
1846 if (priv->fix_link)
1847 emac_set_speed_duplex(priv);
1848
1849 clear_bit(EMAC_DOWN, &priv->state);
1850
1851 /* enable mac interrupt */
1852 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
1853
1854 /* both rx tx */
1855 val = MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1856 MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1857 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
1858#if 0
1859 val |= MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1860 MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1861 MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE;
1862#endif
1863 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
1864
1865#ifdef CONFIG_ASR_EMAC_NAPI
1866 napi_enable(&priv->rx_napi);
1867 napi_enable(&priv->tx_napi);
1868#endif
1869
1870 if (priv->fix_link && !netif_carrier_ok(ndev))
1871 netif_carrier_on(ndev);
1872
1873#ifdef WAN_LAN_AUTO_ADAPT
1874 phy_id = ndev->phydev->phy_id;
1875 if(phy_id == IP175D_PHY_ID)
1876 emac_sig_workq(CARRIER_UP_IP175D, 0);
1877 else
1878 emac_sig_workq(CARRIER_UP, 0);
1879#endif
1880
1881 hrtimer_init(&priv->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1882 priv->tx_timer.function = emac_tx_timer;
1883 hrtimer_init(&priv->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1884 priv->rx_timer.function = emac_rx_timer;
1885
1886 if (priv->tso)
1887 emac_tso_config(priv);
1888
1889 netif_tx_start_all_queues(ndev);
1890 return 0;
1891
1892request_tso_irq_failed:
1893 if (priv->irq_wakeup)
1894 free_irq(priv->irq_wakeup, ndev);
1895
1896request_wakeup_irq_failed:
1897 free_irq(priv->irq, ndev);
1898
1899request_irq_failed:
1900 if (ndev->phydev) {
1901 phy_stop(ndev->phydev);
1902 phy_disconnect(ndev->phydev);
1903 }
1904
1905 return ret;
1906}
1907
1908/* Name emac_down
1909 * Arguments priv : pointer to driver private data structure
1910 * Return Status: 0 - Success; non-zero - Fail
1911 * Description This function is called from emac_close and
1912 * performs the things when net interface is about to down.
1913 * It frees the irq, removes the various timers.
1914 * It sets the net interface off and
1915 * resets the hardware. Cleans the Tx and Rx
1916 * ring descriptor.
1917 */
1918int emac_down(struct emac_priv *priv)
1919{
1920 struct net_device *ndev = priv->ndev;
1921#ifdef WAN_LAN_AUTO_ADAPT
1922 u32 phy_id;
1923
1924 priv->dhcp = 0;
1925 priv->vlan_port = -1;
1926 priv->link = 0;
1927 phy_id = ndev->phydev->phy_id;
1928 if(priv->dhcp_delaywork){
1929 cancel_delayed_work(&priv->dhcp_work);
1930 priv->dhcp_delaywork = 0;
1931 }
1932#endif
1933 set_bit(EMAC_DOWN, &priv->state);
1934
1935 netif_tx_disable(ndev);
1936
1937 hrtimer_cancel(&priv->tx_timer);
1938 hrtimer_cancel(&priv->rx_timer);
1939 /* Stop and disconnect the PHY */
1940 if (ndev->phydev) {
1941 phy_stop(ndev->phydev);
1942 phy_disconnect(ndev->phydev);
1943 }
1944
1945 if (!priv->fix_link) {
1946 priv->duplex = DUPLEX_UNKNOWN;
1947 priv->speed = SPEED_UNKNOWN;
1948 }
1949
1950#ifdef CONFIG_ASR_EMAC_NAPI
1951 napi_disable(&priv->rx_napi);
1952 napi_disable(&priv->tx_napi);
1953#endif
1954 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
1955 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
1956
1957 free_irq(priv->irq, ndev);
1958 if (priv->irq_wakeup)
1959 free_irq(priv->irq_wakeup, ndev);
1960
1961 emac_ptp_deinit(priv);
1962
1963 emac_reset_hw(priv);
1964 netif_carrier_off(ndev);
1965
1966#ifdef WAN_LAN_AUTO_ADAPT
1967 if(phy_id == IP175D_PHY_ID)
1968 emac_sig_workq(CARRIER_DOWN_IP175D, 0);
1969 else
1970 emac_sig_workq(CARRIER_DOWN, 0);
1971#endif
1972
1973#ifdef CONFIG_ASR_EMAC_DDR_QOS
1974 flush_work(&priv->qos_work);
1975 pm_qos_update_request(&priv->clk_scaling.ddr_qos, PM_QOS_DEFAULT_VALUE);
1976#endif
1977 pm_qos_update_request(&priv->pm_qos_req,
1978 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
1979
1980 if (!priv->en_suspend)
1981 pm_relax(&priv->pdev->dev);
1982
1983 if (priv->tso) {
1984 dma_free_coherent(&priv->pdev->dev,
1985 priv->tx_ring.total_cnt * 0x80,
1986 priv->tso_hdr,
1987 priv->tso_hdr_addr);
1988 }
1989
1990 return 0;
1991}
1992
1993/* Name emac_alloc_tx_resources
1994 * Arguments priv : pointer to driver private data structure
1995 * Return Status: 0 - Success; non-zero - Fail
1996 * Description Allocates TX resources and getting virtual & physical address.
1997 */
1998int emac_alloc_tx_resources(struct emac_priv *priv)
1999{
2000 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2001 struct platform_device *pdev = priv->pdev;
2002 u32 size;
2003
2004 size = sizeof(struct emac_desc_buffer) * tx_ring->total_cnt;
2005
2006 /* allocate memory */
2007 tx_ring->desc_buf = kzalloc(size, GFP_KERNEL);
2008 if (!tx_ring->desc_buf) {
2009 pr_err("Memory allocation failed for the Transmit descriptor buffer\n");
2010 return -ENOMEM;
2011 }
2012
2013 memset(tx_ring->desc_buf, 0, size);
2014
2015 tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_tx_desc);
2016
2017 EMAC_ROUNDUP(tx_ring->total_size, 1024);
2018
2019 if (priv->sram_pool) {
2020 tx_ring->desc_addr =
2021 (void *)gen_pool_dma_alloc(
2022 priv->sram_pool, tx_ring->total_size,
2023 &tx_ring->desc_dma_addr);
2024 tx_ring->in_sram = true;
2025 }
2026
2027 if (!tx_ring->desc_addr) {
2028 tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
2029 tx_ring->total_size,
2030 &tx_ring->desc_dma_addr,
2031 GFP_KERNEL | __GFP_ZERO);
2032 if (!tx_ring->desc_addr) {
2033 pr_err("Memory allocation failed for the Transmit descriptor ring\n");
2034 kfree(tx_ring->desc_buf);
2035 return -ENOMEM;
2036 }
2037
2038 if (priv->sram_pool) {
2039 pr_err("sram pool left size not enough, tx fallback\n");
2040 tx_ring->in_sram = false;
2041 }
2042 }
2043
2044 memset(tx_ring->desc_addr, 0, tx_ring->total_size);
2045
2046 tx_ring->nxt_use = 0;
2047 tx_ring->nxt_clean = 0;
2048
2049 return 0;
2050}
2051
2052/* Name emac_alloc_rx_resources
2053 * Arguments priv : pointer to driver private data structure
2054 * Return Status: 0 - Success; non-zero - Fail
2055 * Description Allocates RX resources and getting virtual & physical address.
2056 */
2057int emac_alloc_rx_resources(struct emac_priv *priv)
2058{
2059 struct emac_desc_ring *rx_ring = &priv->rx_ring;
2060 struct platform_device *pdev = priv->pdev;
2061 u32 buf_len;
2062
2063 buf_len = sizeof(struct emac_desc_buffer) * rx_ring->total_cnt;
2064
2065 rx_ring->desc_buf = kzalloc(buf_len, GFP_KERNEL);
2066 if (!rx_ring->desc_buf) {
2067 pr_err("Memory allocation failed for the Receive descriptor buffer\n");
2068 return -ENOMEM;
2069 }
2070
2071 memset(rx_ring->desc_buf, 0, buf_len);
2072
2073 /* round up to nearest 4K */
2074 rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_rx_desc);
2075
2076 EMAC_ROUNDUP(rx_ring->total_size, 1024);
2077
2078 if (priv->sram_pool) {
2079 rx_ring->desc_addr =
2080 (void *)gen_pool_dma_alloc(
2081 priv->sram_pool, rx_ring->total_size,
2082 &rx_ring->desc_dma_addr);
2083 rx_ring->in_sram = true;
2084 }
2085
2086 if (!rx_ring->desc_addr) {
2087 rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
2088 rx_ring->total_size,
2089 &rx_ring->desc_dma_addr,
2090 GFP_KERNEL | __GFP_ZERO);
2091 if (!rx_ring->desc_addr) {
2092 pr_err("Memory allocation failed for the Receive descriptor ring\n");
2093 kfree(rx_ring->desc_buf);
2094 return -ENOMEM;
2095 }
2096
2097 if (priv->sram_pool) {
2098 pr_err("sram pool left size not enough, rx fallback\n");
2099 rx_ring->in_sram = false;
2100 }
2101 }
2102
2103 memset(rx_ring->desc_addr, 0, rx_ring->total_size);
2104
2105 rx_ring->nxt_use = 0;
2106 rx_ring->nxt_clean = 0;
2107
2108 return 0;
2109}
2110
2111/* Name emac_free_tx_resources
2112 * Arguments priv : pointer to driver private data structure
2113 * Return none
2114 * Description Frees the Tx resources allocated
2115 */
2116void emac_free_tx_resources(struct emac_priv *priv)
2117{
2118 emac_clean_tx_desc_ring(priv);
2119 kfree(priv->tx_ring.desc_buf);
2120 priv->tx_ring.desc_buf = NULL;
2121 if (priv->tx_ring.in_sram)
2122 gen_pool_free(priv->sram_pool,
2123 (unsigned long) priv->tx_ring.desc_addr,
2124 priv->tx_ring.total_size);
2125 else
2126 dma_free_coherent(&priv->pdev->dev, priv->tx_ring.total_size,
2127 priv->tx_ring.desc_addr,
2128 priv->tx_ring.desc_dma_addr);
2129 priv->tx_ring.desc_addr = NULL;
2130}
2131
2132/* Name emac_free_rx_resources
2133 * Arguments priv : pointer to driver private data structure
2134 * Return none
2135 * Description Frees the Rx resources allocated
2136 */
2137void emac_free_rx_resources(struct emac_priv *priv)
2138{
2139 emac_clean_rx_desc_ring(priv);
2140 kfree(priv->rx_ring.desc_buf);
2141 priv->rx_ring.desc_buf = NULL;
2142 if (priv->rx_ring.in_sram)
2143 gen_pool_free(priv->sram_pool,
2144 (unsigned long) priv->rx_ring.desc_addr,
2145 priv->rx_ring.total_size);
2146 else
2147 dma_free_coherent(&priv->pdev->dev, priv->rx_ring.total_size,
2148 priv->rx_ring.desc_addr,
2149 priv->rx_ring.desc_dma_addr);
2150 priv->rx_ring.desc_addr = NULL;
2151}
2152
2153/* Name emac_open
2154 * Arguments pstNetdev : pointer to net_device structure
2155 * Return Status: 0 - Success; non-zero - Fail
2156 * Description This function is called when net interface is made up.
2157 * Setting up Tx and Rx
2158 * resources and making the interface up.
2159 */
2160static int emac_open(struct net_device *ndev)
2161{
2162 struct emac_priv *priv = netdev_priv(ndev);
2163 int ret;
2164
2165 ret = emac_alloc_tx_resources(priv);
2166 if (ret) {
2167 pr_err("Error in setting up the Tx resources\n");
2168 goto emac_alloc_tx_resource_fail;
2169 }
2170
2171 ret = emac_alloc_rx_resources(priv);
2172 if (ret) {
2173 pr_err("Error in setting up the Rx resources\n");
2174 goto emac_alloc_rx_resource_fail;
2175 }
2176
2177 ret = emac_up(priv);
2178 if (ret) {
2179 pr_err("Error in making the net intrface up\n");
2180 goto emac_up_fail;
2181 }
2182 return 0;
2183
2184emac_up_fail:
2185 emac_free_rx_resources(priv);
2186emac_alloc_rx_resource_fail:
2187 emac_free_tx_resources(priv);
2188emac_alloc_tx_resource_fail:
2189 emac_reset_hw(priv);
2190 return ret;
2191}
2192
2193/* Name emac_close
2194 * Arguments pstNetdev : pointer to net_device structure
2195 * Return Status: 0 - Success; non-zero - Fail
2196 * Description This function is called when net interface is made down.
2197 * It calls the appropriate functions to
2198 * free Tx and Rx resources.
2199 */
2200static int emac_close(struct net_device *ndev)
2201{
2202 struct emac_priv *priv = netdev_priv(ndev);
2203
2204 emac_down(priv);
2205 emac_free_tx_resources(priv);
2206 emac_free_rx_resources(priv);
2207
2208 return 0;
2209}
2210
2211/* Name emac_tx_clean_desc
2212 * Arguments priv : pointer to driver private data structure
2213 * Return 1: Cleaned; 0:Failed
2214 * Description
2215 */
2216#ifdef CONFIG_ASR_EMAC_NAPI
2217static int emac_tx_clean_desc(struct emac_priv *priv, int budget)
2218#else
2219static int emac_tx_clean_desc(struct emac_priv *priv)
2220#endif
2221{
2222 struct emac_desc_ring *tx_ring;
2223 struct emac_tx_desc *tx_desc, *end_desc;
2224 struct emac_desc_buffer *tx_buf;
2225 struct net_device *ndev = priv->ndev;
2226 u32 i, u32LastIndex;
2227 u8 u8Cleaned;
2228 unsigned int count = 0;
2229
2230 tx_ring = &priv->tx_ring;
2231 i = tx_ring->nxt_clean;
2232 do {
2233 if (i == tx_ring->nxt_use)
2234 break;
2235
2236 u32LastIndex = tx_ring->desc_buf[i].nxt_watch;
2237 end_desc = emac_get_tx_desc(priv, u32LastIndex);
2238 if (end_desc->OWN == 1 ||
2239 (priv->tso && (end_desc->tso || end_desc->coe)))
2240 break;
2241
2242 u8Cleaned = false;
2243 for ( ; !u8Cleaned; count++) {
2244 tx_desc = emac_get_tx_desc(priv, i);
2245 tx_buf = &tx_ring->desc_buf[i];
2246
2247 emac_get_tx_hwtstamp(priv, tx_buf->skb);
2248
2249 /* own bit will be reset to 0 by dma
2250 * once packet is transmitted
2251 */
2252 if (tx_buf->dma_addr) {
2253 dma_unmap_page(&priv->pdev->dev,
2254 tx_buf->dma_addr,
2255 tx_buf->dma_len,
2256 DMA_TO_DEVICE);
2257 tx_buf->dma_addr = 0;
2258 }
2259 if (tx_buf->skb) {
2260 dev_kfree_skb_any(tx_buf->skb);
2261 tx_buf->skb = NULL;
2262 }
2263 if (tx_buf->buff_addr)
2264 tx_buf->buff_addr = NULL;
2265
2266 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2267 u8Cleaned = (i == u32LastIndex);
2268 if (++i == tx_ring->total_cnt)
2269 i = 0;
2270 }
2271
2272#ifdef CONFIG_ASR_EMAC_NAPI
2273 if (count >= budget) {
2274 count = budget;
2275 break;
2276 }
2277#endif
2278 } while (1);
2279 tx_ring->nxt_clean = i;
2280
2281#ifndef CONFIG_ASR_EMAC_NAPI
2282 spin_lock(&priv->spTxLock);
2283#endif
2284 if (unlikely(count && netif_queue_stopped(ndev) &&
2285 netif_carrier_ok(ndev) &&
2286 EMAC_DESC_UNUSED(tx_ring) >= EMAC_TX_WAKE_THRESHOLD))
2287 netif_wake_queue(ndev);
2288#ifndef CONFIG_ASR_EMAC_NAPI
2289 spin_unlock(&priv->spTxLock);
2290#endif
2291 return count;
2292}
2293
2294static int emac_rx_frame_status(struct emac_priv *priv, struct emac_rx_desc *dsc)
2295{
2296 /* if last descritpor isn't set, so we drop it*/
2297 if (!dsc->LastDescriptor) {
2298 netdev_dbg(priv->ndev, "rx LD bit isn't set, drop it.\n");
2299 return frame_discard;
2300 }
2301
2302 /*
2303 * A Frame that is less than 64-bytes (from DA thru the FCS field)
2304 * is considered as Runt Frame.
2305 * Most of the Runt Frames happen because of collisions.
2306 */
2307 if (dsc->ApplicationStatus & EMAC_RX_FRAME_RUNT) {
2308 netdev_dbg(priv->ndev, "rx frame less than 64.\n");
2309 return frame_discard;
2310 }
2311
2312 /*
2313 * When the frame fails the CRC check,
2314 * the frame is assumed to have the CRC error
2315 */
2316 if (dsc->ApplicationStatus & EMAC_RX_FRAME_CRC_ERR) {
2317 netdev_dbg(priv->ndev, "rx frame crc error\n");
2318 return frame_discard;
2319 }
2320
2321 if (priv->tso && dsc->csum_res == EMAC_CSUM_FAIL) {
2322 netdev_dbg(priv->ndev, "COE: rx frame checksum error\n");
2323 return frame_discard;
2324 }
2325
2326 /*
2327 * When the length of the frame exceeds
2328 * the Programmed Max Frame Length
2329 */
2330 if (dsc->ApplicationStatus & EMAC_RX_FRAME_MAX_LEN_ERR) {
2331 netdev_dbg(priv->ndev, "rx frame too long\n");
2332 return frame_discard;
2333 }
2334
2335 /*
2336 * frame reception is truncated at that point and
2337 * frame is considered to have Jabber Error
2338 */
2339 if (dsc->ApplicationStatus & EMAC_RX_FRAME_JABBER_ERR) {
2340 netdev_dbg(priv->ndev, "rx frame has been truncated\n");
2341 return frame_discard;
2342 }
2343
2344 /* this bit is only for 802.3 Type Frames */
2345 if (dsc->ApplicationStatus & EMAC_RX_FRAME_LENGTH_ERR) {
2346 netdev_dbg(priv->ndev, "rx frame length err for 802.3\n");
2347 return frame_discard;
2348 }
2349
2350 if (dsc->FramePacketLength <= ETHERNET_FCS_SIZE ||
2351 dsc->FramePacketLength > EMAC_RX_BUFFER_2048) {
2352 netdev_dbg(priv->ndev, "rx frame len too small or too long\n");
2353 return frame_discard;
2354 }
2355 return frame_ok;
2356}
2357
2358/* Name emac_rx_clean_desc
2359 * Arguments priv : pointer to driver private data structure
2360 * Return 1: Cleaned; 0:Failed
2361 * Description
2362 */
2363#ifdef CONFIG_ASR_EMAC_NAPI
2364static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
2365#else
2366static int emac_rx_clean_desc(struct emac_priv *priv)
2367#endif
2368{
2369 struct emac_desc_ring *rx_ring;
2370 struct emac_desc_buffer *rx_buf;
2371 struct net_device *ndev = priv->ndev;
2372 struct emac_rx_desc *rx_desc;
2373 struct sk_buff *skb = NULL;
2374 int status;
2375#ifdef CONFIG_ASR_EMAC_NAPI
2376 u32 receive_packet = 0;
2377#endif
2378 u32 i;
2379 u32 u32Len;
2380 u32 u32Size;
2381 u8 *pu8Data;
2382#ifdef WAN_LAN_AUTO_ADAPT
2383 int port = -1, vlan = -1;
2384 struct vlan_hdr *vhdr;
2385 struct iphdr *iph = NULL;
2386 struct udphdr *udph = NULL;
2387#endif
2388
2389 rx_ring = &priv->rx_ring;
2390 i = rx_ring->nxt_clean;
2391 rx_desc = emac_get_rx_desc(priv, i);
2392 u32Size = 0;
2393
2394 if (priv->pause.tx_pause && !priv->pause.fc_auto)
2395 emac_check_ring_and_send_pause(priv);
2396
2397 while (rx_desc->OWN == 0) {
2398 if (priv->tso && !rx_desc->csum_done)
2399 break;
2400
2401 if (skb_queue_len(&priv->rx_skb) > priv->rx_ring.total_cnt)
2402 break;
2403
2404 rx_buf = &rx_ring->desc_buf[i];
2405 if (!rx_buf->skb)
2406 break;
2407
2408 emac_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
2409 rx_buf->dma_len, DMA_FROM_DEVICE);
2410 status = emac_rx_frame_status(priv, rx_desc);
2411 if (unlikely(status == frame_discard)) {
2412 ndev->stats.rx_dropped++;
2413 dev_kfree_skb_irq(rx_buf->skb);
2414 rx_buf->skb = NULL;
2415 } else {
2416 skb = rx_buf->skb;
2417 u32Len = rx_desc->FramePacketLength - ETHERNET_FCS_SIZE;
2418
2419 pu8Data = skb_put(skb, u32Len);
2420#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
2421 memcpy(pu8Data, (u8 *)rx_buf->buff_addr, u32Len);
2422#endif
2423 skb->dev = ndev;
2424 ndev->hard_header_len = ETH_HLEN;
2425
2426 emac_get_rx_hwtstamp(priv, rx_desc, skb);
2427
2428 skb->protocol = eth_type_trans(skb, ndev);
2429 if (priv->tso)
2430 skb->ip_summed = CHECKSUM_UNNECESSARY;
2431 else
2432 skb->ip_summed = CHECKSUM_NONE;
2433
2434#ifdef WAN_LAN_AUTO_ADAPT
2435 {/* Special tag format: DA-SA-0x81-xx-data.
2436 Bit 7-3 Packet Information
2437 - bit 4: Reserved
2438 - bit 3: Reserved
2439 - bit 2: Miss address table
2440 - bit 1: Security violation
2441 - bit 0: VLAN violation
2442 Bit 2-0 Ingress Port number
2443 - b000: Disabled
2444 - b001: Port 0
2445 - b010: Port 1
2446 - b011: Port 2
2447 - b100: Port 3
2448 - b101: Port 4
2449 - Other: Reserved */
2450 if(ntohs(skb->protocol)>>8 == 0x81) {
2451 port = ntohs(skb->protocol) & 0x7;
2452 if(port > 0 && port <= 0x5) {
2453 skb->protocol = htons(ETH_P_8021Q);
2454 port = port - 1;
2455 }
2456 }
2457 if (skb->protocol == htons(ETH_P_8021Q)) {
2458 vhdr = (struct vlan_hdr *) skb->data;
2459 vlan = ntohs(vhdr->h_vlan_TCI);
2460 iph = (struct iphdr *)(skb->data + VLAN_HLEN);
2461 } else if (skb->protocol == htons(ETH_P_IP))
2462 iph = (struct iphdr *)skb->data;
2463
2464 if (iph && iph->protocol == IPPROTO_UDP) {
2465 udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
2466 if ((htons(udph->dest) == 68 && htons(udph->source) == 67)) {
2467 u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
2468 u8 dhcp_type = *(udp_data + 242);
2469 if ((DHCP_ACK == dhcp_type || DHCP_OFFER == dhcp_type)
2470 && (DHCP_SEND_REQ == priv->dhcp)) {
2471 priv->dhcp = DHCP_REC_RESP;
2472 if (ndev->phydev->phy_id == IP175D_PHY_ID)
2473 priv->vlan_port = port;
2474 else
2475 priv->vlan_port = -1;
2476 }
2477 }
2478 }
2479 }
2480#endif
2481 skb_queue_tail(&priv->rx_skb, skb);
2482 rx_buf->skb = NULL;
2483 }
2484
2485 if (++i == rx_ring->total_cnt)
2486 i = 0;
2487
2488 rx_desc = emac_get_rx_desc(priv, i);
2489
2490 /* restart RX COE */
2491 if (priv->tso)
2492 emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
2493 }
2494
2495 rx_ring->nxt_clean = i;
2496
2497 emac_alloc_rx_desc_buffers(priv);
2498
2499 /*
2500 * Since netif_rx may consume too much time, put this after
2501 * emac_alloc_rx_desc_buffers so that RX DMA desc refill ASAP,
2502 * reduce packet loss probability.
2503 */
2504 while ((skb = skb_dequeue(&priv->rx_skb))) {
2505 ndev->stats.rx_packets++;
2506 ndev->stats.rx_bytes += skb->len;
2507#ifdef CONFIG_ASR_EMAC_NAPI
2508 napi_gro_receive(&priv->rx_napi, skb);
2509#else
2510 netif_rx(skb);
2511#endif
2512
2513#ifdef CONFIG_ASR_EMAC_NAPI
2514 receive_packet++;
2515 if (receive_packet >= budget)
2516 break;
2517#endif
2518 }
2519
2520#ifdef CONFIG_ASR_EMAC_DDR_QOS
2521 emac_ddr_clk_scaling(priv);
2522#endif
2523
2524#ifdef CONFIG_ASR_EMAC_NAPI
2525 return receive_packet;
2526#else
2527 return 0;
2528#endif
2529}
2530
2531/* Name emac_alloc_rx_desc_buffers
2532 * Arguments priv : pointer to driver private data structure
2533 * Return 1: Cleaned; 0:Failed
2534 * Description
2535 */
2536static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
2537{
2538 struct net_device *ndev = priv->ndev;
2539 struct emac_desc_ring *rx_ring = &priv->rx_ring;
2540 struct emac_desc_buffer *rx_buf;
2541 struct sk_buff *skb;
2542 struct emac_rx_desc *rx_desc;
2543 u32 i;
2544#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
2545 void *buff;
2546#endif
2547 u32 buff_len;
2548 int fail_cnt = 0;
2549
2550 i = rx_ring->nxt_use;
2551 rx_buf = &rx_ring->desc_buf[i];
2552
2553 buff_len = priv->u32RxBufferLen;
2554
2555 while (!rx_buf->skb) {
2556 skb = emac_skbrb_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
2557 if (!skb) {
2558 if (priv->rx_ring.total_cnt == EMAC_RX_RING_SIZE)
2559 skb = dev_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
2560 if (!skb) {
2561 fail_cnt++;
2562 pr_warn_ratelimited("emac sk_buff allocation failed\n");
2563 break;
2564 }
2565 }
2566
2567 /* make buffer alignment */
2568 skb_reserve(skb, NET_IP_ALIGN + EMAC_EXTRA_ROOM);
2569 skb->dev = ndev;
2570
2571#ifdef CONFIG_ASR_EMAC_RX_NO_COPY
2572 rx_buf->buff_addr = skb->data;
2573#else
2574 if (!rx_buf->buff_addr) {
2575 buff = kmalloc(buff_len, GFP_ATOMIC | GFP_DMA);
2576 if (!buff) {
2577 pr_err("kmalloc failed\n");
2578 dev_kfree_skb(skb);
2579 break;
2580 }
2581 rx_buf->buff_addr = buff;
2582 }
2583#endif
2584 rx_buf->skb = skb;
2585 rx_buf->dma_len = buff_len;
2586 rx_buf->dma_addr = emac_map_single(&priv->pdev->dev,
2587 rx_buf->buff_addr,
2588 buff_len,
2589 DMA_FROM_DEVICE);
2590
2591 rx_desc = emac_get_rx_desc(priv, i);
2592 rx_desc->BufferAddr1 = rx_buf->dma_addr;
2593 rx_desc->BufferSize1 = rx_buf->dma_len;
2594 rx_desc->rx_timestamp = 0;
2595 rx_desc->ptp_pkt = 0;
2596 rx_desc->FirstDescriptor = 0;
2597 rx_desc->LastDescriptor = 0;
2598 rx_desc->FramePacketLength = 0;
2599 rx_desc->ApplicationStatus = 0;
2600 if (++i == rx_ring->total_cnt) {
2601 rx_desc->EndRing = 1;
2602 i = 0;
2603 }
2604
2605 wmb();
2606 rx_desc->OWN = 1;
2607 if (priv->tso)
2608 rx_desc->csum_done = 0;
2609
2610 rx_buf = &rx_ring->desc_buf[i];
2611 }
2612 rx_ring->nxt_use = i;
2613
2614 if (fail_cnt)
2615 priv->refill = 1;
2616 else
2617 priv->refill = 0;
2618 emac_dma_start_receive(priv);
2619}
2620
2621#ifdef CONFIG_ASR_EMAC_NAPI
2622static int emac_rx_poll(struct napi_struct *napi, int budget)
2623{
2624 struct emac_priv *priv = container_of(napi, struct emac_priv, rx_napi);
2625 int work_done;
2626
2627 work_done = emac_rx_clean_desc(priv, budget);
2628 if (work_done < budget && napi_complete_done(napi, work_done)) {
2629 unsigned long flags;
2630
2631 spin_lock_irqsave(&priv->intr_lock, flags);
2632 emac_enable_interrupt(priv, 0);
2633 spin_unlock_irqrestore(&priv->intr_lock, flags);
2634
2635 if (priv->refill)
2636 emac_rx_timer_arm(priv);
2637 }
2638
2639 return work_done;
2640}
2641
2642static int emac_tx_poll(struct napi_struct *napi, int budget)
2643{
2644 struct emac_priv *priv = container_of(napi, struct emac_priv, tx_napi);
2645 int work_done;
2646
2647 work_done = emac_tx_clean_desc(priv, budget);
2648 if (work_done < budget && napi_complete_done(napi, work_done)) {
2649 unsigned long flags;
2650
2651 spin_lock_irqsave(&priv->intr_lock, flags);
2652 emac_enable_interrupt(priv, 1);
2653 spin_unlock_irqrestore(&priv->intr_lock, flags);
2654 }
2655
2656 return work_done;
2657}
2658#endif
2659
2660/* Name emac_tx_mem_map
2661 * Arguments priv : pointer to driver private data structure
2662 * pstSkb : pointer to sk_buff structure passed by upper layer
2663 * max_tx_len : max data len per descriptor
2664 * frag_num : number of fragments in the packet
2665 * Return number of descriptors needed for transmitting packet
2666 * Description
2667 */
2668static int emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb,
2669 u32 max_tx_len, u32 frag_num, int ioc)
2670{
2671 struct emac_desc_ring *tx_ring;
2672 struct emac_desc_buffer *tx_buf;
2673 struct emac_tx_desc *tx_desc, *first_desc;
2674 u32 skb_len;
2675 u32 u32Offset, u32Size, i;
2676 u32 use_desc_cnt;
2677 u32 f;
2678 void *pvPtr;
2679 u32 cur_desc_addr;
2680 u32 cur_desc_idx;
2681 u8 do_tx_timestamp = 0;
2682 bool use_buf2 = 0;
2683
2684 u32Offset = 0;
2685 use_desc_cnt = 0;
2686
2687 skb_tx_timestamp(skb);
2688 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2689 priv->hwts_tx_en)) {
2690 /* declare that device is doing timestamping */
2691 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2692 do_tx_timestamp = 1;
2693 }
2694
2695 tx_ring = &priv->tx_ring;
2696 skb_len = skb->len - skb->data_len;
2697 i = cur_desc_idx = tx_ring->nxt_use;
2698 cur_desc_addr = emac_rd(priv, DMA_TRANSMIT_BASE_ADDRESS);
2699 while (skb_len > 0) {
2700 u32Size = min(skb_len, max_tx_len);
2701 skb_len -= u32Size;
2702
2703 tx_buf = &tx_ring->desc_buf[i];
2704 tx_buf->dma_len = u32Size;
2705 pvPtr = skb->data + u32Offset;
2706 tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pvPtr,
2707 u32Size, DMA_TO_DEVICE);
2708 tx_buf->buff_addr = pvPtr;
2709 tx_buf->ulTimeStamp = jiffies;
2710
2711 tx_desc = emac_get_tx_desc(priv, i);
2712
2713 if (use_buf2) {
2714 tx_desc->BufferAddr2 = tx_buf->dma_addr;
2715 tx_desc->BufferSize2 = tx_buf->dma_len;
2716 i++;
2717 use_buf2 = 0;
2718 } else {
2719 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2720 tx_desc->BufferAddr1 = tx_buf->dma_addr;
2721 tx_desc->BufferSize1 = tx_buf->dma_len;
2722 use_buf2 = 1;
2723 }
2724
2725 if (use_desc_cnt == 0) {
2726 first_desc = tx_desc;
2727 tx_desc->FirstSegment = 1;
2728 if (do_tx_timestamp)
2729 tx_desc->tx_timestamp = 1;
2730 }
2731
2732 if (skb_len == 0 && frag_num == 0) {
2733 tx_desc->LastSegment = 1;
2734 tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
2735 }
2736
2737 if (!use_buf2 && i == tx_ring->total_cnt) {
2738 tx_desc->EndRing = 1;
2739 i = 0;
2740 }
2741
2742 /* trigger first desc OWN bit later */
2743 use_desc_cnt++;
2744 if (use_desc_cnt > 2)
2745 tx_desc->OWN = 1;
2746
2747 u32Offset += u32Size;
2748 }
2749
2750 /* if the data is fragmented */
2751 for (f = 0; f < frag_num; f++) {
2752 skb_frag_t *frag;
2753
2754 frag = &(skb_shinfo(skb)->frags[f]);
2755 skb_len = skb_frag_size(frag);
2756 u32Offset = skb_frag_off(frag);
2757
2758 while (skb_len) {
2759 u32Size = min(skb_len, max_tx_len);
2760 skb_len -= u32Size;
2761
2762 tx_buf = &tx_ring->desc_buf[i];
2763 tx_buf->dma_len = u32Size;
2764 tx_buf->dma_addr =
2765 dma_map_page(&priv->pdev->dev,
2766 skb_frag_page(frag),
2767 u32Offset,
2768 u32Size,
2769 DMA_TO_DEVICE);
2770 tx_buf->ulTimeStamp = jiffies;
2771
2772 tx_desc = emac_get_tx_desc(priv, i);
2773 if (use_buf2) {
2774 tx_desc->BufferAddr2 = tx_buf->dma_addr;
2775 tx_desc->BufferSize2 = tx_buf->dma_len;
2776 i++;
2777 use_buf2 = 0;
2778 } else {
2779 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2780 tx_desc->BufferAddr1 = tx_buf->dma_addr;
2781 tx_desc->BufferSize1 = tx_buf->dma_len;
2782 use_buf2 = 1;
2783 }
2784
2785 if (skb_len == 0 && f == (frag_num - 1)) {
2786 tx_desc->LastSegment = 1;
2787 tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
2788 }
2789
2790 if (!use_buf2 && i == tx_ring->total_cnt) {
2791 tx_desc->EndRing = 1;
2792 i = 0;
2793 }
2794
2795 /* trigger first desc OWN bit later */
2796 use_desc_cnt++;
2797 if (use_desc_cnt > 2)
2798 tx_desc->OWN = 1;
2799
2800 u32Offset += u32Size;
2801 }
2802 }
2803
2804 if (use_buf2 && ++i == tx_ring->total_cnt) {
2805 tx_desc->EndRing = 1;
2806 i = 0;
2807 }
2808
2809 tx_ring->desc_buf[cur_desc_idx].skb = skb;
2810 tx_ring->desc_buf[cur_desc_idx].nxt_watch =
2811 (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
2812
2813 wmb();
2814
2815 first_desc->OWN = 1;
2816
2817 emac_dma_start_transmit(priv);
2818
2819 tx_ring->nxt_use = i;
2820 return use_desc_cnt;
2821}
2822
2823static int emac_prepare_tso_desc(struct emac_priv *priv, int idx,
2824 bool tso, bool coe,
2825 u32 addr, int payload, u8 hlen, int mss,
2826 bool fst, bool last, bool ioc, bool ts,
2827 u32 *cnt)
2828{
2829 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2830 struct emac_tx_desc *pdesc;
2831
2832 pdesc = emac_get_tx_desc(priv, idx);
2833 if (tso) {
2834 if (fst && hlen) {
2835 emac_set_buf1_addr_len(pdesc, addr, 0);
2836 payload -= hlen;
2837 addr += hlen;
2838 }
2839 emac_set_buf2_addr_len(pdesc, addr, payload);
2840 } else {
2841 emac_set_buf1_addr_len(pdesc, addr, payload);
2842 }
2843
2844 if (fst) {
2845 emac_tx_desc_set_fd(pdesc);
2846 } else {
2847 if (tso)
2848 emac_tx_desc_set_offload(pdesc, 1, 1, 1);
2849 else if (coe)
2850 emac_tx_desc_set_offload(pdesc, 0, 1, 0);
2851 else
2852 emac_tx_desc_set_offload(pdesc, 1, 0, 0);
2853 }
2854
2855 if (ts)
2856 emac_tx_desc_set_ts(pdesc);
2857
2858 if (last) {
2859 /* last segment */
2860 emac_tx_desc_set_ld(pdesc);
2861 if (ioc)
2862 emac_tx_desc_set_ioc(pdesc);
2863 }
2864
2865 print_desc((void *)pdesc, 16);
2866 if (payload <= 0)
2867 return idx;
2868
2869 do {
2870 (*cnt)++;
2871
2872 if (++idx == tx_ring->total_cnt) {
2873 emac_tx_desc_set_ring_end(pdesc);
2874 idx = 0;
2875 }
2876
2877 if (!tso)
2878 break;
2879
2880 payload -= mss;
2881 if (payload <= 0)
2882 break;
2883
2884 pdesc = emac_get_tx_desc(priv, idx);
2885 emac_tx_desc_set_offload(pdesc, 1, 1, 0);
2886
2887 print_desc((void *)pdesc, 16);
2888 } while (1);
2889
2890 return idx;
2891}
2892
2893static int emac_tso_xmit(struct sk_buff *skb, struct net_device *ndev,
2894 bool tso, bool coe)
2895{
2896 struct emac_priv *priv = netdev_priv(ndev);
2897 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2898 struct emac_desc_buffer *tx_buf;
2899 struct emac_tx_desc *pdesc;
2900 skb_frag_t *frag;
2901 u32 desc_cnt, frag_num, f, mss, fst;
2902 u32 offset, i;
2903 u8 hlen;
2904 int skb_len, payload;
2905 void *pbuf;
2906 int ioc;
2907 u8 timestamp = 0;
2908
2909 frag_num = skb_shinfo(skb)->nr_frags;
2910 skb_len = skb->len - skb->data_len;
2911 if (tso) {
2912 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
2913 mss = skb_shinfo(skb)->gso_size;
2914 desc_cnt = (skb_len / mss) + 1;
2915 for (f = 0; f < frag_num; f++) {
2916 frag = &skb_shinfo(skb)->frags[f];
2917 desc_cnt += (skb_frag_size(frag) / mss) + 1;
2918 }
2919 } else {
2920 hlen = 0;
2921 mss = 0;
2922 desc_cnt = EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
2923 for (i = 0; i < frag_num; i++) {
2924 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2925 desc_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
2926 MAX_DATA_PWR_TX_DES);
2927 }
2928 }
2929
2930 emac_print("%s: skb=0x%x, skb->len=%d skb_len=%d mss=%d frag_num=%d hlen=%d\n",
2931 __func__, (unsigned)skb, skb->len, skb_len, mss, frag_num, hlen);
2932
2933#ifdef EMAC_DEBUG
2934 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 32, 1, skb->data, skb_len, 0);
2935#endif
2936 /* disable hard interrupt on local CPUs */
2937#ifndef CONFIG_ASR_EMAC_NAPI
2938 local_irq_save(ulFlags);
2939#endif
2940 if (!spin_trylock(&priv->spTxLock)) {
2941 pr_err("Collision detected\n");
2942#ifndef CONFIG_ASR_EMAC_NAPI
2943 local_irq_restore(ulFlags);
2944#endif
2945 return NETDEV_TX_BUSY;
2946 }
2947
2948 /* check whether sufficient free descriptors are there */
2949 if (EMAC_DESC_UNUSED(tx_ring) < (desc_cnt + 2)) {
2950 pr_err_ratelimited("TSO Descriptors are not free\n");
2951 netif_stop_queue(ndev);
2952#ifndef CONFIG_ASR_EMAC_NAPI
2953 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
2954#else
2955 spin_unlock(&priv->spTxLock);
2956#endif
2957 return NETDEV_TX_BUSY;
2958 }
2959
2960 priv->tx_count_frames += desc_cnt;
2961 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2962 priv->hwts_tx_en))
2963 ioc = 1;
2964 else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
2965 ioc = 1;
2966 else
2967 ioc = 0;
2968
2969 if (ioc)
2970 priv->tx_count_frames = 0;
2971
2972 skb_tx_timestamp(skb);
2973 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2974 priv->hwts_tx_en)) {
2975 /* declare that device is doing timestamping */
2976 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2977 timestamp = 1;
2978 }
2979
2980 offset = 0;
2981 desc_cnt = 0;
2982 i = fst = tx_ring->nxt_use;
2983 do {
2984 payload = min(skb_len, TSO_MAX_SEG_SIZE);
2985
2986 tx_buf = &tx_ring->desc_buf[i];
2987 tx_buf->dma_len = payload;
2988 pbuf = skb->data + offset;
2989 tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pbuf,
2990 payload, DMA_TO_DEVICE);
2991 tx_buf->buff_addr = pbuf;
2992 tx_buf->ulTimeStamp = jiffies;
2993
2994 skb_len -= payload;
2995 offset += payload;
2996
2997 i = emac_prepare_tso_desc(priv, i, tso, coe,
2998 tx_buf->dma_addr, payload, hlen, mss,
2999 (i == fst), (skb_len == 0 && frag_num == 0),
3000 ioc, timestamp, &desc_cnt);
3001 } while (skb_len > 0);
3002
3003 /* if the data is fragmented */
3004 for (f = 0; f < frag_num; f++) {
3005 frag = &(skb_shinfo(skb)->frags[f]);
3006 skb_len = skb_frag_size(frag);
3007 offset = skb_frag_off(frag);
3008
3009 emac_print("%s: frag %d len=%d\n", __func__, f, skb_len);
3010#ifdef EMAC_DEBUG
3011 {
3012 u8 *vaddr;
3013
3014 vaddr = kmap_atomic(skb_frag_page(frag));
3015 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
3016 32, 1, vaddr + offset, skb_len, 0);
3017 kunmap_atomic(vaddr);
3018 }
3019#endif
3020 do {
3021 payload = min(skb_len, TSO_MAX_SEG_SIZE);
3022
3023 tx_buf = &tx_ring->desc_buf[i];
3024 tx_buf->dma_len = payload;
3025 //pbuf = skb->data + offset;
3026 tx_buf->dma_addr = dma_map_page(&priv->pdev->dev,
3027 skb_frag_page(frag),
3028 offset, payload,
3029 DMA_TO_DEVICE);
3030 tx_buf->ulTimeStamp = jiffies;
3031
3032 skb_len -= payload;
3033 offset += payload;
3034
3035 i = emac_prepare_tso_desc(priv, i, tso, coe,
3036 tx_buf->dma_addr, payload, 0, mss,
3037 (i == fst),
3038 (skb_len == 0 && f == (frag_num - 1)),
3039 ioc, timestamp, &desc_cnt);
3040 } while (skb_len > 0);
3041 }
3042
3043 tx_ring->desc_buf[fst].skb = skb;
3044 tx_ring->desc_buf[fst].nxt_watch =
3045 (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
3046
3047 wmb();
3048
3049 /* set first descriptor for this packet */
3050 pdesc = emac_get_tx_desc(priv, fst);
3051 emac_tx_update_fst_desc(pdesc, hlen, mss, tso, coe);
3052 print_desc((void *)pdesc, 16);
3053
3054 tx_ring->nxt_use = i;
3055
3056 ndev->stats.tx_packets++;
3057 ndev->stats.tx_bytes += skb->len;
3058 if (tso) {
3059 priv->hw_stats->tx_tso_pkts++;
3060 priv->hw_stats->tx_tso_bytes += skb->len;
3061 }
3062
3063 emac_wr_tso(priv, TSO_TX_POLL_DEMAND, 0xFF);
3064 /* Make sure there is space in the ring for the next send. */
3065 if (EMAC_DESC_UNUSED(tx_ring) < (MAX_SKB_FRAGS + 2)) {
3066 pr_debug_ratelimited("TSO Descriptors not enough, stop\n");
3067 netif_stop_queue(ndev);
3068 }
3069
3070#ifndef CONFIG_ASR_EMAC_NAPI
3071 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3072#else
3073 spin_unlock(&priv->spTxLock);
3074#endif
3075#ifdef CONFIG_ASR_EMAC_DDR_QOS
3076 emac_ddr_clk_scaling(priv);
3077#endif
3078
3079 if (!tso && !coe)
3080 emac_tx_timer_arm(priv);
3081
3082 return NETDEV_TX_OK;
3083}
3084
3085/* Name emac_start_xmit
3086 * Arguments pstSkb : pointer to sk_buff structure passed by upper layer
3087 * pstNetdev : pointer to net_device structure
3088 * Return Status: 0 - Success; non-zero - Fail
3089 * Description This function is called by upper layer to
3090 * handover the Tx packet to the driver
3091 * for sending it to the device.
3092 * Currently this is doing nothing but
3093 * simply to simulate the tx packet handling.
3094 */
3095static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3096{
3097 struct emac_priv *priv = netdev_priv(ndev);
3098 int ioc;
3099 u32 frag_num;
3100 u32 skb_len;
3101 u32 tx_des_cnt = 0;
3102 u32 i;
3103#ifndef CONFIG_ASR_EMAC_NAPI
3104 unsigned long ulFlags;
3105#endif
3106#ifdef WAN_LAN_AUTO_ADAPT
3107 int vlan = 0;
3108 struct iphdr *iph = NULL;
3109 struct udphdr *udph = NULL;
3110 struct vlan_hdr *vhdr;
3111
3112 { struct ethhdr *myeth = (struct ethhdr *)skb->data;
3113 if (myeth->h_proto == htons(ETH_P_8021Q)) {
3114 vhdr = (struct vlan_hdr *)((u8 *)myeth + sizeof(struct ethhdr));
3115 vlan = ntohs(vhdr->h_vlan_TCI);
3116 iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr) + VLAN_HLEN);
3117 }
3118 else if (myeth->h_proto == htons(ETH_P_IP))
3119 iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr));
3120
3121 if (iph && iph->protocol == IPPROTO_UDP) {
3122 udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
3123 if ((htons(udph->dest) == 67 && htons(udph->source) == 68)) {
3124 u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
3125 u8 dhcp_type = *(udp_data + 242);
3126 if ((DHCP_DISCOVER == dhcp_type || DHCP_REQUEST == dhcp_type)
3127 && (0 == priv->dhcp)) {
3128 priv->dhcp = DHCP_SEND_REQ;
3129 if (ndev->phydev->phy_id == IP175D_PHY_ID)
3130 priv->vlan_port = vlan;
3131 else
3132 priv->vlan_port = -1;
3133 }
3134 }
3135 }
3136 }
3137#endif
3138
3139 /* pstSkb->len: is the full length of the data in the packet
3140 * pstSkb->data_len: the number of bytes in skb fragments
3141 * u16Len: length of the first fragment
3142 */
3143 skb_len = skb->len - skb->data_len;
3144
3145 if (skb->len <= 0) {
3146 pr_err("Packet length is zero\n");
3147 dev_kfree_skb_any(skb);
3148 return NETDEV_TX_OK;
3149 }
3150
3151 if (priv->tso) {
3152 bool tso = false, coe = false;
3153
3154 if (skb_is_gso(skb) &&
3155 (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3156 tso = true;
3157 coe = true;
3158 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
3159 coe = true;
3160 }
3161
3162 /* WR: COE need skb->data to be 2 bytes alinged */
3163 if (coe && !IS_ALIGNED((unsigned long)skb->data, 2))
3164 pskb_expand_head(skb, 1, 0, GFP_ATOMIC);
3165
3166 return emac_tso_xmit(skb, ndev, tso, coe);
3167 }
3168
3169 /* increment the count if len exceeds MAX_DATA_LEN_TX_DES */
3170 tx_des_cnt += EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
3171
3172 frag_num = skb_shinfo(skb)->nr_frags;
3173
3174 for (i = 0; i < frag_num; i++) {
3175 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3176 tx_des_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
3177 MAX_DATA_PWR_TX_DES);
3178 }
3179
3180 /* disable hard interrupt on local CPUs */
3181#ifndef CONFIG_ASR_EMAC_NAPI
3182 local_irq_save(ulFlags);
3183#endif
3184 if (!spin_trylock(&priv->spTxLock)) {
3185 pr_err("Collision detected\n");
3186#ifndef CONFIG_ASR_EMAC_NAPI
3187 local_irq_restore(ulFlags);
3188#endif
3189 return NETDEV_TX_BUSY;
3190 }
3191
3192 /* check whether sufficient free descriptors are there */
3193 if (EMAC_DESC_UNUSED(&priv->tx_ring) < (tx_des_cnt + 2)) {
3194 pr_err_ratelimited("Descriptors are not free\n");
3195 netif_stop_queue(ndev);
3196#ifndef CONFIG_ASR_EMAC_NAPI
3197 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3198#else
3199 spin_unlock(&priv->spTxLock);
3200#endif
3201 return NETDEV_TX_BUSY;
3202 }
3203
3204 priv->tx_count_frames += frag_num + 1;
3205 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3206 priv->hwts_tx_en))
3207 ioc = 1;
3208 else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
3209 ioc = 1;
3210 else
3211 ioc = 0;
3212
3213 if (ioc)
3214 priv->tx_count_frames = 0;
3215
3216 tx_des_cnt = emac_tx_mem_map(priv, skb, MAX_DATA_LEN_TX_DES, frag_num, ioc);
3217 if (tx_des_cnt == 0) {
3218 pr_err("Could not acquire memory from pool\n");
3219 netif_stop_queue(ndev);
3220#ifndef CONFIG_ASR_EMAC_NAPI
3221 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3222#else
3223 spin_unlock(&priv->spTxLock);
3224#endif
3225 return NETDEV_TX_BUSY;
3226 }
3227 ndev->stats.tx_packets++;
3228 ndev->stats.tx_bytes += skb->len;
3229
3230 /* Make sure there is space in the ring for the next send. */
3231 if (EMAC_DESC_UNUSED(&priv->tx_ring) < (MAX_SKB_FRAGS + 2))
3232 netif_stop_queue(ndev);
3233
3234#ifndef CONFIG_ASR_EMAC_NAPI
3235 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3236#else
3237 spin_unlock(&priv->spTxLock);
3238#endif
3239#ifdef CONFIG_ASR_EMAC_DDR_QOS
3240 emac_ddr_clk_scaling(priv);
3241#endif
3242 emac_tx_timer_arm(priv);
3243 return NETDEV_TX_OK;
3244}
3245
3246u32 ReadTxStatCounters(struct emac_priv *priv, u8 cnt)
3247{
3248 u32 val, tmp;
3249
3250 val = 0x8000 | cnt;
3251 emac_wr(priv, MAC_TX_STATCTR_CONTROL, val);
3252 val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
3253
3254 while (val & 0x8000)
3255 val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
3256
3257 tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_HIGH);
3258 val = tmp << 16;
3259 tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_LOW);
3260 val |= tmp;
3261
3262 return val;
3263}
3264
3265u32 ReadRxStatCounters(struct emac_priv *priv, u8 cnt)
3266{
3267 u32 val, tmp;
3268
3269 val = 0x8000 | cnt;
3270 emac_wr(priv, MAC_RX_STATCTR_CONTROL, val);
3271 val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
3272
3273 while (val & 0x8000)
3274 val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
3275
3276 tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_HIGH);
3277 val = tmp << 16;
3278 tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_LOW);
3279 val |= tmp;
3280 return val;
3281}
3282
3283/* Name emac_set_mac_address
3284 * Arguments pstNetdev : pointer to net_device structure
3285 * addr : pointer to addr
3286 * Return Status: 0 - Success; non-zero - Fail
3287 * Description It is called by upper layer to set the mac address.
3288 */
3289static int emac_set_mac_address(struct net_device *ndev, void *addr)
3290{
3291 struct sockaddr *sa = addr;
3292 struct emac_priv *priv = netdev_priv(ndev);
3293
3294 if (!is_valid_ether_addr(sa->sa_data))
3295 return -EADDRNOTAVAIL;
3296
3297 memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
3298
3299 emac_set_mac_addr(priv, ndev->dev_addr);
3300
3301 emac_set_fc_source_addr(priv, ndev->dev_addr);
3302
3303 return 0;
3304}
3305
3306/* Name emac_change_mtu
3307 * Arguments pstNetdev : pointer to net_device structure
3308 * u32MTU : maximum transmit unit value
3309 * Return Status: 0 - Success; non-zero - Fail
3310 * Description It is called by upper layer to set the MTU value.
3311 */
3312static int emac_change_mtu(struct net_device *ndev, int mtu)
3313{
3314 struct emac_priv *priv = netdev_priv(ndev);
3315 u32 frame_len;
3316
3317 if (netif_running(ndev)) {
3318 pr_err("must be stopped to change its MTU\n");
3319 return -EBUSY;
3320 }
3321
3322 frame_len = mtu + ETHERNET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3323
3324 if (frame_len < MINIMUM_ETHERNET_FRAME_SIZE ||
3325 frame_len > EMAC_SKBRB_MAX_PAYLOAD) {
3326 pr_err("Invalid MTU setting\n");
3327 return -EINVAL;
3328 }
3329
3330 if (frame_len <= EMAC_RX_BUFFER_1024)
3331 priv->u32RxBufferLen = EMAC_RX_BUFFER_1024;
3332 else
3333 priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
3334
3335 ndev->mtu = mtu;
3336
3337 return 0;
3338}
3339
3340static void emac_reset(struct emac_priv *priv)
3341{
3342 if (!test_and_clear_bit(EMAC_RESET_REQUESTED, &priv->state))
3343 return;
3344 if (test_bit(EMAC_DOWN, &priv->state))
3345 return;
3346
3347 netdev_dbg(priv->ndev, "Reset controller.\n");
3348
3349 rtnl_lock();
3350 //netif_trans_update(priv->ndev);
3351 while (test_and_set_bit(EMAC_RESETING, &priv->state))
3352 usleep_range(1000, 2000);
3353
3354 dev_close(priv->ndev);
3355 dev_open(priv->ndev, NULL);
3356 clear_bit(EMAC_RESETING, &priv->state);
3357 rtnl_unlock();
3358}
3359
3360static void emac_tx_timeout_task(struct work_struct *work)
3361{
3362 struct emac_priv *priv = container_of(work,
3363 struct emac_priv, tx_timeout_task);
3364 emac_reset(priv);
3365 clear_bit(EMAC_TASK_SCHED, &priv->state);
3366}
3367
3368/* Name emac_tx_timeout
3369 * Arguments pstNetdev : pointer to net_device structure
3370 * Return none
3371 * Description It is called by upper layer
3372 * for packet transmit timeout.
3373 */
3374static void emac_tx_timeout(struct net_device *ndev)
3375{
3376 struct emac_priv *priv = netdev_priv(ndev);
3377
3378 netdev_info(ndev, "TX timeout\n");
3379 register_dump(priv);
3380
3381 netif_carrier_off(priv->ndev);
3382 set_bit(EMAC_RESET_REQUESTED, &priv->state);
3383
3384 if (!test_bit(EMAC_DOWN, &priv->state) &&
3385 !test_and_set_bit(EMAC_TASK_SCHED, &priv->state))
3386 schedule_work(&priv->tx_timeout_task);
3387}
3388
3389static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable)
3390{
3391 const struct emac_regdata *regdata = priv->regdata;
3392 void __iomem* apmu;
3393 u32 val;
3394
3395 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3396 if (apmu == NULL) {
3397 pr_err("error to ioremap APMU base\n");
3398 return -ENOMEM;
3399 }
3400
3401 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3402 if (enable) {
3403 val |= 0x1;
3404 } else {
3405 val &= ~0x1;
3406 }
3407 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3408 iounmap(apmu);
3409 return 0;
3410}
3411
3412static int clk_phase_rgmii_set(struct emac_priv *priv, bool is_tx)
3413{
3414 const struct emac_regdata *regdata = priv->regdata;
3415 void __iomem* apmu;
3416 u32 val, dline;
3417 u8 phase, tmp;
3418
3419 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3420 if (apmu == NULL) {
3421 pr_err("error to ioremap APMU base\n");
3422 return -ENOMEM;
3423 }
3424
3425 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3426 if (is_tx) {
3427 if (regdata->rgmii_tx_clk_src_sel_shift > 0) {
3428 phase = (priv->tx_clk_config >> 16) & 0x1;
3429 val &= ~(0x1 << regdata->rgmii_tx_clk_src_sel_shift);
3430 val |= phase << regdata->rgmii_tx_clk_src_sel_shift;
3431 }
3432
3433 if (regdata->rgmii_tx_dline_reg_offset > 0) {
3434 /* Set RGMIII TX DLINE */
3435 dline = readl(apmu + regdata->rgmii_tx_dline_reg_offset);
3436
3437 /* delay code */
3438 tmp = (priv->tx_clk_config >> 8) &
3439 regdata->rgmii_tx_delay_code_mask;
3440 dline &= ~(regdata->rgmii_tx_delay_code_mask <<
3441 regdata->rgmii_tx_delay_code_shift);
3442 dline |= tmp << regdata->rgmii_tx_delay_code_shift;
3443
3444 /* delay step */
3445 tmp = priv->tx_clk_config &
3446 regdata->rgmii_tx_delay_step_mask;
3447 dline &= ~(regdata->rgmii_tx_delay_step_mask <<
3448 regdata->rgmii_tx_delay_step_shift);
3449 dline |= tmp << regdata->rgmii_tx_delay_step_shift;
3450
3451 /* delay line enable */
3452 dline |= 1 << regdata->rgmii_tx_delay_enable_shift;
3453 writel(dline, apmu + regdata->rgmii_tx_dline_reg_offset);
3454 pr_info("===> emac set tx dline 0x%x 0x%x", dline,
3455 readl(apmu + regdata->rgmii_tx_dline_reg_offset));
3456 }
3457 } else {
3458 if (regdata->rgmii_rx_clk_src_sel_shift > 0) {
3459 phase = (priv->rx_clk_config >> 16) & 0x1;
3460 val &= ~(0x1 << regdata->rgmii_rx_clk_src_sel_shift);
3461 val |= phase << regdata->rgmii_rx_clk_src_sel_shift;
3462 }
3463
3464 /* Set RGMIII RX DLINE */
3465 if (regdata->rgmii_rx_dline_reg_offset > 0) {
3466 dline = readl(apmu + regdata->rgmii_rx_dline_reg_offset);
3467
3468 /* delay code */
3469 tmp = (priv->rx_clk_config >> 8) &
3470 regdata->rgmii_rx_delay_code_mask;
3471 dline &= ~(regdata->rgmii_rx_delay_code_mask <<
3472 regdata->rgmii_rx_delay_code_shift);
3473 dline |= tmp << regdata->rgmii_rx_delay_code_shift;
3474
3475 /* delay step */
3476 tmp = priv->rx_clk_config &
3477 regdata->rgmii_rx_delay_step_mask;
3478 dline &= ~(regdata->rgmii_rx_delay_step_mask <<
3479 regdata->rgmii_rx_delay_step_shift);
3480 dline |= tmp << regdata->rgmii_rx_delay_step_shift;
3481
3482 /* delay line enable */
3483 dline |= 1 << regdata->rgmii_rx_delay_enable_shift;
3484 writel(dline, apmu + regdata->rgmii_rx_dline_reg_offset);
3485 pr_info("===> emac set rx dline 0x%x 0x%x", dline,
3486 readl(apmu + regdata->rgmii_rx_dline_reg_offset));
3487 }
3488 }
3489 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3490 pr_info("%s phase:%d direction:%s 0x%x 0x%x\n", __func__, phase,
3491 is_tx ? "tx": "rx", val,
3492 readl(apmu + regdata->clk_rst_ctrl_reg_offset));
3493
3494 iounmap(apmu);
3495 return 0;
3496}
3497
3498static int clk_phase_rmii_set(struct emac_priv *priv, bool is_tx)
3499{
3500 const struct emac_regdata *regdata = priv->regdata;
3501 void __iomem* apmu;
3502 u32 val;
3503 u8 phase, tmp;
3504
3505 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3506 if (apmu == NULL) {
3507 pr_err("error to ioremap APMU base\n");
3508 return -ENOMEM;
3509 }
3510
3511 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3512 if (is_tx) {
3513 /* rmii tx clock select */
3514 if (regdata->rmii_tx_clk_sel_shift > 0) {
3515 tmp = (priv->tx_clk_config >> 16) & 0x1;
3516 val &= ~(0x1 << regdata->rmii_tx_clk_sel_shift);
3517 val |= tmp << regdata->rmii_tx_clk_sel_shift;
3518 }
3519
3520 /* rmii ref clock selct, 1 - from soc, 0 - from phy */
3521 if (regdata->rmii_rx_clk_sel_shift) {
3522 tmp = (priv->tx_clk_config >> 24) & 0x1;
3523 val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
3524 val |= tmp << regdata->rmii_ref_clk_sel_shift;
3525 }
3526 } else {
3527 /* rmii rx clock select */
3528 if (regdata->rmii_rx_clk_sel_shift > 0) {
3529 tmp = (priv->rx_clk_config >> 16) & 0x1;
3530 val &= ~(0x1 << regdata->rmii_rx_clk_sel_shift);
3531 val |= tmp << regdata->rmii_rx_clk_sel_shift;
3532 }
3533
3534 /* rmii ref clock selct, 1 - from soc, 0 - from phy */
3535 if (regdata->rmii_rx_clk_sel_shift) {
3536 tmp = (priv->tx_clk_config >> 24) & 0x1;
3537 val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
3538 val |= tmp << regdata->rmii_ref_clk_sel_shift;
3539 }
3540 }
3541
3542 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3543 pr_debug("%s phase:%d direction:%s\n", __func__, phase,
3544 is_tx ? "tx": "rx");
3545
3546 iounmap(apmu);
3547 return 0;
3548}
3549
3550static int clk_phase_set(struct emac_priv *priv, bool is_tx)
3551{
3552 if (emac_is_rmii_interface(priv)) {
3553 clk_phase_rmii_set(priv, is_tx);
3554 } else {
3555 clk_phase_rgmii_set(priv, is_tx);
3556 }
3557
3558 return 0;
3559}
3560
3561#ifdef CONFIG_DEBUG_FS
3562static int clk_phase_show(struct seq_file *s, void *data)
3563{
3564 struct emac_priv *priv = s->private;
3565 bool rmii_intf;
3566 rmii_intf = emac_is_rmii_interface(priv);
3567
3568 seq_printf(s, "Emac MII Interface : %s\n", rmii_intf ? "RMII" : "RGMII");
3569 seq_printf(s, "Current rx clk config : %d\n", priv->rx_clk_config);
3570 seq_printf(s, "Current tx clk config : %d\n", priv->tx_clk_config);
3571 return 0;
3572}
3573
3574static ssize_t clk_tuning_write(struct file *file,
3575 const char __user *user_buf,
3576 size_t count, loff_t *ppos)
3577{
3578 struct emac_priv *priv =
3579 ((struct seq_file *)(file->private_data))->private;
3580 int err;
3581 int clk_phase;
3582 char buff[TUNING_CMD_LEN] = { 0 };
3583 char mode_str[20];
3584
3585 if (count > TUNING_CMD_LEN) {
3586 pr_err("count must be less than 50.\n");
3587 return count;
3588 }
3589 err = copy_from_user(buff, user_buf, count);
3590 if (err)
3591 return err;
3592
3593 err = sscanf(buff, "%s %d", (char *)&mode_str, &clk_phase);
3594 if (err != 2) {
3595 pr_err("debugfs para count error\n");
3596 return count;
3597 }
3598 pr_info("input:%s %d\n", mode_str, clk_phase);
3599
3600 if (strcmp(mode_str, "tx") == 0) {
3601 priv->tx_clk_config = clk_phase;
3602 clk_phase_set(priv, TX_PHASE);
3603 } else if (strcmp(mode_str, "rx") == 0) {
3604 priv->rx_clk_config = clk_phase;
3605 clk_phase_set(priv, RX_PHASE);
3606 } else {
3607 pr_err("command error\n");
3608 pr_err("eg: echo rx 1 > clk_tuning\n");
3609 return count;
3610 }
3611
3612 return count;
3613}
3614
3615static int clk_tuning_open(struct inode *inode, struct file *file)
3616{
3617 return single_open(file, clk_phase_show, inode->i_private);
3618}
3619
3620const struct file_operations clk_tuning_fops = {
3621 .open = clk_tuning_open,
3622 .write = clk_tuning_write,
3623 .read = seq_read,
3624 .llseek = seq_lseek,
3625 .release = single_release,
3626};
3627
3628#endif
3629
3630static int emac_power_down(struct emac_priv *priv)
3631{
3632 if (priv->rst_gpio >= 0)
3633 gpio_direction_output(priv->rst_gpio,
3634 priv->low_active_rst ? 0 : 1);
3635
3636 if (priv->ldo_gpio >= 0)
3637 gpio_direction_output(priv->ldo_gpio,
3638 priv->low_active_ldo ? 0 : 1);
3639
3640 return 0;
3641}
3642
3643static int emac_power_up(struct emac_priv *priv)
3644{
3645 u32 *delays_ldo = priv->delays_ldo;
3646 u32 *delays_rst = priv->delays_rst;
3647 int rst_gpio = priv->rst_gpio;
3648 int low_active_rst = priv->low_active_rst;
3649 int ldo_gpio = priv->ldo_gpio;
3650 int low_active_ldo = priv->low_active_ldo;
3651
3652 if (rst_gpio >= 0) {
3653 gpio_direction_output(rst_gpio, low_active_rst ? 0 : 1);
3654 }
3655
3656 if (ldo_gpio >= 0) {
3657 gpio_direction_output(ldo_gpio, low_active_ldo ? 0 : 1);
3658 if (delays_ldo[0]) {
3659 gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
3660 msleep(DIV_ROUND_UP(delays_ldo[0], 1000));
3661 }
3662
3663 gpio_set_value(ldo_gpio, low_active_ldo ? 0 : 1);
3664 if (delays_ldo[1])
3665 msleep(DIV_ROUND_UP(delays_ldo[1], 1000));
3666
3667 gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
3668 if (delays_ldo[2])
3669 msleep(DIV_ROUND_UP(delays_ldo[2], 1000));
3670 }
3671
3672 if (rst_gpio >= 0) {
3673 if (delays_rst[0]) {
3674 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
3675 msleep(DIV_ROUND_UP(delays_rst[0], 1000));
3676 }
3677
3678 gpio_set_value(rst_gpio, low_active_rst ? 0 : 1);
3679 if (delays_rst[1])
3680 msleep(DIV_ROUND_UP(delays_rst[1], 1000));
3681
3682 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
3683 if (delays_rst[2])
3684 msleep(DIV_ROUND_UP(delays_rst[2], 1000));
3685 }
3686
3687 return 0;
3688}
3689
3690static int emac_mii_reset(struct mii_bus *bus)
3691{
3692 struct emac_priv *priv = bus->priv;
3693 struct device *dev = &priv->pdev->dev;
3694 struct device_node *np = dev->of_node;
3695 int rst_gpio, ldo_gpio;
3696 int low_active_ldo, low_active_rst;
3697 u32 *delays_ldo = priv->delays_ldo;
3698 u32 *delays_rst = priv->delays_rst;
3699
3700 priv->rst_gpio = -1;
3701 priv->ldo_gpio = -1;
3702
3703 if (!np)
3704 return 0;
3705
3706 rst_gpio = of_get_named_gpio(np, "reset-gpio", 0);
3707 if (rst_gpio >= 0) {
3708 low_active_rst = of_property_read_bool(np, "reset-active-low");
3709 of_property_read_u32_array(np, "reset-delays-us", delays_rst, 3);
3710
3711 if (gpio_request(rst_gpio, "mdio-reset")) {
3712 printk("emac: reset-gpio=%d request failed\n",
3713 rst_gpio);
3714 return 0;
3715 }
3716 priv->rst_gpio = rst_gpio;
3717 priv->low_active_rst = low_active_rst;
3718 }
3719
3720 ldo_gpio = of_get_named_gpio(np, "ldo-gpio", 0);
3721 if (ldo_gpio >= 0) {
3722 low_active_ldo = of_property_read_bool(np, "ldo-active-low");
3723 of_property_read_u32_array(np, "ldo-delays-us", delays_ldo, 3);
3724
3725 if (gpio_request(ldo_gpio, "mdio-ldo"))
3726 return 0;
3727
3728 priv->ldo_gpio = ldo_gpio;
3729 priv->low_active_ldo = low_active_ldo;
3730 }
3731
3732 /*
3733 * Some device not allow MDC/MDIO operation during power on/reset,
3734 * disable AXI clock to shutdown mdio clock.
3735 */
3736 clk_disable_unprepare(priv->clk);
3737
3738 emac_power_up(priv);
3739
3740 clk_prepare_enable(priv->clk);
3741
3742 emac_reset_hw(priv);
3743
3744 return 0;
3745}
3746
3747static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
3748{
3749 struct emac_priv *priv = bus->priv;
3750 u32 cmd = 0;
3751 u32 val;
3752
3753 if (!__clk_is_enabled(priv->clk))
3754 return -EBUSY;
3755
3756 mutex_lock(&priv->mii_mutex);
3757 cmd |= phy_addr & 0x1F;
3758 cmd |= (regnum & 0x1F) << 5;
3759 cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
3760
3761 /*
3762 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
3763 * change during MDIO read/write
3764 */
3765#ifdef CONFIG_DDR_DEVFREQ
3766 pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
3767#endif
3768 emac_wr(priv, MAC_MDIO_DATA, 0x0);
3769 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
3770
3771 if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
3772 !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
3773 return -EBUSY;
3774
3775 val = emac_rd(priv, MAC_MDIO_DATA);
3776
3777#ifdef CONFIG_DDR_DEVFREQ
3778 pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
3779#endif
3780 mutex_unlock(&priv->mii_mutex);
3781 return val;
3782}
3783
3784static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
3785 u16 value)
3786{
3787 struct emac_priv *priv = bus->priv;
3788 u32 cmd = 0;
3789 u32 val;
3790
3791 if (!__clk_is_enabled(priv->clk))
3792 return -EBUSY;
3793
3794 mutex_lock(&priv->mii_mutex);
3795 emac_wr(priv, MAC_MDIO_DATA, value);
3796
3797 cmd |= phy_addr & 0x1F;
3798 cmd |= (regnum & 0x1F) << 5;
3799 cmd |= MREGBIT_START_MDIO_TRANS;
3800
3801 /*
3802 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
3803 * change during MDIO read/write
3804 */
3805#ifdef CONFIG_DDR_DEVFREQ
3806 pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
3807#endif
3808 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
3809
3810 if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
3811 !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
3812 return -EBUSY;
3813
3814#ifdef CONFIG_DDR_DEVFREQ
3815 pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
3816#endif
3817
3818 mutex_unlock(&priv->mii_mutex);
3819 return 0;
3820}
3821
3822static void emac_adjust_link(struct net_device *dev)
3823{
3824 struct phy_device *phydev = dev->phydev;
3825 struct emac_priv *priv = netdev_priv(dev);
3826 u32 ctrl;
3827#ifdef WAN_LAN_AUTO_ADAPT
3828 int status_change = 0;
3829 int addr = 0;
3830 int i = 0;
3831#endif
3832 if (!phydev || priv->fix_link)
3833 return;
3834
3835 if (phydev->link) {
3836 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
3837
3838 /* Now we make sure that we can be in full duplex mode
3839 * If not, we operate in half-duplex mode.
3840 */
3841 if (phydev->duplex != priv->duplex) {
3842 if (!phydev->duplex)
3843 ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
3844 else
3845 ctrl |= MREGBIT_FULL_DUPLEX_MODE;
3846 priv->duplex = phydev->duplex;
3847 }
3848
3849 if (phydev->speed != priv->speed) {
3850 ctrl &= ~MREGBIT_SPEED;
3851
3852 switch (phydev->speed) {
3853 case SPEED_1000:
3854 ctrl |= MREGBIT_SPEED_1000M;
3855 break;
3856 case SPEED_100:
3857 ctrl |= MREGBIT_SPEED_100M;
3858 break;
3859 case SPEED_10:
3860 ctrl |= MREGBIT_SPEED_10M;
3861 break;
3862 default:
3863 pr_err("broken speed: %d\n", phydev->speed);
3864 phydev->speed = SPEED_UNKNOWN;
3865 break;
3866 }
3867 if (phydev->speed != SPEED_UNKNOWN) {
3868 priv->speed = phydev->speed;
3869 }
3870 }
3871 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
3872 pr_info("%s link:%d speed:%dM duplex:%s\n", __func__,
3873 phydev->link, phydev->speed,
3874 phydev->duplex ? "Full": "Half");
3875 }
3876
3877#ifdef WAN_LAN_AUTO_ADAPT
3878 if(phydev->phy_id == IP175D_PHY_ID) {
3879 if (phydev->link != priv->link) {
3880 for (i=0; i<16; i++) {
3881 if((priv->link & (1<<i)) != (phydev->link & (1<<i))) {
3882 addr = i;
3883 if (phydev->link & (1<<i)) {
3884 /* link up */
3885 printk("eth0 port%d link up\n", addr);
3886 priv->dhcp = 0;
3887 emac_sig_workq(CARRIER_UP_IP175D, addr);
3888 if(priv->dhcp_delaywork)
3889 cancel_delayed_work(&priv->dhcp_work);
3890 priv->dhcp_delaywork = 1;
3891 schedule_delayed_work(&priv->dhcp_work, 25*HZ);
3892 } else {
3893 /* link down */
3894 printk("eth0 port%d link down\n", addr);
3895 priv->dhcp = 0;
3896 if(priv->dhcp_delaywork)
3897 cancel_delayed_work(&priv->dhcp_work);
3898 priv->dhcp_delaywork = 0;
3899 emac_sig_workq(CARRIER_DOWN_IP175D, addr);
3900 }
3901 }
3902 }
3903 priv->link = phydev->link;
3904 }
3905 } else {
3906 if (phydev->link != priv->link) {
3907 priv->link = phydev->link;
3908 status_change = 1;
3909 }
3910
3911 if (status_change) {
3912 if (phydev->link) {
3913 /* link up */
3914 priv->dhcp = 0;
3915 emac_sig_workq(CARRIER_UP, 0);
3916 if(priv->dhcp_delaywork)
3917 cancel_delayed_work(&priv->dhcp_work);
3918 priv->dhcp_delaywork = 1;
3919 schedule_delayed_work(&priv->dhcp_work, 25*HZ);
3920
3921 } else {
3922 /* link down */
3923 priv->dhcp = 0;
3924 if(priv->dhcp_delaywork)
3925 cancel_delayed_work(&priv->dhcp_work);
3926 priv->dhcp_delaywork = 0;
3927 emac_sig_workq(CARRIER_DOWN, 0);
3928 }
3929 }
3930 }
3931#endif
3932}
3933
3934static int emac_phy_connect(struct net_device *dev)
3935{
3936 struct phy_device *phydev;
3937 int phy_interface;
3938 struct device_node *np;
3939 struct emac_priv *priv = netdev_priv(dev);
3940
3941 np = of_parse_phandle(priv->pdev->dev.of_node, "phy-handle", 0);
3942 if (!np) {
3943 if (priv->fix_link) {
3944 emac_phy_interface_config(priv, priv->interface);
3945 if (priv->interface == PHY_INTERFACE_MODE_RGMII)
3946 pinctrl_select_state(priv->pinctrl,
3947 priv->rgmii_pins);
3948 emac_config_phy_interrupt(priv, 0);
3949 return 0;
3950 }
3951 return -ENODEV;
3952 }
3953
3954 printk("%s: %s\n",__func__, np->full_name);
3955 phy_interface = of_get_phy_mode(np);
3956 emac_phy_interface_config(priv, phy_interface);
3957 if (phy_interface != PHY_INTERFACE_MODE_RMII)
3958 pinctrl_select_state(priv->pinctrl, priv->rgmii_pins);
b.liub17525e2025-05-14 17:22:29 +08003959 phydev = phy_find_first(priv->mii);
3960 if (!phydev) {
3961 printk("%s: no PHY found\n", dev->name);
3962 return -ENODEV;
3963 }
3964 phy_connect_direct(dev, phydev, emac_adjust_link, phy_interface); /* phy_start_machine */
3965 //phydev = of_phy_connect(dev, np,&emac_adjust_link, 0, phy_interface);
b.liue9582032025-04-17 19:18:16 +08003966 if (IS_ERR_OR_NULL(phydev)) {
3967 pr_err("Could not attach to PHY\n");
3968 emac_power_down(priv);
3969 if (!phydev)
3970 return -ENODEV;
3971 return PTR_ERR(phydev);
3972 }
3973
3974 if (!phydev->phy_id || phydev->phy_id == 0xffffffff) {
3975 pr_err("Not valid phy_id=0x%x\n", phydev->phy_id);
3976 emac_power_down(priv);
3977 return -ENODEV;
3978 }
3979
3980 if(phy_interrupt_is_valid(phydev))
3981 emac_config_phy_interrupt(priv, 1);
3982 else
3983 emac_config_phy_interrupt(priv, 0);
3984
3985 //phydev->supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
3986 pr_info("%s: %s: attached to PHY (UID 0x%x)"
3987 " Link = %d irq=%d\n", __func__,
3988 dev->name, phydev->phy_id, phydev->link, phydev->irq);
3989 dev->phydev = phydev;
3990
3991#ifdef WAN_LAN_AUTO_ADAPT
3992 if(phydev->phy_id == IP175D_PHY_ID)
3993 emac_sig_workq(PHY_IP175D_CONNECT, 0);
3994#endif
3995
3996 return 0;
3997}
3998
3999static int emac_mdio_init(struct emac_priv *priv)
4000{
4001 struct device_node *mii_np;
4002 struct device *dev = &priv->pdev->dev;
4003 int ret;
4004
4005 mii_np = of_get_child_by_name(dev->of_node, "mdio-bus");
4006 if (!mii_np) {
4007 dev_err(dev, "no %s child node found", "mdio-bus");
4008 return -ENODEV;
4009 }
4010
4011 if (!of_device_is_available(mii_np)) {
4012 ret = -ENODEV;
4013 goto err_put_node;
4014 }
4015
4016 priv->mii = mdiobus_alloc();//devm_mdiobus_alloc(dev);
4017 if (!priv->mii) {
4018 ret = -ENOMEM;
4019 goto err_put_node;
4020 }
4021 priv->mii->priv = priv;
4022 //priv->mii->irq = priv->mdio_irqs;
4023 priv->mii->name = "emac mii";
4024 priv->mii->reset = emac_mii_reset;
4025 priv->mii->read = emac_mii_read;
4026 priv->mii->write = emac_mii_write;
4027 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%pOFn",
4028 mii_np);
4029 priv->mii->parent = dev;
4030 priv->mii->phy_mask = 0xffffffff;
4031 ret = of_mdiobus_register(priv->mii, mii_np);
4032
4033err_put_node:
4034 of_node_put(mii_np);
4035 return ret;
4036}
4037
4038static int emac_mdio_deinit(struct emac_priv *priv)
4039{
4040 if (!priv->mii)
4041 return 0;
4042
4043 mdiobus_unregister(priv->mii);
4044 return 0;
4045}
4046
4047static int emac_get_ts_info(struct net_device *dev,
4048 struct ethtool_ts_info *info)
4049{
4050 struct emac_priv *priv = netdev_priv(dev);
4051
4052 if (priv->ptp_support) {
4053
4054 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4055 SOF_TIMESTAMPING_TX_HARDWARE |
4056 SOF_TIMESTAMPING_RX_SOFTWARE |
4057 SOF_TIMESTAMPING_RX_HARDWARE |
4058 SOF_TIMESTAMPING_SOFTWARE |
4059 SOF_TIMESTAMPING_RAW_HARDWARE;
4060
4061 if (priv->ptp_clock)
4062 info->phc_index = ptp_clock_index(priv->ptp_clock);
4063
4064 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
4065 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
4066 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
4067 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
4068 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
4069 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
4070 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
4071 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
4072 (1 << HWTSTAMP_FILTER_ALL));
4073 if (priv->regdata->ptp_rx_ts_all_events) {
4074 info->rx_filters |=
4075 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
4076 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
4077 }
4078
4079 return 0;
4080 } else
4081 return ethtool_op_get_ts_info(dev, info);
4082}
4083
4084static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4085{
4086 int i;
4087
4088 switch (stringset) {
4089 case ETH_SS_STATS:
4090 for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
4091 memcpy(data, emac_ethtool_stats[i].str, ETH_GSTRING_LEN);
4092 data += ETH_GSTRING_LEN;
4093 }
4094 break;
4095 }
4096}
4097
4098static int emac_get_sset_count(struct net_device *dev, int sset)
4099{
4100 switch (sset) {
4101 case ETH_SS_STATS:
4102 return ARRAY_SIZE(emac_ethtool_stats);
4103 default:
4104 return -EOPNOTSUPP;
4105 }
4106}
4107
4108static void emac_stats_update(struct emac_priv *priv)
4109{
4110 struct emac_hw_stats *hwstats = priv->hw_stats;
4111 int i;
4112 u32 *p;
4113
4114 p = (u32 *)(hwstats);
4115
4116 for (i = 0; i < MAX_TX_STATS_NUM; i++)
4117 *(p + i) = ReadTxStatCounters(priv, i);
4118
4119 p = (u32 *)hwstats + MAX_TX_STATS_NUM;
4120
4121 for (i = 0; i < MAX_RX_STATS_NUM; i++)
4122 *(p + i) = ReadRxStatCounters(priv, i);
4123
4124 *(p + i++) = emac_rd(priv, DMA_MISSED_FRAME_COUNTER);
4125
4126 *(p + i++) = hwstats->tx_tso_pkts;
4127 *(p + i++) = hwstats->tx_tso_bytes;
4128}
4129
4130static void emac_get_ethtool_stats(struct net_device *dev,
4131 struct ethtool_stats *stats, u64 *data)
4132{
4133 struct emac_priv *priv = netdev_priv(dev);
4134 struct emac_hw_stats *hwstats = priv->hw_stats;
4135 u32 *data_src;
4136 u64 *data_dst;
4137 int i;
4138
4139 if (netif_running(dev) && netif_device_present(dev)) {
4140 if (spin_trylock_bh(&hwstats->stats_lock)) {
4141 emac_stats_update(priv);
4142 spin_unlock_bh(&hwstats->stats_lock);
4143 }
4144 }
4145
4146 data_dst = data;
4147
4148 for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
4149 data_src = (u32 *)hwstats + emac_ethtool_stats[i].offset;
4150 *data_dst++ = (u64)(*data_src);
4151 }
4152}
4153
4154static int emac_ethtool_get_regs_len(struct net_device *dev)
4155{
4156 return EMAC_REG_SPACE_SIZE;
4157}
4158
4159static void emac_ethtool_get_regs(struct net_device *dev,
4160 struct ethtool_regs *regs, void *space)
4161{
4162 struct emac_priv *priv = netdev_priv(dev);
4163 u32 *reg_space = (u32 *) space;
4164 void __iomem *base = priv->iobase;
4165 int i;
4166
4167 regs->version = 1;
4168
4169 memset(reg_space, 0x0, EMAC_REG_SPACE_SIZE);
4170
4171 for (i = 0; i < EMAC_DMA_REG_CNT; i++)
4172 reg_space[i] = readl(base + DMA_CONFIGURATION + i * 4);
4173
4174 for (i = 0; i < EMAC_MAC_REG_CNT; i++)
4175 reg_space[i + MAC_GLOBAL_CONTROL / 4] = readl(base + MAC_GLOBAL_CONTROL + i * 4);
4176}
4177
4178static int emac_get_link_ksettings(struct net_device *ndev,
4179 struct ethtool_link_ksettings *cmd)
4180{
4181 if (!ndev->phydev)
4182 return -ENODEV;
4183
4184 phy_ethtool_ksettings_get(ndev->phydev, cmd);
4185 return 0;
4186}
4187
4188static int emac_set_link_ksettings(struct net_device *ndev,
4189 const struct ethtool_link_ksettings *cmd)
4190{
4191 if (!ndev->phydev)
4192 return -ENODEV;
4193
4194 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
4195}
4196
4197static void emac_get_drvinfo(struct net_device *dev,
4198 struct ethtool_drvinfo *info)
4199{
4200 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
4201 info->n_stats = ARRAY_SIZE(emac_ethtool_stats);
4202}
4203
4204static void emac_get_pauseparam(struct net_device *ndev,
4205 struct ethtool_pauseparam *param)
4206{
4207 struct emac_priv *priv = netdev_priv(ndev);
4208 int val = emac_mii_read(priv->mii, 0, 0);
4209
4210 param->autoneg = (val & BIT(12)) ? 1 : 0;
4211 param->rx_pause = priv->pause.rx_pause;
4212 param->tx_pause = priv->pause.tx_pause;
4213
4214 return;
4215}
4216
4217static int emac_set_pauseparam(struct net_device *ndev,
4218 struct ethtool_pauseparam *param)
4219{
4220 struct emac_priv *priv = netdev_priv(ndev);
4221 struct device *dev = &priv->pdev->dev;
4222 struct device_node *np = dev->of_node;
4223 int val;
4224 int phyval;
4225 u32 threshold[2];
4226 static int init_flag = 1;
4227
4228 val = readl(priv->iobase + MAC_FC_CONTROL);
4229 phyval = emac_mii_read(priv->mii, 0, 0);
4230
4231 if (param->rx_pause)
4232 val |= MREGBIT_FC_DECODE_ENABLE;
4233 else
4234 val &= ~MREGBIT_FC_DECODE_ENABLE;
4235
4236 if (param->tx_pause)
4237 val |= MREGBIT_FC_GENERATION_ENABLE;
4238 else
4239 val &= ~MREGBIT_FC_GENERATION_ENABLE;
4240
4241 if (init_flag && (param->rx_pause | param->tx_pause)) {
4242 val |= MREGBIT_MULTICAST_MODE;
4243 priv->pause.pause_time_max = 0;
4244 if (0 != of_property_read_u32_array(np, "flow-control-threshold", threshold, 2)) {
4245 threshold[0] = 60;
4246 threshold[1] = 90;
4247 }
4248 threshold[0] = clamp(threshold[0], 0U, 99U);
4249 threshold[1] = clamp(threshold[1], 1U, 100U);
4250
4251 if (cpu_is_asr18xx() || cpu_is_asr1903_z1()) {
4252 priv->pause.low_water = priv->rx_ring.total_cnt * threshold[0] / 100;
4253 priv->pause.high_water = priv->rx_ring.total_cnt * threshold[1] / 100 - 1;
4254 priv->pause.fc_auto = 0;
4255 } else {
4256 priv->pause.low_water = 0;
4257 priv->pause.high_water = 0;
4258 priv->pause.fc_auto = 1;
4259 val |= MREGBIT_AUTO_FC_GENERATION_ENABLE;
4260 threshold[0] = 1024 * threshold[0] / 100;
4261 threshold[1] = 1024 * threshold[1] / 100;
4262 emac_wr(priv, MAC_FC_AUTO_HIGH_THRESHOLD, threshold[1]);
4263 emac_wr(priv, MAC_FC_AUTO_LOW_THRESHOLD, threshold[0]);
4264 emac_wr(priv, MAC_FC_AUTO_HIGH_PAUSE_TIME_VALUE, 0xffff);
4265 emac_wr(priv, MAC_FC_AUTO_LOW_PAUSE_TIME_VALUE, 0);
4266 }
4267 init_flag = 0;
4268 }
4269 emac_wr(priv, MAC_FC_CONTROL, val);
4270
4271 if (param->autoneg)
4272 phyval |= BIT(12);
4273 else
4274 phyval &= ~BIT(12);
4275
4276 (void)emac_mii_write(priv->mii, 0, 0, (u16)phyval);
4277
4278 priv->pause.rx_pause = param->rx_pause;
4279 priv->pause.tx_pause = param->tx_pause;
4280 return 0;
4281}
4282
4283static void emac_get_wol(struct net_device *dev,
4284 struct ethtool_wolinfo *wol)
4285{
4286 struct emac_priv *priv = netdev_priv(dev);
4287 struct device *device = &priv->pdev->dev;
4288
4289 if (device_can_wakeup(device)) {
4290 wol->supported = WAKE_MAGIC | WAKE_UCAST;
4291 wol->wolopts = priv->wolopts;
4292 }
4293}
4294
4295static int emac_set_wol(struct net_device *dev,
4296 struct ethtool_wolinfo *wol)
4297{
4298 struct emac_priv *priv = netdev_priv(dev);
4299 struct device *device = &priv->pdev->dev;
4300 u32 support = WAKE_MAGIC | WAKE_UCAST;
4301
4302 if (!device_can_wakeup(device) || !priv->en_suspend)
4303 return -ENOTSUPP;
4304
4305 if (wol->wolopts & ~support)
4306 return -EINVAL;
4307
4308 priv->wolopts = wol->wolopts;
4309
4310 if (wol->wolopts) {
4311 device_set_wakeup_enable(device, 1);
4312 enable_irq_wake(priv->irq_wakeup);
4313 } else {
4314 device_set_wakeup_enable(device, 0);
4315 disable_irq_wake(priv->irq_wakeup);
4316 }
4317
4318 return 0;
4319}
4320
4321static const struct ethtool_ops emac_ethtool_ops = {
4322 .get_link_ksettings = emac_get_link_ksettings,
4323 .set_link_ksettings = emac_set_link_ksettings,
4324 .get_drvinfo = emac_get_drvinfo,
4325 .nway_reset = phy_ethtool_nway_reset,
4326 .get_link = ethtool_op_get_link,
4327 .get_pauseparam = emac_get_pauseparam,
4328 .set_pauseparam = emac_set_pauseparam,
4329 .get_strings = emac_get_strings,
4330 .get_sset_count = emac_get_sset_count,
4331 .get_ethtool_stats = emac_get_ethtool_stats,
4332 .get_regs = emac_ethtool_get_regs,
4333 .get_regs_len = emac_ethtool_get_regs_len,
4334 .get_ts_info = emac_get_ts_info,
4335 .get_wol = emac_get_wol,
4336 .set_wol = emac_set_wol,
4337};
4338
4339static const struct net_device_ops emac_netdev_ops = {
4340 .ndo_open = emac_open,
4341 .ndo_stop = emac_close,
4342 .ndo_start_xmit = emac_start_xmit,
4343 .ndo_set_mac_address = emac_set_mac_address,
4344 .ndo_do_ioctl = emac_ioctl,
4345 .ndo_change_mtu = emac_change_mtu,
4346 .ndo_tx_timeout = emac_tx_timeout,
4347};
4348
4349#ifdef WAN_LAN_AUTO_ADAPT
4350#define EMAC_SKB_SIZE 2048
4351static int emac_event_add_var(struct emac_event *event, int argv,
4352 const char *format, ...)
4353{
4354 static char buf[128];
4355 char *s;
4356 va_list args;
4357 int len;
4358
4359 if (argv)
4360 return 0;
4361
4362 va_start(args, format);
4363 len = vsnprintf(buf, sizeof(buf), format, args);
4364 va_end(args);
4365
4366 if (len >= sizeof(buf)) {
4367 printk("buffer size too small\n");
4368 WARN_ON(1);
4369 return -ENOMEM;
4370 }
4371
4372 s = skb_put(event->skb, len + 1);
4373 strcpy(s, buf);
4374
4375 return 0;
4376}
4377
4378static int emac_hotplug_fill_event(struct emac_event *event)
4379{
4380 int ret;
4381
4382 ret = emac_event_add_var(event, 0, "HOME=%s", "/");
4383 if (ret)
4384 return ret;
4385
4386 ret = emac_event_add_var(event, 0, "PATH=%s",
4387 "/sbin:/bin:/usr/sbin:/usr/bin");
4388 if (ret)
4389 return ret;
4390
4391 ret = emac_event_add_var(event, 0, "SUBSYSTEM=%s", "ethernet");
4392 if (ret)
4393 return ret;
4394
4395 ret = emac_event_add_var(event, 0, "ACTION=%s", event->action);
4396 if (ret)
4397 return ret;
4398
4399 ret = emac_event_add_var(event, 0, "ETH=%s", event->name);
4400 if (ret)
4401 return ret;
4402
4403 ret = emac_event_add_var(event, 0, "PORT=%d", event->port);
4404 if (ret)
4405 return ret;
4406
4407 ret = emac_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());
4408
4409 return ret;
4410}
4411
4412static void emac_hotplug_work(struct work_struct *work)
4413{
4414 struct emac_event *event = container_of(work, struct emac_event, work);
4415 int ret = 0;
4416
4417 event->skb = alloc_skb(EMAC_SKB_SIZE, GFP_KERNEL);
4418 if (!event->skb)
4419 goto out_free_event;
4420
4421 ret = emac_event_add_var(event, 0, "%s@", event->action);
4422 if (ret)
4423 goto out_free_skb;
4424
4425 ret = emac_hotplug_fill_event(event);
4426 if (ret)
4427 goto out_free_skb;
4428
4429 NETLINK_CB(event->skb).dst_group = 1;
4430 broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);
4431
4432 out_free_skb:
4433 if (ret) {
4434 printk("work error %d\n", ret);
4435 kfree_skb(event->skb);
4436 }
4437 out_free_event:
4438 kfree(event);
4439}
4440
4441static int emac_sig_workq(int event, int port)
4442{
4443 struct emac_event *u_event = NULL;
4444
4445 u_event = kzalloc(sizeof(*u_event), GFP_KERNEL);
4446 if (!u_event)
4447 return -ENOMEM;
4448
4449 u_event->name = DRIVER_NAME;
4450 if(event == CARRIER_UP)
4451 u_event->action = "LINKUP";
4452 else if(event == CARRIER_DOWN)
4453 u_event->action = "LINKDW";
4454 else if(event == CARRIER_DOWN_IP175D)
4455 u_event->action = "IP175D_LINKDW";
4456 else if(event == CARRIER_UP_IP175D)
4457 u_event->action = "IP175D_LINKUP";
4458 else if(event == DHCP_EVENT_CLIENT)
4459 u_event->action = "DHCPCLIENT";
4460 else if(event == DHCP_EVENT_SERVER)
4461 u_event->action = "DHCPSERVER";
4462 else if(event == PHY_IP175D_CONNECT)
4463 u_event->action = "PHY_CONNECT";
4464
4465 u_event->port = port;
4466 INIT_WORK(&u_event->work, (void *)emac_hotplug_work);
4467 schedule_work(&u_event->work);
4468
4469 return 0;
4470}
4471
4472static inline void __emac_dhcp_work_func(struct emac_priv *priv)
4473{
4474 if (priv->dhcp == DHCP_REC_RESP) {
4475 emac_sig_workq(DHCP_EVENT_CLIENT, priv->vlan_port);
4476 } else if (priv->dhcp == DHCP_SEND_REQ || priv->dhcp == 0) {
4477 emac_sig_workq(DHCP_EVENT_SERVER, priv->vlan_port);
4478 }
4479
4480 priv->dhcp = 0;
4481 if(priv->dhcp_delaywork){
4482 cancel_delayed_work(&priv->dhcp_work);
4483 priv->dhcp_delaywork = 0;
4484 }
4485}
4486
4487static void emac_dhcp_work_func_t(struct work_struct *work)
4488{
4489 struct emac_priv *priv = container_of(work, struct emac_priv, dhcp_work.work);
4490
4491 __emac_dhcp_work_func(priv);
4492}
4493#endif
4494
b.liub17525e2025-05-14 17:22:29 +08004495
4496
4497
4498long g_PhyVersionNumber = 0;
4499
4500
4501static ssize_t phy_version_show(struct device *dev,
4502 struct device_attribute *attr, char *buf)
4503{
4504 int len = 0;
4505
4506 len = sprintf(buf, "phy_version = 0x%x\n", g_PhyVersionNumber);
4507
4508 return (ssize_t)len;
4509}
4510
4511static ssize_t phy_version_store(struct device *dev,
4512 struct device_attribute *attr, const char *buf, size_t size)
4513{
4514 int reg, val, devad = 0;
4515
4516 struct emac_priv *priv = dev_get_drvdata(dev);
4517
4518 sscanf(buf, "%d", &val);
4519 if(val == 1)
4520 {
4521 devad = 0x1f;
4522 reg = 0x113;
4523 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4524 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4525 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4526 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4527
4528 }
4529 g_PhyVersionNumber = val;
4530
4531 return size;
4532}
4533
4534
4535static ssize_t lpsd_sleep_show(struct device *dev,
4536 struct device_attribute *attr, char *buf)
4537{
4538 int len = 0;
4539 int reg, val, devad = 0;
4540 struct emac_priv *priv = dev_get_drvdata(dev);
4541
4542 devad = 0x3;
4543 reg = 0x8700;
4544 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4545 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4546 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4547 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4548
4549 len = sprintf(buf, "phy_version = 0x%x\n", val);
4550
4551 return (ssize_t)len;
4552}
4553
4554static ssize_t lpsd_sleep_store(struct device *dev,
4555 struct device_attribute *attr, const char *buf, size_t size)
4556{
4557 int reg, val, devad = 0;
4558
4559 struct emac_priv *priv = dev_get_drvdata(dev);
4560
4561 sscanf(buf, "%d", &val);
4562 if(val == 1) //enter lpsd sleep mode
4563 {
4564 devad = 0x3;
4565 reg = 0x8700;
4566 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4567 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4568 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4569 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4570
4571 printk("lpsd sleep mode : reg3.8700 = 0x%x", val);
4572 msleep(200);
4573
4574 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4575 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4576 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4577 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, (val | BIT(0)));
4578
4579 }else
4580 {
4581
4582 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4583 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4584 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4585 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4586
4587 printk("lpsd sleep mode : reg3.8700 = 0x%x", val);
4588 msleep(200);
4589
4590 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4591 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4592 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4593 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, (val | ~BIT(0)));
4594 }
4595
4596 return size;
4597}
4598
4599
4600static int mode_type = -1;
4601static int enter_only_one = 0;
4602
4603
4604static ssize_t gmac_master_or_slave_store(struct device *dev,
4605 struct device_attribute *attr, const char *buf, size_t size)
4606{
4607 int val = 0;
4608 int reg = 0;
4609 int devad = 0;
4610 int ret = 0;
4611
4612 struct emac_priv *priv = dev_get_drvdata(dev);
4613
4614 //read mode_type
4615 ret = sscanf(buf, "%d", &mode_type);
4616 if(ret < 1)
4617 {
4618 printk(KERN_ERR "Please enter the number 0-3 to enable the corresponding mode \n"
4619 "Enter values in the non-0-3 range to get pattern description \n");
4620 return size;
4621 }
4622
4623 //Judgment model
4624 if (mode_type < 0 || mode_type > 3) {
4625 printk(KERN_DEBUG "Please enter the number range 0-3\n"
4626 "0: Set the slave mode \n"
4627 "1: Set the main mode \n"
4628 "2: indicates setting SQI value view mode \n"
4629 "3: Set the VCT value view mode \n"
4630 "After the mode is set, the corresponding value can be obtained\n");
4631 return ret ? ret : size;
4632 }
4633
4634 //Set the Ethernet slave mode
4635 if (mode_type == 0)
4636 {
4637 devad = 0x1;
4638 reg = 0x834;
4639 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4640 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4641 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4642 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4643 msleep(200);
4644
4645 val &= ~BIT(14);
4646 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4647 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4648 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4649 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, val );
4650
4651 }
4652 //Set the Ethernet master mode
4653 else if (mode_type == 1)
4654 {
4655 devad = 0x1;
4656 reg = 0x834;
4657 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4658 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4659 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4660 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4661 msleep(200);
4662
4663 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4664 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4665 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4666 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, val | BIT(14));
4667
4668
4669 }
4670
4671 return size;
4672}
4673
4674
4675static ssize_t gmac_master_or_slave_show(struct device *dev,
4676 struct device_attribute *attr, char *buf)
4677{
4678 int len = 0;
4679 int val = 0;
4680 int reg = 0;
4681 int devad = 0;
4682 int ret = 0;
4683 struct emac_priv *priv = dev_get_drvdata(dev);
4684
4685 if(enter_only_one == 1)
4686 {
4687 return 0;
4688 }
4689 enter_only_one = 1;
4690
4691 //Read the network master/slave
4692 if (mode_type == 0 || mode_type == 1)
4693 {
4694 devad = 0x1;
4695 reg = 0x834;
4696 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4697 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4698 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4699 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e) & BIT(14);
4700 if(val)
4701 memcpy(buf, "Master\n",7);
4702 else
4703 memcpy(buf, "Slave\n", 6);
4704
4705 printk(KERN_DEBUG "mode_type %d - gmac_master_or_slave is %s\n", mode_type, buf);
4706
4707 }
4708
4709 //Obtain the cable quality SQI value
4710 else if(mode_type == 2)
4711 {
4712 devad = 0x1;
4713 reg = 0x8B10;
4714 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0d, devad);
4715 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e, reg);
4716 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0x0d, 0x4000 | devad);
4717 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0x0e);
4718 sprintf(buf, "0x%x\n", val);
4719 sprintf(buf, "SQI : 0x%x\n", val);
4720 printk(KERN_DEBUG "mode_type %d - SQI is 0x%x", mode_type, val);
4721
4722 }
4723
4724 //Obtain short circuit, open circuit and normal connection of VCT
4725 else if(mode_type == 3)
4726 {
4727 //--TDR Enable
4728 devad = 0x1;
4729 reg = 0x8B00;
4730 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4731 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4732 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4733 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, BIT(14));
4734
4735 msleep(200);
4736
4737 //--TDR Start
4738 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4739 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4740 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4741 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, BIT(12) | BIT(14));
4742
4743 msleep(20);
4744 //--Read VCT
4745 devad = 0x1;
4746 reg = 0x8B02;
4747 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4748 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4749 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4750 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0xe);
4751
4752 printk(KERN_DEBUG "Open status: %s - Short status: %s\n",
4753 (val & BIT(1)) ? "Open" : "Normal", (val & BIT(0)) ? "Short" : "Normal");
4754 sprintf(buf, "Open status: %s\nShort status: %s\n",
4755 (val & BIT(1)) ? "Open" : "Normal", (val & BIT(0)) ? "Short" : "Normal");
4756
4757 reg = 0x8B01;
4758 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4759 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4760 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4761 val = mdiobus_read(priv->mii, priv->ndev->phydev->mdio.addr, 0xe);
4762
4763 sprintf(buf, "%sDistance status: 0x%x\n", buf, val);
4764 printk(KERN_DEBUG "mode_type %d - Distance status is 0x%x\n", mode_type, val);
4765
4766 //--TDR Disable
4767 devad = 0x1;
4768 reg = 0x8B00;
4769 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, devad);
4770 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, reg);
4771 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xd, 0x4000 | devad);
4772 mdiobus_write(priv->mii, priv->ndev->phydev->mdio.addr, 0xe, 0x0);
4773
4774
4775 }
4776 else{
4777 sprintf(buf, "Please enter the number range 0-3\n"
4778 "0: Set the slave mode \n"
4779 "1: Set the main mode \n"
4780 "2: indicates setting SQI value view mode \n"
4781 "3: Set the VCT value view mode \n"
4782 "After the mode is set, the corresponding value can be obtained\n");
4783 printk(KERN_DEBUG "Please enter the number range 0-3\n"
4784 "0: Set the slave mode \n"
4785 "1: Set the main mode \n"
4786 "2: indicates setting SQI value view mode \n"
4787 "3: Set the VCT value view mode \n"
4788 "After the mode is set, the corresponding value can be obtained\n");
4789 }
4790 enter_only_one = 0;
4791
4792 return strlen(buf);
4793}
4794
4795
4796
4797static DEVICE_ATTR(lpsd_sleep, S_IRUGO | S_IWUSR, lpsd_sleep_show, lpsd_sleep_store);
4798static DEVICE_ATTR(phy_version, S_IRUGO | S_IWUSR, phy_version_show, phy_version_store);
4799static DEVICE_ATTR(gmac_master_or_slave, S_IRUGO | S_IWUSR, gmac_master_or_slave_show, gmac_master_or_slave_store);
4800
4801
4802static struct attribute *ethrnet_opera_attrs[] = {
4803 &dev_attr_lpsd_sleep.attr,
4804 &dev_attr_phy_version.attr,
4805 &dev_attr_gmac_master_or_slave.attr,
4806 NULL,
4807};
4808
4809static const struct attribute_group demo_attr_grp = {
4810
4811 .attrs = ethrnet_opera_attrs,
4812
4813};
4814
b.liue9582032025-04-17 19:18:16 +08004815static int emac_probe(struct platform_device *pdev)
4816{
4817 struct emac_priv *priv;
4818 struct net_device *ndev = NULL;
4819 struct resource *res;
4820 struct device_node *np = pdev->dev.of_node;
4821 struct device *dev = &pdev->dev;
4822 const unsigned char *mac_addr = NULL;
4823 const struct of_device_id *match;
4824#ifdef CONFIG_DEBUG_FS
4825 struct dentry *emac_fs_dir = NULL;
4826 struct dentry *emac_clk_tuning;
4827#endif
4828 int ret;
b.liub17525e2025-05-14 17:22:29 +08004829 struct regulator *vcc3v3_gmac;
b.liue9582032025-04-17 19:18:16 +08004830
4831 ndev = alloc_etherdev(sizeof(struct emac_priv));
4832 if (!ndev) {
4833 ret = -ENOMEM;
4834 return ret;
4835 }
4836 priv = netdev_priv(ndev);
4837 priv->ndev = ndev;
4838 priv->pdev = pdev;
4839#ifdef WAN_LAN_AUTO_ADAPT
4840 priv->dhcp = -1;
4841 priv->vlan_port = -1;
4842 priv->dhcp_delaywork = 0;
4843#endif
4844 platform_set_drvdata(pdev, priv);
4845
4846 match = of_match_device(of_match_ptr(emac_of_match), &pdev->dev);
4847 if (match) {
4848 priv->regdata = match->data;
4849 } else {
4850 pr_info("===> not match valid device\n");
4851 }
4852
4853 emac_command_options(priv);
4854 emac_skbrb_init(EMAC_SKBRB_SLOT_SIZE, priv->rx_ring.total_cnt * 2);
4855
4856 priv->hw_stats = devm_kzalloc(&pdev->dev,
4857 sizeof(*priv->hw_stats),
4858 GFP_KERNEL);
4859 if (!priv->hw_stats) {
4860 dev_err(&pdev->dev, "failed to allocate counter memory\n");
4861 ret = -ENOMEM;
4862 goto err_netdev;
4863 }
4864
4865 spin_lock_init(&priv->hw_stats->stats_lock);
4866
4867 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4868 priv->iobase = devm_ioremap_resource(&pdev->dev, res);
4869 if (IS_ERR(priv->iobase)) {
4870 ret = -ENOMEM;
4871 goto err_netdev;
4872 }
4873
4874 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4875 priv->tso_base = devm_ioremap_resource(&pdev->dev, res);
4876 if (!IS_ERR(priv->tso_base)) {
4877 dev_info(&pdev->dev, "tso base=0x%x\n", (unsigned)priv->tso_base);
4878 }
4879
4880 priv->irq = irq_of_parse_and_map(np, 0);
4881 if (!priv->irq) {
4882 ret = -ENXIO;
4883 goto err_netdev;
4884 }
4885 priv->irq_wakeup = irq_of_parse_and_map(np, 1);
4886 if (!priv->irq_wakeup)
4887 dev_err(&pdev->dev, "wake_up irq not found\n");
4888
4889 priv->tso = of_property_read_bool(np, "tso-support");
4890 if (cpu_is_asr1903_a0() || cpu_is_asr1903_z1())
4891 priv->tso = false;
4892 if (priv->tso) {
4893 priv->irq_tso = irq_of_parse_and_map(np, 3);
4894 if (!priv->irq_tso) {
4895 dev_err(&pdev->dev, "tso irq not found\n");
4896 priv->tso = false;
4897 }
4898 }
4899
4900 priv->sram_pool = of_gen_pool_get(dev->of_node, "eth,sram", 0);
4901 if (priv->sram_pool) {
4902 dev_notice(&pdev->dev, "use sram as tx desc\n");
4903 }
4904
4905 ret = of_property_read_u32(np, "lpm-qos", &priv->pm_qos);
4906 if (ret)
4907 return ret;
4908
4909 ret = of_property_read_u32(np, "3v3-enable", &priv->power_domain);
4910 if (ret)
4911 priv->power_domain = 0;
4912
4913 ret = of_property_read_u32(np, "mdio-clk-div", &priv->mdio_clk_div);
4914 if (ret)
4915 priv->mdio_clk_div = 0xfe;
4916
4917 if (of_property_read_bool(np, "enable-suspend"))
4918 priv->en_suspend = 1;
4919 else
4920 priv->en_suspend = 0;
4921
4922 priv->wolopts = 0;
4923 if (of_property_read_bool(np, "magic-packet-wakeup"))
4924 priv->wolopts |= WAKE_MAGIC;
4925
4926 if (of_property_read_bool(np, "unicast-packet-wakeup"))
4927 priv->wolopts |= WAKE_UCAST;
4928
4929 priv->dev_flags = 0;
4930 if (of_property_read_bool(np, "suspend-not-keep-power")) {
4931 priv->dev_flags |= EMAC_SUSPEND_POWER_DOWN_PHY;
4932 priv->wolopts = 0;
4933 }
4934
b.liub17525e2025-05-14 17:22:29 +08004935 vcc3v3_gmac = devm_regulator_get(dev, "vmmc");
4936 if (!IS_ERR(vcc3v3_gmac))
4937 {
4938 if( regulator_set_voltage(vcc3v3_gmac, 1800000,1800000))
4939 pr_err("fail to set regulator vcc3v3_gmac to 1.8v\n");
4940
4941 if (!regulator_is_enabled(vcc3v3_gmac) && regulator_enable(vcc3v3_gmac))
4942 pr_err("fail to enable regulator vcc3v3_gmac\n");
4943 }
4944
4945 g_vcc3v3_gmac = vcc3v3_gmac;
4946
b.liue9582032025-04-17 19:18:16 +08004947 priv->pinctrl = devm_pinctrl_get(dev);
4948 if (IS_ERR(priv->pinctrl))
4949 dev_err(dev, "could not get pinctrl handle\n");
4950
4951 priv->rgmii_pins = pinctrl_lookup_state(priv->pinctrl, "rgmii-pins");
4952 if (IS_ERR(priv->rgmii_pins))
4953 dev_err(dev, "could not get rgmii-pins pinstate\n");
4954
4955 emac_set_aib_power_domain(priv);
4956
4957 device_init_wakeup(&pdev->dev, 1);
4958
4959 priv->pm_qos_req.name = pdev->name;
4960 pm_qos_add_request(&priv->pm_qos_req, PM_QOS_CPUIDLE_BLOCK,
4961 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
4962#ifdef CONFIG_DDR_DEVFREQ
4963 pm_qos_add_request(&priv->pm_ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
4964 PM_QOS_DEFAULT_VALUE);
4965
4966 priv->clk_scaling.polling_delay_ms = 1000; /* 1s window */
4967 priv->clk_scaling.tx_up_threshold = 120; /* 120Mbps */
4968 priv->clk_scaling.tx_down_threshold = 60;
4969 priv->clk_scaling.rx_up_threshold = 60; /* 60Mbps */
4970 priv->clk_scaling.rx_down_threshold = 20;
4971 priv->clk_scaling.window_time = jiffies;
4972 pm_qos_add_request(&priv->clk_scaling.ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
4973 PM_QOS_DEFAULT_VALUE);
4974 INIT_WORK(&priv->qos_work, emac_ddr_qos_work);
4975#endif
4976 skb_queue_head_init(&priv->rx_skb);
4977 ndev->watchdog_timeo = 5 * HZ;
4978 ndev->base_addr = (unsigned long)priv->iobase;
4979 ndev->irq = priv->irq;
4980 /* set hw features */
4981 ndev->features = NETIF_F_SG | NETIF_F_SOFT_FEATURES;
4982 if (priv->tso) {
4983 ndev->features |= NETIF_F_RXCSUM;
4984 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4985 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
4986 dev_info(&pdev->dev, "TSO feature enabled\n");
4987 }
4988 ndev->hw_features = ndev->features;
4989 ndev->vlan_features = ndev->features;
4990
4991 ndev->ethtool_ops = &emac_ethtool_ops;
4992 ndev->netdev_ops = &emac_netdev_ops;
4993 if (pdev->dev.of_node)
4994 mac_addr = of_get_mac_address(np);
4995
4996 if (!IS_ERR_OR_NULL(mac_addr)) {
4997 //ether_addr_copy(ndev->dev_addr, mac_addr);
4998 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
4999 if (!is_valid_ether_addr(ndev->dev_addr)) {
5000 dev_info(&pdev->dev, "Using random mac address\n");
5001 eth_hw_addr_random(ndev);
5002 }
5003 } else {
5004 dev_info(&pdev->dev, "Using random mac address\n");
5005 eth_hw_addr_random(ndev);
5006 }
5007
5008 priv->hw_adj = of_property_read_bool(np, "hw-increment");
5009 priv->ptp_support = of_property_read_bool(np, "ptp-support");
5010 if (priv->ptp_support) {
5011 pr_info("EMAC support IEEE1588 PTP Protocol\n");
5012 if (of_property_read_u32(np, "ptp-clk-rate",
5013 &priv->ptp_clk_rate)) {
5014 priv->ptp_clk_rate = 20000000;
5015 pr_info("%s ptp_clk rate using default value:%d may inaccurate!!1\n",
5016 __func__, priv->ptp_clk_rate);
5017 }
5018
5019 priv->ptp_clk = devm_clk_get(&pdev->dev, "ptp-clk");
5020 if (IS_ERR(priv->ptp_clk)) {
5021 dev_err(&pdev->dev, "ptp clock not found.\n");
5022 ret = PTR_ERR(priv->ptp_clk);
5023 goto err_netdev;
5024 }
5025
5026 clk_set_rate(priv->ptp_clk, priv->ptp_clk_rate);
5027 }
5028
5029 priv->pps_info.enable_pps = 0;
5030#ifdef CONFIG_PPS
5031 ret = of_property_read_u32(np, "pps_source", &priv->pps_info.pps_source);
5032 if (!ret) {
5033 priv->irq_pps = irq_of_parse_and_map(np, 2);
5034
5035 if (priv->pps_info.pps_source < EMAC_PPS_MAX)
5036 priv->pps_info.enable_pps = 1;
5037 else
5038 dev_err(&pdev->dev, "wrong PPS source!\n");
5039 }
5040#endif
5041 priv->clk = devm_clk_get(&pdev->dev, "emac-clk");
5042 if (IS_ERR(priv->clk)) {
5043 dev_err(&pdev->dev, "emac clock not found.\n");
5044 ret = PTR_ERR(priv->clk);
5045 goto err_netdev;
5046 }
5047
5048 ret = clk_prepare_enable(priv->clk);
5049 if (ret < 0) {
5050 dev_err(&pdev->dev, "failed to enable emac clock: %d\n",
5051 ret);
5052 goto clk_disable;
5053 }
5054
5055 emac_sw_init(priv);
5056 ret = emac_mdio_init(priv);
5057 if (ret)
5058 goto clk_disable;
5059
5060 INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
5061#ifdef WAN_LAN_AUTO_ADAPT
5062 INIT_DELAYED_WORK(&priv->dhcp_work, emac_dhcp_work_func_t);
5063#endif
5064 if (of_phy_is_fixed_link(np)) {
5065 if ((emac_set_fixed_link(np, priv) < 0)) {
5066 ret = -ENODEV;
5067 goto clk_disable;
5068 }
5069 dev_info(&pdev->dev, "find fixed link\n");
5070 priv->fix_link = 1;
5071 }
5072
5073 INIT_DELAYED_WORK(&priv->emac_pause_work, emac_pause_generate_work_fuc);
5074 SET_NETDEV_DEV(ndev, &pdev->dev);
5075 strcpy(ndev->name, "eth%d");
5076
5077 ret = register_netdev(ndev);
5078 if (ret) {
5079 pr_err("register_netdev failed\n");
5080 goto err_mdio_deinit;
5081 }
5082 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
5083#ifdef CONFIG_ASR_EMAC_NAPI
5084 netif_napi_add(ndev, &priv->rx_napi, emac_rx_poll, 32);
5085 netif_tx_napi_add(ndev, &priv->tx_napi, emac_tx_poll, 32);
5086#endif
5087 priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
5088 priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
5089 priv->clk_tuning_enable = of_property_read_bool(np, "clk-tuning-enable");
5090
5091 if (priv->clk_tuning_enable) {
5092 ret = of_property_read_u32(np, "tx-clk-config",
5093 &priv->tx_clk_config);
5094 if (ret)
5095 priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
5096
5097 ret = of_property_read_u32(np, "rx-clk-config",
5098 &priv->rx_clk_config);
5099 if (ret)
5100 priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
5101#ifdef CONFIG_DEBUG_FS
5102 if (!emac_fs_dir) {
5103 emac_fs_dir = debugfs_create_dir(DRIVER_NAME, NULL);
5104
5105 if (!emac_fs_dir || IS_ERR(emac_fs_dir)) {
5106 pr_err("emac debugfs create directory failed\n");
5107 }else {
5108 emac_clk_tuning = debugfs_create_file("clk_tuning", 0664,
5109 emac_fs_dir, priv, &clk_tuning_fops);
5110 if (!emac_clk_tuning) {
5111 pr_err("emac debugfs create file failed\n");
5112 }
5113 }
5114 }
5115#endif
5116 }
b.liub17525e2025-05-14 17:22:29 +08005117
5118 sysfs_create_group(&pdev->dev.kobj,&demo_attr_grp);
5119
5120
5121 //device_create_file(&pdev->dev, &dev_attr_cable_sqi_value);
b.liue9582032025-04-17 19:18:16 +08005122 return 0;
5123
5124err_mdio_deinit:
5125 emac_mdio_deinit(priv);
5126clk_disable:
5127 clk_disable_unprepare(priv->clk);
5128err_netdev:
5129 free_netdev(ndev);
5130 emac_skbrb_release();
5131 return ret;
5132}
5133
5134static int emac_remove(struct platform_device *pdev)
5135{
5136 struct emac_priv *priv = platform_get_drvdata(pdev);
5137
5138 device_init_wakeup(&pdev->dev, 0);
5139 unregister_netdev(priv->ndev);
5140 emac_reset_hw(priv);
5141 free_netdev(priv->ndev);
5142 emac_mdio_deinit(priv);
5143 clk_disable_unprepare(priv->clk);
5144 pm_qos_remove_request(&priv->pm_qos_req);
5145 cancel_delayed_work_sync(&priv->emac_pause_work);
5146#ifdef CONFIG_DDR_DEVFREQ
5147 pm_qos_remove_request(&priv->pm_ddr_qos);
5148 pm_qos_remove_request(&priv->clk_scaling.ddr_qos);
5149#endif
5150 emac_skbrb_release();
5151 return 0;
5152}
5153
5154static void emac_shutdown(struct platform_device *pdev)
5155{
5156}
5157
5158#ifdef CONFIG_PM_SLEEP
5159static int emac_resume(struct device *dev)
5160{
5161 struct emac_priv *priv = dev_get_drvdata(dev);
5162 struct net_device *ndev = priv->ndev;
5163 u32 ctrl, wake_mode = 0;
5164
5165 if (!priv->en_suspend)
5166 return 0;
5167
5168 if (priv->wolopts) {
5169 if (netif_running(ndev)) {
5170 netif_device_attach(ndev);
5171#ifdef CONFIG_ASR_EMAC_NAPI
5172 napi_enable(&priv->rx_napi);
5173 napi_enable(&priv->tx_napi);
5174#endif
5175 }
5176
5177 if (priv->wolopts & WAKE_MAGIC)
5178 wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
5179 if (priv->wolopts & WAKE_UCAST)
5180 wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
5181
5182 disable_irq_wake(priv->irq_wakeup);
5183 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
5184 ctrl &= ~wake_mode;
5185 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
5186 } else {
5187 clk_prepare_enable(priv->clk);
5188
5189 if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
5190 emac_power_up(priv);
5191
5192 rtnl_lock();
5193 dev_open(ndev, NULL);
5194 rtnl_unlock();
5195 }
5196
5197 return 0;
5198}
5199
5200static int emac_suspend(struct device *dev)
5201{
5202 struct emac_priv *priv = dev_get_drvdata(dev);
5203 struct net_device *ndev = priv->ndev;
5204 u32 ctrl, wake_mode = 0;
5205
5206 if (!priv->en_suspend)
5207 return 0;
5208
5209 if (priv->wolopts) {
5210 if (netif_running(ndev)) {
5211 netif_device_detach(ndev);
5212#ifdef CONFIG_ASR_EMAC_NAPI
5213 napi_disable(&priv->rx_napi);
5214 napi_disable(&priv->tx_napi);
5215#endif
5216 }
5217
5218 if (priv->wolopts & WAKE_MAGIC)
5219 wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
5220 if (priv->wolopts & WAKE_UCAST)
5221 wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
5222
5223 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
5224 ctrl |= wake_mode;
5225 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
5226 enable_irq_wake(priv->irq_wakeup);
5227 } else {
5228 rtnl_lock();
5229 dev_close(ndev);
5230 rtnl_unlock();
5231
5232 if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
5233 emac_power_down(priv);
5234
5235 clk_disable_unprepare(priv->clk);
5236 }
5237
5238 return 0;
5239}
5240
5241static int emac_suspend_noirq(struct device *dev)
5242{
5243 struct emac_priv *priv = dev_get_drvdata(dev);
5244 struct net_device *ndev = priv->ndev;
5245
5246 if (!ndev->phydev && !priv->fix_link)
5247 return 0;
5248
5249 pr_pm_debug("==> enter emac_suspend_noirq\n");
5250 pm_qos_update_request(&priv->pm_qos_req,
5251 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
5252 return 0;
5253}
5254
5255static int emac_resume_noirq(struct device *dev)
5256{
5257 struct emac_priv *priv = dev_get_drvdata(dev);
5258 struct net_device *ndev = priv->ndev;
5259
5260 if (!ndev->phydev && !priv->fix_link)
5261 return 0;
5262
5263 pr_pm_debug("==> enter emac_resume_noirq\n");
5264 pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
5265 return 0;
5266}
5267
5268static const struct dev_pm_ops emac_pm_ops = {
5269 .suspend = emac_suspend,
5270 .resume = emac_resume,
5271 .suspend_noirq = emac_suspend_noirq,
5272 .resume_noirq = emac_resume_noirq,
5273};
5274
5275#define ASR_EMAC_PM_OPS (&emac_pm_ops)
5276#else
5277#define ASR_EMAC_PM_OPS NULL
5278#endif
5279
5280static struct platform_driver emac_driver = {
5281 .probe = emac_probe,
5282 .remove = emac_remove,
5283 .shutdown = emac_shutdown,
5284 .driver = {
5285 .name = DRIVER_NAME,
5286 .of_match_table = of_match_ptr(emac_of_match),
5287 .pm = ASR_EMAC_PM_OPS,
5288 },
5289};
5290
5291module_platform_driver(emac_driver);
5292
5293MODULE_LICENSE("GPL");
5294MODULE_DESCRIPTION("Ethernet driver for ASR Emac");
5295MODULE_ALIAS("platform:asr_eth");