blob: 5aed7ecf41270fef96b0f8982778426af3f09158 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * asr emac driver
4 *
5 * Copyright (C) 2019 ASR Micro Limited
6 *
7 */
8
9#include <linux/bitops.h>
10#include <linux/kernel.h>
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/delay.h>
14#include <linux/dma-mapping.h>
15#include <linux/etherdevice.h>
16#include <linux/ethtool.h>
17#include <linux/in.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/ip.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/of_net.h>
26#include <linux/of_mdio.h>
27#include <linux/of_irq.h>
28#include <linux/of_device.h>
29#include <linux/phy.h>
30#include <linux/platform_device.h>
31#include <linux/tcp.h>
32#include <linux/timer.h>
33#include <linux/types.h>
34#include <linux/udp.h>
35#include <linux/workqueue.h>
36#include <linux/phy_fixed.h>
37#include <linux/pm_qos.h>
38#include <asm/cacheflush.h>
39#include <linux/cputype.h>
40#include <linux/iopoll.h>
41#include <linux/genalloc.h>
42
43#ifdef CONFIG_DEBUG_FS
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
46#endif /* CONFIG_DEBUG_FS */
47#include <asm/atomic.h>
48#include "emac_eth.h"
49#include <linux/skbrb.h>
50
51#ifdef WAN_LAN_AUTO_ADAPT
52#include <linux/if_vlan.h>
53#include <linux/if_ether.h>
54#include <linux/kobject.h>
55#endif
56
57#define DRIVER_NAME "asr_emac"
58
59#define AXI_PHYS_BASE 0xd4200000
60
61#define AIB_GMAC_IO_REG 0xD401E804
62#define APBC_ASFAR 0xD4015050
63#define AKEY_ASFAR 0xbaba
64#define AKEY_ASSAR 0xeb10
65
66#define EMAC_DIRECT_MAP
67#define TUNING_CMD_LEN 50
68#define CLK_PHASE_CNT 8
69#define TXCLK_PHASE_DEFAULT 0
70#define RXCLK_PHASE_DEFAULT 0
71#define TX_PHASE 1
72#define RX_PHASE 0
73
74#define EMAC_DMA_REG_CNT 16
75#define EMAC_MAC_REG_CNT 61
76#define EMAC_EMPTY_FROM_DMA_TO_MAC 48
77#define EMAC_REG_SPACE_SIZE ((EMAC_DMA_REG_CNT + \
78 EMAC_MAC_REG_CNT + EMAC_EMPTY_FROM_DMA_TO_MAC) * 4)
79#define EMAC_ETHTOOL_STAT(x) { #x, \
80 offsetof(struct emac_hw_stats, x) / sizeof(u32) }
81
82#define EMAC_SKBRB_SLOT_SIZE 1600
83#define EMAC_EXTRA_ROOM 72
84#define EMAC_SKBRB_MAX_PAYLOAD (EMAC_SKBRB_SLOT_SIZE - EMAC_EXTRA_ROOM - NET_IP_ALIGN)
85
86#define EMAC_RX_FILL_TIMER_US 0
87#define EMAC_TX_COAL_TIMER_US (1000)
88#define EMAC_TX_FRAMES (64)
89
90#ifdef WAN_LAN_AUTO_ADAPT
91#define DHCP_DISCOVER 1
92#define DHCP_OFFER 2
93#define DHCP_REQUEST 3
94#define DHCP_ACK 5
95#define IP175D_PHY_ID 0x02430d80
96
97enum emac_SIG {
98 CARRIER_DOWN = 0,
99 CARRIER_UP,
100 DHCP_EVENT_CLIENT,
101 DHCP_EVENT_SERVER,
102 PHY_IP175D_CONNECT,
103 CARRIER_DOWN_IP175D,
104 CARRIER_UP_IP175D,
105};
106
107enum emac_DHCP {
108 DHCP_SEND_REQ = 1,
109 DHCP_REC_RESP = 2,
110};
111
112struct emac_event {
113 const char *name;
114 char *action;
115 int port;
116 struct sk_buff *skb;
117 struct work_struct work;
118};
119
120extern u64 uevent_next_seqnum(void);
121static int emac_sig_workq(int event, int port);
122#endif
123
124static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable);
125static int clk_phase_set(struct emac_priv *priv, bool is_tx);
126#ifdef CONFIG_ASR_EMAC_NAPI
127static int emac_rx_clean_desc(struct emac_priv *priv, int budget);
128#else
129static int emac_rx_clean_desc(struct emac_priv *priv);
130#endif
131static void emac_alloc_rx_desc_buffers(struct emac_priv *priv);
132static int emac_phy_connect(struct net_device *dev);
133
134/* for falcon */
135struct emac_regdata asr_emac_regdata_v1 = {
136 .support_dual_vol_power = 1,
137 .ptp_rx_ts_all_events = 0,
138 .clk_rst_ctrl_reg_offset = 0x160,
139 .axi_mst_single_id_shift = 17,
140 .phy_intr_enable_shift = 16,
141 .int_clk_src_sel_shift = -1,
142 .rgmii_tx_clk_src_sel_shift = 5,
143 .rgmii_rx_clk_src_sel_shift = 4,
144 .rmii_rx_clk_sel_shift = 7,
145 .rmii_tx_clk_sel_shift = 6,
146 .rmii_ref_clk_sel_shift = -1,
147 .mac_intf_sel_shift = 2,
148 .rgmii_tx_dline_reg_offset = -1,
149 .rgmii_tx_delay_code_shift = -1,
150 .rgmii_tx_delay_code_mask =-1,
151 .rgmii_tx_delay_step_shift = -1,
152 .rgmii_tx_delay_step_mask = -1,
153 .rgmii_tx_delay_enable_shift = -1,
154 .rgmii_rx_dline_reg_offset = -1,
155 .rgmii_rx_delay_code_shift = -1,
156 .rgmii_rx_delay_code_mask = -1,
157 .rgmii_rx_delay_step_shift = -1,
158 .rgmii_rx_delay_step_mask = -1,
159 .rgmii_rx_delay_enable_shift = -1,
160};
161
162/* for kagu */
163struct emac_regdata asr_emac_regdata_v2 = {
164 .support_dual_vol_power = 0,
165 .ptp_rx_ts_all_events = 0,
166 .clk_rst_ctrl_reg_offset = 0x160,
167 .axi_mst_single_id_shift = 13,
168 .phy_intr_enable_shift = 12,
169 .int_clk_src_sel_shift = 9,
170 .rgmii_tx_clk_src_sel_shift = 8,
171 .rgmii_rx_clk_src_sel_shift = -1,
172 .rmii_rx_clk_sel_shift = 7,
173 .rmii_tx_clk_sel_shift = 6,
174 .rmii_ref_clk_sel_shift = 3,
175 .mac_intf_sel_shift = 2,
176 .rgmii_tx_dline_reg_offset = 0x178,
177 .rgmii_tx_delay_code_shift = 24,
178 .rgmii_tx_delay_code_mask = 0xff,
179 .rgmii_tx_delay_step_shift = 20,
180 .rgmii_tx_delay_step_mask = 0x3,
181 .rgmii_tx_delay_enable_shift = 16,
182 .rgmii_rx_dline_reg_offset = 0x178,
183 .rgmii_rx_delay_code_shift = 8,
184 .rgmii_rx_delay_code_mask = 0xff,
185 .rgmii_rx_delay_step_shift = 4,
186 .rgmii_rx_delay_step_mask = 0x3,
187 .rgmii_rx_delay_enable_shift = 0,
188};
189
190/* for lapwing */
191struct emac_regdata asr_emac_regdata_v3 = {
192 .support_dual_vol_power = 1,
193 .ptp_rx_ts_all_events = 1,
194 .clk_rst_ctrl_reg_offset = 0x164,
195 .axi_mst_single_id_shift = 13,
196 .phy_intr_enable_shift = 12,
197 .int_clk_src_sel_shift = 9,
198 .rgmii_tx_clk_src_sel_shift = 8,
199 .rgmii_rx_clk_src_sel_shift = -1,
200 .rmii_rx_clk_sel_shift = 7,
201 .rmii_tx_clk_sel_shift = 6,
202 .rmii_ref_clk_sel_shift = 3,
203 .mac_intf_sel_shift = 2,
204 .rgmii_tx_dline_reg_offset = 0x16c,
205 .rgmii_tx_delay_code_shift = 8,
206 .rgmii_tx_delay_code_mask = 0xff,
207 .rgmii_tx_delay_step_shift = 0,
208 .rgmii_tx_delay_step_mask = 0x3,
209 .rgmii_tx_delay_enable_shift = 31,
210 .rgmii_rx_dline_reg_offset = 0x168,
211 .rgmii_rx_delay_code_shift = 8,
212 .rgmii_rx_delay_code_mask = 0xff,
213 .rgmii_rx_delay_step_shift = 0,
214 .rgmii_rx_delay_step_mask = 0x3,
215 .rgmii_rx_delay_enable_shift = 31,
216};
217
218static const struct of_device_id emac_of_match[] = {
219 {
220 .compatible = "asr,asr-eth",
221 .data = (void *)&asr_emac_regdata_v1,
222 },
223 {
224 .compatible = "asr,asr-eth-v2",
225 .data = (void *)&asr_emac_regdata_v2,
226 },
227 {
228 .compatible = "asr,asr-eth-v3",
229 .data = (void *)&asr_emac_regdata_v3,
230 },
231 { },
232};
233MODULE_DEVICE_TABLE(of, emac_of_match);
234
235#ifdef EMAC_DIRECT_MAP
236dma_addr_t inline emac_map_direct(unsigned buf, unsigned len)
237{
238 unsigned ret;
239 ret = mv_cp_virtual_to_physical(buf);
240 BUG_ON(ret == buf);
241 __cpuc_flush_dcache_area((void *)(buf & ~ 31),
242 ((len + (buf & 31) + 31) & ~ 31));
243 return (dma_addr_t)ret;
244}
245#endif
246
247static inline void emac_unmap_single(struct device *dev, dma_addr_t handle,
248 size_t size, enum dma_data_direction dir)
249{
250#ifdef EMAC_DIRECT_MAP
251 if (dir == DMA_TO_DEVICE)
252 return;
253#endif
254 dma_unmap_single(dev, handle, size ,dir);
255}
256
257static inline dma_addr_t emac_map_single(struct device *dev, void *ptr,
258 size_t size,enum dma_data_direction dir)
259{
260 if (dir == DMA_FROM_DEVICE)
261 return dma_map_single(dev, ptr, size, dir);
262#ifndef EMAC_DIRECT_MAP
263 return dma_map_single(dev, ptr, size, dir);
264#else
265 return emac_map_direct((unsigned)ptr, (unsigned)size);
266#endif
267}
268
269#ifdef CONFIG_DDR_DEVFREQ
270static void emac_ddr_qos_work(struct work_struct *work)
271{
272 struct emac_priv *priv;
273 int val;
274
275 priv = container_of(work, struct emac_priv, qos_work);
276 val = priv->clk_scaling.qos_val;
277
278 if (val == PM_QOS_DEFAULT_VALUE)
279 pm_qos_update_request(&priv->clk_scaling.ddr_qos, val);
280 else
281 pm_qos_update_request_timeout(
282 &priv->clk_scaling.ddr_qos, val, (2 * USEC_PER_SEC));
283}
284
285static void emac_ddr_clk_scaling(struct emac_priv *priv)
286{
287 struct net_device *ndev = priv->ndev;
288 unsigned long rx_bytes, tx_bytes;
289 unsigned long last_rx_bytes, last_tx_bytes;
290 unsigned long total_time_ms = 0;
291 unsigned int cur_rx_threshold, cur_tx_threshold;
292 unsigned long polling_jiffies;
293 int qos_val;
294
295 polling_jiffies = msecs_to_jiffies(priv->clk_scaling.polling_delay_ms);
296 if (time_is_after_jiffies(priv->clk_scaling.window_time +
297 polling_jiffies))
298 return;
299
300 total_time_ms = jiffies_to_msecs((long)jiffies -
301 (long)priv->clk_scaling.window_time);
302
303 if (!ndev) {
304 pr_err("%s: dev or net is not ready\n", __func__);
305 return;
306 }
307
308 qos_val = priv->clk_scaling.qos_val;
309 last_rx_bytes = priv->clk_scaling.rx_bytes;
310 last_tx_bytes = priv->clk_scaling.tx_bytes;
311 if (!last_rx_bytes && !last_tx_bytes)
312 goto out;
313
314 if (likely(ndev->stats.rx_bytes > last_rx_bytes))
315 rx_bytes = ndev->stats.rx_bytes - last_rx_bytes;
316 else
317 rx_bytes = ULONG_MAX - last_rx_bytes + ndev->stats.rx_bytes + 1;
318
319 if (likely(ndev->stats.tx_bytes > last_tx_bytes))
320 tx_bytes = ndev->stats.tx_bytes - last_tx_bytes;
321 else
322 tx_bytes = ULONG_MAX - last_tx_bytes + ndev->stats.tx_bytes + 1;
323
324 cur_tx_threshold = tx_bytes * 8 / (total_time_ms * 1000);
325 pr_debug("%s: tx_rate=%dMbps, up_threshold=%dMbps\n",
326 __func__, cur_tx_threshold, priv->clk_scaling.tx_up_threshold);
327 if (cur_tx_threshold >= priv->clk_scaling.tx_up_threshold) {
328 qos_val = ASR_EMAC_DDR_BOOST_FREQ;
329 goto out;
330 }
331
332 cur_rx_threshold = rx_bytes * 8 / (total_time_ms * 1000);
333 pr_debug("%s: rx_rate=%dMbps, up_threshold=%dMbps\n",
334 __func__, cur_rx_threshold, priv->clk_scaling.rx_up_threshold);
335 if (cur_rx_threshold >= priv->clk_scaling.rx_up_threshold) {
336 qos_val = ASR_EMAC_DDR_BOOST_FREQ;
337 goto out;
338 }
339
340 if (cur_tx_threshold < priv->clk_scaling.tx_down_threshold &&
341 cur_rx_threshold < priv->clk_scaling.rx_down_threshold)
342 qos_val = PM_QOS_DEFAULT_VALUE;
343
344out:
345 priv->clk_scaling.rx_bytes = ndev->stats.rx_bytes;
346 priv->clk_scaling.tx_bytes = ndev->stats.tx_bytes;
347 priv->clk_scaling.window_time = jiffies;
348
349 if (qos_val != priv->clk_scaling.qos_val) {
350 priv->clk_scaling.qos_val = qos_val;
351 schedule_work(&priv->qos_work);
352 }
353
354 return;
355}
356#endif
357
358/* strings used by ethtool */
359static const struct emac_ethtool_stats {
360 char str[ETH_GSTRING_LEN];
361 u32 offset;
362} emac_ethtool_stats[] = {
363 EMAC_ETHTOOL_STAT(tx_ok_pkts),
364 EMAC_ETHTOOL_STAT(tx_total_pkts),
365 EMAC_ETHTOOL_STAT(tx_ok_bytes),
366 EMAC_ETHTOOL_STAT(tx_err_pkts),
367 EMAC_ETHTOOL_STAT(tx_singleclsn_pkts),
368 EMAC_ETHTOOL_STAT(tx_multiclsn_pkts),
369 EMAC_ETHTOOL_STAT(tx_lateclsn_pkts),
370 EMAC_ETHTOOL_STAT(tx_excessclsn_pkts),
371 EMAC_ETHTOOL_STAT(tx_unicast_pkts),
372 EMAC_ETHTOOL_STAT(tx_multicast_pkts),
373 EMAC_ETHTOOL_STAT(tx_broadcast_pkts),
374 EMAC_ETHTOOL_STAT(tx_pause_pkts),
375 EMAC_ETHTOOL_STAT(rx_ok_pkts),
376 EMAC_ETHTOOL_STAT(rx_total_pkts),
377 EMAC_ETHTOOL_STAT(rx_crc_err_pkts),
378 EMAC_ETHTOOL_STAT(rx_align_err_pkts),
379 EMAC_ETHTOOL_STAT(rx_err_total_pkts),
380 EMAC_ETHTOOL_STAT(rx_ok_bytes),
381 EMAC_ETHTOOL_STAT(rx_total_bytes),
382 EMAC_ETHTOOL_STAT(rx_unicast_pkts),
383 EMAC_ETHTOOL_STAT(rx_multicast_pkts),
384 EMAC_ETHTOOL_STAT(rx_broadcast_pkts),
385 EMAC_ETHTOOL_STAT(rx_pause_pkts),
386 EMAC_ETHTOOL_STAT(rx_len_err_pkts),
387 EMAC_ETHTOOL_STAT(rx_len_undersize_pkts),
388 EMAC_ETHTOOL_STAT(rx_len_oversize_pkts),
389 EMAC_ETHTOOL_STAT(rx_len_fragment_pkts),
390 EMAC_ETHTOOL_STAT(rx_len_jabber_pkts),
391 EMAC_ETHTOOL_STAT(rx_64_pkts),
392 EMAC_ETHTOOL_STAT(rx_65_127_pkts),
393 EMAC_ETHTOOL_STAT(rx_128_255_pkts),
394 EMAC_ETHTOOL_STAT(rx_256_511_pkts),
395 EMAC_ETHTOOL_STAT(rx_512_1023_pkts),
396 EMAC_ETHTOOL_STAT(rx_1024_1518_pkts),
397 EMAC_ETHTOOL_STAT(rx_1519_plus_pkts),
398 EMAC_ETHTOOL_STAT(rx_drp_fifo_full_pkts),
399 EMAC_ETHTOOL_STAT(rx_truncate_fifo_full_pkts),
400 EMAC_ETHTOOL_STAT(rx_dma_missed_frame_cnt),
401 EMAC_ETHTOOL_STAT(tx_tso_pkts),
402 EMAC_ETHTOOL_STAT(tx_tso_bytes),
403};
404
405static int emac_set_speed_duplex(struct emac_priv *priv)
406{
407 u32 ctrl;
408
409 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
410 if (priv->duplex)
411 ctrl |= MREGBIT_FULL_DUPLEX_MODE;
412 else
413 ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
414
415 switch (priv->speed) {
416 case SPEED_1000:
417 ctrl |= MREGBIT_SPEED_1000M;
418 break;
419 case SPEED_100:
420 ctrl |= MREGBIT_SPEED_100M;
421 break;
422 case SPEED_10:
423 ctrl |= MREGBIT_SPEED_10M;
424 break;
425 default:
426 pr_err("broken speed: %d\n", priv->speed);
427 return 0;
428 }
429 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
430 pr_info("emac: force link speed:%dM duplex:%s\n",
431 priv->speed, priv->duplex ? "Full": "Half");
432
433 return 0;
434}
435
436static int emac_set_fixed_link(struct device_node *np, struct emac_priv *priv)
437{
438 struct fixed_phy_status status = {};
439 struct device_node *fixed_link_node;
440 u32 fixed_link_prop[5];
441 const char *managed;
442 int interface;
443
444 if (of_property_read_string(np, "managed", &managed) == 0 &&
445 strcmp(managed, "in-band-status") == 0) {
446 /* status is zeroed, namely its .link member */
447 goto fix_link;
448 }
449
450 /* New binding */
451 fixed_link_node = of_get_child_by_name(np, "fixed-link");
452 if (fixed_link_node) {
453 status.link = 1;
454 status.duplex = of_property_read_bool(fixed_link_node,
455 "full-duplex");
456 if (of_property_read_u32(fixed_link_node, "speed",
457 &status.speed)) {
458 of_node_put(fixed_link_node);
459 return -EINVAL;
460 }
461 status.pause = of_property_read_bool(fixed_link_node, "pause");
462 status.asym_pause = of_property_read_bool(fixed_link_node,
463 "asym-pause");
464 interface = of_get_phy_mode(fixed_link_node);
465 if (interface < 0) {
466 priv->interface = PHY_INTERFACE_MODE_RGMII;
467 pr_info("no interface for fix-link, use RGMII\n");
468 } else {
469 priv->interface = interface;
470 }
471
472 of_node_put(fixed_link_node);
473 goto fix_link;
474 }
475
476 /* Old binding */
477 if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
478 ARRAY_SIZE(fixed_link_prop)) == 0) {
479 status.link = 1;
480 status.duplex = fixed_link_prop[1];
481 status.speed = fixed_link_prop[2];
482 status.pause = fixed_link_prop[3];
483 status.asym_pause = fixed_link_prop[4];
484 goto fix_link;
485 }
486
487 return -ENODEV;
488
489fix_link:
490 priv->speed = status.speed;
491 priv->duplex = status.duplex;
492
493 return emac_set_speed_duplex(priv);
494}
495
496void register_dump(struct emac_priv *priv)
497{
498 int i;
499 void __iomem *base = priv->iobase;
500
501 for (i = 0; i < 16; i++) {
502 pr_info("DMA:0x%x:0x%x\n",
503 DMA_CONFIGURATION + i * 4,
504 readl(base + DMA_CONFIGURATION + i * 4));
505 }
506 for (i = 0; i < 60; i++) {
507 pr_info("MAC:0x%x:0x%x\n",
508 MAC_GLOBAL_CONTROL + i * 4,
509 readl(base + MAC_GLOBAL_CONTROL + i * 4));
510 }
511
512 for (i = 0; i < 4; i++) {
513 pr_info("1588:0x%x:0x%x\n",
514 PTP_1588_CTRL + i * 4,
515 readl(base + PTP_1588_CTRL + i * 4));
516 }
517
518 for (i = 0; i < 6; i++) {
519 pr_info("1588:0x%x:0x%x\n",
520 SYS_TIME_GET_LOW + i * 4,
521 readl(base + SYS_TIME_GET_LOW + i * 4));
522 }
523 for (i = 0; i < 5; i++) {
524 pr_info("1588:0x%x:0x%x\n",
525 RX_TIMESTAMP_LOW + i * 4,
526 readl(base + RX_TIMESTAMP_LOW + i * 4));
527 }
528 for (i = 0; i < 2; i++) {
529 pr_info("1588:0x%x:0x%x\n",
530 PTP_1588_IRQ_STS + i * 4,
531 readl(base + PTP_1588_IRQ_STS + i * 4));
532 }
533
534 if (priv->tso) {
535 for (i = 0; i < 18; i++) {
536 pr_info("TSO:0x%x:0x%x\n", i * 4,
537 emac_rd_tso(priv, i * 4));
538 }
539 }
540}
541
542void print_pkt(unsigned char *buf, int len)
543{
544 int i = 0;
545
546 pr_debug("data len = %d byte, buf addr: 0x%x\n",
547 len, (unsigned int)buf);
548 for (i = 0; i < len; i = i + 8) {
549 pr_debug("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
550 *(buf + i),
551 *(buf + i + 1),
552 *(buf + i + 2),
553 *(buf + i + 3),
554 *(buf + i + 4),
555 *(buf + i + 5),
556 *(buf + i + 6),
557 *(buf + i + 7)
558 );
559 }
560}
561
562#ifdef EMAC_DEBUG
563void print_desc(unsigned char *buf, int len)
564{
565 int i;
566
567 pr_info("descriptor len = %d byte, buf addr: 0x%x\n",
568 len, (unsigned int)buf);
569 for (i = 0; i < len; i = i + 4) {
570 pr_info("0x%02x%02x%02x%02x\n",
571 *(buf + i + 3),
572 *(buf + i + 2),
573 *(buf + i + 1),
574 *(buf + i));
575 }
576}
577#else
578void print_desc(unsigned char *buf, int len)
579{
580
581}
582#endif
583
584/* Name emac_reset_hw
585 * Arguments priv : pointer to hardware data structure
586 * Return Status: 0 - Success; non-zero - Fail
587 * Description TBDL
588 */
589int emac_reset_hw(struct emac_priv *priv)
590{
591 mutex_lock(&priv->mii_mutex);
592 /* disable all the interrupts */
593 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
594 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
595
596 /* disable transmit and receive units */
597 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
598 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
599
600 /* stop the DMA */
601 emac_wr(priv, DMA_CONTROL, 0x0000);
602
603 /* reset mac, statistic counters */
604 emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0018);
605
606 emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0000);
607
608 emac_wr(priv, MAC_MDIO_CLK_DIV,
609 priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
610 mutex_unlock(&priv->mii_mutex);
611 return 0;
612}
613
614/* Name emac_init_hw
615 * Arguments pstHWData : pointer to hardware data structure
616 * Return Status: 0 - Success; non-zero - Fail
617 * Description TBDL
618 * Assumes that the controller has previously been reset
619 * and is in apost-reset uninitialized state.
620 * Initializes the receive address registers,
621 * multicast table, and VLAN filter table.
622 * Calls routines to setup link
623 * configuration and flow control settings.
624 * Clears all on-chip counters. Leaves
625 * the transmit and receive units disabled and uninitialized.
626 */
627int emac_init_hw(struct emac_priv *priv)
628{
629 u32 val = 0, threshold;
630
631 mutex_lock(&priv->mii_mutex);
632 /* MAC Init
633 * disable transmit and receive units
634 */
635 emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
636 emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
637
638 /* enable mac address 1 filtering */
639 //emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0001);
640 emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0100);
641
642 /* zero initialize the multicast hash table */
643 emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0000);
644 emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0000);
645 emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0000);
646 emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0000);
647
648 emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, EMAC_TX_FIFO_DWORDS - 8);
649
650 if (priv->speed == SPEED_1000)
651 threshold = 1024;
652 else if (priv->speed == SPEED_100)
653 threshold = 256;
654 else
655 threshold = TX_STORE_FORWARD_MODE;
656 emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, threshold);
657
658 emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, 0xc);
659
660 /* reset dma */
661 emac_wr(priv, DMA_CONTROL, 0x0000);
662
663 emac_wr(priv, DMA_CONFIGURATION, 0x01);
664 mdelay(10);
665 emac_wr(priv, DMA_CONFIGURATION, 0x00);
666 mdelay(10);
667
668 val |= MREGBIT_WAIT_FOR_DONE;
669 val |= MREGBIT_STRICT_BURST;
670 val |= MREGBIT_DMA_64BIT_MODE;
671 val |= MREGBIT_BURST_16WORD; //MREGBIT_BURST_1WORD;
672
673 emac_wr(priv, DMA_CONFIGURATION, val);
674
675 /* MDC Clock Division: AXI-312M/96 = 3.25M */
676 emac_wr(priv, MAC_MDIO_CLK_DIV,
677 priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
678
679 mutex_unlock(&priv->mii_mutex);
680
681 printk("MDIO clock div: 0x%x\n", emac_rd(priv, MAC_MDIO_CLK_DIV));
682 return 0;
683}
684
685int emac_set_mac_addr(struct emac_priv *priv, unsigned char *addr)
686{
687 emac_wr(priv, MAC_ADDRESS1_HIGH, (addr[1] << 8 | addr[0]));
688 emac_wr(priv, MAC_ADDRESS1_MED, (addr[3] << 8 | addr[2]));
689 emac_wr(priv, MAC_ADDRESS1_LOW, (addr[5] << 8 | addr[4]));
690
691 return 0;
692}
693
694void emac_set_fc_source_addr(struct emac_priv *priv, unsigned char *addr)
695{
696 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_HIGH, (addr[1] << 8 | addr[0]));
697 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_MED, (addr[3] << 8 | addr[2]));
698 emac_wr(priv, MAC_FC_SOURCE_ADDRESS_LOW, (addr[5] << 8 | addr[4]));
699
700 return;
701}
702
703static inline void emac_dma_start_transmit(struct emac_priv *priv)
704{
705 emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 0xFF);
706}
707
708static inline void emac_dma_start_receive(struct emac_priv *priv)
709{
710 emac_wr(priv, DMA_RECEIVE_POLL_DEMAND, 0xFF);
711}
712
713#ifdef CONFIG_ASR_EMAC_NAPI
714void emac_enable_interrupt(struct emac_priv *priv, int tx)
715{
716 u32 val;
717
718 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
719
720 if (tx) {
721 val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
722 } else {
723 val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
724 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
725 if (priv->tso)
726 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
727 TSO_AP_RX_INTR_ENA_CSUM_DONE |
728 TSO_AP_RX_INTR_ENA_CSUM_ERR);
729 }
730
731 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
732}
733
734void emac_disable_interrupt(struct emac_priv *priv, int tx)
735{
736 u32 val;
737
738 val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
739
740 if (tx) {
741 val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
742 } else {
743 val &= ~(MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
744 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
745 if (priv->tso)
746 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA, 0x0);
747 }
748
749 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
750}
751#endif
752
753bool emac_is_rmii_interface(struct emac_priv *priv)
754{
755 const struct emac_regdata *regdata = priv->regdata;
756 void __iomem* apmu;
757 u32 val;
758
759 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
760 if (apmu == NULL) {
761 pr_err("error to ioremap APMU base\n");
762 return -ENOMEM;
763 }
764
765 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
766 val &= (0x1 << regdata->mac_intf_sel_shift);
767 if (val)
768 return false;
769 else
770 return true;
771}
772
773void emac_config_phy_interrupt(struct emac_priv *priv, int enable)
774{
775 const struct emac_regdata *regdata = priv->regdata;
776 void __iomem* apmu;
777 u32 val;
778
779 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
780 if (apmu == NULL) {
781 pr_err("error to ioremap APMU base\n");
782 return;
783 }
784
785 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
786 if (enable)
787 val |= 0x1 << regdata->phy_intr_enable_shift;
788 else
789 val &= ~(0x1 << regdata->phy_intr_enable_shift);
790 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
791 iounmap(apmu);
792 return;
793}
794
795void emac_phy_interface_config(struct emac_priv *priv, int phy_interface)
796{
797 const struct emac_regdata *regdata = priv->regdata;
798 void __iomem* apmu;
799 u32 val;
800
801 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
802 if (apmu == NULL) {
803 pr_err("error to ioremap APMU base\n");
804 return;
805 }
806
807 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
808 if (PHY_INTERFACE_MODE_RMII == phy_interface) {
809 val &= ~(0x1 << regdata->mac_intf_sel_shift);
810 printk("===> set eamc interface: rmii\n");
811 } else {
812 val |= 0x1 << regdata->mac_intf_sel_shift;
813 printk("===> set eamc interface: rgmii\n");
814 }
815 val |= 0x1 << regdata->axi_mst_single_id_shift;
816 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
817
818 iounmap(apmu);
819 priv->interface = phy_interface;
820 return;
821}
822
823static void emac_set_aib_power_domain(struct emac_priv *priv)
824{
825 const struct emac_regdata *regdata = priv->regdata;
826 void __iomem *aib_emac_io;
827 void __iomem *apbc_asfar;
828 u32 tmp;
829
830 if (!regdata->support_dual_vol_power)
831 return;
832
833 aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
834 apbc_asfar = ioremap(APBC_ASFAR, 8);
835
836 writel(AKEY_ASFAR, apbc_asfar);
837 writel(AKEY_ASSAR, apbc_asfar + 4);
838 tmp = readl(aib_emac_io);
839
840 /* 0= power down, only set power down when vol = 0 */
841 if (priv->power_domain) {
842 tmp &= ~(0x1 << 2); /* 3.3v */
843 printk("===> emac set io to 3.3v\n");
844 } else {
845 tmp |= 0x1 << 2; /* 1.8v */
846 printk("===> emac set io to 1.8v\n");
847 }
848
849 writel(AKEY_ASFAR, apbc_asfar);
850 writel(AKEY_ASSAR, apbc_asfar + 4);
851 writel(tmp, aib_emac_io);
852
853 writel(AKEY_ASFAR, apbc_asfar);
854 writel(AKEY_ASSAR, apbc_asfar + 4);
855 tmp = readl(aib_emac_io);
856 printk("===> emac AIB read back: 0x%x\n", tmp);
857
858 iounmap(apbc_asfar);
859 iounmap(aib_emac_io);
860}
861
862static void emac_pause_generate_work_fuc(struct work_struct *work)
863{
864 struct emac_priv *priv= container_of(work, struct emac_priv, emac_pause_work.work);
865 int time_nxt = 0;
866 /* because pause time value = 0XFFFF,equal to stopping for 336ms(100M)/34ms(1000M) to transmit */
867 /* by a repeated testing, delay 20ms(1000M)/300ms(100M) satisfy making the neighbor stop transmission */
868 time_nxt = (priv->speed == SPEED_1000) ? 20 : 300;
869 if (!priv->pause.pause_time_max) {
870 emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0xffff);
871 priv->pause.pause_time_max = 1;
872 }
873
874 emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
875 schedule_delayed_work(&priv->emac_pause_work, msecs_to_jiffies(time_nxt));
876 return;
877}
878
879static inline void emac_check_ring_and_send_pause(struct emac_priv *priv)
880{
881 int pos;
882 int high_water;
883 int low_water;
884 struct emac_rx_desc *rx_desc;
885 struct emac_desc_ring *rx_ring;
886
887 rx_ring = &priv->rx_ring;
888 pos = rx_ring->nxt_clean;
889 high_water = (pos + priv->pause.high_water) % priv->rx_ring.total_cnt;
890 low_water = (pos + priv->pause.low_water) % priv->rx_ring.total_cnt;
891
892 rx_desc = emac_get_rx_desc(priv, high_water);
893 if (priv->pause.pause_sending == 0 && rx_desc->OWN == 0) {
894 schedule_delayed_work(&priv->emac_pause_work, 0);
895 priv->pause.pause_sending = 1;
896 }
897
898 rx_desc = emac_get_rx_desc(priv, low_water);
899 if (rx_desc->OWN && priv->pause.pause_sending) {
900 cancel_delayed_work_sync(&priv->emac_pause_work);
901 emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0);
902 emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
903 priv->pause.pause_time_max = 0;
904 priv->pause.pause_sending = 0;
905 }
906}
907
908/* Name emac_sw_init
909 * Arguments priv : pointer to driver private data structure
910 * Return Status: 0 - Success; non-zero - Fail
911 * Description Reads PCI space configuration information and
912 * initializes the variables with
913 * their default values
914 */
915static int emac_sw_init(struct emac_priv *priv)
916{
917 priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
918
919 mutex_init(&priv->mii_mutex);
920 spin_lock_init(&priv->spStatsLock);
921 spin_lock_init(&priv->spTxLock);
922 spin_lock_init(&priv->intr_lock);
923
924 return 0;
925}
926
927static int emac_check_ptp_packet(struct emac_priv *priv,
928 struct sk_buff *skb, int txrx)
929{
930 struct ethhdr *eth = (struct ethhdr *)skb->data;
931 struct ptp_header *ptph = NULL;
932 struct iphdr *iph;
933 struct udphdr *udph;
934 int msg_type, msg_id;
935 int ts;
936
937 if (eth->h_proto == htons(ETH_P_1588)) {
938 netdev_dbg(priv->ndev, "get PTP packet over ETH\n");
939 ptph = (struct ptp_header *)((u8 *)eth + sizeof(struct ethhdr));
940 } else if (eth->h_proto == htons(ETH_P_IP)) {
941 iph = (struct iphdr *)((u8 *)eth + sizeof(struct ethhdr));
942 if (iph->protocol != IPPROTO_UDP)
943 return -1;
944
945 udph = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
946 if ((htons(udph->dest) != PTP_EVENT_PORT ||
947 htons(udph->source) != PTP_EVENT_PORT))
948 return -1;
949
950 netdev_dbg(priv->ndev, "get PTP packet over UDP\n");
951 ptph = (struct ptp_header *)((u8 *)udph + sizeof(struct udphdr));
952 } else {
953 return -1;
954 }
955
956 msg_id = -1;
957 ts = ptph->tsmt & 0xF0;
958 msg_type = (ptph->tsmt) & 0x0F;
959 if (txrx) {
960 if (msg_type == MSG_SYNC) {
961 if (ts)
962 msg_id = MSG_PDELAY_REQ;
963 else
964 msg_id = MSG_DELAY_REQ;
965 } else if (msg_type == MSG_DELAY_REQ) {
966 msg_id = MSG_SYNC;
967 } else if (msg_type == MSG_PDELAY_REQ) {
968 msg_id = MSG_PDELAY_RESP;
969 memcpy(&priv->sourcePortIdentity,
970 &ptph->sourcePortIdentity,
971 sizeof(struct PortIdentity));
972 } else if (msg_type == MSG_PDELAY_RESP) {
973 msg_id = MSG_PDELAY_REQ;
974 }
975 } else {
976 netdev_dbg(priv->ndev, "RX timestamp for message type %d\n",
977 ptph->tsmt);
978
979 if (msg_type == MSG_PDELAY_RESP) {
980 struct pdelay_resp_msg *presp = (struct pdelay_resp_msg *)ptph;
981
982 /*
983 * Change to monitor SYNC packet if pdelay response
984 * received for same clock indentity.
985 */
986 if (!memcmp(&presp->requestingPortIdentity.clockIdentity,
987 &priv->sourcePortIdentity.clockIdentity,
988 sizeof(struct ClockIdentity))) {
989 msg_id = MSG_SYNC;
990 }
991 }
992 }
993
994 /*
995 * Since some platform not support to timestamp two or more
996 * message type, so change here.
997 */
998 if (msg_id >= 0) {
999 if (priv->regdata->ptp_rx_ts_all_events) {
1000 msg_id = ALL_EVENTS;
1001 msg_id |= ts | ts << 8 | ts << 16 | ts << 24;
1002 } else {
1003 msg_id |= ts;
1004 }
1005
1006 priv->hwptp->config_hw_tstamping(priv, 1, PTP_V2_L2_L4, msg_id);
1007 }
1008
1009 return ptph->tsmt;
1010}
1011
1012/* emac_get_tx_hwtstamp - get HW TX timestamps
1013 * @priv: driver private structure
1014 * @skb : the socket buffer
1015 * Description :
1016 * This function will read timestamp from the register & pass it to stack.
1017 * and also perform some sanity checks.
1018 */
1019static void emac_get_tx_hwtstamp(struct emac_priv *priv, struct sk_buff *skb)
1020{
1021 struct skb_shared_hwtstamps shhwtstamp;
1022 u64 ns;
1023
1024 if (!priv->hwts_tx_en)
1025 return;
1026
1027 /* exit if skb doesn't support hw tstamp */
1028 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
1029 return;
1030
1031 emac_check_ptp_packet(priv, skb, 1);
1032
1033 /* get the valid tstamp */
1034 ns = priv->hwptp->get_tx_timestamp(priv);
1035
1036 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1037 shhwtstamp.hwtstamp = ns_to_ktime(ns);
1038
1039 wmb();
1040 netdev_dbg(priv->ndev, "get valid TX hw timestamp %llu\n", ns);
1041 /* pass tstamp to stack */
1042 skb_tstamp_tx(skb, &shhwtstamp);
1043
1044 return;
1045}
1046
1047/* emac_get_rx_hwtstamp - get HW RX timestamps
1048 * @priv: driver private structure
1049 * @p : descriptor pointer
1050 * @skb : the socket buffer
1051 * Description :
1052 * This function will read received packet's timestamp from the descriptor
1053 * and pass it to stack. It also perform some sanity checks.
1054 */
1055static void emac_get_rx_hwtstamp(struct emac_priv *priv, struct emac_rx_desc *p,
1056 struct sk_buff *skb)
1057{
1058 struct skb_shared_hwtstamps *shhwtstamp = NULL;
1059 u64 ns;
1060
1061 if (!priv->hwts_rx_en)
1062 return;
1063
1064 /* Check if timestamp is available */
1065 if (p->ptp_pkt && p->rx_timestamp) {
1066 emac_check_ptp_packet(priv, skb, 0);
1067 ns = priv->hwptp->get_rx_timestamp(priv);
1068 netdev_dbg(priv->ndev, "get valid RX hw timestamp %llu\n", ns);
1069 shhwtstamp = skb_hwtstamps(skb);
1070 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1071 shhwtstamp->hwtstamp = ns_to_ktime(ns);
1072 } else {
1073 netdev_dbg(priv->ndev, "cannot get RX hw timestamp\n");
1074 }
1075}
1076
1077/**
1078 * emac_hwtstamp_set - control hardware timestamping.
1079 * @dev: device pointer.
1080 * @ifr: An IOCTL specific structure, that can contain a pointer to
1081 * a proprietary structure used to pass information to the driver.
1082 * Description:
1083 * This function configures the MAC to enable/disable both outgoing(TX)
1084 * and incoming(RX) packets time stamping based on user input.
1085 * Return Value:
1086 * 0 on success and an appropriate -ve integer on failure.
1087 */
1088static int emac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1089{
1090 struct emac_priv *priv = netdev_priv(dev);
1091 struct hwtstamp_config config;
1092 struct timespec64 now;
1093 u64 ns_ptp;
1094 u32 ptp_event_msg_id = 0;
1095 u32 rx_ptp_type = 0;
1096
1097 if (!priv->ptp_support) {
1098 netdev_alert(priv->ndev, "No support for HW time stamping\n");
1099 priv->hwts_tx_en = 0;
1100 priv->hwts_rx_en = 0;
1101
1102 return -EOPNOTSUPP;
1103 }
1104
1105 if (copy_from_user(&config, ifr->ifr_data,
1106 sizeof(struct hwtstamp_config)))
1107 return -EFAULT;
1108
1109 netdev_dbg(priv->ndev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
1110 __func__, config.flags, config.tx_type, config.rx_filter);
1111
1112 /* reserved for future extensions */
1113 if (config.flags)
1114 return -EINVAL;
1115
1116 if (config.tx_type != HWTSTAMP_TX_OFF &&
1117 config.tx_type != HWTSTAMP_TX_ON)
1118 return -ERANGE;
1119
1120 switch (config.rx_filter) {
1121 case HWTSTAMP_FILTER_NONE:
1122 /* time stamp no incoming packet at all */
1123 config.rx_filter = HWTSTAMP_FILTER_NONE;
1124 break;
1125
1126 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1127 /* PTP v1, UDP, Sync packet */
1128 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
1129 /* take time stamp for SYNC messages only */
1130 ptp_event_msg_id = MSG_SYNC;
1131 rx_ptp_type = PTP_V1_L4_ONLY;
1132 break;
1133
1134 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1135 /* PTP v1, UDP, Delay_req packet */
1136 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
1137 /* take time stamp for Delay_Req messages only */
1138 ptp_event_msg_id = MSG_DELAY_REQ;
1139 rx_ptp_type = PTP_V1_L4_ONLY;
1140 break;
1141
1142 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1143 /* PTP v2, UDP, Sync packet */
1144 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
1145 /* take time stamp for SYNC messages only */
1146 ptp_event_msg_id = MSG_SYNC;
1147 rx_ptp_type = PTP_V2_L2_L4;
1148 break;
1149
1150 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1151 /* PTP v2, UDP, Delay_req packet */
1152 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
1153 /* take time stamp for Delay_Req messages only */
1154 ptp_event_msg_id = MSG_DELAY_REQ;
1155 rx_ptp_type = PTP_V2_L2_L4;
1156 break;
1157
1158 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1159 /* PTP v2/802.AS1 any layer, any kind of event packet */
1160 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1161
1162 /*
1163 * IF not support ALL EVENTS, default timestamp SYNC packet,
1164 * changed to MSG_DELAY_REQ automactically if needed
1165 */
1166 if (priv->regdata->ptp_rx_ts_all_events)
1167 ptp_event_msg_id = ALL_EVENTS;
1168 else
1169 ptp_event_msg_id = MSG_SYNC;
1170
1171 rx_ptp_type = PTP_V2_L2_L4;
1172 break;
1173
1174 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1175 /* PTP v2/802.AS1, any layer, Sync packet */
1176 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
1177 /* take time stamp for SYNC messages only */
1178 ptp_event_msg_id = MSG_SYNC;
1179 rx_ptp_type = PTP_V2_L2_L4;
1180 break;
1181
1182 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1183 /* PTP v2/802.AS1, any layer, Delay_req packet */
1184 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
1185 /* take time stamp for Delay_Req messages only */
1186 ptp_event_msg_id = MSG_DELAY_REQ;
1187 rx_ptp_type = PTP_V2_L2_L4;
1188 break;
1189 default:
1190 return -ERANGE;
1191 }
1192
1193 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
1194 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
1195
1196 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
1197 priv->hwptp->config_hw_tstamping(priv, 0, 0, 0);
1198 else {
1199
1200 priv->hwptp->config_hw_tstamping(priv, 1,
1201 rx_ptp_type, ptp_event_msg_id);
1202
1203 /* initialize system time */
1204 ktime_get_real_ts64(&now);
1205 priv->hwptp->init_systime(priv, timespec64_to_ns(&now));
1206
1207 /* program Increment reg */
1208 priv->hwptp->config_systime_increment(priv);
1209
1210 ns_ptp = priv->hwptp->get_phc_time(priv);
1211 ktime_get_real_ts64(&now);
1212 /* check the diff between ptp timer and system time */
1213 if (abs(timespec64_to_ns(&now) - ns_ptp) > 5000)
1214 priv->hwptp->init_systime(priv,
1215 timespec64_to_ns(&now));
1216 }
1217
1218 memcpy(&priv->tstamp_config, &config, sizeof(config));
1219
1220 return copy_to_user(ifr->ifr_data, &config,
1221 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
1222}
1223
1224/**
1225 * emac_hwtstamp_get - read hardware timestamping.
1226 * @dev: device pointer.
1227 * @ifr: An IOCTL specific structure, that can contain a pointer to
1228 * a proprietary structure used to pass information to the driver.
1229 * Description:
1230 * This function obtain the current hardware timestamping settings
1231 as requested.
1232 */
1233static int emac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1234{
1235 struct emac_priv *priv = netdev_priv(dev);
1236 struct hwtstamp_config *config = &priv->tstamp_config;
1237
1238 if (!priv->ptp_support)
1239 return -EOPNOTSUPP;
1240
1241 return copy_to_user(ifr->ifr_data, config,
1242 sizeof(*config)) ? -EFAULT : 0;
1243}
1244
1245/* Name emac_ioctl
1246 * Arguments pstNetdev : pointer to net_device structure
1247 * pstIfReq : pointer to interface request structure used.
1248 * u32Cmd : IOCTL command number
1249 * Return Status: 0 - Success; non-zero - Fail
1250 * Description It is called by upper layer and
1251 * handling various task IOCTL commands.
1252 */
1253static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1254{
1255 int ret = -EOPNOTSUPP;
1256
1257 if (!netif_running(ndev))
1258 return -EINVAL;
1259
1260 switch (cmd) {
1261 case SIOCGMIIPHY:
1262 case SIOCGMIIREG:
1263 case SIOCSMIIREG:
1264 if (!ndev->phydev)
1265 return -EINVAL;
1266 ret = phy_mii_ioctl(ndev->phydev, rq, cmd);
1267 break;
1268 case SIOCSHWTSTAMP:
1269 ret = emac_hwtstamp_set(ndev, rq);
1270 break;
1271 case SIOCGHWTSTAMP:
1272 ret = emac_hwtstamp_get(ndev, rq);
1273 break;
1274 default:
1275 break;
1276 }
1277
1278 return ret;
1279}
1280
1281static irqreturn_t emac_wakeup_handler(int irq, void *dev_id)
1282{
1283 struct net_device *ndev = (struct net_device *)dev_id;
1284 struct emac_priv *priv = netdev_priv(ndev);
1285 u32 ctrl;
1286
1287 emac_set_axi_bus_clock(priv, 1);
1288 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1289 if (!(ctrl & (MREGBIT_UNICAST_WAKEUP_MODE |
1290 MREGBIT_MAGIC_PACKET_WAKEUP_MODE)))
1291 return IRQ_NONE;
1292
1293 ctrl &= ~(MREGBIT_UNICAST_WAKEUP_MODE |
1294 MREGBIT_MAGIC_PACKET_WAKEUP_MODE);
1295 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1296 return IRQ_HANDLED;
1297}
1298
1299static irqreturn_t emac_irq_tso(int irq, void *dev_id)
1300{
1301 struct net_device *ndev = (struct net_device *)dev_id;
1302 struct emac_priv *priv = netdev_priv(ndev);
1303 u32 status;
1304
1305 /* handle rx */
1306 status = emac_rd_tso(priv, TSO_AP_RX_INTR_STS);
1307 if (status) {
1308 emac_print("TSO_AP_RX_INTR_STS=0x%x", status);
1309
1310 if (status & TSO_AP_RX_INTR_ENA_CSUM_DONE) {
1311#ifdef CONFIG_ASR_EMAC_NAPI
1312 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1313 unsigned long flags;
1314
1315 spin_lock_irqsave(&priv->intr_lock, flags);
1316 emac_disable_interrupt(priv, 0);
1317 spin_unlock_irqrestore(&priv->intr_lock, flags);
1318 __napi_schedule(&priv->rx_napi);
1319 }
1320#else
1321 emac_rx_clean_desc(priv);
1322#endif
1323 }
1324
1325#ifdef EMAC_DEBUG
1326 if (status & TSO_AP_RX_INTR_ENA_CSUM_ERR)
1327 pr_err("rx checksum err irq\n");
1328#endif
1329 /* clear rx status */
1330 emac_wr_tso(priv, TSO_AP_RX_INTR_STS, status);
1331 }
1332
1333 /* handle tx */
1334 status = emac_rd_tso(priv, TSO_AP_TX_INTR_STS);
1335 if (status) {
1336 emac_print("TSO_AP_TX_INTR_STS=0x%x\n", status);
1337 if (status & TSO_AP_TX_INTR_TSO_DONE) {
1338 emac_print("TX TSO done\n");
1339 emac_dma_start_transmit(priv);
1340 }
1341
1342 if (status & TSO_AP_TX_INTR_CSUM_DONE) {
1343 emac_print("TX checksum done\n");
1344 emac_dma_start_transmit(priv);
1345 }
1346
1347 /* clear tx status */
1348 emac_wr_tso(priv, TSO_AP_TX_INTR_STS, status);
1349 }
1350
1351 /* handle err */
1352 status = emac_rd_tso(priv, TSO_ERR_INTR_STS);
1353 if (status) {
1354 pr_err("TSO: TX/RX ERR, status=0x%x\n", status);
1355 emac_wr_tso(priv, TSO_ERR_INTR_STS, status);
1356 }
1357
1358 return IRQ_HANDLED;
1359}
1360
1361
1362/* Name emac_interrupt_handler
1363 * Arguments irq : irq number for which the interrupt is fired
1364 * dev_id : pointer was passed to request_irq and same pointer is passed
1365 * back to handler
1366 * Return irqreturn_t : integer value
1367 * Description Interrupt handler routine for interrupts from target for RX packets indication.
1368 */
1369static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1370{
1371 struct net_device *ndev = (struct net_device *)dev_id;
1372 struct emac_priv *priv = netdev_priv(ndev);
1373 u32 status;
1374 u32 clr = 0;
1375
1376 /* read the status register for IRQ received */
1377 status = emac_rd(priv, DMA_STATUS_IRQ);
1378
1379 /* Check if emac is up */
1380 if (test_bit(EMAC_DOWN, &priv->state)) {
1381 emac_wr(priv, DMA_STATUS_IRQ, status & 0x1F7);
1382 return IRQ_HANDLED;
1383 }
1384
1385 if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1386 clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1387#ifdef CONFIG_ASR_EMAC_NAPI
1388 if (likely(napi_schedule_prep(&priv->tx_napi))) {
1389 unsigned long flags;
1390
1391 spin_lock_irqsave(&priv->intr_lock, flags);
1392 emac_disable_interrupt(priv, 1);
1393 spin_unlock_irqrestore(&priv->intr_lock, flags);
1394 __napi_schedule(&priv->tx_napi);
1395 }
1396#else
1397 emac_tx_clean_desc(priv);
1398#endif
1399 }
1400
1401 if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1402 clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1403
1404 if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1405 clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1406
1407 if (status & (MREGBIT_RECEIVE_TRANSFER_DONE_IRQ |
1408 MREGBIT_RECEIVE_MISSED_FRAME_IRQ)) {
1409 if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ)
1410 clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1411
1412 if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1413 clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1414
1415 if (priv->tso)
1416 emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
1417
1418#ifdef CONFIG_ASR_EMAC_NAPI
1419 if (likely(napi_schedule_prep(&priv->rx_napi))) {
1420 unsigned long flags;
1421
1422 spin_lock_irqsave(&priv->intr_lock, flags);
1423 emac_disable_interrupt(priv, 0);
1424 spin_unlock_irqrestore(&priv->intr_lock, flags);
1425 __napi_schedule(&priv->rx_napi);
1426 }
1427#else
1428 emac_rx_clean_desc(priv);
1429#endif
1430 }
1431
1432 if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1433 clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1434
1435 if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1436 clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1437
1438 emac_wr(priv, DMA_STATUS_IRQ, clr);
1439
1440 return IRQ_HANDLED;
1441}
1442
1443/* Name emac_command_options
1444 * Arguments priv : pointer to driver private data structure
1445 * Return none
1446 * Description This function actually handles the command line para passed
1447 * when the driver is loaded at the command prompt.
1448 * It parses the parameters and validates them for valid values.
1449 */
1450void emac_command_options(struct emac_priv *priv)
1451{
1452 int pages = totalram_pages();
1453
1454 if (pages <= (EMAC_SMALL_RING_MEM_LIMIT >> PAGE_SHIFT))
1455 priv->rx_ring.total_cnt = EMAC_SMALL_RX_RING_SIZE;
1456 else
1457 priv->rx_ring.total_cnt = EMAC_RX_RING_SIZE;
1458 priv->tx_ring.total_cnt = EMAC_TX_RING_SIZE;
1459
1460 pr_info("emac: rx_ring=%d, tx_ring=%d, pages=%d\n",
1461 priv->rx_ring.total_cnt, priv->tx_ring.total_cnt, pages);
1462}
1463
1464/* Name emac_configure_tx
1465 * Arguments priv : pointer to driver private data structure
1466 * Return none
1467 * Description Configures the transmit unit of the device
1468 */
1469static void emac_configure_tx(struct emac_priv *priv)
1470{
1471 u32 val;
1472
1473 /* set the transmit base address */
1474 val = (u32)(priv->tx_ring.desc_dma_addr);
1475
1476 emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1477
1478 /* Tx Inter Packet Gap value and enable the transmit */
1479 val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1480 val &= (~MREGBIT_IFG_LEN);
1481 val |= MREGBIT_TRANSMIT_ENABLE;
1482 val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1483 emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1484
1485 emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x00);
1486
1487 /* start tx dma */
1488 val = emac_rd(priv, DMA_CONTROL);
1489 val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1490 emac_wr(priv, DMA_CONTROL, val);
1491}
1492
1493/* Name emac_configure_rx
1494 * Arguments priv : pointer to driver private data structure
1495 * Return none
1496 * Description Configures the receive unit of the device
1497 */
1498static void emac_configure_rx(struct emac_priv *priv)
1499{
1500 u32 val;
1501
1502 /* set the receive base address */
1503 val = (u32)(priv->rx_ring.desc_dma_addr);
1504 emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1505
1506 /* enable the receive */
1507 val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1508 val |= MREGBIT_RECEIVE_ENABLE;
1509 val |= MREGBIT_STORE_FORWARD;
1510 val |= MREGBIT_ACOOUNT_VLAN;
1511 emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1512
1513 /* start rx dma */
1514 val = emac_rd(priv, DMA_CONTROL);
1515 val |= MREGBIT_START_STOP_RECEIVE_DMA;
1516 emac_wr(priv, DMA_CONTROL, val);
1517}
1518
1519/* Name emac_clean_tx_desc_ring
1520 * Arguments priv : pointer to driver private data structure
1521 * Return none
1522 * Description Freeing the TX resources allocated earlier.
1523 */
1524static void emac_clean_tx_desc_ring(struct emac_priv *priv)
1525{
1526 struct emac_desc_ring *tx_ring = &priv->tx_ring;
1527 struct emac_desc_buffer *tx_buf;
1528 u32 i;
1529
1530 /* Free all the Tx ring sk_buffs */
1531 for (i = 0; i < tx_ring->total_cnt; i++) {
1532 tx_buf = &tx_ring->desc_buf[i];
1533
1534 if (tx_buf->dma_addr) {
1535 dma_unmap_page(&priv->pdev->dev,
1536 tx_buf->dma_addr,
1537 tx_buf->dma_len,
1538 DMA_TO_DEVICE);
1539 tx_buf->dma_addr = 0;
1540 }
1541
1542 if (tx_buf->skb) {
1543 dev_kfree_skb_any(tx_buf->skb);
1544 tx_buf->skb = NULL;
1545 }
1546 }
1547
1548 tx_ring->nxt_use = 0;
1549 tx_ring->nxt_clean = 0;
1550}
1551
1552/* Name emac_clean_rx_desc_ring
1553 * Arguments priv : pointer to driver private data structure
1554 * Return none
1555 * Description Freeing the RX resources allocated earlier.
1556 */
1557static void emac_clean_rx_desc_ring(struct emac_priv *priv)
1558{
1559 struct emac_desc_ring *rx_ring;
1560 struct emac_desc_buffer *rx_buf;
1561 u32 i;
1562
1563 rx_ring = &priv->rx_ring;
1564
1565 /* Free all the Rx ring sk_buffs */
1566 for (i = 0; i < rx_ring->total_cnt; i++) {
1567 rx_buf = &rx_ring->desc_buf[i];
1568 if (rx_buf->skb) {
1569 emac_unmap_single(&priv->pdev->dev,
1570 rx_buf->dma_addr,
1571 rx_buf->dma_len,
1572 DMA_FROM_DEVICE);
1573 dev_kfree_skb(rx_buf->skb);
1574 rx_buf->skb = NULL;
1575 }
1576
1577 if (rx_buf->buff_addr) {
1578#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
1579 kfree(rx_buf->buff_addr);
1580#endif
1581 rx_buf->buff_addr = NULL;
1582 }
1583 }
1584
1585 rx_ring->nxt_clean = 0;
1586 rx_ring->nxt_use = 0;
1587}
1588
1589void emac_ptp_init(struct emac_priv *priv)
1590{
1591 int ret;
1592
1593 if (priv->ptp_support) {
1594 ret = clk_prepare_enable(priv->ptp_clk);
1595 if (ret < 0) {
1596 pr_warning("ptp clock failed to enable \n");
1597 priv->ptp_clk = NULL;
1598 }
1599
1600 emac_ptp_register(priv);
1601
1602 if (IS_ERR_OR_NULL(priv->ptp_clock)) {
1603 priv->ptp_support = 0;
1604 pr_warning("disable PTP due to clock not enabled\n");
1605 }
1606 }
1607}
1608
1609void emac_ptp_deinit(struct emac_priv *priv)
1610{
1611 if (priv->ptp_support) {
1612 if (priv->ptp_clk)
1613 clk_disable_unprepare(priv->ptp_clk);
1614
1615 emac_ptp_unregister(priv);
1616 }
1617}
1618
1619static void emac_rx_timer_arm(struct emac_priv *priv)
1620{
1621 u32 rx_fill_timer = EMAC_RX_FILL_TIMER_US;
1622
1623 if (!rx_fill_timer)
1624 return;
1625
1626 if (hrtimer_is_queued(&priv->rx_timer))
1627 return;
1628
1629 hrtimer_start(&priv->rx_timer,
1630 ns_to_ktime(rx_fill_timer) * NSEC_PER_USEC,
1631 HRTIMER_MODE_REL);
1632}
1633
1634static enum hrtimer_restart emac_rx_timer(struct hrtimer *t)
1635{
1636 struct emac_priv *priv = container_of(t, struct emac_priv, rx_timer);
1637 struct napi_struct *napi = &priv->rx_napi;
1638
1639 if (likely(napi_schedule_prep(napi))) {
1640 unsigned long flags;
1641
1642 spin_lock_irqsave(&priv->intr_lock, flags);
1643 emac_disable_interrupt(priv, 0);
1644 spin_unlock_irqrestore(&priv->intr_lock, flags);
1645 __napi_schedule(napi);
1646 }
1647
1648 return HRTIMER_NORESTART;
1649}
1650
1651static void emac_tx_timer_arm(struct emac_priv *priv)
1652{
1653 u32 tx_coal_timer = EMAC_TX_COAL_TIMER_US;
1654
1655 if (!tx_coal_timer)
1656 return;
1657
1658 if (hrtimer_is_queued(&priv->tx_timer))
1659 return;
1660
1661 hrtimer_start(&priv->tx_timer,
1662 ns_to_ktime(tx_coal_timer) * NSEC_PER_USEC,
1663 HRTIMER_MODE_REL);
1664}
1665
1666static enum hrtimer_restart emac_tx_timer(struct hrtimer *t)
1667{
1668 struct emac_priv *priv = container_of(t, struct emac_priv, tx_timer);
1669 struct napi_struct *napi = &priv->tx_napi;
1670
1671 if (priv->tso) {
1672 emac_dma_start_transmit(priv);
1673 return HRTIMER_NORESTART;
1674 }
1675
1676 if (likely(napi_schedule_prep(napi))) {
1677 unsigned long flags;
1678
1679 spin_lock_irqsave(&priv->intr_lock, flags);
1680 emac_disable_interrupt(priv, 1);
1681 spin_unlock_irqrestore(&priv->intr_lock, flags);
1682 __napi_schedule(napi);
1683 }
1684
1685 return HRTIMER_NORESTART;
1686}
1687
1688
1689static int emac_tso_config(struct emac_priv *priv)
1690{
1691 struct emac_desc_ring * tx_ring = &priv->tx_ring;
1692 u32 val = 0;
1693
1694 /* reset */
1695 emac_wr_tso(priv, TSO_CONFIG, TSO_CONFIG_RST);
1696 mdelay(1);
1697 emac_wr_tso(priv, TSO_CONFIG, 0x0);
1698
1699 emac_wr_tso(priv, TSO_DMA_CONFIG, 0x2 << 8);
1700
1701 /* rx */
1702 /* set the transmit base address */
1703 val = (u32)(priv->rx_ring.desc_dma_addr);
1704 emac_wr_tso(priv, TSO_RX_DESC_BA, val >> 1);
1705 emac_wr_tso(priv, TSO_RX_AUTO_POLL_CNT, 0x0);
1706
1707 /* tx */
1708 val = (u32)(priv->tx_ring.desc_dma_addr);
1709 emac_wr_tso(priv, TSO_TX_DESC_BA, val >> 1);
1710
1711 priv->tso_hdr = dma_alloc_coherent(&priv->pdev->dev,
1712 tx_ring->total_cnt * 0x80,
1713 &priv->tso_hdr_addr,
1714 GFP_KERNEL | __GFP_ZERO);
1715 if (!priv->tso_hdr) {
1716 pr_err("Memory allocation failed for tso_hdr\n");
1717 return -ENOMEM;
1718 }
1719
1720 val = (u32)(priv->tso_hdr_addr);
1721 emac_wr_tso(priv, TSO_TX_HDR_BA, val >> 1);
1722 emac_wr_tso(priv, TSO_TX_HDR_CTR, tx_ring->total_cnt);
1723 emac_wr_tso(priv, TSO_TX_AUTO_POLL_CNT, 0x0);
1724
1725 /* enable tx/rx tso/coe */
1726 emac_wr_tso(priv, TSO_CONFIG,
1727 TSO_CONFIG_RX_EN | TSO_CONFIG_TX_EN | TSO_CONFIG_RX_CSUM_EN);
1728
1729 /* enable tx/rx/err interrupt */
1730 emac_wr_tso(priv, TSO_ERR_INTR_ENA, 0xF0007);
1731 emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
1732 TSO_AP_RX_INTR_ENA_CSUM_DONE | TSO_AP_RX_INTR_ENA_CSUM_ERR);
1733#if 1
1734 emac_wr_tso(priv, TSO_AP_TX_INTR_ENA,
1735 TSO_AP_TX_INTR_ENA_TSO_DONE | TSO_AP_TX_INTR_ENA_CSUM_DONE);
1736#else
1737 emac_wr_tso(priv, TSO_AP_TX_INTR_ENA, 0x0);
1738#endif
1739 return 0;
1740}
1741
1742/* Name emac_up
1743 * Arguments priv : pointer to driver private data structure
1744 * Return Status: 0 - Success; non-zero - Fail
1745 * Description This function is called from emac_open and
1746 * performs the things when net interface is about to up.
1747 * It configues the Tx and Rx unit of the device and
1748 * registers interrupt handler.
1749 * It also starts one watchdog timer to monitor
1750 * the net interface link status.
1751 */
1752int emac_up(struct emac_priv *priv)
1753{
1754 struct net_device *ndev = priv->ndev;
1755 int ret, val;
1756#ifdef WAN_LAN_AUTO_ADAPT
1757 u32 phy_id;
1758#endif
1759
1760 priv->hw_stats->tx_tso_pkts = 0;
1761 priv->hw_stats->tx_tso_bytes = 0;
1762
1763 ret = emac_phy_connect(ndev);
1764 if (ret) {
1765 pr_err("%s phy_connet failed\n", __func__);
1766 return ret;
1767 }
1768
1769 if (!priv->en_suspend)
1770 pm_stay_awake(&priv->pdev->dev);
1771 pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
1772
1773 clk_phase_set(priv, TX_PHASE);
1774 clk_phase_set(priv, RX_PHASE);
1775
1776 /* init hardware */
1777 emac_init_hw(priv);
1778
1779 emac_ptp_init(priv);
1780
1781 emac_set_mac_addr(priv, ndev->dev_addr);
1782
1783 emac_set_fc_source_addr(priv, ndev->dev_addr);
1784
1785 /* configure transmit unit */
1786 emac_configure_tx(priv);
1787 /* configure rx unit */
1788 emac_configure_rx(priv);
1789
1790 /* allocate buffers for receive descriptors */
1791 emac_alloc_rx_desc_buffers(priv);
1792
1793 if (ndev->phydev)
1794 phy_start(ndev->phydev);
1795
1796 /* allocates interrupt resources and
1797 * enables the interrupt line and IRQ handling
1798 */
1799 ret = request_irq(priv->irq, emac_interrupt_handler,
1800 IRQF_SHARED, ndev->name, ndev);
1801 if (ret) {
1802 pr_err("request_irq failed, ret=%d\n", ret);
1803 goto request_irq_failed;
1804 }
1805
1806 if (priv->irq_wakeup) {
1807 ret = request_irq(priv->irq_wakeup, emac_wakeup_handler,
1808 IRQF_SHARED, ndev->name, ndev);
1809 if (ret) {
1810 pr_err("request wakeup_irq failed, ret=%d\\n", ret);
1811 goto request_wakeup_irq_failed;
1812 }
1813 }
1814
1815 if (priv->irq_tso) {
1816 ret = request_irq(priv->irq_tso, emac_irq_tso,
1817 IRQF_SHARED, "emac_tso", ndev);
1818 if (ret) {
1819 pr_err("request tso failed, ret=%d\\n", ret);
1820 goto request_tso_irq_failed;
1821 }
1822 }
1823
1824 if (priv->fix_link)
1825 emac_set_speed_duplex(priv);
1826
1827 clear_bit(EMAC_DOWN, &priv->state);
1828
1829 /* enable mac interrupt */
1830 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
1831
1832 /* both rx tx */
1833 val = MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1834 MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1835 MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
1836#if 0
1837 val |= MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1838 MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1839 MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE;
1840#endif
1841 emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
1842
1843#ifdef CONFIG_ASR_EMAC_NAPI
1844 napi_enable(&priv->rx_napi);
1845 napi_enable(&priv->tx_napi);
1846#endif
1847
1848 if (priv->fix_link && !netif_carrier_ok(ndev))
1849 netif_carrier_on(ndev);
1850
1851#ifdef WAN_LAN_AUTO_ADAPT
1852 phy_id = ndev->phydev->phy_id;
1853 if(phy_id == IP175D_PHY_ID)
1854 emac_sig_workq(CARRIER_UP_IP175D, 0);
1855 else
1856 emac_sig_workq(CARRIER_UP, 0);
1857#endif
1858
1859 hrtimer_init(&priv->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1860 priv->tx_timer.function = emac_tx_timer;
1861 hrtimer_init(&priv->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1862 priv->rx_timer.function = emac_rx_timer;
1863
1864 if (priv->tso)
1865 emac_tso_config(priv);
1866
1867 netif_tx_start_all_queues(ndev);
1868 return 0;
1869
1870request_tso_irq_failed:
1871 if (priv->irq_wakeup)
1872 free_irq(priv->irq_wakeup, ndev);
1873
1874request_wakeup_irq_failed:
1875 free_irq(priv->irq, ndev);
1876
1877request_irq_failed:
1878 if (ndev->phydev) {
1879 phy_stop(ndev->phydev);
1880 phy_disconnect(ndev->phydev);
1881 }
1882
1883 return ret;
1884}
1885
1886/* Name emac_down
1887 * Arguments priv : pointer to driver private data structure
1888 * Return Status: 0 - Success; non-zero - Fail
1889 * Description This function is called from emac_close and
1890 * performs the things when net interface is about to down.
1891 * It frees the irq, removes the various timers.
1892 * It sets the net interface off and
1893 * resets the hardware. Cleans the Tx and Rx
1894 * ring descriptor.
1895 */
1896int emac_down(struct emac_priv *priv)
1897{
1898 struct net_device *ndev = priv->ndev;
1899#ifdef WAN_LAN_AUTO_ADAPT
1900 u32 phy_id;
1901
1902 priv->dhcp = 0;
1903 priv->vlan_port = -1;
1904 priv->link = 0;
1905 phy_id = ndev->phydev->phy_id;
1906 if(priv->dhcp_delaywork){
1907 cancel_delayed_work(&priv->dhcp_work);
1908 priv->dhcp_delaywork = 0;
1909 }
1910#endif
1911 set_bit(EMAC_DOWN, &priv->state);
1912
1913 netif_tx_disable(ndev);
1914
1915 hrtimer_cancel(&priv->tx_timer);
1916 hrtimer_cancel(&priv->rx_timer);
1917 /* Stop and disconnect the PHY */
1918 if (ndev->phydev) {
1919 phy_stop(ndev->phydev);
1920 phy_disconnect(ndev->phydev);
1921 }
1922
1923 if (!priv->fix_link) {
1924 priv->duplex = DUPLEX_UNKNOWN;
1925 priv->speed = SPEED_UNKNOWN;
1926 }
1927
1928#ifdef CONFIG_ASR_EMAC_NAPI
1929 napi_disable(&priv->rx_napi);
1930 napi_disable(&priv->tx_napi);
1931#endif
1932 emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
1933 emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
1934
1935 free_irq(priv->irq, ndev);
1936 if (priv->irq_wakeup)
1937 free_irq(priv->irq_wakeup, ndev);
1938
1939 emac_ptp_deinit(priv);
1940
1941 emac_reset_hw(priv);
1942 netif_carrier_off(ndev);
1943
1944#ifdef WAN_LAN_AUTO_ADAPT
1945 if(phy_id == IP175D_PHY_ID)
1946 emac_sig_workq(CARRIER_DOWN_IP175D, 0);
1947 else
1948 emac_sig_workq(CARRIER_DOWN, 0);
1949#endif
1950
1951#ifdef CONFIG_ASR_EMAC_DDR_QOS
1952 flush_work(&priv->qos_work);
1953 pm_qos_update_request(&priv->clk_scaling.ddr_qos, PM_QOS_DEFAULT_VALUE);
1954#endif
1955 pm_qos_update_request(&priv->pm_qos_req,
1956 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
1957
1958 if (!priv->en_suspend)
1959 pm_relax(&priv->pdev->dev);
1960
1961 if (priv->tso) {
1962 dma_free_coherent(&priv->pdev->dev,
1963 priv->tx_ring.total_cnt * 0x80,
1964 priv->tso_hdr,
1965 priv->tso_hdr_addr);
1966 }
1967
1968 return 0;
1969}
1970
1971/* Name emac_alloc_tx_resources
1972 * Arguments priv : pointer to driver private data structure
1973 * Return Status: 0 - Success; non-zero - Fail
1974 * Description Allocates TX resources and getting virtual & physical address.
1975 */
1976int emac_alloc_tx_resources(struct emac_priv *priv)
1977{
1978 struct emac_desc_ring *tx_ring = &priv->tx_ring;
1979 struct platform_device *pdev = priv->pdev;
1980 u32 size;
1981
1982 size = sizeof(struct emac_desc_buffer) * tx_ring->total_cnt;
1983
1984 /* allocate memory */
1985 tx_ring->desc_buf = kzalloc(size, GFP_KERNEL);
1986 if (!tx_ring->desc_buf) {
1987 pr_err("Memory allocation failed for the Transmit descriptor buffer\n");
1988 return -ENOMEM;
1989 }
1990
1991 memset(tx_ring->desc_buf, 0, size);
1992
1993 tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_tx_desc);
1994
1995 EMAC_ROUNDUP(tx_ring->total_size, 1024);
1996
1997 if (priv->sram_pool) {
1998 tx_ring->desc_addr =
1999 (void *)gen_pool_dma_alloc(
2000 priv->sram_pool, tx_ring->total_size,
2001 &tx_ring->desc_dma_addr);
2002 tx_ring->in_sram = true;
2003 }
2004
2005 if (!tx_ring->desc_addr) {
2006 tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
2007 tx_ring->total_size,
2008 &tx_ring->desc_dma_addr,
2009 GFP_KERNEL | __GFP_ZERO);
2010 if (!tx_ring->desc_addr) {
2011 pr_err("Memory allocation failed for the Transmit descriptor ring\n");
2012 kfree(tx_ring->desc_buf);
2013 return -ENOMEM;
2014 }
2015
2016 if (priv->sram_pool) {
2017 pr_err("sram pool left size not enough, tx fallback\n");
2018 tx_ring->in_sram = false;
2019 }
2020 }
2021
2022 memset(tx_ring->desc_addr, 0, tx_ring->total_size);
2023
2024 tx_ring->nxt_use = 0;
2025 tx_ring->nxt_clean = 0;
2026
2027 return 0;
2028}
2029
2030/* Name emac_alloc_rx_resources
2031 * Arguments priv : pointer to driver private data structure
2032 * Return Status: 0 - Success; non-zero - Fail
2033 * Description Allocates RX resources and getting virtual & physical address.
2034 */
2035int emac_alloc_rx_resources(struct emac_priv *priv)
2036{
2037 struct emac_desc_ring *rx_ring = &priv->rx_ring;
2038 struct platform_device *pdev = priv->pdev;
2039 u32 buf_len;
2040
2041 buf_len = sizeof(struct emac_desc_buffer) * rx_ring->total_cnt;
2042
2043 rx_ring->desc_buf = kzalloc(buf_len, GFP_KERNEL);
2044 if (!rx_ring->desc_buf) {
2045 pr_err("Memory allocation failed for the Receive descriptor buffer\n");
2046 return -ENOMEM;
2047 }
2048
2049 memset(rx_ring->desc_buf, 0, buf_len);
2050
2051 /* round up to nearest 4K */
2052 rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_rx_desc);
2053
2054 EMAC_ROUNDUP(rx_ring->total_size, 1024);
2055
2056 if (priv->sram_pool) {
2057 rx_ring->desc_addr =
2058 (void *)gen_pool_dma_alloc(
2059 priv->sram_pool, rx_ring->total_size,
2060 &rx_ring->desc_dma_addr);
2061 rx_ring->in_sram = true;
2062 }
2063
2064 if (!rx_ring->desc_addr) {
2065 rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
2066 rx_ring->total_size,
2067 &rx_ring->desc_dma_addr,
2068 GFP_KERNEL | __GFP_ZERO);
2069 if (!rx_ring->desc_addr) {
2070 pr_err("Memory allocation failed for the Receive descriptor ring\n");
2071 kfree(rx_ring->desc_buf);
2072 return -ENOMEM;
2073 }
2074
2075 if (priv->sram_pool) {
2076 pr_err("sram pool left size not enough, rx fallback\n");
2077 rx_ring->in_sram = false;
2078 }
2079 }
2080
2081 memset(rx_ring->desc_addr, 0, rx_ring->total_size);
2082
2083 rx_ring->nxt_use = 0;
2084 rx_ring->nxt_clean = 0;
2085
2086 return 0;
2087}
2088
2089/* Name emac_free_tx_resources
2090 * Arguments priv : pointer to driver private data structure
2091 * Return none
2092 * Description Frees the Tx resources allocated
2093 */
2094void emac_free_tx_resources(struct emac_priv *priv)
2095{
2096 emac_clean_tx_desc_ring(priv);
2097 kfree(priv->tx_ring.desc_buf);
2098 priv->tx_ring.desc_buf = NULL;
2099 if (priv->tx_ring.in_sram)
2100 gen_pool_free(priv->sram_pool,
2101 (unsigned long) priv->tx_ring.desc_addr,
2102 priv->tx_ring.total_size);
2103 else
2104 dma_free_coherent(&priv->pdev->dev, priv->tx_ring.total_size,
2105 priv->tx_ring.desc_addr,
2106 priv->tx_ring.desc_dma_addr);
2107 priv->tx_ring.desc_addr = NULL;
2108}
2109
2110/* Name emac_free_rx_resources
2111 * Arguments priv : pointer to driver private data structure
2112 * Return none
2113 * Description Frees the Rx resources allocated
2114 */
2115void emac_free_rx_resources(struct emac_priv *priv)
2116{
2117 emac_clean_rx_desc_ring(priv);
2118 kfree(priv->rx_ring.desc_buf);
2119 priv->rx_ring.desc_buf = NULL;
2120 if (priv->rx_ring.in_sram)
2121 gen_pool_free(priv->sram_pool,
2122 (unsigned long) priv->rx_ring.desc_addr,
2123 priv->rx_ring.total_size);
2124 else
2125 dma_free_coherent(&priv->pdev->dev, priv->rx_ring.total_size,
2126 priv->rx_ring.desc_addr,
2127 priv->rx_ring.desc_dma_addr);
2128 priv->rx_ring.desc_addr = NULL;
2129}
2130
2131/* Name emac_open
2132 * Arguments pstNetdev : pointer to net_device structure
2133 * Return Status: 0 - Success; non-zero - Fail
2134 * Description This function is called when net interface is made up.
2135 * Setting up Tx and Rx
2136 * resources and making the interface up.
2137 */
2138static int emac_open(struct net_device *ndev)
2139{
2140 struct emac_priv *priv = netdev_priv(ndev);
2141 int ret;
2142
2143 ret = emac_alloc_tx_resources(priv);
2144 if (ret) {
2145 pr_err("Error in setting up the Tx resources\n");
2146 goto emac_alloc_tx_resource_fail;
2147 }
2148
2149 ret = emac_alloc_rx_resources(priv);
2150 if (ret) {
2151 pr_err("Error in setting up the Rx resources\n");
2152 goto emac_alloc_rx_resource_fail;
2153 }
2154
2155 ret = emac_up(priv);
2156 if (ret) {
2157 pr_err("Error in making the net intrface up\n");
2158 goto emac_up_fail;
2159 }
2160 return 0;
2161
2162emac_up_fail:
2163 emac_free_rx_resources(priv);
2164emac_alloc_rx_resource_fail:
2165 emac_free_tx_resources(priv);
2166emac_alloc_tx_resource_fail:
2167 emac_reset_hw(priv);
2168 return ret;
2169}
2170
2171/* Name emac_close
2172 * Arguments pstNetdev : pointer to net_device structure
2173 * Return Status: 0 - Success; non-zero - Fail
2174 * Description This function is called when net interface is made down.
2175 * It calls the appropriate functions to
2176 * free Tx and Rx resources.
2177 */
2178static int emac_close(struct net_device *ndev)
2179{
2180 struct emac_priv *priv = netdev_priv(ndev);
2181
2182 emac_down(priv);
2183 emac_free_tx_resources(priv);
2184 emac_free_rx_resources(priv);
2185
2186 return 0;
2187}
2188
2189/* Name emac_tx_clean_desc
2190 * Arguments priv : pointer to driver private data structure
2191 * Return 1: Cleaned; 0:Failed
2192 * Description
2193 */
2194#ifdef CONFIG_ASR_EMAC_NAPI
2195static int emac_tx_clean_desc(struct emac_priv *priv, int budget)
2196#else
2197static int emac_tx_clean_desc(struct emac_priv *priv)
2198#endif
2199{
2200 struct emac_desc_ring *tx_ring;
2201 struct emac_tx_desc *tx_desc, *end_desc;
2202 struct emac_desc_buffer *tx_buf;
2203 struct net_device *ndev = priv->ndev;
2204 u32 i, u32LastIndex;
2205 u8 u8Cleaned;
2206 unsigned int count = 0;
2207
2208 tx_ring = &priv->tx_ring;
2209 i = tx_ring->nxt_clean;
2210 do {
2211 if (i == tx_ring->nxt_use)
2212 break;
2213
2214 u32LastIndex = tx_ring->desc_buf[i].nxt_watch;
2215 end_desc = emac_get_tx_desc(priv, u32LastIndex);
2216 if (end_desc->OWN == 1 ||
2217 (priv->tso && (end_desc->tso || end_desc->coe)))
2218 break;
2219
2220 u8Cleaned = false;
2221 for ( ; !u8Cleaned; count++) {
2222 tx_desc = emac_get_tx_desc(priv, i);
2223 tx_buf = &tx_ring->desc_buf[i];
2224
2225 emac_get_tx_hwtstamp(priv, tx_buf->skb);
2226
2227 /* own bit will be reset to 0 by dma
2228 * once packet is transmitted
2229 */
2230 if (tx_buf->dma_addr) {
2231 dma_unmap_page(&priv->pdev->dev,
2232 tx_buf->dma_addr,
2233 tx_buf->dma_len,
2234 DMA_TO_DEVICE);
2235 tx_buf->dma_addr = 0;
2236 }
2237 if (tx_buf->skb) {
2238 dev_kfree_skb_any(tx_buf->skb);
2239 tx_buf->skb = NULL;
2240 }
2241 if (tx_buf->buff_addr)
2242 tx_buf->buff_addr = NULL;
2243
2244 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2245 u8Cleaned = (i == u32LastIndex);
2246 if (++i == tx_ring->total_cnt)
2247 i = 0;
2248 }
2249
2250#ifdef CONFIG_ASR_EMAC_NAPI
2251 if (count >= budget) {
2252 count = budget;
2253 break;
2254 }
2255#endif
2256 } while (1);
2257 tx_ring->nxt_clean = i;
2258
2259#ifndef CONFIG_ASR_EMAC_NAPI
2260 spin_lock(&priv->spTxLock);
2261#endif
2262 if (unlikely(count && netif_queue_stopped(ndev) &&
2263 netif_carrier_ok(ndev) &&
2264 EMAC_DESC_UNUSED(tx_ring) >= EMAC_TX_WAKE_THRESHOLD))
2265 netif_wake_queue(ndev);
2266#ifndef CONFIG_ASR_EMAC_NAPI
2267 spin_unlock(&priv->spTxLock);
2268#endif
2269 return count;
2270}
2271
2272static int emac_rx_frame_status(struct emac_priv *priv, struct emac_rx_desc *dsc)
2273{
2274 /* if last descritpor isn't set, so we drop it*/
2275 if (!dsc->LastDescriptor) {
2276 netdev_dbg(priv->ndev, "rx LD bit isn't set, drop it.\n");
2277 return frame_discard;
2278 }
2279
2280 /*
2281 * A Frame that is less than 64-bytes (from DA thru the FCS field)
2282 * is considered as Runt Frame.
2283 * Most of the Runt Frames happen because of collisions.
2284 */
2285 if (dsc->ApplicationStatus & EMAC_RX_FRAME_RUNT) {
2286 netdev_dbg(priv->ndev, "rx frame less than 64.\n");
2287 return frame_discard;
2288 }
2289
2290 /*
2291 * When the frame fails the CRC check,
2292 * the frame is assumed to have the CRC error
2293 */
2294 if (dsc->ApplicationStatus & EMAC_RX_FRAME_CRC_ERR) {
2295 netdev_dbg(priv->ndev, "rx frame crc error\n");
2296 return frame_discard;
2297 }
2298
2299 if (priv->tso && dsc->csum_res == EMAC_CSUM_FAIL) {
2300 netdev_dbg(priv->ndev, "COE: rx frame checksum error\n");
2301 return frame_discard;
2302 }
2303
2304 /*
2305 * When the length of the frame exceeds
2306 * the Programmed Max Frame Length
2307 */
2308 if (dsc->ApplicationStatus & EMAC_RX_FRAME_MAX_LEN_ERR) {
2309 netdev_dbg(priv->ndev, "rx frame too long\n");
2310 return frame_discard;
2311 }
2312
2313 /*
2314 * frame reception is truncated at that point and
2315 * frame is considered to have Jabber Error
2316 */
2317 if (dsc->ApplicationStatus & EMAC_RX_FRAME_JABBER_ERR) {
2318 netdev_dbg(priv->ndev, "rx frame has been truncated\n");
2319 return frame_discard;
2320 }
2321
2322 /* this bit is only for 802.3 Type Frames */
2323 if (dsc->ApplicationStatus & EMAC_RX_FRAME_LENGTH_ERR) {
2324 netdev_dbg(priv->ndev, "rx frame length err for 802.3\n");
2325 return frame_discard;
2326 }
2327
2328 if (dsc->FramePacketLength <= ETHERNET_FCS_SIZE ||
2329 dsc->FramePacketLength > EMAC_RX_BUFFER_2048) {
2330 netdev_dbg(priv->ndev, "rx frame len too small or too long\n");
2331 return frame_discard;
2332 }
2333 return frame_ok;
2334}
2335
2336/* Name emac_rx_clean_desc
2337 * Arguments priv : pointer to driver private data structure
2338 * Return 1: Cleaned; 0:Failed
2339 * Description
2340 */
2341#ifdef CONFIG_ASR_EMAC_NAPI
2342static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
2343#else
2344static int emac_rx_clean_desc(struct emac_priv *priv)
2345#endif
2346{
2347 struct emac_desc_ring *rx_ring;
2348 struct emac_desc_buffer *rx_buf;
2349 struct net_device *ndev = priv->ndev;
2350 struct emac_rx_desc *rx_desc;
2351 struct sk_buff *skb = NULL;
2352 int status;
2353#ifdef CONFIG_ASR_EMAC_NAPI
2354 u32 receive_packet = 0;
2355#endif
2356 u32 i;
2357 u32 u32Len;
2358 u32 u32Size;
2359 u8 *pu8Data;
2360#ifdef WAN_LAN_AUTO_ADAPT
2361 int port = -1, vlan = -1;
2362 struct vlan_hdr *vhdr;
2363 struct iphdr *iph = NULL;
2364 struct udphdr *udph = NULL;
2365#endif
2366
2367 rx_ring = &priv->rx_ring;
2368 i = rx_ring->nxt_clean;
2369 rx_desc = emac_get_rx_desc(priv, i);
2370 u32Size = 0;
2371
2372 if (priv->pause.tx_pause && !priv->pause.fc_auto)
2373 emac_check_ring_and_send_pause(priv);
2374
2375 while (rx_desc->OWN == 0) {
2376 if (priv->tso && !rx_desc->csum_done)
2377 break;
2378
2379 if (skb_queue_len(&priv->rx_skb) > priv->rx_ring.total_cnt)
2380 break;
2381
2382 rx_buf = &rx_ring->desc_buf[i];
2383 if (!rx_buf->skb)
2384 break;
2385
2386 emac_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
2387 rx_buf->dma_len, DMA_FROM_DEVICE);
2388 status = emac_rx_frame_status(priv, rx_desc);
2389 if (unlikely(status == frame_discard)) {
2390 ndev->stats.rx_dropped++;
2391 dev_kfree_skb_irq(rx_buf->skb);
2392 rx_buf->skb = NULL;
2393 } else {
2394 skb = rx_buf->skb;
2395 u32Len = rx_desc->FramePacketLength - ETHERNET_FCS_SIZE;
2396
2397 pu8Data = skb_put(skb, u32Len);
2398#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
2399 memcpy(pu8Data, (u8 *)rx_buf->buff_addr, u32Len);
2400#endif
2401 skb->dev = ndev;
2402 ndev->hard_header_len = ETH_HLEN;
2403
2404 emac_get_rx_hwtstamp(priv, rx_desc, skb);
2405
2406 skb->protocol = eth_type_trans(skb, ndev);
2407 if (priv->tso)
2408 skb->ip_summed = CHECKSUM_UNNECESSARY;
2409 else
2410 skb->ip_summed = CHECKSUM_NONE;
2411
2412#ifdef WAN_LAN_AUTO_ADAPT
2413 {/* Special tag format: DA-SA-0x81-xx-data.
2414 Bit 7-3 Packet Information
2415 - bit 4: Reserved
2416 - bit 3: Reserved
2417 - bit 2: Miss address table
2418 - bit 1: Security violation
2419 - bit 0: VLAN violation
2420 Bit 2-0 Ingress Port number
2421 - b000: Disabled
2422 - b001: Port 0
2423 - b010: Port 1
2424 - b011: Port 2
2425 - b100: Port 3
2426 - b101: Port 4
2427 - Other: Reserved */
2428 if(ntohs(skb->protocol)>>8 == 0x81) {
2429 port = ntohs(skb->protocol) & 0x7;
2430 if(port > 0 && port <= 0x5) {
2431 skb->protocol = htons(ETH_P_8021Q);
2432 port = port - 1;
2433 }
2434 }
2435 if (skb->protocol == htons(ETH_P_8021Q)) {
2436 vhdr = (struct vlan_hdr *) skb->data;
2437 vlan = ntohs(vhdr->h_vlan_TCI);
2438 iph = (struct iphdr *)(skb->data + VLAN_HLEN);
2439 } else if (skb->protocol == htons(ETH_P_IP))
2440 iph = (struct iphdr *)skb->data;
2441
2442 if (iph && iph->protocol == IPPROTO_UDP) {
2443 udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
2444 if ((htons(udph->dest) == 68 && htons(udph->source) == 67)) {
2445 u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
2446 u8 dhcp_type = *(udp_data + 242);
2447 if ((DHCP_ACK == dhcp_type || DHCP_OFFER == dhcp_type)
2448 && (DHCP_SEND_REQ == priv->dhcp)) {
2449 priv->dhcp = DHCP_REC_RESP;
2450 if (ndev->phydev->phy_id == IP175D_PHY_ID)
2451 priv->vlan_port = port;
2452 else
2453 priv->vlan_port = -1;
2454 }
2455 }
2456 }
2457 }
2458#endif
2459 skb_queue_tail(&priv->rx_skb, skb);
2460 rx_buf->skb = NULL;
2461 }
2462
2463 if (++i == rx_ring->total_cnt)
2464 i = 0;
2465
2466 rx_desc = emac_get_rx_desc(priv, i);
2467
2468 /* restart RX COE */
2469 if (priv->tso)
2470 emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
2471 }
2472
2473 rx_ring->nxt_clean = i;
2474
2475 emac_alloc_rx_desc_buffers(priv);
2476
2477 /*
2478 * Since netif_rx may consume too much time, put this after
2479 * emac_alloc_rx_desc_buffers so that RX DMA desc refill ASAP,
2480 * reduce packet loss probability.
2481 */
2482 while ((skb = skb_dequeue(&priv->rx_skb))) {
2483 ndev->stats.rx_packets++;
2484 ndev->stats.rx_bytes += skb->len;
2485#ifdef CONFIG_ASR_EMAC_NAPI
2486 napi_gro_receive(&priv->rx_napi, skb);
2487#else
2488 netif_rx(skb);
2489#endif
2490
2491#ifdef CONFIG_ASR_EMAC_NAPI
2492 receive_packet++;
2493 if (receive_packet >= budget)
2494 break;
2495#endif
2496 }
2497
2498#ifdef CONFIG_ASR_EMAC_DDR_QOS
2499 emac_ddr_clk_scaling(priv);
2500#endif
2501
2502#ifdef CONFIG_ASR_EMAC_NAPI
2503 return receive_packet;
2504#else
2505 return 0;
2506#endif
2507}
2508
2509/* Name emac_alloc_rx_desc_buffers
2510 * Arguments priv : pointer to driver private data structure
2511 * Return 1: Cleaned; 0:Failed
2512 * Description
2513 */
2514static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
2515{
2516 struct net_device *ndev = priv->ndev;
2517 struct emac_desc_ring *rx_ring = &priv->rx_ring;
2518 struct emac_desc_buffer *rx_buf;
2519 struct sk_buff *skb;
2520 struct emac_rx_desc *rx_desc;
2521 u32 i;
2522#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
2523 void *buff;
2524#endif
2525 u32 buff_len;
2526 int fail_cnt = 0;
2527
2528 i = rx_ring->nxt_use;
2529 rx_buf = &rx_ring->desc_buf[i];
2530
2531 buff_len = priv->u32RxBufferLen;
2532
2533 while (!rx_buf->skb) {
2534 skb = emac_skbrb_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
2535 if (!skb) {
2536 if (priv->rx_ring.total_cnt == EMAC_RX_RING_SIZE)
2537 skb = dev_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
2538 if (!skb) {
2539 fail_cnt++;
2540 pr_warn_ratelimited("emac sk_buff allocation failed\n");
2541 break;
2542 }
2543 }
2544
2545 /* make buffer alignment */
2546 skb_reserve(skb, NET_IP_ALIGN + EMAC_EXTRA_ROOM);
2547 skb->dev = ndev;
2548
2549#ifdef CONFIG_ASR_EMAC_RX_NO_COPY
2550 rx_buf->buff_addr = skb->data;
2551#else
2552 if (!rx_buf->buff_addr) {
2553 buff = kmalloc(buff_len, GFP_ATOMIC | GFP_DMA);
2554 if (!buff) {
2555 pr_err("kmalloc failed\n");
2556 dev_kfree_skb(skb);
2557 break;
2558 }
2559 rx_buf->buff_addr = buff;
2560 }
2561#endif
2562 rx_buf->skb = skb;
2563 rx_buf->dma_len = buff_len;
2564 rx_buf->dma_addr = emac_map_single(&priv->pdev->dev,
2565 rx_buf->buff_addr,
2566 buff_len,
2567 DMA_FROM_DEVICE);
2568
2569 rx_desc = emac_get_rx_desc(priv, i);
2570 rx_desc->BufferAddr1 = rx_buf->dma_addr;
2571 rx_desc->BufferSize1 = rx_buf->dma_len;
2572 rx_desc->rx_timestamp = 0;
2573 rx_desc->ptp_pkt = 0;
2574 rx_desc->FirstDescriptor = 0;
2575 rx_desc->LastDescriptor = 0;
2576 rx_desc->FramePacketLength = 0;
2577 rx_desc->ApplicationStatus = 0;
2578 if (++i == rx_ring->total_cnt) {
2579 rx_desc->EndRing = 1;
2580 i = 0;
2581 }
2582
2583 wmb();
2584 rx_desc->OWN = 1;
2585 if (priv->tso)
2586 rx_desc->csum_done = 0;
2587
2588 rx_buf = &rx_ring->desc_buf[i];
2589 }
2590 rx_ring->nxt_use = i;
2591
2592 if (fail_cnt)
2593 priv->refill = 1;
2594 else
2595 priv->refill = 0;
2596 emac_dma_start_receive(priv);
2597}
2598
2599#ifdef CONFIG_ASR_EMAC_NAPI
2600static int emac_rx_poll(struct napi_struct *napi, int budget)
2601{
2602 struct emac_priv *priv = container_of(napi, struct emac_priv, rx_napi);
2603 int work_done;
2604
2605 work_done = emac_rx_clean_desc(priv, budget);
2606 if (work_done < budget && napi_complete_done(napi, work_done)) {
2607 unsigned long flags;
2608
2609 spin_lock_irqsave(&priv->intr_lock, flags);
2610 emac_enable_interrupt(priv, 0);
2611 spin_unlock_irqrestore(&priv->intr_lock, flags);
2612
2613 if (priv->refill)
2614 emac_rx_timer_arm(priv);
2615 }
2616
2617 return work_done;
2618}
2619
2620static int emac_tx_poll(struct napi_struct *napi, int budget)
2621{
2622 struct emac_priv *priv = container_of(napi, struct emac_priv, tx_napi);
2623 int work_done;
2624
2625 work_done = emac_tx_clean_desc(priv, budget);
2626 if (work_done < budget && napi_complete_done(napi, work_done)) {
2627 unsigned long flags;
2628
2629 spin_lock_irqsave(&priv->intr_lock, flags);
2630 emac_enable_interrupt(priv, 1);
2631 spin_unlock_irqrestore(&priv->intr_lock, flags);
2632 }
2633
2634 return work_done;
2635}
2636#endif
2637
2638/* Name emac_tx_mem_map
2639 * Arguments priv : pointer to driver private data structure
2640 * pstSkb : pointer to sk_buff structure passed by upper layer
2641 * max_tx_len : max data len per descriptor
2642 * frag_num : number of fragments in the packet
2643 * Return number of descriptors needed for transmitting packet
2644 * Description
2645 */
2646static int emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb,
2647 u32 max_tx_len, u32 frag_num, int ioc)
2648{
2649 struct emac_desc_ring *tx_ring;
2650 struct emac_desc_buffer *tx_buf;
2651 struct emac_tx_desc *tx_desc, *first_desc;
2652 u32 skb_len;
2653 u32 u32Offset, u32Size, i;
2654 u32 use_desc_cnt;
2655 u32 f;
2656 void *pvPtr;
2657 u32 cur_desc_addr;
2658 u32 cur_desc_idx;
2659 u8 do_tx_timestamp = 0;
2660 bool use_buf2 = 0;
2661
2662 u32Offset = 0;
2663 use_desc_cnt = 0;
2664
2665 skb_tx_timestamp(skb);
2666 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2667 priv->hwts_tx_en)) {
2668 /* declare that device is doing timestamping */
2669 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2670 do_tx_timestamp = 1;
2671 }
2672
2673 tx_ring = &priv->tx_ring;
2674 skb_len = skb->len - skb->data_len;
2675 i = cur_desc_idx = tx_ring->nxt_use;
2676 cur_desc_addr = emac_rd(priv, DMA_TRANSMIT_BASE_ADDRESS);
2677 while (skb_len > 0) {
2678 u32Size = min(skb_len, max_tx_len);
2679 skb_len -= u32Size;
2680
2681 tx_buf = &tx_ring->desc_buf[i];
2682 tx_buf->dma_len = u32Size;
2683 pvPtr = skb->data + u32Offset;
2684 tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pvPtr,
2685 u32Size, DMA_TO_DEVICE);
2686 tx_buf->buff_addr = pvPtr;
2687 tx_buf->ulTimeStamp = jiffies;
2688
2689 tx_desc = emac_get_tx_desc(priv, i);
2690
2691 if (use_buf2) {
2692 tx_desc->BufferAddr2 = tx_buf->dma_addr;
2693 tx_desc->BufferSize2 = tx_buf->dma_len;
2694 i++;
2695 use_buf2 = 0;
2696 } else {
2697 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2698 tx_desc->BufferAddr1 = tx_buf->dma_addr;
2699 tx_desc->BufferSize1 = tx_buf->dma_len;
2700 use_buf2 = 1;
2701 }
2702
2703 if (use_desc_cnt == 0) {
2704 first_desc = tx_desc;
2705 tx_desc->FirstSegment = 1;
2706 if (do_tx_timestamp)
2707 tx_desc->tx_timestamp = 1;
2708 }
2709
2710 if (skb_len == 0 && frag_num == 0) {
2711 tx_desc->LastSegment = 1;
2712 tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
2713 }
2714
2715 if (!use_buf2 && i == tx_ring->total_cnt) {
2716 tx_desc->EndRing = 1;
2717 i = 0;
2718 }
2719
2720 /* trigger first desc OWN bit later */
2721 use_desc_cnt++;
2722 if (use_desc_cnt > 2)
2723 tx_desc->OWN = 1;
2724
2725 u32Offset += u32Size;
2726 }
2727
2728 /* if the data is fragmented */
2729 for (f = 0; f < frag_num; f++) {
2730 skb_frag_t *frag;
2731
2732 frag = &(skb_shinfo(skb)->frags[f]);
2733 skb_len = skb_frag_size(frag);
2734 u32Offset = skb_frag_off(frag);
2735
2736 while (skb_len) {
2737 u32Size = min(skb_len, max_tx_len);
2738 skb_len -= u32Size;
2739
2740 tx_buf = &tx_ring->desc_buf[i];
2741 tx_buf->dma_len = u32Size;
2742 tx_buf->dma_addr =
2743 dma_map_page(&priv->pdev->dev,
2744 skb_frag_page(frag),
2745 u32Offset,
2746 u32Size,
2747 DMA_TO_DEVICE);
2748 tx_buf->ulTimeStamp = jiffies;
2749
2750 tx_desc = emac_get_tx_desc(priv, i);
2751 if (use_buf2) {
2752 tx_desc->BufferAddr2 = tx_buf->dma_addr;
2753 tx_desc->BufferSize2 = tx_buf->dma_len;
2754 i++;
2755 use_buf2 = 0;
2756 } else {
2757 memset(tx_desc, 0, sizeof(struct emac_tx_desc));
2758 tx_desc->BufferAddr1 = tx_buf->dma_addr;
2759 tx_desc->BufferSize1 = tx_buf->dma_len;
2760 use_buf2 = 1;
2761 }
2762
2763 if (skb_len == 0 && f == (frag_num - 1)) {
2764 tx_desc->LastSegment = 1;
2765 tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
2766 }
2767
2768 if (!use_buf2 && i == tx_ring->total_cnt) {
2769 tx_desc->EndRing = 1;
2770 i = 0;
2771 }
2772
2773 /* trigger first desc OWN bit later */
2774 use_desc_cnt++;
2775 if (use_desc_cnt > 2)
2776 tx_desc->OWN = 1;
2777
2778 u32Offset += u32Size;
2779 }
2780 }
2781
2782 if (use_buf2 && ++i == tx_ring->total_cnt) {
2783 tx_desc->EndRing = 1;
2784 i = 0;
2785 }
2786
2787 tx_ring->desc_buf[cur_desc_idx].skb = skb;
2788 tx_ring->desc_buf[cur_desc_idx].nxt_watch =
2789 (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
2790
2791 wmb();
2792
2793 first_desc->OWN = 1;
2794
2795 emac_dma_start_transmit(priv);
2796
2797 tx_ring->nxt_use = i;
2798 return use_desc_cnt;
2799}
2800
2801static int emac_prepare_tso_desc(struct emac_priv *priv, int idx,
2802 bool tso, bool coe,
2803 u32 addr, int payload, u8 hlen, int mss,
2804 bool fst, bool last, bool ioc, bool ts,
2805 u32 *cnt)
2806{
2807 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2808 struct emac_tx_desc *pdesc;
2809
2810 pdesc = emac_get_tx_desc(priv, idx);
2811 if (tso) {
2812 if (fst && hlen) {
2813 emac_set_buf1_addr_len(pdesc, addr, 0);
2814 payload -= hlen;
2815 addr += hlen;
2816 }
2817 emac_set_buf2_addr_len(pdesc, addr, payload);
2818 } else {
2819 emac_set_buf1_addr_len(pdesc, addr, payload);
2820 }
2821
2822 if (fst) {
2823 emac_tx_desc_set_fd(pdesc);
2824 } else {
2825 if (tso)
2826 emac_tx_desc_set_offload(pdesc, 1, 1, 1);
2827 else if (coe)
2828 emac_tx_desc_set_offload(pdesc, 0, 1, 0);
2829 else
2830 emac_tx_desc_set_offload(pdesc, 1, 0, 0);
2831 }
2832
2833 if (ts)
2834 emac_tx_desc_set_ts(pdesc);
2835
2836 if (last) {
2837 /* last segment */
2838 emac_tx_desc_set_ld(pdesc);
2839 if (ioc)
2840 emac_tx_desc_set_ioc(pdesc);
2841 }
2842
2843 print_desc((void *)pdesc, 16);
2844 if (payload <= 0)
2845 return idx;
2846
2847 do {
2848 (*cnt)++;
2849
2850 if (++idx == tx_ring->total_cnt) {
2851 emac_tx_desc_set_ring_end(pdesc);
2852 idx = 0;
2853 }
2854
2855 if (!tso)
2856 break;
2857
2858 payload -= mss;
2859 if (payload <= 0)
2860 break;
2861
2862 pdesc = emac_get_tx_desc(priv, idx);
2863 emac_tx_desc_set_offload(pdesc, 1, 1, 0);
2864
2865 print_desc((void *)pdesc, 16);
2866 } while (1);
2867
2868 return idx;
2869}
2870
2871static int emac_tso_xmit(struct sk_buff *skb, struct net_device *ndev,
2872 bool tso, bool coe)
2873{
2874 struct emac_priv *priv = netdev_priv(ndev);
2875 struct emac_desc_ring *tx_ring = &priv->tx_ring;
2876 struct emac_desc_buffer *tx_buf;
2877 struct emac_tx_desc *pdesc;
2878 skb_frag_t *frag;
2879 u32 desc_cnt, frag_num, f, mss, fst;
2880 u32 offset, i;
2881 u8 hlen;
2882 int skb_len, payload;
2883 void *pbuf;
2884 int ioc;
2885 u8 timestamp = 0;
2886
2887 frag_num = skb_shinfo(skb)->nr_frags;
2888 skb_len = skb->len - skb->data_len;
2889 if (tso) {
2890 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
2891 mss = skb_shinfo(skb)->gso_size;
2892 desc_cnt = (skb_len / mss) + 1;
2893 for (f = 0; f < frag_num; f++) {
2894 frag = &skb_shinfo(skb)->frags[f];
2895 desc_cnt += (skb_frag_size(frag) / mss) + 1;
2896 }
2897 } else {
2898 hlen = 0;
2899 mss = 0;
2900 desc_cnt = EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
2901 for (i = 0; i < frag_num; i++) {
2902 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2903 desc_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
2904 MAX_DATA_PWR_TX_DES);
2905 }
2906 }
2907
2908 emac_print("%s: skb=0x%x, skb->len=%d skb_len=%d mss=%d frag_num=%d hlen=%d\n",
2909 __func__, (unsigned)skb, skb->len, skb_len, mss, frag_num, hlen);
2910
2911#ifdef EMAC_DEBUG
2912 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 32, 1, skb->data, skb_len, 0);
2913#endif
2914 /* disable hard interrupt on local CPUs */
2915#ifndef CONFIG_ASR_EMAC_NAPI
2916 local_irq_save(ulFlags);
2917#endif
2918 if (!spin_trylock(&priv->spTxLock)) {
2919 pr_err("Collision detected\n");
2920#ifndef CONFIG_ASR_EMAC_NAPI
2921 local_irq_restore(ulFlags);
2922#endif
2923 return NETDEV_TX_BUSY;
2924 }
2925
2926 /* check whether sufficient free descriptors are there */
2927 if (EMAC_DESC_UNUSED(tx_ring) < (desc_cnt + 2)) {
2928 pr_err_ratelimited("TSO Descriptors are not free\n");
2929 netif_stop_queue(ndev);
2930#ifndef CONFIG_ASR_EMAC_NAPI
2931 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
2932#else
2933 spin_unlock(&priv->spTxLock);
2934#endif
2935 return NETDEV_TX_BUSY;
2936 }
2937
2938 priv->tx_count_frames += desc_cnt;
2939 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2940 priv->hwts_tx_en))
2941 ioc = 1;
2942 else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
2943 ioc = 1;
2944 else
2945 ioc = 0;
2946
2947 if (ioc)
2948 priv->tx_count_frames = 0;
2949
2950 skb_tx_timestamp(skb);
2951 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2952 priv->hwts_tx_en)) {
2953 /* declare that device is doing timestamping */
2954 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2955 timestamp = 1;
2956 }
2957
2958 offset = 0;
2959 desc_cnt = 0;
2960 i = fst = tx_ring->nxt_use;
2961 do {
2962 payload = min(skb_len, TSO_MAX_SEG_SIZE);
2963
2964 tx_buf = &tx_ring->desc_buf[i];
2965 tx_buf->dma_len = payload;
2966 pbuf = skb->data + offset;
2967 tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pbuf,
2968 payload, DMA_TO_DEVICE);
2969 tx_buf->buff_addr = pbuf;
2970 tx_buf->ulTimeStamp = jiffies;
2971
2972 skb_len -= payload;
2973 offset += payload;
2974
2975 i = emac_prepare_tso_desc(priv, i, tso, coe,
2976 tx_buf->dma_addr, payload, hlen, mss,
2977 (i == fst), (skb_len == 0 && frag_num == 0),
2978 ioc, timestamp, &desc_cnt);
2979 } while (skb_len > 0);
2980
2981 /* if the data is fragmented */
2982 for (f = 0; f < frag_num; f++) {
2983 frag = &(skb_shinfo(skb)->frags[f]);
2984 skb_len = skb_frag_size(frag);
2985 offset = skb_frag_off(frag);
2986
2987 emac_print("%s: frag %d len=%d\n", __func__, f, skb_len);
2988#ifdef EMAC_DEBUG
2989 {
2990 u8 *vaddr;
2991
2992 vaddr = kmap_atomic(skb_frag_page(frag));
2993 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
2994 32, 1, vaddr + offset, skb_len, 0);
2995 kunmap_atomic(vaddr);
2996 }
2997#endif
2998 do {
2999 payload = min(skb_len, TSO_MAX_SEG_SIZE);
3000
3001 tx_buf = &tx_ring->desc_buf[i];
3002 tx_buf->dma_len = payload;
3003 //pbuf = skb->data + offset;
3004 tx_buf->dma_addr = dma_map_page(&priv->pdev->dev,
3005 skb_frag_page(frag),
3006 offset, payload,
3007 DMA_TO_DEVICE);
3008 tx_buf->ulTimeStamp = jiffies;
3009
3010 skb_len -= payload;
3011 offset += payload;
3012
3013 i = emac_prepare_tso_desc(priv, i, tso, coe,
3014 tx_buf->dma_addr, payload, 0, mss,
3015 (i == fst),
3016 (skb_len == 0 && f == (frag_num - 1)),
3017 ioc, timestamp, &desc_cnt);
3018 } while (skb_len > 0);
3019 }
3020
3021 tx_ring->desc_buf[fst].skb = skb;
3022 tx_ring->desc_buf[fst].nxt_watch =
3023 (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
3024
3025 wmb();
3026
3027 /* set first descriptor for this packet */
3028 pdesc = emac_get_tx_desc(priv, fst);
3029 emac_tx_update_fst_desc(pdesc, hlen, mss, tso, coe);
3030 print_desc((void *)pdesc, 16);
3031
3032 tx_ring->nxt_use = i;
3033
3034 ndev->stats.tx_packets++;
3035 ndev->stats.tx_bytes += skb->len;
3036 if (tso) {
3037 priv->hw_stats->tx_tso_pkts++;
3038 priv->hw_stats->tx_tso_bytes += skb->len;
3039 }
3040
3041 emac_wr_tso(priv, TSO_TX_POLL_DEMAND, 0xFF);
3042 /* Make sure there is space in the ring for the next send. */
3043 if (EMAC_DESC_UNUSED(tx_ring) < (MAX_SKB_FRAGS + 2)) {
3044 pr_debug_ratelimited("TSO Descriptors not enough, stop\n");
3045 netif_stop_queue(ndev);
3046 }
3047
3048#ifndef CONFIG_ASR_EMAC_NAPI
3049 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3050#else
3051 spin_unlock(&priv->spTxLock);
3052#endif
3053#ifdef CONFIG_ASR_EMAC_DDR_QOS
3054 emac_ddr_clk_scaling(priv);
3055#endif
3056
3057 if (!tso && !coe)
3058 emac_tx_timer_arm(priv);
3059
3060 return NETDEV_TX_OK;
3061}
3062
3063/* Name emac_start_xmit
3064 * Arguments pstSkb : pointer to sk_buff structure passed by upper layer
3065 * pstNetdev : pointer to net_device structure
3066 * Return Status: 0 - Success; non-zero - Fail
3067 * Description This function is called by upper layer to
3068 * handover the Tx packet to the driver
3069 * for sending it to the device.
3070 * Currently this is doing nothing but
3071 * simply to simulate the tx packet handling.
3072 */
3073static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3074{
3075 struct emac_priv *priv = netdev_priv(ndev);
3076 int ioc;
3077 u32 frag_num;
3078 u32 skb_len;
3079 u32 tx_des_cnt = 0;
3080 u32 i;
3081#ifndef CONFIG_ASR_EMAC_NAPI
3082 unsigned long ulFlags;
3083#endif
3084#ifdef WAN_LAN_AUTO_ADAPT
3085 int vlan = 0;
3086 struct iphdr *iph = NULL;
3087 struct udphdr *udph = NULL;
3088 struct vlan_hdr *vhdr;
3089
3090 { struct ethhdr *myeth = (struct ethhdr *)skb->data;
3091 if (myeth->h_proto == htons(ETH_P_8021Q)) {
3092 vhdr = (struct vlan_hdr *)((u8 *)myeth + sizeof(struct ethhdr));
3093 vlan = ntohs(vhdr->h_vlan_TCI);
3094 iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr) + VLAN_HLEN);
3095 }
3096 else if (myeth->h_proto == htons(ETH_P_IP))
3097 iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr));
3098
3099 if (iph && iph->protocol == IPPROTO_UDP) {
3100 udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
3101 if ((htons(udph->dest) == 67 && htons(udph->source) == 68)) {
3102 u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
3103 u8 dhcp_type = *(udp_data + 242);
3104 if ((DHCP_DISCOVER == dhcp_type || DHCP_REQUEST == dhcp_type)
3105 && (0 == priv->dhcp)) {
3106 priv->dhcp = DHCP_SEND_REQ;
3107 if (ndev->phydev->phy_id == IP175D_PHY_ID)
3108 priv->vlan_port = vlan;
3109 else
3110 priv->vlan_port = -1;
3111 }
3112 }
3113 }
3114 }
3115#endif
3116
3117 /* pstSkb->len: is the full length of the data in the packet
3118 * pstSkb->data_len: the number of bytes in skb fragments
3119 * u16Len: length of the first fragment
3120 */
3121 skb_len = skb->len - skb->data_len;
3122
3123 if (skb->len <= 0) {
3124 pr_err("Packet length is zero\n");
3125 dev_kfree_skb_any(skb);
3126 return NETDEV_TX_OK;
3127 }
3128
3129 if (priv->tso) {
3130 bool tso = false, coe = false;
3131
3132 if (skb_is_gso(skb) &&
3133 (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3134 tso = true;
3135 coe = true;
3136 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
3137 coe = true;
3138 }
3139
3140 /* WR: COE need skb->data to be 2 bytes alinged */
3141 if (coe && !IS_ALIGNED((unsigned long)skb->data, 2))
3142 pskb_expand_head(skb, 1, 0, GFP_ATOMIC);
3143
3144 return emac_tso_xmit(skb, ndev, tso, coe);
3145 }
3146
3147 /* increment the count if len exceeds MAX_DATA_LEN_TX_DES */
3148 tx_des_cnt += EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
3149
3150 frag_num = skb_shinfo(skb)->nr_frags;
3151
3152 for (i = 0; i < frag_num; i++) {
3153 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3154 tx_des_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
3155 MAX_DATA_PWR_TX_DES);
3156 }
3157
3158 /* disable hard interrupt on local CPUs */
3159#ifndef CONFIG_ASR_EMAC_NAPI
3160 local_irq_save(ulFlags);
3161#endif
3162 if (!spin_trylock(&priv->spTxLock)) {
3163 pr_err("Collision detected\n");
3164#ifndef CONFIG_ASR_EMAC_NAPI
3165 local_irq_restore(ulFlags);
3166#endif
3167 return NETDEV_TX_BUSY;
3168 }
3169
3170 /* check whether sufficient free descriptors are there */
3171 if (EMAC_DESC_UNUSED(&priv->tx_ring) < (tx_des_cnt + 2)) {
3172 pr_err_ratelimited("Descriptors are not free\n");
3173 netif_stop_queue(ndev);
3174#ifndef CONFIG_ASR_EMAC_NAPI
3175 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3176#else
3177 spin_unlock(&priv->spTxLock);
3178#endif
3179 return NETDEV_TX_BUSY;
3180 }
3181
3182 priv->tx_count_frames += frag_num + 1;
3183 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3184 priv->hwts_tx_en))
3185 ioc = 1;
3186 else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
3187 ioc = 1;
3188 else
3189 ioc = 0;
3190
3191 if (ioc)
3192 priv->tx_count_frames = 0;
3193
3194 tx_des_cnt = emac_tx_mem_map(priv, skb, MAX_DATA_LEN_TX_DES, frag_num, ioc);
3195 if (tx_des_cnt == 0) {
3196 pr_err("Could not acquire memory from pool\n");
3197 netif_stop_queue(ndev);
3198#ifndef CONFIG_ASR_EMAC_NAPI
3199 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3200#else
3201 spin_unlock(&priv->spTxLock);
3202#endif
3203 return NETDEV_TX_BUSY;
3204 }
3205 ndev->stats.tx_packets++;
3206 ndev->stats.tx_bytes += skb->len;
3207
3208 /* Make sure there is space in the ring for the next send. */
3209 if (EMAC_DESC_UNUSED(&priv->tx_ring) < (MAX_SKB_FRAGS + 2))
3210 netif_stop_queue(ndev);
3211
3212#ifndef CONFIG_ASR_EMAC_NAPI
3213 spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
3214#else
3215 spin_unlock(&priv->spTxLock);
3216#endif
3217#ifdef CONFIG_ASR_EMAC_DDR_QOS
3218 emac_ddr_clk_scaling(priv);
3219#endif
3220 emac_tx_timer_arm(priv);
3221 return NETDEV_TX_OK;
3222}
3223
3224u32 ReadTxStatCounters(struct emac_priv *priv, u8 cnt)
3225{
3226 u32 val, tmp;
3227
3228 val = 0x8000 | cnt;
3229 emac_wr(priv, MAC_TX_STATCTR_CONTROL, val);
3230 val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
3231
3232 while (val & 0x8000)
3233 val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
3234
3235 tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_HIGH);
3236 val = tmp << 16;
3237 tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_LOW);
3238 val |= tmp;
3239
3240 return val;
3241}
3242
3243u32 ReadRxStatCounters(struct emac_priv *priv, u8 cnt)
3244{
3245 u32 val, tmp;
3246
3247 val = 0x8000 | cnt;
3248 emac_wr(priv, MAC_RX_STATCTR_CONTROL, val);
3249 val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
3250
3251 while (val & 0x8000)
3252 val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
3253
3254 tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_HIGH);
3255 val = tmp << 16;
3256 tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_LOW);
3257 val |= tmp;
3258 return val;
3259}
3260
3261/* Name emac_set_mac_address
3262 * Arguments pstNetdev : pointer to net_device structure
3263 * addr : pointer to addr
3264 * Return Status: 0 - Success; non-zero - Fail
3265 * Description It is called by upper layer to set the mac address.
3266 */
3267static int emac_set_mac_address(struct net_device *ndev, void *addr)
3268{
3269 struct sockaddr *sa = addr;
3270 struct emac_priv *priv = netdev_priv(ndev);
3271
3272 if (!is_valid_ether_addr(sa->sa_data))
3273 return -EADDRNOTAVAIL;
3274
3275 memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
3276
3277 emac_set_mac_addr(priv, ndev->dev_addr);
3278
3279 emac_set_fc_source_addr(priv, ndev->dev_addr);
3280
3281 return 0;
3282}
3283
3284/* Name emac_change_mtu
3285 * Arguments pstNetdev : pointer to net_device structure
3286 * u32MTU : maximum transmit unit value
3287 * Return Status: 0 - Success; non-zero - Fail
3288 * Description It is called by upper layer to set the MTU value.
3289 */
3290static int emac_change_mtu(struct net_device *ndev, int mtu)
3291{
3292 struct emac_priv *priv = netdev_priv(ndev);
3293 u32 frame_len;
3294
3295 if (netif_running(ndev)) {
3296 pr_err("must be stopped to change its MTU\n");
3297 return -EBUSY;
3298 }
3299
3300 frame_len = mtu + ETHERNET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3301
3302 if (frame_len < MINIMUM_ETHERNET_FRAME_SIZE ||
3303 frame_len > EMAC_SKBRB_MAX_PAYLOAD) {
3304 pr_err("Invalid MTU setting\n");
3305 return -EINVAL;
3306 }
3307
3308 if (frame_len <= EMAC_RX_BUFFER_1024)
3309 priv->u32RxBufferLen = EMAC_RX_BUFFER_1024;
3310 else
3311 priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
3312
3313 ndev->mtu = mtu;
3314
3315 return 0;
3316}
3317
3318static void emac_reset(struct emac_priv *priv)
3319{
3320 if (!test_and_clear_bit(EMAC_RESET_REQUESTED, &priv->state))
3321 return;
3322 if (test_bit(EMAC_DOWN, &priv->state))
3323 return;
3324
3325 netdev_dbg(priv->ndev, "Reset controller.\n");
3326
3327 rtnl_lock();
3328 //netif_trans_update(priv->ndev);
3329 while (test_and_set_bit(EMAC_RESETING, &priv->state))
3330 usleep_range(1000, 2000);
3331
3332 dev_close(priv->ndev);
3333 dev_open(priv->ndev, NULL);
3334 clear_bit(EMAC_RESETING, &priv->state);
3335 rtnl_unlock();
3336}
3337
3338static void emac_tx_timeout_task(struct work_struct *work)
3339{
3340 struct emac_priv *priv = container_of(work,
3341 struct emac_priv, tx_timeout_task);
3342 emac_reset(priv);
3343 clear_bit(EMAC_TASK_SCHED, &priv->state);
3344}
3345
3346/* Name emac_tx_timeout
3347 * Arguments pstNetdev : pointer to net_device structure
3348 * Return none
3349 * Description It is called by upper layer
3350 * for packet transmit timeout.
3351 */
3352static void emac_tx_timeout(struct net_device *ndev)
3353{
3354 struct emac_priv *priv = netdev_priv(ndev);
3355
3356 netdev_info(ndev, "TX timeout\n");
3357 register_dump(priv);
3358
3359 netif_carrier_off(priv->ndev);
3360 set_bit(EMAC_RESET_REQUESTED, &priv->state);
3361
3362 if (!test_bit(EMAC_DOWN, &priv->state) &&
3363 !test_and_set_bit(EMAC_TASK_SCHED, &priv->state))
3364 schedule_work(&priv->tx_timeout_task);
3365}
3366
3367static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable)
3368{
3369 const struct emac_regdata *regdata = priv->regdata;
3370 void __iomem* apmu;
3371 u32 val;
3372
3373 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3374 if (apmu == NULL) {
3375 pr_err("error to ioremap APMU base\n");
3376 return -ENOMEM;
3377 }
3378
3379 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3380 if (enable) {
3381 val |= 0x1;
3382 } else {
3383 val &= ~0x1;
3384 }
3385 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3386 iounmap(apmu);
3387 return 0;
3388}
3389
3390static int clk_phase_rgmii_set(struct emac_priv *priv, bool is_tx)
3391{
3392 const struct emac_regdata *regdata = priv->regdata;
3393 void __iomem* apmu;
3394 u32 val, dline;
3395 u8 phase, tmp;
3396
3397 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3398 if (apmu == NULL) {
3399 pr_err("error to ioremap APMU base\n");
3400 return -ENOMEM;
3401 }
3402
3403 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3404 if (is_tx) {
3405 if (regdata->rgmii_tx_clk_src_sel_shift > 0) {
3406 phase = (priv->tx_clk_config >> 16) & 0x1;
3407 val &= ~(0x1 << regdata->rgmii_tx_clk_src_sel_shift);
3408 val |= phase << regdata->rgmii_tx_clk_src_sel_shift;
3409 }
3410
3411 if (regdata->rgmii_tx_dline_reg_offset > 0) {
3412 /* Set RGMIII TX DLINE */
3413 dline = readl(apmu + regdata->rgmii_tx_dline_reg_offset);
3414
3415 /* delay code */
3416 tmp = (priv->tx_clk_config >> 8) &
3417 regdata->rgmii_tx_delay_code_mask;
3418 dline &= ~(regdata->rgmii_tx_delay_code_mask <<
3419 regdata->rgmii_tx_delay_code_shift);
3420 dline |= tmp << regdata->rgmii_tx_delay_code_shift;
3421
3422 /* delay step */
3423 tmp = priv->tx_clk_config &
3424 regdata->rgmii_tx_delay_step_mask;
3425 dline &= ~(regdata->rgmii_tx_delay_step_mask <<
3426 regdata->rgmii_tx_delay_step_shift);
3427 dline |= tmp << regdata->rgmii_tx_delay_step_shift;
3428
3429 /* delay line enable */
3430 dline |= 1 << regdata->rgmii_tx_delay_enable_shift;
3431 writel(dline, apmu + regdata->rgmii_tx_dline_reg_offset);
3432 pr_info("===> emac set tx dline 0x%x 0x%x", dline,
3433 readl(apmu + regdata->rgmii_tx_dline_reg_offset));
3434 }
3435 } else {
3436 if (regdata->rgmii_rx_clk_src_sel_shift > 0) {
3437 phase = (priv->rx_clk_config >> 16) & 0x1;
3438 val &= ~(0x1 << regdata->rgmii_rx_clk_src_sel_shift);
3439 val |= phase << regdata->rgmii_rx_clk_src_sel_shift;
3440 }
3441
3442 /* Set RGMIII RX DLINE */
3443 if (regdata->rgmii_rx_dline_reg_offset > 0) {
3444 dline = readl(apmu + regdata->rgmii_rx_dline_reg_offset);
3445
3446 /* delay code */
3447 tmp = (priv->rx_clk_config >> 8) &
3448 regdata->rgmii_rx_delay_code_mask;
3449 dline &= ~(regdata->rgmii_rx_delay_code_mask <<
3450 regdata->rgmii_rx_delay_code_shift);
3451 dline |= tmp << regdata->rgmii_rx_delay_code_shift;
3452
3453 /* delay step */
3454 tmp = priv->rx_clk_config &
3455 regdata->rgmii_rx_delay_step_mask;
3456 dline &= ~(regdata->rgmii_rx_delay_step_mask <<
3457 regdata->rgmii_rx_delay_step_shift);
3458 dline |= tmp << regdata->rgmii_rx_delay_step_shift;
3459
3460 /* delay line enable */
3461 dline |= 1 << regdata->rgmii_rx_delay_enable_shift;
3462 writel(dline, apmu + regdata->rgmii_rx_dline_reg_offset);
3463 pr_info("===> emac set rx dline 0x%x 0x%x", dline,
3464 readl(apmu + regdata->rgmii_rx_dline_reg_offset));
3465 }
3466 }
3467 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3468 pr_info("%s phase:%d direction:%s 0x%x 0x%x\n", __func__, phase,
3469 is_tx ? "tx": "rx", val,
3470 readl(apmu + regdata->clk_rst_ctrl_reg_offset));
3471
3472 iounmap(apmu);
3473 return 0;
3474}
3475
3476static int clk_phase_rmii_set(struct emac_priv *priv, bool is_tx)
3477{
3478 const struct emac_regdata *regdata = priv->regdata;
3479 void __iomem* apmu;
3480 u32 val;
3481 u8 phase, tmp;
3482
3483 apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
3484 if (apmu == NULL) {
3485 pr_err("error to ioremap APMU base\n");
3486 return -ENOMEM;
3487 }
3488
3489 val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
3490 if (is_tx) {
3491 /* rmii tx clock select */
3492 if (regdata->rmii_tx_clk_sel_shift > 0) {
3493 tmp = (priv->tx_clk_config >> 16) & 0x1;
3494 val &= ~(0x1 << regdata->rmii_tx_clk_sel_shift);
3495 val |= tmp << regdata->rmii_tx_clk_sel_shift;
3496 }
3497
3498 /* rmii ref clock selct, 1 - from soc, 0 - from phy */
3499 if (regdata->rmii_rx_clk_sel_shift) {
3500 tmp = (priv->tx_clk_config >> 24) & 0x1;
3501 val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
3502 val |= tmp << regdata->rmii_ref_clk_sel_shift;
3503 }
3504 } else {
3505 /* rmii rx clock select */
3506 if (regdata->rmii_rx_clk_sel_shift > 0) {
3507 tmp = (priv->rx_clk_config >> 16) & 0x1;
3508 val &= ~(0x1 << regdata->rmii_rx_clk_sel_shift);
3509 val |= tmp << regdata->rmii_rx_clk_sel_shift;
3510 }
3511
3512 /* rmii ref clock selct, 1 - from soc, 0 - from phy */
3513 if (regdata->rmii_rx_clk_sel_shift) {
3514 tmp = (priv->tx_clk_config >> 24) & 0x1;
3515 val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
3516 val |= tmp << regdata->rmii_ref_clk_sel_shift;
3517 }
3518 }
3519
3520 writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
3521 pr_debug("%s phase:%d direction:%s\n", __func__, phase,
3522 is_tx ? "tx": "rx");
3523
3524 iounmap(apmu);
3525 return 0;
3526}
3527
3528static int clk_phase_set(struct emac_priv *priv, bool is_tx)
3529{
3530 if (emac_is_rmii_interface(priv)) {
3531 clk_phase_rmii_set(priv, is_tx);
3532 } else {
3533 clk_phase_rgmii_set(priv, is_tx);
3534 }
3535
3536 return 0;
3537}
3538
3539#ifdef CONFIG_DEBUG_FS
3540static int clk_phase_show(struct seq_file *s, void *data)
3541{
3542 struct emac_priv *priv = s->private;
3543 bool rmii_intf;
3544 rmii_intf = emac_is_rmii_interface(priv);
3545
3546 seq_printf(s, "Emac MII Interface : %s\n", rmii_intf ? "RMII" : "RGMII");
3547 seq_printf(s, "Current rx clk config : %d\n", priv->rx_clk_config);
3548 seq_printf(s, "Current tx clk config : %d\n", priv->tx_clk_config);
3549 return 0;
3550}
3551
3552static ssize_t clk_tuning_write(struct file *file,
3553 const char __user *user_buf,
3554 size_t count, loff_t *ppos)
3555{
3556 struct emac_priv *priv =
3557 ((struct seq_file *)(file->private_data))->private;
3558 int err;
3559 int clk_phase;
3560 char buff[TUNING_CMD_LEN] = { 0 };
3561 char mode_str[20];
3562
3563 if (count > TUNING_CMD_LEN) {
3564 pr_err("count must be less than 50.\n");
3565 return count;
3566 }
3567 err = copy_from_user(buff, user_buf, count);
3568 if (err)
3569 return err;
3570
3571 err = sscanf(buff, "%s %d", (char *)&mode_str, &clk_phase);
3572 if (err != 2) {
3573 pr_err("debugfs para count error\n");
3574 return count;
3575 }
3576 pr_info("input:%s %d\n", mode_str, clk_phase);
3577
3578 if (strcmp(mode_str, "tx") == 0) {
3579 priv->tx_clk_config = clk_phase;
3580 clk_phase_set(priv, TX_PHASE);
3581 } else if (strcmp(mode_str, "rx") == 0) {
3582 priv->rx_clk_config = clk_phase;
3583 clk_phase_set(priv, RX_PHASE);
3584 } else {
3585 pr_err("command error\n");
3586 pr_err("eg: echo rx 1 > clk_tuning\n");
3587 return count;
3588 }
3589
3590 return count;
3591}
3592
3593static int clk_tuning_open(struct inode *inode, struct file *file)
3594{
3595 return single_open(file, clk_phase_show, inode->i_private);
3596}
3597
3598const struct file_operations clk_tuning_fops = {
3599 .open = clk_tuning_open,
3600 .write = clk_tuning_write,
3601 .read = seq_read,
3602 .llseek = seq_lseek,
3603 .release = single_release,
3604};
3605
3606#endif
3607
3608static int emac_power_down(struct emac_priv *priv)
3609{
3610 if (priv->rst_gpio >= 0)
3611 gpio_direction_output(priv->rst_gpio,
3612 priv->low_active_rst ? 0 : 1);
3613
3614 if (priv->ldo_gpio >= 0)
3615 gpio_direction_output(priv->ldo_gpio,
3616 priv->low_active_ldo ? 0 : 1);
3617
3618 return 0;
3619}
3620
3621static int emac_power_up(struct emac_priv *priv)
3622{
3623 u32 *delays_ldo = priv->delays_ldo;
3624 u32 *delays_rst = priv->delays_rst;
3625 int rst_gpio = priv->rst_gpio;
3626 int low_active_rst = priv->low_active_rst;
3627 int ldo_gpio = priv->ldo_gpio;
3628 int low_active_ldo = priv->low_active_ldo;
3629
3630 if (rst_gpio >= 0) {
3631 gpio_direction_output(rst_gpio, low_active_rst ? 0 : 1);
3632 }
3633
3634 if (ldo_gpio >= 0) {
3635 gpio_direction_output(ldo_gpio, low_active_ldo ? 0 : 1);
3636 if (delays_ldo[0]) {
3637 gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
3638 msleep(DIV_ROUND_UP(delays_ldo[0], 1000));
3639 }
3640
3641 gpio_set_value(ldo_gpio, low_active_ldo ? 0 : 1);
3642 if (delays_ldo[1])
3643 msleep(DIV_ROUND_UP(delays_ldo[1], 1000));
3644
3645 gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
3646 if (delays_ldo[2])
3647 msleep(DIV_ROUND_UP(delays_ldo[2], 1000));
3648 }
3649
3650 if (rst_gpio >= 0) {
3651 if (delays_rst[0]) {
3652 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
3653 msleep(DIV_ROUND_UP(delays_rst[0], 1000));
3654 }
3655
3656 gpio_set_value(rst_gpio, low_active_rst ? 0 : 1);
3657 if (delays_rst[1])
3658 msleep(DIV_ROUND_UP(delays_rst[1], 1000));
3659
3660 gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
3661 if (delays_rst[2])
3662 msleep(DIV_ROUND_UP(delays_rst[2], 1000));
3663 }
3664
3665 return 0;
3666}
3667
3668static int emac_mii_reset(struct mii_bus *bus)
3669{
3670 struct emac_priv *priv = bus->priv;
3671 struct device *dev = &priv->pdev->dev;
3672 struct device_node *np = dev->of_node;
3673 int rst_gpio, ldo_gpio;
3674 int low_active_ldo, low_active_rst;
3675 u32 *delays_ldo = priv->delays_ldo;
3676 u32 *delays_rst = priv->delays_rst;
3677
3678 priv->rst_gpio = -1;
3679 priv->ldo_gpio = -1;
3680
3681 if (!np)
3682 return 0;
3683
3684 rst_gpio = of_get_named_gpio(np, "reset-gpio", 0);
3685 if (rst_gpio >= 0) {
3686 low_active_rst = of_property_read_bool(np, "reset-active-low");
3687 of_property_read_u32_array(np, "reset-delays-us", delays_rst, 3);
3688
3689 if (gpio_request(rst_gpio, "mdio-reset")) {
3690 printk("emac: reset-gpio=%d request failed\n",
3691 rst_gpio);
3692 return 0;
3693 }
3694 priv->rst_gpio = rst_gpio;
3695 priv->low_active_rst = low_active_rst;
3696 }
3697
3698 ldo_gpio = of_get_named_gpio(np, "ldo-gpio", 0);
3699 if (ldo_gpio >= 0) {
3700 low_active_ldo = of_property_read_bool(np, "ldo-active-low");
3701 of_property_read_u32_array(np, "ldo-delays-us", delays_ldo, 3);
3702
3703 if (gpio_request(ldo_gpio, "mdio-ldo"))
3704 return 0;
3705
3706 priv->ldo_gpio = ldo_gpio;
3707 priv->low_active_ldo = low_active_ldo;
3708 }
3709
3710 /*
3711 * Some device not allow MDC/MDIO operation during power on/reset,
3712 * disable AXI clock to shutdown mdio clock.
3713 */
3714 clk_disable_unprepare(priv->clk);
3715
3716 emac_power_up(priv);
3717
3718 clk_prepare_enable(priv->clk);
3719
3720 emac_reset_hw(priv);
3721
3722 return 0;
3723}
3724
3725static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
3726{
3727 struct emac_priv *priv = bus->priv;
3728 u32 cmd = 0;
3729 u32 val;
3730
3731 if (!__clk_is_enabled(priv->clk))
3732 return -EBUSY;
3733
3734 mutex_lock(&priv->mii_mutex);
3735 cmd |= phy_addr & 0x1F;
3736 cmd |= (regnum & 0x1F) << 5;
3737 cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
3738
3739 /*
3740 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
3741 * change during MDIO read/write
3742 */
3743#ifdef CONFIG_DDR_DEVFREQ
3744 pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
3745#endif
3746 emac_wr(priv, MAC_MDIO_DATA, 0x0);
3747 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
3748
3749 if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
3750 !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
3751 return -EBUSY;
3752
3753 val = emac_rd(priv, MAC_MDIO_DATA);
3754
3755#ifdef CONFIG_DDR_DEVFREQ
3756 pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
3757#endif
3758 mutex_unlock(&priv->mii_mutex);
3759 return val;
3760}
3761
3762static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
3763 u16 value)
3764{
3765 struct emac_priv *priv = bus->priv;
3766 u32 cmd = 0;
3767 u32 val;
3768
3769 if (!__clk_is_enabled(priv->clk))
3770 return -EBUSY;
3771
3772 mutex_lock(&priv->mii_mutex);
3773 emac_wr(priv, MAC_MDIO_DATA, value);
3774
3775 cmd |= phy_addr & 0x1F;
3776 cmd |= (regnum & 0x1F) << 5;
3777 cmd |= MREGBIT_START_MDIO_TRANS;
3778
3779 /*
3780 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
3781 * change during MDIO read/write
3782 */
3783#ifdef CONFIG_DDR_DEVFREQ
3784 pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
3785#endif
3786 emac_wr(priv, MAC_MDIO_CONTROL, cmd);
3787
3788 if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
3789 !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
3790 return -EBUSY;
3791
3792#ifdef CONFIG_DDR_DEVFREQ
3793 pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
3794#endif
3795
3796 mutex_unlock(&priv->mii_mutex);
3797 return 0;
3798}
3799
3800static void emac_adjust_link(struct net_device *dev)
3801{
3802 struct phy_device *phydev = dev->phydev;
3803 struct emac_priv *priv = netdev_priv(dev);
3804 u32 ctrl;
3805#ifdef WAN_LAN_AUTO_ADAPT
3806 int status_change = 0;
3807 int addr = 0;
3808 int i = 0;
3809#endif
3810 if (!phydev || priv->fix_link)
3811 return;
3812
3813 if (phydev->link) {
3814 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
3815
3816 /* Now we make sure that we can be in full duplex mode
3817 * If not, we operate in half-duplex mode.
3818 */
3819 if (phydev->duplex != priv->duplex) {
3820 if (!phydev->duplex)
3821 ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
3822 else
3823 ctrl |= MREGBIT_FULL_DUPLEX_MODE;
3824 priv->duplex = phydev->duplex;
3825 }
3826
3827 if (phydev->speed != priv->speed) {
3828 ctrl &= ~MREGBIT_SPEED;
3829
3830 switch (phydev->speed) {
3831 case SPEED_1000:
3832 ctrl |= MREGBIT_SPEED_1000M;
3833 break;
3834 case SPEED_100:
3835 ctrl |= MREGBIT_SPEED_100M;
3836 break;
3837 case SPEED_10:
3838 ctrl |= MREGBIT_SPEED_10M;
3839 break;
3840 default:
3841 pr_err("broken speed: %d\n", phydev->speed);
3842 phydev->speed = SPEED_UNKNOWN;
3843 break;
3844 }
3845 if (phydev->speed != SPEED_UNKNOWN) {
3846 priv->speed = phydev->speed;
3847 }
3848 }
3849 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
3850 pr_info("%s link:%d speed:%dM duplex:%s\n", __func__,
3851 phydev->link, phydev->speed,
3852 phydev->duplex ? "Full": "Half");
3853 }
3854
3855#ifdef WAN_LAN_AUTO_ADAPT
3856 if(phydev->phy_id == IP175D_PHY_ID) {
3857 if (phydev->link != priv->link) {
3858 for (i=0; i<16; i++) {
3859 if((priv->link & (1<<i)) != (phydev->link & (1<<i))) {
3860 addr = i;
3861 if (phydev->link & (1<<i)) {
3862 /* link up */
3863 printk("eth0 port%d link up\n", addr);
3864 priv->dhcp = 0;
3865 emac_sig_workq(CARRIER_UP_IP175D, addr);
3866 if(priv->dhcp_delaywork)
3867 cancel_delayed_work(&priv->dhcp_work);
3868 priv->dhcp_delaywork = 1;
3869 schedule_delayed_work(&priv->dhcp_work, 25*HZ);
3870 } else {
3871 /* link down */
3872 printk("eth0 port%d link down\n", addr);
3873 priv->dhcp = 0;
3874 if(priv->dhcp_delaywork)
3875 cancel_delayed_work(&priv->dhcp_work);
3876 priv->dhcp_delaywork = 0;
3877 emac_sig_workq(CARRIER_DOWN_IP175D, addr);
3878 }
3879 }
3880 }
3881 priv->link = phydev->link;
3882 }
3883 } else {
3884 if (phydev->link != priv->link) {
3885 priv->link = phydev->link;
3886 status_change = 1;
3887 }
3888
3889 if (status_change) {
3890 if (phydev->link) {
3891 /* link up */
3892 priv->dhcp = 0;
3893 emac_sig_workq(CARRIER_UP, 0);
3894 if(priv->dhcp_delaywork)
3895 cancel_delayed_work(&priv->dhcp_work);
3896 priv->dhcp_delaywork = 1;
3897 schedule_delayed_work(&priv->dhcp_work, 25*HZ);
3898
3899 } else {
3900 /* link down */
3901 priv->dhcp = 0;
3902 if(priv->dhcp_delaywork)
3903 cancel_delayed_work(&priv->dhcp_work);
3904 priv->dhcp_delaywork = 0;
3905 emac_sig_workq(CARRIER_DOWN, 0);
3906 }
3907 }
3908 }
3909#endif
3910}
3911
3912static int emac_phy_connect(struct net_device *dev)
3913{
3914 struct phy_device *phydev;
3915 int phy_interface;
3916 struct device_node *np;
3917 struct emac_priv *priv = netdev_priv(dev);
3918
3919 np = of_parse_phandle(priv->pdev->dev.of_node, "phy-handle", 0);
3920 if (!np) {
3921 if (priv->fix_link) {
3922 emac_phy_interface_config(priv, priv->interface);
3923 if (priv->interface == PHY_INTERFACE_MODE_RGMII)
3924 pinctrl_select_state(priv->pinctrl,
3925 priv->rgmii_pins);
3926 emac_config_phy_interrupt(priv, 0);
3927 return 0;
3928 }
3929 return -ENODEV;
3930 }
3931
3932 printk("%s: %s\n",__func__, np->full_name);
3933 phy_interface = of_get_phy_mode(np);
3934 emac_phy_interface_config(priv, phy_interface);
3935 if (phy_interface != PHY_INTERFACE_MODE_RMII)
3936 pinctrl_select_state(priv->pinctrl, priv->rgmii_pins);
3937
3938 phydev = of_phy_connect(dev, np,
3939 &emac_adjust_link, 0, phy_interface);
3940 if (IS_ERR_OR_NULL(phydev)) {
3941 pr_err("Could not attach to PHY\n");
3942 emac_power_down(priv);
3943 if (!phydev)
3944 return -ENODEV;
3945 return PTR_ERR(phydev);
3946 }
3947
3948 if (!phydev->phy_id || phydev->phy_id == 0xffffffff) {
3949 pr_err("Not valid phy_id=0x%x\n", phydev->phy_id);
3950 emac_power_down(priv);
3951 return -ENODEV;
3952 }
3953
3954 if(phy_interrupt_is_valid(phydev))
3955 emac_config_phy_interrupt(priv, 1);
3956 else
3957 emac_config_phy_interrupt(priv, 0);
3958
3959 //phydev->supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
3960 pr_info("%s: %s: attached to PHY (UID 0x%x)"
3961 " Link = %d irq=%d\n", __func__,
3962 dev->name, phydev->phy_id, phydev->link, phydev->irq);
3963 dev->phydev = phydev;
3964
3965#ifdef WAN_LAN_AUTO_ADAPT
3966 if(phydev->phy_id == IP175D_PHY_ID)
3967 emac_sig_workq(PHY_IP175D_CONNECT, 0);
3968#endif
3969
3970 return 0;
3971}
3972
3973static int emac_mdio_init(struct emac_priv *priv)
3974{
3975 struct device_node *mii_np;
3976 struct device *dev = &priv->pdev->dev;
3977 int ret;
3978
3979 mii_np = of_get_child_by_name(dev->of_node, "mdio-bus");
3980 if (!mii_np) {
3981 dev_err(dev, "no %s child node found", "mdio-bus");
3982 return -ENODEV;
3983 }
3984
3985 if (!of_device_is_available(mii_np)) {
3986 ret = -ENODEV;
3987 goto err_put_node;
3988 }
3989
3990 priv->mii = mdiobus_alloc();//devm_mdiobus_alloc(dev);
3991 if (!priv->mii) {
3992 ret = -ENOMEM;
3993 goto err_put_node;
3994 }
3995 priv->mii->priv = priv;
3996 //priv->mii->irq = priv->mdio_irqs;
3997 priv->mii->name = "emac mii";
3998 priv->mii->reset = emac_mii_reset;
3999 priv->mii->read = emac_mii_read;
4000 priv->mii->write = emac_mii_write;
4001 snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%pOFn",
4002 mii_np);
4003 priv->mii->parent = dev;
4004 priv->mii->phy_mask = 0xffffffff;
4005 ret = of_mdiobus_register(priv->mii, mii_np);
4006
4007err_put_node:
4008 of_node_put(mii_np);
4009 return ret;
4010}
4011
4012static int emac_mdio_deinit(struct emac_priv *priv)
4013{
4014 if (!priv->mii)
4015 return 0;
4016
4017 mdiobus_unregister(priv->mii);
4018 return 0;
4019}
4020
4021static int emac_get_ts_info(struct net_device *dev,
4022 struct ethtool_ts_info *info)
4023{
4024 struct emac_priv *priv = netdev_priv(dev);
4025
4026 if (priv->ptp_support) {
4027
4028 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4029 SOF_TIMESTAMPING_TX_HARDWARE |
4030 SOF_TIMESTAMPING_RX_SOFTWARE |
4031 SOF_TIMESTAMPING_RX_HARDWARE |
4032 SOF_TIMESTAMPING_SOFTWARE |
4033 SOF_TIMESTAMPING_RAW_HARDWARE;
4034
4035 if (priv->ptp_clock)
4036 info->phc_index = ptp_clock_index(priv->ptp_clock);
4037
4038 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
4039 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
4040 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
4041 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
4042 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
4043 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
4044 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
4045 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
4046 (1 << HWTSTAMP_FILTER_ALL));
4047 if (priv->regdata->ptp_rx_ts_all_events) {
4048 info->rx_filters |=
4049 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
4050 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
4051 }
4052
4053 return 0;
4054 } else
4055 return ethtool_op_get_ts_info(dev, info);
4056}
4057
4058static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4059{
4060 int i;
4061
4062 switch (stringset) {
4063 case ETH_SS_STATS:
4064 for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
4065 memcpy(data, emac_ethtool_stats[i].str, ETH_GSTRING_LEN);
4066 data += ETH_GSTRING_LEN;
4067 }
4068 break;
4069 }
4070}
4071
4072static int emac_get_sset_count(struct net_device *dev, int sset)
4073{
4074 switch (sset) {
4075 case ETH_SS_STATS:
4076 return ARRAY_SIZE(emac_ethtool_stats);
4077 default:
4078 return -EOPNOTSUPP;
4079 }
4080}
4081
4082static void emac_stats_update(struct emac_priv *priv)
4083{
4084 struct emac_hw_stats *hwstats = priv->hw_stats;
4085 int i;
4086 u32 *p;
4087
4088 p = (u32 *)(hwstats);
4089
4090 for (i = 0; i < MAX_TX_STATS_NUM; i++)
4091 *(p + i) = ReadTxStatCounters(priv, i);
4092
4093 p = (u32 *)hwstats + MAX_TX_STATS_NUM;
4094
4095 for (i = 0; i < MAX_RX_STATS_NUM; i++)
4096 *(p + i) = ReadRxStatCounters(priv, i);
4097
4098 *(p + i++) = emac_rd(priv, DMA_MISSED_FRAME_COUNTER);
4099
4100 *(p + i++) = hwstats->tx_tso_pkts;
4101 *(p + i++) = hwstats->tx_tso_bytes;
4102}
4103
4104static void emac_get_ethtool_stats(struct net_device *dev,
4105 struct ethtool_stats *stats, u64 *data)
4106{
4107 struct emac_priv *priv = netdev_priv(dev);
4108 struct emac_hw_stats *hwstats = priv->hw_stats;
4109 u32 *data_src;
4110 u64 *data_dst;
4111 int i;
4112
4113 if (netif_running(dev) && netif_device_present(dev)) {
4114 if (spin_trylock_bh(&hwstats->stats_lock)) {
4115 emac_stats_update(priv);
4116 spin_unlock_bh(&hwstats->stats_lock);
4117 }
4118 }
4119
4120 data_dst = data;
4121
4122 for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
4123 data_src = (u32 *)hwstats + emac_ethtool_stats[i].offset;
4124 *data_dst++ = (u64)(*data_src);
4125 }
4126}
4127
4128static int emac_ethtool_get_regs_len(struct net_device *dev)
4129{
4130 return EMAC_REG_SPACE_SIZE;
4131}
4132
4133static void emac_ethtool_get_regs(struct net_device *dev,
4134 struct ethtool_regs *regs, void *space)
4135{
4136 struct emac_priv *priv = netdev_priv(dev);
4137 u32 *reg_space = (u32 *) space;
4138 void __iomem *base = priv->iobase;
4139 int i;
4140
4141 regs->version = 1;
4142
4143 memset(reg_space, 0x0, EMAC_REG_SPACE_SIZE);
4144
4145 for (i = 0; i < EMAC_DMA_REG_CNT; i++)
4146 reg_space[i] = readl(base + DMA_CONFIGURATION + i * 4);
4147
4148 for (i = 0; i < EMAC_MAC_REG_CNT; i++)
4149 reg_space[i + MAC_GLOBAL_CONTROL / 4] = readl(base + MAC_GLOBAL_CONTROL + i * 4);
4150}
4151
4152static int emac_get_link_ksettings(struct net_device *ndev,
4153 struct ethtool_link_ksettings *cmd)
4154{
4155 if (!ndev->phydev)
4156 return -ENODEV;
4157
4158 phy_ethtool_ksettings_get(ndev->phydev, cmd);
4159 return 0;
4160}
4161
4162static int emac_set_link_ksettings(struct net_device *ndev,
4163 const struct ethtool_link_ksettings *cmd)
4164{
4165 if (!ndev->phydev)
4166 return -ENODEV;
4167
4168 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
4169}
4170
4171static void emac_get_drvinfo(struct net_device *dev,
4172 struct ethtool_drvinfo *info)
4173{
4174 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
4175 info->n_stats = ARRAY_SIZE(emac_ethtool_stats);
4176}
4177
4178static void emac_get_pauseparam(struct net_device *ndev,
4179 struct ethtool_pauseparam *param)
4180{
4181 struct emac_priv *priv = netdev_priv(ndev);
4182 int val = emac_mii_read(priv->mii, 0, 0);
4183
4184 param->autoneg = (val & BIT(12)) ? 1 : 0;
4185 param->rx_pause = priv->pause.rx_pause;
4186 param->tx_pause = priv->pause.tx_pause;
4187
4188 return;
4189}
4190
4191static int emac_set_pauseparam(struct net_device *ndev,
4192 struct ethtool_pauseparam *param)
4193{
4194 struct emac_priv *priv = netdev_priv(ndev);
4195 struct device *dev = &priv->pdev->dev;
4196 struct device_node *np = dev->of_node;
4197 int val;
4198 int phyval;
4199 u32 threshold[2];
4200 static int init_flag = 1;
4201
4202 val = readl(priv->iobase + MAC_FC_CONTROL);
4203 phyval = emac_mii_read(priv->mii, 0, 0);
4204
4205 if (param->rx_pause)
4206 val |= MREGBIT_FC_DECODE_ENABLE;
4207 else
4208 val &= ~MREGBIT_FC_DECODE_ENABLE;
4209
4210 if (param->tx_pause)
4211 val |= MREGBIT_FC_GENERATION_ENABLE;
4212 else
4213 val &= ~MREGBIT_FC_GENERATION_ENABLE;
4214
4215 if (init_flag && (param->rx_pause | param->tx_pause)) {
4216 val |= MREGBIT_MULTICAST_MODE;
4217 priv->pause.pause_time_max = 0;
4218 if (0 != of_property_read_u32_array(np, "flow-control-threshold", threshold, 2)) {
4219 threshold[0] = 60;
4220 threshold[1] = 90;
4221 }
4222 threshold[0] = clamp(threshold[0], 0U, 99U);
4223 threshold[1] = clamp(threshold[1], 1U, 100U);
4224
4225 if (cpu_is_asr18xx() || cpu_is_asr1903_z1()) {
4226 priv->pause.low_water = priv->rx_ring.total_cnt * threshold[0] / 100;
4227 priv->pause.high_water = priv->rx_ring.total_cnt * threshold[1] / 100 - 1;
4228 priv->pause.fc_auto = 0;
4229 } else {
4230 priv->pause.low_water = 0;
4231 priv->pause.high_water = 0;
4232 priv->pause.fc_auto = 1;
4233 val |= MREGBIT_AUTO_FC_GENERATION_ENABLE;
4234 threshold[0] = 1024 * threshold[0] / 100;
4235 threshold[1] = 1024 * threshold[1] / 100;
4236 emac_wr(priv, MAC_FC_AUTO_HIGH_THRESHOLD, threshold[1]);
4237 emac_wr(priv, MAC_FC_AUTO_LOW_THRESHOLD, threshold[0]);
4238 emac_wr(priv, MAC_FC_AUTO_HIGH_PAUSE_TIME_VALUE, 0xffff);
4239 emac_wr(priv, MAC_FC_AUTO_LOW_PAUSE_TIME_VALUE, 0);
4240 }
4241 init_flag = 0;
4242 }
4243 emac_wr(priv, MAC_FC_CONTROL, val);
4244
4245 if (param->autoneg)
4246 phyval |= BIT(12);
4247 else
4248 phyval &= ~BIT(12);
4249
4250 (void)emac_mii_write(priv->mii, 0, 0, (u16)phyval);
4251
4252 priv->pause.rx_pause = param->rx_pause;
4253 priv->pause.tx_pause = param->tx_pause;
4254 return 0;
4255}
4256
4257static void emac_get_wol(struct net_device *dev,
4258 struct ethtool_wolinfo *wol)
4259{
4260 struct emac_priv *priv = netdev_priv(dev);
4261 struct device *device = &priv->pdev->dev;
4262
4263 if (device_can_wakeup(device)) {
4264 wol->supported = WAKE_MAGIC | WAKE_UCAST;
4265 wol->wolopts = priv->wolopts;
4266 }
4267}
4268
4269static int emac_set_wol(struct net_device *dev,
4270 struct ethtool_wolinfo *wol)
4271{
4272 struct emac_priv *priv = netdev_priv(dev);
4273 struct device *device = &priv->pdev->dev;
4274 u32 support = WAKE_MAGIC | WAKE_UCAST;
4275
4276 if (!device_can_wakeup(device) || !priv->en_suspend)
4277 return -ENOTSUPP;
4278
4279 if (wol->wolopts & ~support)
4280 return -EINVAL;
4281
4282 priv->wolopts = wol->wolopts;
4283
4284 if (wol->wolopts) {
4285 device_set_wakeup_enable(device, 1);
4286 enable_irq_wake(priv->irq_wakeup);
4287 } else {
4288 device_set_wakeup_enable(device, 0);
4289 disable_irq_wake(priv->irq_wakeup);
4290 }
4291
4292 return 0;
4293}
4294
4295static const struct ethtool_ops emac_ethtool_ops = {
4296 .get_link_ksettings = emac_get_link_ksettings,
4297 .set_link_ksettings = emac_set_link_ksettings,
4298 .get_drvinfo = emac_get_drvinfo,
4299 .nway_reset = phy_ethtool_nway_reset,
4300 .get_link = ethtool_op_get_link,
4301 .get_pauseparam = emac_get_pauseparam,
4302 .set_pauseparam = emac_set_pauseparam,
4303 .get_strings = emac_get_strings,
4304 .get_sset_count = emac_get_sset_count,
4305 .get_ethtool_stats = emac_get_ethtool_stats,
4306 .get_regs = emac_ethtool_get_regs,
4307 .get_regs_len = emac_ethtool_get_regs_len,
4308 .get_ts_info = emac_get_ts_info,
4309 .get_wol = emac_get_wol,
4310 .set_wol = emac_set_wol,
4311};
4312
4313static const struct net_device_ops emac_netdev_ops = {
4314 .ndo_open = emac_open,
4315 .ndo_stop = emac_close,
4316 .ndo_start_xmit = emac_start_xmit,
4317 .ndo_set_mac_address = emac_set_mac_address,
4318 .ndo_do_ioctl = emac_ioctl,
4319 .ndo_change_mtu = emac_change_mtu,
4320 .ndo_tx_timeout = emac_tx_timeout,
4321};
4322
4323#ifdef WAN_LAN_AUTO_ADAPT
4324#define EMAC_SKB_SIZE 2048
4325static int emac_event_add_var(struct emac_event *event, int argv,
4326 const char *format, ...)
4327{
4328 static char buf[128];
4329 char *s;
4330 va_list args;
4331 int len;
4332
4333 if (argv)
4334 return 0;
4335
4336 va_start(args, format);
4337 len = vsnprintf(buf, sizeof(buf), format, args);
4338 va_end(args);
4339
4340 if (len >= sizeof(buf)) {
4341 printk("buffer size too small\n");
4342 WARN_ON(1);
4343 return -ENOMEM;
4344 }
4345
4346 s = skb_put(event->skb, len + 1);
4347 strcpy(s, buf);
4348
4349 return 0;
4350}
4351
4352static int emac_hotplug_fill_event(struct emac_event *event)
4353{
4354 int ret;
4355
4356 ret = emac_event_add_var(event, 0, "HOME=%s", "/");
4357 if (ret)
4358 return ret;
4359
4360 ret = emac_event_add_var(event, 0, "PATH=%s",
4361 "/sbin:/bin:/usr/sbin:/usr/bin");
4362 if (ret)
4363 return ret;
4364
4365 ret = emac_event_add_var(event, 0, "SUBSYSTEM=%s", "ethernet");
4366 if (ret)
4367 return ret;
4368
4369 ret = emac_event_add_var(event, 0, "ACTION=%s", event->action);
4370 if (ret)
4371 return ret;
4372
4373 ret = emac_event_add_var(event, 0, "ETH=%s", event->name);
4374 if (ret)
4375 return ret;
4376
4377 ret = emac_event_add_var(event, 0, "PORT=%d", event->port);
4378 if (ret)
4379 return ret;
4380
4381 ret = emac_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());
4382
4383 return ret;
4384}
4385
4386static void emac_hotplug_work(struct work_struct *work)
4387{
4388 struct emac_event *event = container_of(work, struct emac_event, work);
4389 int ret = 0;
4390
4391 event->skb = alloc_skb(EMAC_SKB_SIZE, GFP_KERNEL);
4392 if (!event->skb)
4393 goto out_free_event;
4394
4395 ret = emac_event_add_var(event, 0, "%s@", event->action);
4396 if (ret)
4397 goto out_free_skb;
4398
4399 ret = emac_hotplug_fill_event(event);
4400 if (ret)
4401 goto out_free_skb;
4402
4403 NETLINK_CB(event->skb).dst_group = 1;
4404 broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);
4405
4406 out_free_skb:
4407 if (ret) {
4408 printk("work error %d\n", ret);
4409 kfree_skb(event->skb);
4410 }
4411 out_free_event:
4412 kfree(event);
4413}
4414
4415static int emac_sig_workq(int event, int port)
4416{
4417 struct emac_event *u_event = NULL;
4418
4419 u_event = kzalloc(sizeof(*u_event), GFP_KERNEL);
4420 if (!u_event)
4421 return -ENOMEM;
4422
4423 u_event->name = DRIVER_NAME;
4424 if(event == CARRIER_UP)
4425 u_event->action = "LINKUP";
4426 else if(event == CARRIER_DOWN)
4427 u_event->action = "LINKDW";
4428 else if(event == CARRIER_DOWN_IP175D)
4429 u_event->action = "IP175D_LINKDW";
4430 else if(event == CARRIER_UP_IP175D)
4431 u_event->action = "IP175D_LINKUP";
4432 else if(event == DHCP_EVENT_CLIENT)
4433 u_event->action = "DHCPCLIENT";
4434 else if(event == DHCP_EVENT_SERVER)
4435 u_event->action = "DHCPSERVER";
4436 else if(event == PHY_IP175D_CONNECT)
4437 u_event->action = "PHY_CONNECT";
4438
4439 u_event->port = port;
4440 INIT_WORK(&u_event->work, (void *)emac_hotplug_work);
4441 schedule_work(&u_event->work);
4442
4443 return 0;
4444}
4445
4446static inline void __emac_dhcp_work_func(struct emac_priv *priv)
4447{
4448 if (priv->dhcp == DHCP_REC_RESP) {
4449 emac_sig_workq(DHCP_EVENT_CLIENT, priv->vlan_port);
4450 } else if (priv->dhcp == DHCP_SEND_REQ || priv->dhcp == 0) {
4451 emac_sig_workq(DHCP_EVENT_SERVER, priv->vlan_port);
4452 }
4453
4454 priv->dhcp = 0;
4455 if(priv->dhcp_delaywork){
4456 cancel_delayed_work(&priv->dhcp_work);
4457 priv->dhcp_delaywork = 0;
4458 }
4459}
4460
4461static void emac_dhcp_work_func_t(struct work_struct *work)
4462{
4463 struct emac_priv *priv = container_of(work, struct emac_priv, dhcp_work.work);
4464
4465 __emac_dhcp_work_func(priv);
4466}
4467#endif
4468
4469static int emac_probe(struct platform_device *pdev)
4470{
4471 struct emac_priv *priv;
4472 struct net_device *ndev = NULL;
4473 struct resource *res;
4474 struct device_node *np = pdev->dev.of_node;
4475 struct device *dev = &pdev->dev;
4476 const unsigned char *mac_addr = NULL;
4477 const struct of_device_id *match;
4478#ifdef CONFIG_DEBUG_FS
4479 struct dentry *emac_fs_dir = NULL;
4480 struct dentry *emac_clk_tuning;
4481#endif
4482 int ret;
4483
4484 ndev = alloc_etherdev(sizeof(struct emac_priv));
4485 if (!ndev) {
4486 ret = -ENOMEM;
4487 return ret;
4488 }
4489 priv = netdev_priv(ndev);
4490 priv->ndev = ndev;
4491 priv->pdev = pdev;
4492#ifdef WAN_LAN_AUTO_ADAPT
4493 priv->dhcp = -1;
4494 priv->vlan_port = -1;
4495 priv->dhcp_delaywork = 0;
4496#endif
4497 platform_set_drvdata(pdev, priv);
4498
4499 match = of_match_device(of_match_ptr(emac_of_match), &pdev->dev);
4500 if (match) {
4501 priv->regdata = match->data;
4502 } else {
4503 pr_info("===> not match valid device\n");
4504 }
4505
4506 emac_command_options(priv);
4507 emac_skbrb_init(EMAC_SKBRB_SLOT_SIZE, priv->rx_ring.total_cnt * 2);
4508
4509 priv->hw_stats = devm_kzalloc(&pdev->dev,
4510 sizeof(*priv->hw_stats),
4511 GFP_KERNEL);
4512 if (!priv->hw_stats) {
4513 dev_err(&pdev->dev, "failed to allocate counter memory\n");
4514 ret = -ENOMEM;
4515 goto err_netdev;
4516 }
4517
4518 spin_lock_init(&priv->hw_stats->stats_lock);
4519
4520 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4521 priv->iobase = devm_ioremap_resource(&pdev->dev, res);
4522 if (IS_ERR(priv->iobase)) {
4523 ret = -ENOMEM;
4524 goto err_netdev;
4525 }
4526
4527 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4528 priv->tso_base = devm_ioremap_resource(&pdev->dev, res);
4529 if (!IS_ERR(priv->tso_base)) {
4530 dev_info(&pdev->dev, "tso base=0x%x\n", (unsigned)priv->tso_base);
4531 }
4532
4533 priv->irq = irq_of_parse_and_map(np, 0);
4534 if (!priv->irq) {
4535 ret = -ENXIO;
4536 goto err_netdev;
4537 }
4538 priv->irq_wakeup = irq_of_parse_and_map(np, 1);
4539 if (!priv->irq_wakeup)
4540 dev_err(&pdev->dev, "wake_up irq not found\n");
4541
4542 priv->tso = of_property_read_bool(np, "tso-support");
4543 if (cpu_is_asr1903_a0() || cpu_is_asr1903_z1())
4544 priv->tso = false;
4545 if (priv->tso) {
4546 priv->irq_tso = irq_of_parse_and_map(np, 3);
4547 if (!priv->irq_tso) {
4548 dev_err(&pdev->dev, "tso irq not found\n");
4549 priv->tso = false;
4550 }
4551 }
4552
4553 priv->sram_pool = of_gen_pool_get(dev->of_node, "eth,sram", 0);
4554 if (priv->sram_pool) {
4555 dev_notice(&pdev->dev, "use sram as tx desc\n");
4556 }
4557
4558 ret = of_property_read_u32(np, "lpm-qos", &priv->pm_qos);
4559 if (ret)
4560 return ret;
4561
4562 ret = of_property_read_u32(np, "3v3-enable", &priv->power_domain);
4563 if (ret)
4564 priv->power_domain = 0;
4565
4566 ret = of_property_read_u32(np, "mdio-clk-div", &priv->mdio_clk_div);
4567 if (ret)
4568 priv->mdio_clk_div = 0xfe;
4569
4570 if (of_property_read_bool(np, "enable-suspend"))
4571 priv->en_suspend = 1;
4572 else
4573 priv->en_suspend = 0;
4574
4575 priv->wolopts = 0;
4576 if (of_property_read_bool(np, "magic-packet-wakeup"))
4577 priv->wolopts |= WAKE_MAGIC;
4578
4579 if (of_property_read_bool(np, "unicast-packet-wakeup"))
4580 priv->wolopts |= WAKE_UCAST;
4581
4582 priv->dev_flags = 0;
4583 if (of_property_read_bool(np, "suspend-not-keep-power")) {
4584 priv->dev_flags |= EMAC_SUSPEND_POWER_DOWN_PHY;
4585 priv->wolopts = 0;
4586 }
4587
4588 priv->pinctrl = devm_pinctrl_get(dev);
4589 if (IS_ERR(priv->pinctrl))
4590 dev_err(dev, "could not get pinctrl handle\n");
4591
4592 priv->rgmii_pins = pinctrl_lookup_state(priv->pinctrl, "rgmii-pins");
4593 if (IS_ERR(priv->rgmii_pins))
4594 dev_err(dev, "could not get rgmii-pins pinstate\n");
4595
4596 emac_set_aib_power_domain(priv);
4597
4598 device_init_wakeup(&pdev->dev, 1);
4599
4600 priv->pm_qos_req.name = pdev->name;
4601 pm_qos_add_request(&priv->pm_qos_req, PM_QOS_CPUIDLE_BLOCK,
4602 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
4603#ifdef CONFIG_DDR_DEVFREQ
4604 pm_qos_add_request(&priv->pm_ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
4605 PM_QOS_DEFAULT_VALUE);
4606
4607 priv->clk_scaling.polling_delay_ms = 1000; /* 1s window */
4608 priv->clk_scaling.tx_up_threshold = 120; /* 120Mbps */
4609 priv->clk_scaling.tx_down_threshold = 60;
4610 priv->clk_scaling.rx_up_threshold = 60; /* 60Mbps */
4611 priv->clk_scaling.rx_down_threshold = 20;
4612 priv->clk_scaling.window_time = jiffies;
4613 pm_qos_add_request(&priv->clk_scaling.ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
4614 PM_QOS_DEFAULT_VALUE);
4615 INIT_WORK(&priv->qos_work, emac_ddr_qos_work);
4616#endif
4617 skb_queue_head_init(&priv->rx_skb);
4618 ndev->watchdog_timeo = 5 * HZ;
4619 ndev->base_addr = (unsigned long)priv->iobase;
4620 ndev->irq = priv->irq;
4621 /* set hw features */
4622 ndev->features = NETIF_F_SG | NETIF_F_SOFT_FEATURES;
4623 if (priv->tso) {
4624 ndev->features |= NETIF_F_RXCSUM;
4625 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4626 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
4627 dev_info(&pdev->dev, "TSO feature enabled\n");
4628 }
4629 ndev->hw_features = ndev->features;
4630 ndev->vlan_features = ndev->features;
4631
4632 ndev->ethtool_ops = &emac_ethtool_ops;
4633 ndev->netdev_ops = &emac_netdev_ops;
4634 if (pdev->dev.of_node)
4635 mac_addr = of_get_mac_address(np);
4636
4637 if (!IS_ERR_OR_NULL(mac_addr)) {
4638 //ether_addr_copy(ndev->dev_addr, mac_addr);
4639 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
4640 if (!is_valid_ether_addr(ndev->dev_addr)) {
4641 dev_info(&pdev->dev, "Using random mac address\n");
4642 eth_hw_addr_random(ndev);
4643 }
4644 } else {
4645 dev_info(&pdev->dev, "Using random mac address\n");
4646 eth_hw_addr_random(ndev);
4647 }
4648
4649 priv->hw_adj = of_property_read_bool(np, "hw-increment");
4650 priv->ptp_support = of_property_read_bool(np, "ptp-support");
4651 if (priv->ptp_support) {
4652 pr_info("EMAC support IEEE1588 PTP Protocol\n");
4653 if (of_property_read_u32(np, "ptp-clk-rate",
4654 &priv->ptp_clk_rate)) {
4655 priv->ptp_clk_rate = 20000000;
4656 pr_info("%s ptp_clk rate using default value:%d may inaccurate!!1\n",
4657 __func__, priv->ptp_clk_rate);
4658 }
4659
4660 priv->ptp_clk = devm_clk_get(&pdev->dev, "ptp-clk");
4661 if (IS_ERR(priv->ptp_clk)) {
4662 dev_err(&pdev->dev, "ptp clock not found.\n");
4663 ret = PTR_ERR(priv->ptp_clk);
4664 goto err_netdev;
4665 }
4666
4667 clk_set_rate(priv->ptp_clk, priv->ptp_clk_rate);
4668 }
4669
4670 priv->pps_info.enable_pps = 0;
4671#ifdef CONFIG_PPS
4672 ret = of_property_read_u32(np, "pps_source", &priv->pps_info.pps_source);
4673 if (!ret) {
4674 priv->irq_pps = irq_of_parse_and_map(np, 2);
4675
4676 if (priv->pps_info.pps_source < EMAC_PPS_MAX)
4677 priv->pps_info.enable_pps = 1;
4678 else
4679 dev_err(&pdev->dev, "wrong PPS source!\n");
4680 }
4681#endif
4682 priv->clk = devm_clk_get(&pdev->dev, "emac-clk");
4683 if (IS_ERR(priv->clk)) {
4684 dev_err(&pdev->dev, "emac clock not found.\n");
4685 ret = PTR_ERR(priv->clk);
4686 goto err_netdev;
4687 }
4688
4689 ret = clk_prepare_enable(priv->clk);
4690 if (ret < 0) {
4691 dev_err(&pdev->dev, "failed to enable emac clock: %d\n",
4692 ret);
4693 goto clk_disable;
4694 }
4695
4696 emac_sw_init(priv);
4697 ret = emac_mdio_init(priv);
4698 if (ret)
4699 goto clk_disable;
4700
4701 INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
4702#ifdef WAN_LAN_AUTO_ADAPT
4703 INIT_DELAYED_WORK(&priv->dhcp_work, emac_dhcp_work_func_t);
4704#endif
4705 if (of_phy_is_fixed_link(np)) {
4706 if ((emac_set_fixed_link(np, priv) < 0)) {
4707 ret = -ENODEV;
4708 goto clk_disable;
4709 }
4710 dev_info(&pdev->dev, "find fixed link\n");
4711 priv->fix_link = 1;
4712 }
4713
4714 INIT_DELAYED_WORK(&priv->emac_pause_work, emac_pause_generate_work_fuc);
4715 SET_NETDEV_DEV(ndev, &pdev->dev);
4716 strcpy(ndev->name, "eth%d");
4717
4718 ret = register_netdev(ndev);
4719 if (ret) {
4720 pr_err("register_netdev failed\n");
4721 goto err_mdio_deinit;
4722 }
4723 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4724#ifdef CONFIG_ASR_EMAC_NAPI
4725 netif_napi_add(ndev, &priv->rx_napi, emac_rx_poll, 32);
4726 netif_tx_napi_add(ndev, &priv->tx_napi, emac_tx_poll, 32);
4727#endif
4728 priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
4729 priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
4730 priv->clk_tuning_enable = of_property_read_bool(np, "clk-tuning-enable");
4731
4732 if (priv->clk_tuning_enable) {
4733 ret = of_property_read_u32(np, "tx-clk-config",
4734 &priv->tx_clk_config);
4735 if (ret)
4736 priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
4737
4738 ret = of_property_read_u32(np, "rx-clk-config",
4739 &priv->rx_clk_config);
4740 if (ret)
4741 priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
4742#ifdef CONFIG_DEBUG_FS
4743 if (!emac_fs_dir) {
4744 emac_fs_dir = debugfs_create_dir(DRIVER_NAME, NULL);
4745
4746 if (!emac_fs_dir || IS_ERR(emac_fs_dir)) {
4747 pr_err("emac debugfs create directory failed\n");
4748 }else {
4749 emac_clk_tuning = debugfs_create_file("clk_tuning", 0664,
4750 emac_fs_dir, priv, &clk_tuning_fops);
4751 if (!emac_clk_tuning) {
4752 pr_err("emac debugfs create file failed\n");
4753 }
4754 }
4755 }
4756#endif
4757 }
4758 return 0;
4759
4760err_mdio_deinit:
4761 emac_mdio_deinit(priv);
4762clk_disable:
4763 clk_disable_unprepare(priv->clk);
4764err_netdev:
4765 free_netdev(ndev);
4766 emac_skbrb_release();
4767 return ret;
4768}
4769
4770static int emac_remove(struct platform_device *pdev)
4771{
4772 struct emac_priv *priv = platform_get_drvdata(pdev);
4773
4774 device_init_wakeup(&pdev->dev, 0);
4775 unregister_netdev(priv->ndev);
4776 emac_reset_hw(priv);
4777 free_netdev(priv->ndev);
4778 emac_mdio_deinit(priv);
4779 clk_disable_unprepare(priv->clk);
4780 pm_qos_remove_request(&priv->pm_qos_req);
4781 cancel_delayed_work_sync(&priv->emac_pause_work);
4782#ifdef CONFIG_DDR_DEVFREQ
4783 pm_qos_remove_request(&priv->pm_ddr_qos);
4784 pm_qos_remove_request(&priv->clk_scaling.ddr_qos);
4785#endif
4786 emac_skbrb_release();
4787 return 0;
4788}
4789
4790static void emac_shutdown(struct platform_device *pdev)
4791{
4792}
4793
4794#ifdef CONFIG_PM_SLEEP
4795static int emac_resume(struct device *dev)
4796{
4797 struct emac_priv *priv = dev_get_drvdata(dev);
4798 struct net_device *ndev = priv->ndev;
4799 u32 ctrl, wake_mode = 0;
4800
4801 if (!priv->en_suspend)
4802 return 0;
4803
4804 if (priv->wolopts) {
4805 if (netif_running(ndev)) {
4806 netif_device_attach(ndev);
4807#ifdef CONFIG_ASR_EMAC_NAPI
4808 napi_enable(&priv->rx_napi);
4809 napi_enable(&priv->tx_napi);
4810#endif
4811 }
4812
4813 if (priv->wolopts & WAKE_MAGIC)
4814 wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
4815 if (priv->wolopts & WAKE_UCAST)
4816 wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
4817
4818 disable_irq_wake(priv->irq_wakeup);
4819 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
4820 ctrl &= ~wake_mode;
4821 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
4822 } else {
4823 clk_prepare_enable(priv->clk);
4824
4825 if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
4826 emac_power_up(priv);
4827
4828 rtnl_lock();
4829 dev_open(ndev, NULL);
4830 rtnl_unlock();
4831 }
4832
4833 return 0;
4834}
4835
4836static int emac_suspend(struct device *dev)
4837{
4838 struct emac_priv *priv = dev_get_drvdata(dev);
4839 struct net_device *ndev = priv->ndev;
4840 u32 ctrl, wake_mode = 0;
4841
4842 if (!priv->en_suspend)
4843 return 0;
4844
4845 if (priv->wolopts) {
4846 if (netif_running(ndev)) {
4847 netif_device_detach(ndev);
4848#ifdef CONFIG_ASR_EMAC_NAPI
4849 napi_disable(&priv->rx_napi);
4850 napi_disable(&priv->tx_napi);
4851#endif
4852 }
4853
4854 if (priv->wolopts & WAKE_MAGIC)
4855 wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
4856 if (priv->wolopts & WAKE_UCAST)
4857 wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
4858
4859 ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
4860 ctrl |= wake_mode;
4861 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
4862 enable_irq_wake(priv->irq_wakeup);
4863 } else {
4864 rtnl_lock();
4865 dev_close(ndev);
4866 rtnl_unlock();
4867
4868 if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
4869 emac_power_down(priv);
4870
4871 clk_disable_unprepare(priv->clk);
4872 }
4873
4874 return 0;
4875}
4876
4877static int emac_suspend_noirq(struct device *dev)
4878{
4879 struct emac_priv *priv = dev_get_drvdata(dev);
4880 struct net_device *ndev = priv->ndev;
4881
4882 if (!ndev->phydev && !priv->fix_link)
4883 return 0;
4884
4885 pr_pm_debug("==> enter emac_suspend_noirq\n");
4886 pm_qos_update_request(&priv->pm_qos_req,
4887 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
4888 return 0;
4889}
4890
4891static int emac_resume_noirq(struct device *dev)
4892{
4893 struct emac_priv *priv = dev_get_drvdata(dev);
4894 struct net_device *ndev = priv->ndev;
4895
4896 if (!ndev->phydev && !priv->fix_link)
4897 return 0;
4898
4899 pr_pm_debug("==> enter emac_resume_noirq\n");
4900 pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
4901 return 0;
4902}
4903
4904static const struct dev_pm_ops emac_pm_ops = {
4905 .suspend = emac_suspend,
4906 .resume = emac_resume,
4907 .suspend_noirq = emac_suspend_noirq,
4908 .resume_noirq = emac_resume_noirq,
4909};
4910
4911#define ASR_EMAC_PM_OPS (&emac_pm_ops)
4912#else
4913#define ASR_EMAC_PM_OPS NULL
4914#endif
4915
4916static struct platform_driver emac_driver = {
4917 .probe = emac_probe,
4918 .remove = emac_remove,
4919 .shutdown = emac_shutdown,
4920 .driver = {
4921 .name = DRIVER_NAME,
4922 .of_match_table = of_match_ptr(emac_of_match),
4923 .pm = ASR_EMAC_PM_OPS,
4924 },
4925};
4926
4927module_platform_driver(emac_driver);
4928
4929MODULE_LICENSE("GPL");
4930MODULE_DESCRIPTION("Ethernet driver for ASR Emac");
4931MODULE_ALIAS("platform:asr_eth");