ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/net/ethernet/asr/emac_eth.c b/marvell/linux/drivers/net/ethernet/asr/emac_eth.c
new file mode 100644
index 0000000..5aed7ec
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/asr/emac_eth.c
@@ -0,0 +1,4931 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * asr emac driver
+ *
+ * Copyright (C) 2019 ASR Micro Limited
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <linux/phy_fixed.h>
+#include <linux/pm_qos.h>
+#include <asm/cacheflush.h>
+#include <linux/cputype.h>
+#include <linux/iopoll.h>
+#include <linux/genalloc.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif /* CONFIG_DEBUG_FS */
+#include <asm/atomic.h>
+#include "emac_eth.h"
+#include <linux/skbrb.h>
+
+#ifdef WAN_LAN_AUTO_ADAPT
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/kobject.h>
+#endif
+
+#define DRIVER_NAME				"asr_emac"
+
+#define AXI_PHYS_BASE           0xd4200000
+
+#define AIB_GMAC_IO_REG			0xD401E804
+#define APBC_ASFAR			0xD4015050
+#define AKEY_ASFAR			0xbaba
+#define AKEY_ASSAR			0xeb10
+
+#define EMAC_DIRECT_MAP
+#define TUNING_CMD_LEN				50
+#define CLK_PHASE_CNT				8
+#define TXCLK_PHASE_DEFAULT			0
+#define RXCLK_PHASE_DEFAULT			0
+#define TX_PHASE				1
+#define RX_PHASE				0
+
+#define EMAC_DMA_REG_CNT			16
+#define EMAC_MAC_REG_CNT			61
+#define EMAC_EMPTY_FROM_DMA_TO_MAC  48
+#define EMAC_REG_SPACE_SIZE			((EMAC_DMA_REG_CNT + \
+         EMAC_MAC_REG_CNT + EMAC_EMPTY_FROM_DMA_TO_MAC) * 4)
+#define EMAC_ETHTOOL_STAT(x) { #x, \
+				offsetof(struct emac_hw_stats, x) / sizeof(u32) }
+
+#define EMAC_SKBRB_SLOT_SIZE 1600
+#define EMAC_EXTRA_ROOM 72
+#define EMAC_SKBRB_MAX_PAYLOAD (EMAC_SKBRB_SLOT_SIZE - EMAC_EXTRA_ROOM - NET_IP_ALIGN)
+
+#define EMAC_RX_FILL_TIMER_US	0
+#define EMAC_TX_COAL_TIMER_US	(1000)
+#define EMAC_TX_FRAMES		(64)
+
+#ifdef WAN_LAN_AUTO_ADAPT
+#define DHCP_DISCOVER 1
+#define DHCP_OFFER 2
+#define DHCP_REQUEST 3
+#define DHCP_ACK 5
+#define IP175D_PHY_ID 0x02430d80
+
+enum emac_SIG {
+	CARRIER_DOWN = 0,
+	CARRIER_UP,
+	DHCP_EVENT_CLIENT,
+	DHCP_EVENT_SERVER,
+	PHY_IP175D_CONNECT,
+	CARRIER_DOWN_IP175D,
+	CARRIER_UP_IP175D,
+};
+
+enum emac_DHCP {
+	DHCP_SEND_REQ = 1,
+	DHCP_REC_RESP = 2,
+};
+
+struct emac_event {
+	const char		*name;
+	char			*action;
+	int				port;
+	struct sk_buff		*skb;
+	struct work_struct	work;
+};
+
+extern u64 uevent_next_seqnum(void);
+static int emac_sig_workq(int event, int port);
+#endif
+
+static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable);
+static int clk_phase_set(struct emac_priv *priv, bool is_tx);
+#ifdef CONFIG_ASR_EMAC_NAPI
+static int emac_rx_clean_desc(struct emac_priv *priv, int budget);
+#else
+static int emac_rx_clean_desc(struct emac_priv *priv);
+#endif
+static void emac_alloc_rx_desc_buffers(struct emac_priv *priv);
+static int emac_phy_connect(struct net_device *dev);
+
+/* for falcon */
+struct emac_regdata asr_emac_regdata_v1 = {
+	.support_dual_vol_power = 1,
+	.ptp_rx_ts_all_events = 0,
+	.clk_rst_ctrl_reg_offset = 0x160,
+	.axi_mst_single_id_shift = 17,
+	.phy_intr_enable_shift = 16,
+	.int_clk_src_sel_shift = -1,
+	.rgmii_tx_clk_src_sel_shift = 5,
+	.rgmii_rx_clk_src_sel_shift = 4,
+	.rmii_rx_clk_sel_shift = 7,
+	.rmii_tx_clk_sel_shift = 6,
+	.rmii_ref_clk_sel_shift = -1,
+	.mac_intf_sel_shift = 2,
+	.rgmii_tx_dline_reg_offset = -1,
+	.rgmii_tx_delay_code_shift = -1,
+	.rgmii_tx_delay_code_mask =-1,
+	.rgmii_tx_delay_step_shift = -1,
+	.rgmii_tx_delay_step_mask = -1,
+	.rgmii_tx_delay_enable_shift = -1,
+	.rgmii_rx_dline_reg_offset = -1,
+	.rgmii_rx_delay_code_shift = -1,
+	.rgmii_rx_delay_code_mask = -1,
+	.rgmii_rx_delay_step_shift = -1,
+	.rgmii_rx_delay_step_mask = -1,
+	.rgmii_rx_delay_enable_shift = -1,
+};
+
+/* for kagu */
+struct emac_regdata asr_emac_regdata_v2 = {
+	.support_dual_vol_power = 0,
+	.ptp_rx_ts_all_events = 0,
+	.clk_rst_ctrl_reg_offset = 0x160,
+	.axi_mst_single_id_shift = 13,
+	.phy_intr_enable_shift = 12,
+	.int_clk_src_sel_shift = 9,
+	.rgmii_tx_clk_src_sel_shift = 8,
+	.rgmii_rx_clk_src_sel_shift = -1,
+	.rmii_rx_clk_sel_shift = 7,
+	.rmii_tx_clk_sel_shift = 6,
+	.rmii_ref_clk_sel_shift = 3,
+	.mac_intf_sel_shift = 2,
+	.rgmii_tx_dline_reg_offset = 0x178,
+	.rgmii_tx_delay_code_shift = 24,
+	.rgmii_tx_delay_code_mask = 0xff,
+	.rgmii_tx_delay_step_shift = 20,
+	.rgmii_tx_delay_step_mask = 0x3,
+	.rgmii_tx_delay_enable_shift = 16,
+	.rgmii_rx_dline_reg_offset = 0x178,
+	.rgmii_rx_delay_code_shift = 8,
+	.rgmii_rx_delay_code_mask = 0xff,
+	.rgmii_rx_delay_step_shift = 4,
+	.rgmii_rx_delay_step_mask = 0x3,
+	.rgmii_rx_delay_enable_shift = 0,
+};
+
+/* for lapwing */
+struct emac_regdata asr_emac_regdata_v3 = {
+	.support_dual_vol_power = 1,
+	.ptp_rx_ts_all_events = 1,
+	.clk_rst_ctrl_reg_offset = 0x164,
+	.axi_mst_single_id_shift = 13,
+	.phy_intr_enable_shift = 12,
+	.int_clk_src_sel_shift = 9,
+	.rgmii_tx_clk_src_sel_shift = 8,
+	.rgmii_rx_clk_src_sel_shift = -1,
+	.rmii_rx_clk_sel_shift = 7,
+	.rmii_tx_clk_sel_shift = 6,
+	.rmii_ref_clk_sel_shift = 3,
+	.mac_intf_sel_shift = 2,
+	.rgmii_tx_dline_reg_offset = 0x16c,
+	.rgmii_tx_delay_code_shift = 8,
+	.rgmii_tx_delay_code_mask = 0xff,
+	.rgmii_tx_delay_step_shift = 0,
+	.rgmii_tx_delay_step_mask = 0x3,
+	.rgmii_tx_delay_enable_shift = 31,
+	.rgmii_rx_dline_reg_offset = 0x168,
+	.rgmii_rx_delay_code_shift = 8,
+	.rgmii_rx_delay_code_mask = 0xff,
+	.rgmii_rx_delay_step_shift = 0,
+	.rgmii_rx_delay_step_mask = 0x3,
+	.rgmii_rx_delay_enable_shift = 31,
+};
+
+static const struct of_device_id emac_of_match[] = {
+	{
+		.compatible = "asr,asr-eth",
+		.data = (void *)&asr_emac_regdata_v1,
+	},
+	{
+		.compatible = "asr,asr-eth-v2",
+		.data = (void *)&asr_emac_regdata_v2,
+	},
+	{
+		.compatible = "asr,asr-eth-v3",
+		.data = (void *)&asr_emac_regdata_v3,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, emac_of_match);
+
+#ifdef EMAC_DIRECT_MAP
+dma_addr_t inline emac_map_direct(unsigned buf, unsigned len)
+{
+	unsigned ret;
+	ret = mv_cp_virtual_to_physical(buf);
+	BUG_ON(ret == buf);
+	__cpuc_flush_dcache_area((void *)(buf & ~ 31),
+				((len + (buf & 31) + 31) & ~ 31));
+	return (dma_addr_t)ret;
+}
+#endif
+
+static inline void emac_unmap_single(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir)
+{
+#ifdef EMAC_DIRECT_MAP
+	if (dir == DMA_TO_DEVICE)
+		return;
+#endif
+	dma_unmap_single(dev, handle, size ,dir);
+}
+
+static inline dma_addr_t emac_map_single(struct device *dev, void *ptr,
+					size_t size,enum dma_data_direction dir)
+{
+	if (dir == DMA_FROM_DEVICE)
+		return dma_map_single(dev, ptr, size, dir);
+#ifndef EMAC_DIRECT_MAP
+	return dma_map_single(dev, ptr, size, dir);
+#else
+	return emac_map_direct((unsigned)ptr, (unsigned)size);
+#endif
+}
+
+#ifdef CONFIG_DDR_DEVFREQ
+static void emac_ddr_qos_work(struct work_struct *work)
+{
+	struct emac_priv *priv;
+	int val;
+
+	priv = container_of(work, struct emac_priv, qos_work);
+	val = priv->clk_scaling.qos_val;
+
+	if (val == PM_QOS_DEFAULT_VALUE)
+		pm_qos_update_request(&priv->clk_scaling.ddr_qos, val);
+	else
+		pm_qos_update_request_timeout(
+			&priv->clk_scaling.ddr_qos, val, (2 * USEC_PER_SEC));
+}
+
+static void emac_ddr_clk_scaling(struct emac_priv *priv)
+{
+	struct net_device *ndev = priv->ndev;
+	unsigned long rx_bytes, tx_bytes;
+	unsigned long last_rx_bytes, last_tx_bytes;
+	unsigned long total_time_ms = 0;
+	unsigned int cur_rx_threshold, cur_tx_threshold;
+	unsigned long polling_jiffies;
+	int qos_val;
+
+	polling_jiffies = msecs_to_jiffies(priv->clk_scaling.polling_delay_ms);
+	if (time_is_after_jiffies(priv->clk_scaling.window_time +
+				polling_jiffies))
+		return;
+
+	total_time_ms = jiffies_to_msecs((long)jiffies -
+			(long)priv->clk_scaling.window_time);
+
+	if (!ndev) {
+		pr_err("%s: dev or net is not ready\n", __func__);
+		return;
+	}
+
+	qos_val = priv->clk_scaling.qos_val;
+	last_rx_bytes = priv->clk_scaling.rx_bytes;
+	last_tx_bytes = priv->clk_scaling.tx_bytes;
+	if (!last_rx_bytes && !last_tx_bytes)
+		goto out;
+
+	if (likely(ndev->stats.rx_bytes > last_rx_bytes))
+		rx_bytes = ndev->stats.rx_bytes - last_rx_bytes;
+	else
+		rx_bytes = ULONG_MAX - last_rx_bytes + ndev->stats.rx_bytes + 1;
+
+	if (likely(ndev->stats.tx_bytes > last_tx_bytes))
+		tx_bytes = ndev->stats.tx_bytes - last_tx_bytes;
+	else
+		tx_bytes = ULONG_MAX - last_tx_bytes + ndev->stats.tx_bytes + 1;
+
+	cur_tx_threshold = tx_bytes * 8 / (total_time_ms * 1000);
+	pr_debug("%s: tx_rate=%dMbps, up_threshold=%dMbps\n",
+		__func__, cur_tx_threshold, priv->clk_scaling.tx_up_threshold);
+	if (cur_tx_threshold >= priv->clk_scaling.tx_up_threshold) {
+		qos_val = ASR_EMAC_DDR_BOOST_FREQ;
+		goto out;
+	}
+
+	cur_rx_threshold = rx_bytes * 8 / (total_time_ms * 1000);
+	pr_debug("%s: rx_rate=%dMbps, up_threshold=%dMbps\n",
+		__func__, cur_rx_threshold, priv->clk_scaling.rx_up_threshold);
+	if (cur_rx_threshold >= priv->clk_scaling.rx_up_threshold) {
+		qos_val = ASR_EMAC_DDR_BOOST_FREQ;
+		goto out;
+	}
+
+	if (cur_tx_threshold < priv->clk_scaling.tx_down_threshold &&
+	    cur_rx_threshold < priv->clk_scaling.rx_down_threshold)
+		qos_val = PM_QOS_DEFAULT_VALUE;
+
+out:
+	priv->clk_scaling.rx_bytes = ndev->stats.rx_bytes;
+	priv->clk_scaling.tx_bytes = ndev->stats.tx_bytes;
+	priv->clk_scaling.window_time = jiffies;
+
+	if (qos_val != priv->clk_scaling.qos_val) {
+		priv->clk_scaling.qos_val = qos_val;
+		schedule_work(&priv->qos_work);
+	}
+
+	return;
+}
+#endif
+
+/* strings used by ethtool */
+static const struct emac_ethtool_stats {
+	char str[ETH_GSTRING_LEN];
+	u32 offset;
+} emac_ethtool_stats[] = {
+	EMAC_ETHTOOL_STAT(tx_ok_pkts),
+	EMAC_ETHTOOL_STAT(tx_total_pkts),
+	EMAC_ETHTOOL_STAT(tx_ok_bytes),
+	EMAC_ETHTOOL_STAT(tx_err_pkts),
+	EMAC_ETHTOOL_STAT(tx_singleclsn_pkts),
+	EMAC_ETHTOOL_STAT(tx_multiclsn_pkts),
+	EMAC_ETHTOOL_STAT(tx_lateclsn_pkts),
+	EMAC_ETHTOOL_STAT(tx_excessclsn_pkts),
+	EMAC_ETHTOOL_STAT(tx_unicast_pkts),
+	EMAC_ETHTOOL_STAT(tx_multicast_pkts),
+	EMAC_ETHTOOL_STAT(tx_broadcast_pkts),
+	EMAC_ETHTOOL_STAT(tx_pause_pkts),
+	EMAC_ETHTOOL_STAT(rx_ok_pkts),
+	EMAC_ETHTOOL_STAT(rx_total_pkts),
+	EMAC_ETHTOOL_STAT(rx_crc_err_pkts),
+	EMAC_ETHTOOL_STAT(rx_align_err_pkts),
+	EMAC_ETHTOOL_STAT(rx_err_total_pkts),
+	EMAC_ETHTOOL_STAT(rx_ok_bytes),
+	EMAC_ETHTOOL_STAT(rx_total_bytes),
+	EMAC_ETHTOOL_STAT(rx_unicast_pkts),
+	EMAC_ETHTOOL_STAT(rx_multicast_pkts),
+	EMAC_ETHTOOL_STAT(rx_broadcast_pkts),
+	EMAC_ETHTOOL_STAT(rx_pause_pkts),
+	EMAC_ETHTOOL_STAT(rx_len_err_pkts),
+	EMAC_ETHTOOL_STAT(rx_len_undersize_pkts),
+	EMAC_ETHTOOL_STAT(rx_len_oversize_pkts),
+	EMAC_ETHTOOL_STAT(rx_len_fragment_pkts),
+	EMAC_ETHTOOL_STAT(rx_len_jabber_pkts),
+	EMAC_ETHTOOL_STAT(rx_64_pkts),
+	EMAC_ETHTOOL_STAT(rx_65_127_pkts),
+	EMAC_ETHTOOL_STAT(rx_128_255_pkts),
+	EMAC_ETHTOOL_STAT(rx_256_511_pkts),
+	EMAC_ETHTOOL_STAT(rx_512_1023_pkts),
+	EMAC_ETHTOOL_STAT(rx_1024_1518_pkts),
+	EMAC_ETHTOOL_STAT(rx_1519_plus_pkts),
+	EMAC_ETHTOOL_STAT(rx_drp_fifo_full_pkts),
+	EMAC_ETHTOOL_STAT(rx_truncate_fifo_full_pkts),
+	EMAC_ETHTOOL_STAT(rx_dma_missed_frame_cnt),
+	EMAC_ETHTOOL_STAT(tx_tso_pkts),
+	EMAC_ETHTOOL_STAT(tx_tso_bytes),
+};
+
+static int emac_set_speed_duplex(struct emac_priv *priv)
+{
+	u32 ctrl;
+
+	ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+	if (priv->duplex)
+		ctrl |= MREGBIT_FULL_DUPLEX_MODE;
+	else
+		ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
+
+	switch (priv->speed) {
+	case SPEED_1000:
+		ctrl |= MREGBIT_SPEED_1000M;
+		break;
+	case SPEED_100:
+		ctrl |= MREGBIT_SPEED_100M;
+		break;
+	case SPEED_10:
+		ctrl |= MREGBIT_SPEED_10M;
+		break;
+	default:
+		pr_err("broken speed: %d\n", priv->speed);
+		return 0;
+	}
+	emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+	pr_info("emac: force link speed:%dM duplex:%s\n",
+			priv->speed, priv->duplex ? "Full": "Half");
+
+	return 0;
+}
+
+static int emac_set_fixed_link(struct device_node *np, struct emac_priv *priv)
+{
+	struct fixed_phy_status status = {};
+	struct device_node *fixed_link_node;
+	u32 fixed_link_prop[5];
+	const char *managed;
+	int interface;
+
+	if (of_property_read_string(np, "managed", &managed) == 0 &&
+	    strcmp(managed, "in-band-status") == 0) {
+		/* status is zeroed, namely its .link member */
+		goto fix_link;
+	}
+
+	/* New binding */
+	fixed_link_node = of_get_child_by_name(np, "fixed-link");
+	if (fixed_link_node) {
+		status.link = 1;
+		status.duplex = of_property_read_bool(fixed_link_node,
+						      "full-duplex");
+		if (of_property_read_u32(fixed_link_node, "speed",
+					 &status.speed)) {
+			of_node_put(fixed_link_node);
+			return -EINVAL;
+		}
+		status.pause = of_property_read_bool(fixed_link_node, "pause");
+		status.asym_pause = of_property_read_bool(fixed_link_node,
+							  "asym-pause");
+		interface = of_get_phy_mode(fixed_link_node);
+		if (interface < 0) {
+			priv->interface = PHY_INTERFACE_MODE_RGMII;
+			pr_info("no interface for fix-link, use RGMII\n");
+		} else {
+			priv->interface = interface;
+		}
+
+		of_node_put(fixed_link_node);
+		goto fix_link;
+	}
+
+	/* Old binding */
+	if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
+				       ARRAY_SIZE(fixed_link_prop)) == 0) {
+		status.link = 1;
+		status.duplex = fixed_link_prop[1];
+		status.speed  = fixed_link_prop[2];
+		status.pause  = fixed_link_prop[3];
+		status.asym_pause = fixed_link_prop[4];
+		goto fix_link;
+	}
+
+	return -ENODEV;
+
+fix_link:
+	priv->speed = status.speed;
+	priv->duplex = status.duplex;
+
+	return emac_set_speed_duplex(priv);
+}
+
+void register_dump(struct emac_priv *priv)
+{
+	int i;
+	void __iomem *base = priv->iobase;
+
+	for (i = 0; i < 16; i++) {
+		pr_info("DMA:0x%x:0x%x\n",
+		       DMA_CONFIGURATION + i * 4,
+		       readl(base + DMA_CONFIGURATION + i * 4));
+	}
+	for (i = 0; i < 60; i++) {
+		pr_info("MAC:0x%x:0x%x\n",
+		       MAC_GLOBAL_CONTROL + i * 4,
+		       readl(base + MAC_GLOBAL_CONTROL + i * 4));
+	}
+
+	for (i = 0; i < 4; i++) {
+		pr_info("1588:0x%x:0x%x\n",
+		       PTP_1588_CTRL + i * 4,
+		       readl(base + PTP_1588_CTRL + i * 4));
+	}
+
+	for (i = 0; i < 6; i++) {
+		pr_info("1588:0x%x:0x%x\n",
+		       SYS_TIME_GET_LOW + i * 4,
+		       readl(base + SYS_TIME_GET_LOW + i * 4));
+	}
+	for (i = 0; i < 5; i++) {
+		pr_info("1588:0x%x:0x%x\n",
+		       RX_TIMESTAMP_LOW + i * 4,
+		       readl(base + RX_TIMESTAMP_LOW + i * 4));
+	}
+	for (i = 0; i < 2; i++) {
+		pr_info("1588:0x%x:0x%x\n",
+		       PTP_1588_IRQ_STS + i * 4,
+		       readl(base + PTP_1588_IRQ_STS + i * 4));
+	}
+
+	if (priv->tso) {
+		for (i = 0; i < 18; i++) {
+			pr_info("TSO:0x%x:0x%x\n", i * 4,
+				emac_rd_tso(priv, i * 4));
+		}
+	}
+}
+
+void print_pkt(unsigned char *buf, int len)
+{
+	int i = 0;
+
+	pr_debug("data len = %d byte, buf addr: 0x%x\n",
+		 len, (unsigned int)buf);
+	for (i = 0; i < len; i = i + 8) {
+		pr_debug("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+			*(buf + i),
+			*(buf + i + 1),
+			*(buf + i + 2),
+			*(buf + i + 3),
+			*(buf + i + 4),
+			*(buf + i + 5),
+			*(buf + i + 6),
+			*(buf + i + 7)
+			);
+	}
+}
+
+#ifdef EMAC_DEBUG
+void print_desc(unsigned char *buf, int len)
+{
+	int i;
+
+	pr_info("descriptor len = %d byte, buf addr: 0x%x\n",
+		 len, (unsigned int)buf);
+	for (i = 0; i < len; i = i + 4) {
+		pr_info("0x%02x%02x%02x%02x\n",
+			*(buf + i + 3),
+			*(buf + i + 2),
+			*(buf + i + 1),
+			*(buf + i));
+	}
+}
+#else
+void print_desc(unsigned char *buf, int len)
+{
+
+}
+#endif
+
+/* Name		emac_reset_hw
+ * Arguments	priv : pointer to hardware data structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	TBDL
+ */
+int emac_reset_hw(struct emac_priv *priv)
+{
+	mutex_lock(&priv->mii_mutex);
+	/* disable all the interrupts */
+	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
+	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
+
+	/* disable transmit and receive units */
+	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
+	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
+
+	/* stop the DMA */
+	emac_wr(priv, DMA_CONTROL, 0x0000);
+
+	/* reset mac, statistic counters */
+	emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0018);
+
+	emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0000);
+
+	emac_wr(priv, MAC_MDIO_CLK_DIV,
+		priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
+	mutex_unlock(&priv->mii_mutex);
+	return 0;
+}
+
+/* Name		emac_init_hw
+ * Arguments	pstHWData	: pointer to hardware data structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	TBDL
+ * Assumes that the controller has previously been reset
+ * and is in apost-reset uninitialized state.
+ * Initializes the receive address registers,
+ * multicast table, and VLAN filter table.
+ * Calls routines to setup link
+ * configuration and flow control settings.
+ * Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ */
+int emac_init_hw(struct emac_priv *priv)
+{
+	u32 val = 0, threshold;
+
+	mutex_lock(&priv->mii_mutex);
+	/* MAC Init
+	 * disable transmit and receive units
+	 */
+	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
+	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
+
+	/* enable mac address 1 filtering */
+	//emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0001);
+	emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0100);
+
+	/* zero initialize the multicast hash table */
+	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0000);
+	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0000);
+	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0000);
+	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0000);
+
+	emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, EMAC_TX_FIFO_DWORDS - 8);
+
+	if (priv->speed == SPEED_1000)
+		threshold = 1024;
+	else if (priv->speed == SPEED_100)
+		threshold = 256;
+	else
+		threshold = TX_STORE_FORWARD_MODE;
+	emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, threshold);
+
+	emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, 0xc);
+
+	/* reset dma */
+	emac_wr(priv, DMA_CONTROL, 0x0000);
+
+	emac_wr(priv, DMA_CONFIGURATION, 0x01);
+	mdelay(10);
+	emac_wr(priv, DMA_CONFIGURATION, 0x00);
+	mdelay(10);
+
+	val |= MREGBIT_WAIT_FOR_DONE;
+	val |= MREGBIT_STRICT_BURST;
+	val |= MREGBIT_DMA_64BIT_MODE;
+	val |= MREGBIT_BURST_16WORD; //MREGBIT_BURST_1WORD;
+
+	emac_wr(priv, DMA_CONFIGURATION, val);
+
+	/* MDC Clock Division: AXI-312M/96 = 3.25M */
+	emac_wr(priv, MAC_MDIO_CLK_DIV,
+		priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
+
+	mutex_unlock(&priv->mii_mutex);
+
+	printk("MDIO clock div: 0x%x\n", emac_rd(priv, MAC_MDIO_CLK_DIV));
+	return 0;
+}
+
+int emac_set_mac_addr(struct emac_priv *priv, unsigned char *addr)
+{
+	emac_wr(priv, MAC_ADDRESS1_HIGH, (addr[1] << 8 | addr[0]));
+	emac_wr(priv, MAC_ADDRESS1_MED, (addr[3] << 8 | addr[2]));
+	emac_wr(priv, MAC_ADDRESS1_LOW, (addr[5] << 8 | addr[4]));
+
+	return 0;
+}
+
+void emac_set_fc_source_addr(struct emac_priv *priv, unsigned char *addr)
+{
+	emac_wr(priv, MAC_FC_SOURCE_ADDRESS_HIGH, (addr[1] << 8 | addr[0]));
+	emac_wr(priv, MAC_FC_SOURCE_ADDRESS_MED, (addr[3] << 8 | addr[2]));
+	emac_wr(priv, MAC_FC_SOURCE_ADDRESS_LOW, (addr[5] << 8 | addr[4]));
+
+	return;
+}
+
+static inline void emac_dma_start_transmit(struct emac_priv *priv)
+{
+	emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 0xFF);
+}
+
+static inline void emac_dma_start_receive(struct emac_priv *priv)
+{
+	emac_wr(priv, DMA_RECEIVE_POLL_DEMAND, 0xFF);
+}
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+void emac_enable_interrupt(struct emac_priv *priv, int tx)
+{
+	u32 val;
+
+	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+
+	if (tx) {
+		val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+	} else {
+		val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
+		if (priv->tso)
+			emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
+				TSO_AP_RX_INTR_ENA_CSUM_DONE |
+				TSO_AP_RX_INTR_ENA_CSUM_ERR);
+	}
+
+	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+void emac_disable_interrupt(struct emac_priv *priv, int tx)
+{
+	u32 val;
+
+	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+
+	if (tx) {
+		val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+	} else {
+		val &= ~(MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
+		if (priv->tso)
+			emac_wr_tso(priv, TSO_AP_RX_INTR_ENA, 0x0);
+	}
+
+	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+#endif
+
+bool emac_is_rmii_interface(struct emac_priv *priv)
+{
+	const struct emac_regdata *regdata = priv->regdata;
+	void __iomem* apmu;
+	u32 val;
+
+	apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+	if (apmu == NULL) {
+		pr_err("error to ioremap APMU base\n");
+		return -ENOMEM;
+	}
+
+	val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+	val &= (0x1 << regdata->mac_intf_sel_shift);
+	if (val)
+		return false;
+	else
+		return true;
+}
+
+void emac_config_phy_interrupt(struct emac_priv *priv, int enable)
+{
+	const struct emac_regdata *regdata = priv->regdata;
+	void __iomem* apmu;
+	u32 val;
+
+	apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+	if (apmu == NULL) {
+		pr_err("error to ioremap APMU base\n");
+		return;
+	}
+
+	val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+	if (enable)
+		val |= 0x1 << regdata->phy_intr_enable_shift;
+	else
+		val &= ~(0x1 << regdata->phy_intr_enable_shift);
+	writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+	iounmap(apmu);
+	return;
+}
+
+void emac_phy_interface_config(struct emac_priv *priv, int phy_interface)
+{
+	const struct emac_regdata *regdata = priv->regdata;
+	void __iomem* apmu;
+	u32 val;
+
+	apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+	if (apmu == NULL) {
+		pr_err("error to ioremap APMU base\n");
+		return;
+	}
+
+	val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+	if (PHY_INTERFACE_MODE_RMII == phy_interface) {
+		val &= ~(0x1 << regdata->mac_intf_sel_shift);
+		printk("===> set eamc interface: rmii\n");
+	} else {
+		val |= 0x1 << regdata->mac_intf_sel_shift;
+		printk("===> set eamc interface: rgmii\n");
+	}
+	val |= 0x1 << regdata->axi_mst_single_id_shift;
+	writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+
+	iounmap(apmu);
+	priv->interface = phy_interface;
+	return;
+}
+
+static void emac_set_aib_power_domain(struct emac_priv *priv)
+{
+	const struct emac_regdata *regdata = priv->regdata;
+	void __iomem *aib_emac_io;
+	void __iomem *apbc_asfar;
+	u32 tmp;
+
+	if (!regdata->support_dual_vol_power)
+		return;
+
+	aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
+	apbc_asfar = ioremap(APBC_ASFAR, 8);
+
+	writel(AKEY_ASFAR, apbc_asfar);
+	writel(AKEY_ASSAR, apbc_asfar + 4);
+	tmp = readl(aib_emac_io);
+
+	/* 0= power down, only set power down when vol = 0 */
+	if (priv->power_domain) {
+		tmp &= ~(0x1 << 2);  /* 3.3v */
+		printk("===> emac set io to 3.3v\n");
+	} else {
+		tmp |= 0x1 << 2; /* 1.8v */
+		printk("===> emac set io to 1.8v\n");
+	}
+
+	writel(AKEY_ASFAR, apbc_asfar);
+	writel(AKEY_ASSAR, apbc_asfar + 4);
+	writel(tmp, aib_emac_io);
+
+	writel(AKEY_ASFAR, apbc_asfar);
+	writel(AKEY_ASSAR, apbc_asfar + 4);
+	tmp = readl(aib_emac_io);
+	printk("===> emac AIB read back: 0x%x\n", tmp);
+
+	iounmap(apbc_asfar);
+	iounmap(aib_emac_io);
+}
+
+static void emac_pause_generate_work_fuc(struct work_struct *work)
+{
+	struct emac_priv *priv= container_of(work, struct emac_priv, emac_pause_work.work);
+	int time_nxt = 0;
+	/* because pause time value = 0XFFFF,equal to stopping for 336ms(100M)/34ms(1000M) to transmit */
+	/* by a repeated testing, delay 20ms(1000M)/300ms(100M) satisfy making the neighbor stop transmission */
+	time_nxt = (priv->speed == SPEED_1000) ? 20 : 300;
+	if (!priv->pause.pause_time_max) {
+		emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0xffff);
+		priv->pause.pause_time_max = 1;
+	}
+
+	emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
+	schedule_delayed_work(&priv->emac_pause_work, msecs_to_jiffies(time_nxt));
+	return;
+}
+
+static inline void emac_check_ring_and_send_pause(struct emac_priv *priv)
+{
+	int pos;
+	int high_water;
+	int low_water;
+	struct emac_rx_desc *rx_desc;
+	struct emac_desc_ring *rx_ring;
+
+	rx_ring = &priv->rx_ring;
+	pos = rx_ring->nxt_clean;
+	high_water = (pos + priv->pause.high_water) % priv->rx_ring.total_cnt;
+	low_water = (pos + priv->pause.low_water) % priv->rx_ring.total_cnt;
+
+	rx_desc = emac_get_rx_desc(priv, high_water);
+	if (priv->pause.pause_sending == 0 && rx_desc->OWN == 0) {
+		schedule_delayed_work(&priv->emac_pause_work, 0);
+		priv->pause.pause_sending = 1;
+	}
+
+	rx_desc = emac_get_rx_desc(priv, low_water);
+	if (rx_desc->OWN && priv->pause.pause_sending) {
+		cancel_delayed_work_sync(&priv->emac_pause_work);
+		emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0);
+		emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
+		priv->pause.pause_time_max = 0;
+		priv->pause.pause_sending = 0;
+	}
+}
+
+/* Name		emac_sw_init
+ * Arguments	priv	: pointer to driver private data structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	Reads PCI space configuration information and
+ *		initializes the variables with
+ *		their default values
+ */
+static int emac_sw_init(struct emac_priv *priv)
+{
+	priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
+
+	mutex_init(&priv->mii_mutex);
+	spin_lock_init(&priv->spStatsLock);
+	spin_lock_init(&priv->spTxLock);
+	spin_lock_init(&priv->intr_lock);
+
+	return 0;
+}
+
+static int emac_check_ptp_packet(struct emac_priv *priv,
+				struct sk_buff *skb, int txrx)
+{
+	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	struct ptp_header *ptph = NULL;
+	struct iphdr *iph;
+	struct udphdr *udph;
+	int msg_type, msg_id;
+	int ts;
+
+	if (eth->h_proto == htons(ETH_P_1588)) {
+		netdev_dbg(priv->ndev, "get PTP packet over ETH\n");
+		ptph = (struct ptp_header *)((u8 *)eth + sizeof(struct ethhdr));
+	} else if (eth->h_proto == htons(ETH_P_IP)) {
+		iph = (struct iphdr *)((u8 *)eth + sizeof(struct ethhdr));
+		if (iph->protocol != IPPROTO_UDP)
+			return -1;
+
+		udph = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
+		if ((htons(udph->dest) != PTP_EVENT_PORT ||
+		     htons(udph->source) != PTP_EVENT_PORT))
+		     return -1;
+
+		netdev_dbg(priv->ndev, "get PTP packet over UDP\n");
+		ptph = (struct ptp_header *)((u8 *)udph + sizeof(struct udphdr));
+	} else {
+		return -1;
+	}
+
+	msg_id = -1;
+	ts = ptph->tsmt & 0xF0;
+	msg_type = (ptph->tsmt) & 0x0F;
+	if (txrx) {
+		if (msg_type == MSG_SYNC) {
+			if (ts)
+				msg_id = MSG_PDELAY_REQ;
+			else
+				msg_id = MSG_DELAY_REQ;
+		} else if (msg_type == MSG_DELAY_REQ) {
+			msg_id = MSG_SYNC;
+		} else if (msg_type == MSG_PDELAY_REQ) {
+			msg_id = MSG_PDELAY_RESP;
+			memcpy(&priv->sourcePortIdentity,
+				&ptph->sourcePortIdentity,
+				sizeof(struct PortIdentity));
+		} else if (msg_type == MSG_PDELAY_RESP) {
+			msg_id = MSG_PDELAY_REQ;
+		}
+	} else {
+		netdev_dbg(priv->ndev, "RX timestamp for message type %d\n",
+			   ptph->tsmt);
+
+		if (msg_type == MSG_PDELAY_RESP) {
+			struct pdelay_resp_msg *presp = (struct pdelay_resp_msg *)ptph;
+
+			/*
+			 * Change to monitor SYNC packet if pdelay response
+			 * received for same clock indentity.
+			 */
+			if (!memcmp(&presp->requestingPortIdentity.clockIdentity,
+			            &priv->sourcePortIdentity.clockIdentity,
+				    sizeof(struct ClockIdentity))) {
+				msg_id = MSG_SYNC;
+			}
+		}
+	}
+
+	/*
+	 * Since some platform not support to timestamp two or more
+	 * message type, so change here.
+	 */
+	if (msg_id >= 0) {
+		if (priv->regdata->ptp_rx_ts_all_events) {
+			msg_id = ALL_EVENTS;
+			msg_id |= ts | ts << 8 | ts << 16 | ts << 24;
+		} else {
+			msg_id |= ts;
+		}
+
+		priv->hwptp->config_hw_tstamping(priv, 1, PTP_V2_L2_L4, msg_id);
+	}
+
+	return ptph->tsmt;
+}
+
+/* emac_get_tx_hwtstamp - get HW TX timestamps
+ * @priv: driver private structure
+ * @skb : the socket buffer
+ * Description :
+ * This function will read timestamp from the register & pass it to stack.
+ * and also perform some sanity checks.
+ */
+static void emac_get_tx_hwtstamp(struct emac_priv *priv, struct sk_buff *skb)
+{
+	struct skb_shared_hwtstamps shhwtstamp;
+	u64 ns;
+
+	if (!priv->hwts_tx_en)
+		return;
+
+	/* exit if skb doesn't support hw tstamp */
+	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+		return;
+
+	emac_check_ptp_packet(priv, skb, 1);
+
+	/* get the valid tstamp */
+	ns = priv->hwptp->get_tx_timestamp(priv);
+
+	memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+	shhwtstamp.hwtstamp = ns_to_ktime(ns);
+
+	wmb();
+	netdev_dbg(priv->ndev, "get valid TX hw timestamp %llu\n", ns);
+	/* pass tstamp to stack */
+	skb_tstamp_tx(skb, &shhwtstamp);
+
+	return;
+}
+
+/* emac_get_rx_hwtstamp - get HW RX timestamps
+ * @priv: driver private structure
+ * @p : descriptor pointer
+ * @skb : the socket buffer
+ * Description :
+ * This function will read received packet's timestamp from the descriptor
+ * and pass it to stack. It also perform some sanity checks.
+ */
+static void emac_get_rx_hwtstamp(struct emac_priv *priv, struct emac_rx_desc *p,
+				 struct sk_buff *skb)
+{
+	struct skb_shared_hwtstamps *shhwtstamp = NULL;
+	u64 ns;
+
+	if (!priv->hwts_rx_en)
+		return;
+
+	/* Check if timestamp is available */
+	if (p->ptp_pkt && p->rx_timestamp) {
+		emac_check_ptp_packet(priv, skb, 0);
+		ns = priv->hwptp->get_rx_timestamp(priv);
+		netdev_dbg(priv->ndev, "get valid RX hw timestamp %llu\n", ns);
+		shhwtstamp = skb_hwtstamps(skb);
+		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+		shhwtstamp->hwtstamp = ns_to_ktime(ns);
+	} else {
+		netdev_dbg(priv->ndev, "cannot get RX hw timestamp\n");
+	}
+}
+
+/**
+ *  emac_hwtstamp_set - control hardware timestamping.
+ *  @dev: device pointer.
+ *  @ifr: An IOCTL specific structure, that can contain a pointer to
+ *  a proprietary structure used to pass information to the driver.
+ *  Description:
+ *  This function configures the MAC to enable/disable both outgoing(TX)
+ *  and incoming(RX) packets time stamping based on user input.
+ *  Return Value:
+ *  0 on success and an appropriate -ve integer on failure.
+ */
+static int emac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+	struct emac_priv *priv = netdev_priv(dev);
+	struct hwtstamp_config config;
+	struct timespec64 now;
+	u64 ns_ptp;
+	u32 ptp_event_msg_id = 0;
+	u32 rx_ptp_type = 0;
+
+	if (!priv->ptp_support) {
+		netdev_alert(priv->ndev, "No support for HW time stamping\n");
+		priv->hwts_tx_en = 0;
+		priv->hwts_rx_en = 0;
+
+		return -EOPNOTSUPP;
+	}
+
+	if (copy_from_user(&config, ifr->ifr_data,
+			   sizeof(struct hwtstamp_config)))
+		return -EFAULT;
+
+	netdev_dbg(priv->ndev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+		   __func__, config.flags, config.tx_type, config.rx_filter);
+
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	if (config.tx_type != HWTSTAMP_TX_OFF &&
+	    config.tx_type != HWTSTAMP_TX_ON)
+		return -ERANGE;
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		/* time stamp no incoming packet at all */
+		config.rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+		/* PTP v1, UDP, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+		/* take time stamp for SYNC messages only */
+		ptp_event_msg_id = MSG_SYNC;
+		rx_ptp_type = PTP_V1_L4_ONLY;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		/* PTP v1, UDP, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+		/* take time stamp for Delay_Req messages only */
+		ptp_event_msg_id = MSG_DELAY_REQ;
+		rx_ptp_type = PTP_V1_L4_ONLY;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+		/* PTP v2, UDP, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+		/* take time stamp for SYNC messages only */
+		ptp_event_msg_id = MSG_SYNC;
+		rx_ptp_type = PTP_V2_L2_L4;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		/* PTP v2, UDP, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+		/* take time stamp for Delay_Req messages only */
+		ptp_event_msg_id = MSG_DELAY_REQ;
+		rx_ptp_type = PTP_V2_L2_L4;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+		/* PTP v2/802.AS1 any layer, any kind of event packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+		/*
+		 * IF not support ALL EVENTS, default timestamp SYNC packet,
+		 * changed to MSG_DELAY_REQ automactically if needed
+		 */
+		if (priv->regdata->ptp_rx_ts_all_events)
+			ptp_event_msg_id = ALL_EVENTS;
+		else
+			ptp_event_msg_id = MSG_SYNC;
+
+		rx_ptp_type = PTP_V2_L2_L4;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+		/* PTP v2/802.AS1, any layer, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+		/* take time stamp for SYNC messages only */
+		ptp_event_msg_id = MSG_SYNC;
+		rx_ptp_type = PTP_V2_L2_L4;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		/* PTP v2/802.AS1, any layer, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+		/* take time stamp for Delay_Req messages only */
+		ptp_event_msg_id = MSG_DELAY_REQ;
+		rx_ptp_type = PTP_V2_L2_L4;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+
+	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
+		priv->hwptp->config_hw_tstamping(priv, 0, 0, 0);
+	else {
+
+		priv->hwptp->config_hw_tstamping(priv, 1,
+			rx_ptp_type, ptp_event_msg_id);
+
+		/* initialize system time */
+		ktime_get_real_ts64(&now);
+		priv->hwptp->init_systime(priv, timespec64_to_ns(&now));
+
+		/* program Increment reg */
+		priv->hwptp->config_systime_increment(priv);
+
+		ns_ptp = priv->hwptp->get_phc_time(priv);
+		ktime_get_real_ts64(&now);
+		/* check the diff between ptp timer and system time */
+		if (abs(timespec64_to_ns(&now) - ns_ptp) > 5000)
+			priv->hwptp->init_systime(priv,
+				timespec64_to_ns(&now));
+	}
+
+	memcpy(&priv->tstamp_config, &config, sizeof(config));
+
+	return copy_to_user(ifr->ifr_data, &config,
+			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+/**
+ *  emac_hwtstamp_get - read hardware timestamping.
+ *  @dev: device pointer.
+ *  @ifr: An IOCTL specific structure, that can contain a pointer to
+ *  a proprietary structure used to pass information to the driver.
+ *  Description:
+ *  This function obtain the current hardware timestamping settings
+    as requested.
+ */
+static int emac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+	struct emac_priv *priv = netdev_priv(dev);
+	struct hwtstamp_config *config = &priv->tstamp_config;
+
+	if (!priv->ptp_support)
+		return -EOPNOTSUPP;
+
+	return copy_to_user(ifr->ifr_data, config,
+			    sizeof(*config)) ? -EFAULT : 0;
+}
+
+/* Name		emac_ioctl
+ * Arguments	pstNetdev : pointer to net_device structure
+ *		pstIfReq : pointer to interface request structure used.
+ *		u32Cmd : IOCTL command number
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	It is called by upper layer and
+ *		handling various task IOCTL commands.
+ */
+static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+	int ret = -EOPNOTSUPP;
+
+	if (!netif_running(ndev))
+		return -EINVAL;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		if (!ndev->phydev)
+			return -EINVAL;
+		ret = phy_mii_ioctl(ndev->phydev, rq, cmd);
+		break;
+	case SIOCSHWTSTAMP:
+		ret = emac_hwtstamp_set(ndev, rq);
+		break;
+	case SIOCGHWTSTAMP:
+		ret = emac_hwtstamp_get(ndev, rq);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static irqreturn_t emac_wakeup_handler(int irq, void *dev_id)
+{
+	struct net_device *ndev = (struct net_device *)dev_id;
+	struct emac_priv *priv = netdev_priv(ndev);
+	u32 ctrl;
+
+	emac_set_axi_bus_clock(priv, 1);
+	ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+	if (!(ctrl & (MREGBIT_UNICAST_WAKEUP_MODE |
+			MREGBIT_MAGIC_PACKET_WAKEUP_MODE)))
+		return IRQ_NONE;
+
+	ctrl &= ~(MREGBIT_UNICAST_WAKEUP_MODE |
+		MREGBIT_MAGIC_PACKET_WAKEUP_MODE);
+	emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t emac_irq_tso(int irq, void *dev_id)
+{
+	struct net_device *ndev = (struct net_device *)dev_id;
+	struct emac_priv *priv = netdev_priv(ndev);
+	u32 status;
+
+	/* handle rx */
+	status = emac_rd_tso(priv, TSO_AP_RX_INTR_STS);
+	if (status) {
+		emac_print("TSO_AP_RX_INTR_STS=0x%x", status);
+
+		if (status & TSO_AP_RX_INTR_ENA_CSUM_DONE) {
+#ifdef CONFIG_ASR_EMAC_NAPI
+			if (likely(napi_schedule_prep(&priv->rx_napi))) {
+				unsigned long flags;
+
+				spin_lock_irqsave(&priv->intr_lock, flags);
+				emac_disable_interrupt(priv, 0);
+				spin_unlock_irqrestore(&priv->intr_lock, flags);
+				__napi_schedule(&priv->rx_napi);
+			}
+#else
+			emac_rx_clean_desc(priv);
+#endif
+		}
+
+#ifdef EMAC_DEBUG
+		if (status & TSO_AP_RX_INTR_ENA_CSUM_ERR)
+			pr_err("rx checksum err irq\n");
+#endif
+		/* clear rx status */
+		emac_wr_tso(priv, TSO_AP_RX_INTR_STS, status);
+	}
+
+	/* handle tx */
+	status = emac_rd_tso(priv, TSO_AP_TX_INTR_STS);
+	if (status) {
+		emac_print("TSO_AP_TX_INTR_STS=0x%x\n", status);
+		if (status & TSO_AP_TX_INTR_TSO_DONE) {
+			emac_print("TX TSO done\n");
+			emac_dma_start_transmit(priv);
+		}
+
+		if (status & TSO_AP_TX_INTR_CSUM_DONE) {
+			emac_print("TX checksum done\n");
+			emac_dma_start_transmit(priv);
+		}
+
+		/* clear tx status */
+		emac_wr_tso(priv, TSO_AP_TX_INTR_STS, status);
+	}
+
+	/* handle err */
+	status = emac_rd_tso(priv, TSO_ERR_INTR_STS);
+	if (status) {
+		pr_err("TSO: TX/RX ERR, status=0x%x\n", status);
+		emac_wr_tso(priv, TSO_ERR_INTR_STS, status);
+	}
+
+	return IRQ_HANDLED;
+}
+
+
+/* Name		emac_interrupt_handler
+ * Arguments	irq : irq number for which the interrupt is fired
+ *		dev_id : pointer was passed to request_irq and same pointer is passed
+ *		back to handler
+ * Return	irqreturn_t : integer value
+ * Description	Interrupt handler routine for interrupts from target for RX packets indication.
+ */
+static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
+{
+	struct net_device *ndev = (struct net_device *)dev_id;
+	struct emac_priv *priv = netdev_priv(ndev);
+	u32 status;
+	u32 clr = 0;
+
+	/* read the status register for IRQ received */
+	status = emac_rd(priv, DMA_STATUS_IRQ);
+
+	/* Check if emac is up */
+	if (test_bit(EMAC_DOWN, &priv->state)) {
+		emac_wr(priv, DMA_STATUS_IRQ, status & 0x1F7);
+		return IRQ_HANDLED;
+	}
+
+	if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
+		clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
+#ifdef CONFIG_ASR_EMAC_NAPI
+		if (likely(napi_schedule_prep(&priv->tx_napi))) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&priv->intr_lock, flags);
+			emac_disable_interrupt(priv, 1);
+			spin_unlock_irqrestore(&priv->intr_lock, flags);
+			__napi_schedule(&priv->tx_napi);
+		}
+#else
+		emac_tx_clean_desc(priv);
+#endif
+	}
+
+	if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
+		clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
+
+	if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
+		clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
+
+	if (status & (MREGBIT_RECEIVE_TRANSFER_DONE_IRQ |
+			MREGBIT_RECEIVE_MISSED_FRAME_IRQ)) {
+		if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ)
+			clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
+
+		if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
+			clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
+
+		if (priv->tso)
+			emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+		if (likely(napi_schedule_prep(&priv->rx_napi))) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&priv->intr_lock, flags);
+			emac_disable_interrupt(priv, 0);
+			spin_unlock_irqrestore(&priv->intr_lock, flags);
+			__napi_schedule(&priv->rx_napi);
+		}
+#else
+		emac_rx_clean_desc(priv);
+#endif
+	}
+
+	if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
+		clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
+
+	if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
+		clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
+
+	emac_wr(priv, DMA_STATUS_IRQ, clr);
+
+	return IRQ_HANDLED;
+}
+
+/* Name		emac_command_options
+ * Arguments	priv : pointer to driver private data structure
+ * Return	none
+ * Description	This function actually handles the command line para passed
+ *		when the driver is loaded at the command prompt.
+ *		It parses the parameters and validates them for valid values.
+ */
+void emac_command_options(struct emac_priv *priv)
+{
+	int pages = totalram_pages();
+
+	if (pages <= (EMAC_SMALL_RING_MEM_LIMIT >> PAGE_SHIFT))
+		priv->rx_ring.total_cnt = EMAC_SMALL_RX_RING_SIZE;
+	else
+		priv->rx_ring.total_cnt = EMAC_RX_RING_SIZE;
+	priv->tx_ring.total_cnt = EMAC_TX_RING_SIZE;
+
+	pr_info("emac: rx_ring=%d, tx_ring=%d, pages=%d\n",
+		priv->rx_ring.total_cnt, priv->tx_ring.total_cnt, pages);
+}
+
+/* Name		emac_configure_tx
+ * Arguments	priv : pointer to driver private data structure
+ * Return	none
+ * Description	Configures the transmit unit of the device
+ */
+static void emac_configure_tx(struct emac_priv *priv)
+{
+	u32 val;
+
+	/* set the transmit base address */
+	val = (u32)(priv->tx_ring.desc_dma_addr);
+
+	emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
+
+	/* Tx Inter Packet Gap value and enable the transmit */
+	val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
+	val &= (~MREGBIT_IFG_LEN);
+	val |= MREGBIT_TRANSMIT_ENABLE;
+	val |= MREGBIT_TRANSMIT_AUTO_RETRY;
+	emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
+
+	emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x00);
+
+	/* start tx dma */
+	val = emac_rd(priv, DMA_CONTROL);
+	val |= MREGBIT_START_STOP_TRANSMIT_DMA;
+	emac_wr(priv, DMA_CONTROL, val);
+}
+
+/* Name		emac_configure_rx
+ * Arguments	priv : pointer to driver private data structure
+ * Return	none
+ * Description	Configures the receive unit of the device
+ */
+static void emac_configure_rx(struct emac_priv *priv)
+{
+	u32 val;
+
+	/* set the receive base address */
+	val = (u32)(priv->rx_ring.desc_dma_addr);
+	emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
+
+	/* enable the receive */
+	val = emac_rd(priv, MAC_RECEIVE_CONTROL);
+	val |= MREGBIT_RECEIVE_ENABLE;
+	val |= MREGBIT_STORE_FORWARD;
+	val |= MREGBIT_ACOOUNT_VLAN;
+	emac_wr(priv, MAC_RECEIVE_CONTROL, val);
+
+	/* start rx dma */
+	val = emac_rd(priv, DMA_CONTROL);
+	val |= MREGBIT_START_STOP_RECEIVE_DMA;
+	emac_wr(priv, DMA_CONTROL, val);
+}
+
+/* Name		emac_clean_tx_desc_ring
+ * Arguments	priv : pointer to driver private data structure
+ * Return	none
+ * Description	Freeing the TX resources allocated earlier.
+ */
+static void emac_clean_tx_desc_ring(struct emac_priv *priv)
+{
+	struct emac_desc_ring *tx_ring = &priv->tx_ring;
+	struct emac_desc_buffer *tx_buf;
+	u32 i;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tx_ring->total_cnt; i++) {
+		tx_buf = &tx_ring->desc_buf[i];
+
+		if (tx_buf->dma_addr) {
+			dma_unmap_page(&priv->pdev->dev,
+				       tx_buf->dma_addr,
+				       tx_buf->dma_len,
+				       DMA_TO_DEVICE);
+			tx_buf->dma_addr = 0;
+		}
+
+		if (tx_buf->skb) {
+			dev_kfree_skb_any(tx_buf->skb);
+			tx_buf->skb = NULL;
+		}
+	}
+
+	tx_ring->nxt_use = 0;
+	tx_ring->nxt_clean = 0;
+}
+
+/* Name		emac_clean_rx_desc_ring
+ * Arguments	priv : pointer to driver private data structure
+ * Return	none
+ * Description	Freeing the RX resources allocated earlier.
+ */
+static void emac_clean_rx_desc_ring(struct emac_priv *priv)
+{
+	struct emac_desc_ring *rx_ring;
+	struct emac_desc_buffer *rx_buf;
+	u32 i;
+
+	rx_ring = &priv->rx_ring;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->total_cnt; i++) {
+		rx_buf = &rx_ring->desc_buf[i];
+		if (rx_buf->skb) {
+			emac_unmap_single(&priv->pdev->dev,
+					 rx_buf->dma_addr,
+					 rx_buf->dma_len,
+					 DMA_FROM_DEVICE);
+			dev_kfree_skb(rx_buf->skb);
+			rx_buf->skb = NULL;
+		}
+
+		if (rx_buf->buff_addr) {
+#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
+			kfree(rx_buf->buff_addr);
+#endif
+			rx_buf->buff_addr = NULL;
+		}
+	}
+
+	rx_ring->nxt_clean = 0;
+	rx_ring->nxt_use = 0;
+}
+
+void emac_ptp_init(struct emac_priv *priv)
+{
+	int ret;
+
+	if (priv->ptp_support) {
+		ret = clk_prepare_enable(priv->ptp_clk);
+		if (ret < 0) {
+			pr_warning("ptp clock failed to enable \n");
+			priv->ptp_clk = NULL;
+		}
+
+		emac_ptp_register(priv);
+
+		if (IS_ERR_OR_NULL(priv->ptp_clock)) {
+			priv->ptp_support = 0;
+			pr_warning("disable PTP due to clock not enabled\n");
+		}
+	}
+}
+
+void emac_ptp_deinit(struct emac_priv *priv)
+{
+	if (priv->ptp_support) {
+		if (priv->ptp_clk)
+			clk_disable_unprepare(priv->ptp_clk);
+
+		emac_ptp_unregister(priv);
+	}
+}
+
+static void emac_rx_timer_arm(struct emac_priv *priv)
+{
+	u32 rx_fill_timer = EMAC_RX_FILL_TIMER_US;
+
+	if (!rx_fill_timer)
+		return;
+
+	if (hrtimer_is_queued(&priv->rx_timer))
+		return;
+
+	hrtimer_start(&priv->rx_timer,
+		      ns_to_ktime(rx_fill_timer) * NSEC_PER_USEC,
+		      HRTIMER_MODE_REL);
+}
+
+static enum hrtimer_restart emac_rx_timer(struct hrtimer *t)
+{
+	struct emac_priv *priv = container_of(t, struct emac_priv, rx_timer);
+	struct napi_struct *napi = &priv->rx_napi;
+
+	if (likely(napi_schedule_prep(napi))) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&priv->intr_lock, flags);
+		emac_disable_interrupt(priv, 0);
+		spin_unlock_irqrestore(&priv->intr_lock, flags);
+		__napi_schedule(napi);
+	}
+
+	return HRTIMER_NORESTART;
+}
+
+static void emac_tx_timer_arm(struct emac_priv *priv)
+{
+	u32 tx_coal_timer = EMAC_TX_COAL_TIMER_US;
+
+	if (!tx_coal_timer)
+		return;
+
+	if (hrtimer_is_queued(&priv->tx_timer))
+		return;
+
+	hrtimer_start(&priv->tx_timer,
+		      ns_to_ktime(tx_coal_timer) * NSEC_PER_USEC,
+		      HRTIMER_MODE_REL);
+}
+
+static enum hrtimer_restart emac_tx_timer(struct hrtimer *t)
+{
+	struct emac_priv *priv = container_of(t, struct emac_priv, tx_timer);
+	struct napi_struct *napi = &priv->tx_napi;
+
+	if (priv->tso) {
+		emac_dma_start_transmit(priv);
+		return HRTIMER_NORESTART;
+	}
+
+	if (likely(napi_schedule_prep(napi))) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&priv->intr_lock, flags);
+		emac_disable_interrupt(priv, 1);
+		spin_unlock_irqrestore(&priv->intr_lock, flags);
+		__napi_schedule(napi);
+	}
+
+	return HRTIMER_NORESTART;
+}
+
+
+static int emac_tso_config(struct emac_priv *priv)
+{
+	struct emac_desc_ring * tx_ring = &priv->tx_ring;
+	u32 val = 0;
+
+	/* reset */
+	emac_wr_tso(priv, TSO_CONFIG, TSO_CONFIG_RST);
+	mdelay(1);
+	emac_wr_tso(priv, TSO_CONFIG, 0x0);
+
+	emac_wr_tso(priv, TSO_DMA_CONFIG, 0x2 << 8);
+
+	/* rx */
+	/* set the transmit base address */
+	val = (u32)(priv->rx_ring.desc_dma_addr);
+	emac_wr_tso(priv, TSO_RX_DESC_BA, val >> 1);
+	emac_wr_tso(priv, TSO_RX_AUTO_POLL_CNT, 0x0);
+
+	/* tx */
+	val = (u32)(priv->tx_ring.desc_dma_addr);
+	emac_wr_tso(priv, TSO_TX_DESC_BA, val >> 1);
+
+	priv->tso_hdr = dma_alloc_coherent(&priv->pdev->dev,
+					 tx_ring->total_cnt * 0x80,
+					 &priv->tso_hdr_addr,
+					 GFP_KERNEL | __GFP_ZERO);
+	if (!priv->tso_hdr) {
+		pr_err("Memory allocation failed for tso_hdr\n");
+		return -ENOMEM;
+	}
+
+	val = (u32)(priv->tso_hdr_addr);
+	emac_wr_tso(priv, TSO_TX_HDR_BA, val >> 1);
+	emac_wr_tso(priv, TSO_TX_HDR_CTR, tx_ring->total_cnt);
+	emac_wr_tso(priv, TSO_TX_AUTO_POLL_CNT, 0x0);
+
+	/* enable tx/rx tso/coe */
+	emac_wr_tso(priv, TSO_CONFIG,
+		TSO_CONFIG_RX_EN | TSO_CONFIG_TX_EN | TSO_CONFIG_RX_CSUM_EN);
+
+	/* enable tx/rx/err interrupt */
+	emac_wr_tso(priv, TSO_ERR_INTR_ENA, 0xF0007);
+	emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
+		TSO_AP_RX_INTR_ENA_CSUM_DONE | TSO_AP_RX_INTR_ENA_CSUM_ERR);
+#if 1
+	emac_wr_tso(priv, TSO_AP_TX_INTR_ENA,
+		TSO_AP_TX_INTR_ENA_TSO_DONE | TSO_AP_TX_INTR_ENA_CSUM_DONE);
+#else
+	emac_wr_tso(priv, TSO_AP_TX_INTR_ENA, 0x0);
+#endif
+	return 0;
+}
+
+/* Name		emac_up
+ * Arguments	priv : pointer to driver private data structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	This function is called from emac_open and
+ *		performs the things when net interface is about to up.
+ *		It configues the Tx and Rx unit of the device and
+ *		registers interrupt handler.
+ *		It also starts one watchdog timer to monitor
+ *		the net interface link status.
+ */
+int emac_up(struct emac_priv *priv)
+{
+	struct net_device *ndev = priv->ndev;
+	int ret, val;
+#ifdef WAN_LAN_AUTO_ADAPT
+	u32 phy_id;
+#endif
+
+	priv->hw_stats->tx_tso_pkts = 0;
+	priv->hw_stats->tx_tso_bytes = 0;
+
+	ret = emac_phy_connect(ndev);
+	if (ret) {
+		pr_err("%s  phy_connet failed\n", __func__);
+		return ret;
+	}
+
+	if (!priv->en_suspend)
+		pm_stay_awake(&priv->pdev->dev);
+	pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
+
+	clk_phase_set(priv, TX_PHASE);
+	clk_phase_set(priv, RX_PHASE);
+
+	/* init hardware */
+	emac_init_hw(priv);
+
+	emac_ptp_init(priv);
+
+	emac_set_mac_addr(priv, ndev->dev_addr);
+
+	emac_set_fc_source_addr(priv, ndev->dev_addr);
+
+	/* configure transmit unit */
+	emac_configure_tx(priv);
+	/* configure rx unit */
+	emac_configure_rx(priv);
+
+	/* allocate buffers for receive descriptors */
+	emac_alloc_rx_desc_buffers(priv);
+
+	if (ndev->phydev)
+		phy_start(ndev->phydev);
+
+	/* allocates interrupt resources and
+	 * enables the interrupt line and IRQ handling
+	 */
+	ret = request_irq(priv->irq, emac_interrupt_handler,
+			  IRQF_SHARED, ndev->name, ndev);
+	if (ret) {
+		pr_err("request_irq failed, ret=%d\n", ret);
+		goto request_irq_failed;
+	}
+
+	if (priv->irq_wakeup) {
+		ret = request_irq(priv->irq_wakeup, emac_wakeup_handler,
+				  IRQF_SHARED, ndev->name, ndev);
+		if (ret) {
+			pr_err("request wakeup_irq failed, ret=%d\\n", ret);
+			goto request_wakeup_irq_failed;
+		}
+	}
+
+	if (priv->irq_tso) {
+		ret = request_irq(priv->irq_tso, emac_irq_tso,
+				  IRQF_SHARED, "emac_tso", ndev);
+		if (ret) {
+			pr_err("request tso failed, ret=%d\\n", ret);
+			goto request_tso_irq_failed;
+		}
+	}
+
+	if (priv->fix_link)
+		emac_set_speed_duplex(priv);
+
+	clear_bit(EMAC_DOWN, &priv->state);
+
+	/* enable mac interrupt */
+	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
+
+	/* both rx tx */
+	val = MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
+		MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+		MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
+#if 0
+	val |= MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
+		MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
+		MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE;
+#endif
+	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+	napi_enable(&priv->rx_napi);
+	napi_enable(&priv->tx_napi);
+#endif
+
+	if (priv->fix_link && !netif_carrier_ok(ndev))
+		netif_carrier_on(ndev);
+
+#ifdef WAN_LAN_AUTO_ADAPT
+	phy_id = ndev->phydev->phy_id;
+	if(phy_id == IP175D_PHY_ID)
+		emac_sig_workq(CARRIER_UP_IP175D, 0);
+	else
+		emac_sig_workq(CARRIER_UP, 0);
+#endif
+
+	hrtimer_init(&priv->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	priv->tx_timer.function = emac_tx_timer;
+	hrtimer_init(&priv->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	priv->rx_timer.function = emac_rx_timer;
+
+	if (priv->tso)
+		emac_tso_config(priv);
+
+	netif_tx_start_all_queues(ndev);
+	return 0;
+
+request_tso_irq_failed:
+	if (priv->irq_wakeup)
+		free_irq(priv->irq_wakeup, ndev);
+
+request_wakeup_irq_failed:
+	free_irq(priv->irq, ndev);
+
+request_irq_failed:
+	if (ndev->phydev) {
+		phy_stop(ndev->phydev);
+		phy_disconnect(ndev->phydev);
+	}
+
+	return ret;
+}
+
+/* Name		emac_down
+ * Arguments	priv : pointer to driver private data structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	This function is called from emac_close and
+ *		performs the things when net interface is about to down.
+ *		It frees the irq, removes the various timers.
+ *		It sets the net interface off and
+ *		resets the hardware. Cleans the Tx and Rx
+ *		ring descriptor.
+ */
+int emac_down(struct emac_priv *priv)
+{
+	struct net_device *ndev = priv->ndev;
+#ifdef WAN_LAN_AUTO_ADAPT
+	u32 phy_id;
+
+	priv->dhcp  = 0;
+	priv->vlan_port = -1;
+	priv->link = 0;
+	phy_id = ndev->phydev->phy_id;
+	if(priv->dhcp_delaywork){
+		cancel_delayed_work(&priv->dhcp_work);
+		priv->dhcp_delaywork = 0;
+	}
+#endif
+	set_bit(EMAC_DOWN, &priv->state);
+
+	netif_tx_disable(ndev);
+
+	hrtimer_cancel(&priv->tx_timer);
+	hrtimer_cancel(&priv->rx_timer);
+	/* Stop and disconnect the PHY */
+	if (ndev->phydev) {
+		phy_stop(ndev->phydev);
+		phy_disconnect(ndev->phydev);
+	}
+
+	if (!priv->fix_link) {
+		priv->duplex = DUPLEX_UNKNOWN;
+		priv->speed = SPEED_UNKNOWN;
+	}
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+	napi_disable(&priv->rx_napi);
+	napi_disable(&priv->tx_napi);
+#endif
+	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
+	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
+
+	free_irq(priv->irq, ndev);
+	if (priv->irq_wakeup)
+		free_irq(priv->irq_wakeup, ndev);
+
+	emac_ptp_deinit(priv);
+
+	emac_reset_hw(priv);
+	netif_carrier_off(ndev);
+
+#ifdef WAN_LAN_AUTO_ADAPT
+	if(phy_id == IP175D_PHY_ID)
+		emac_sig_workq(CARRIER_DOWN_IP175D, 0);
+	else
+		emac_sig_workq(CARRIER_DOWN, 0);
+#endif
+
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+	flush_work(&priv->qos_work);
+	pm_qos_update_request(&priv->clk_scaling.ddr_qos, PM_QOS_DEFAULT_VALUE);
+#endif
+	pm_qos_update_request(&priv->pm_qos_req,
+			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+
+	if (!priv->en_suspend)
+		pm_relax(&priv->pdev->dev);
+
+	if (priv->tso) {
+		dma_free_coherent(&priv->pdev->dev,
+				  priv->tx_ring.total_cnt * 0x80,
+				  priv->tso_hdr,
+				  priv->tso_hdr_addr);
+	}
+
+	return 0;
+}
+
+/* Name		emac_alloc_tx_resources
+ * Arguments	priv : pointer to driver private data structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	Allocates TX resources and getting virtual & physical address.
+ */
+int emac_alloc_tx_resources(struct emac_priv *priv)
+{
+	struct emac_desc_ring *tx_ring = &priv->tx_ring;
+	struct platform_device *pdev  = priv->pdev;
+	u32 size;
+
+	size = sizeof(struct emac_desc_buffer) * tx_ring->total_cnt;
+
+	/* allocate memory */
+	tx_ring->desc_buf = kzalloc(size, GFP_KERNEL);
+	if (!tx_ring->desc_buf) {
+		pr_err("Memory allocation failed for the Transmit descriptor buffer\n");
+		return -ENOMEM;
+	}
+
+	memset(tx_ring->desc_buf, 0, size);
+
+	tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_tx_desc);
+
+	EMAC_ROUNDUP(tx_ring->total_size, 1024);
+
+	if (priv->sram_pool) {
+		tx_ring->desc_addr =
+			(void *)gen_pool_dma_alloc(
+				priv->sram_pool, tx_ring->total_size,
+				&tx_ring->desc_dma_addr);
+		tx_ring->in_sram = true;
+	}
+
+	if (!tx_ring->desc_addr) {
+		tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
+							tx_ring->total_size,
+							&tx_ring->desc_dma_addr,
+							GFP_KERNEL | __GFP_ZERO);
+		if (!tx_ring->desc_addr) {
+			pr_err("Memory allocation failed for the Transmit descriptor ring\n");
+			kfree(tx_ring->desc_buf);
+			return -ENOMEM;
+		}
+
+		if (priv->sram_pool) {
+			pr_err("sram pool left size not enough, tx fallback\n");
+			tx_ring->in_sram = false;
+		}
+	}
+
+	memset(tx_ring->desc_addr, 0, tx_ring->total_size);
+
+	tx_ring->nxt_use = 0;
+	tx_ring->nxt_clean = 0;
+
+	return 0;
+}
+
+/* Name		emac_alloc_rx_resources
+ * Arguments	priv	: pointer to driver private data structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	Allocates RX resources and getting virtual & physical address.
+ */
+int emac_alloc_rx_resources(struct emac_priv *priv)
+{
+	struct emac_desc_ring *rx_ring = &priv->rx_ring;
+	struct platform_device *pdev  = priv->pdev;
+	u32 buf_len;
+
+	buf_len = sizeof(struct emac_desc_buffer) * rx_ring->total_cnt;
+
+	rx_ring->desc_buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!rx_ring->desc_buf) {
+		pr_err("Memory allocation failed for the Receive descriptor buffer\n");
+		return -ENOMEM;
+	}
+
+	memset(rx_ring->desc_buf, 0, buf_len);
+
+	/* round up to nearest 4K */
+	rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_rx_desc);
+
+	EMAC_ROUNDUP(rx_ring->total_size, 1024);
+
+	if (priv->sram_pool) {
+		rx_ring->desc_addr =
+			(void *)gen_pool_dma_alloc(
+				priv->sram_pool, rx_ring->total_size,
+				&rx_ring->desc_dma_addr);
+		rx_ring->in_sram = true;
+	}
+
+	if (!rx_ring->desc_addr) {
+		rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
+							rx_ring->total_size,
+							&rx_ring->desc_dma_addr,
+							GFP_KERNEL | __GFP_ZERO);
+		if (!rx_ring->desc_addr) {
+			pr_err("Memory allocation failed for the Receive descriptor ring\n");
+			kfree(rx_ring->desc_buf);
+			return -ENOMEM;
+		}
+
+		if (priv->sram_pool) {
+			pr_err("sram pool left size not enough, rx fallback\n");
+			rx_ring->in_sram = false;
+		}
+	}
+
+	memset(rx_ring->desc_addr, 0, rx_ring->total_size);
+
+	rx_ring->nxt_use = 0;
+	rx_ring->nxt_clean = 0;
+
+	return 0;
+}
+
+/* Name		emac_free_tx_resources
+ * Arguments	priv : pointer to driver private data structure
+ * Return	none
+ * Description	Frees the Tx resources allocated
+ */
+void emac_free_tx_resources(struct emac_priv *priv)
+{
+	emac_clean_tx_desc_ring(priv);
+	kfree(priv->tx_ring.desc_buf);
+	priv->tx_ring.desc_buf = NULL;
+	if (priv->tx_ring.in_sram)
+		gen_pool_free(priv->sram_pool,
+			      (unsigned long) priv->tx_ring.desc_addr,
+			      priv->tx_ring.total_size);
+	else
+		dma_free_coherent(&priv->pdev->dev, priv->tx_ring.total_size,
+					priv->tx_ring.desc_addr,
+					priv->tx_ring.desc_dma_addr);
+	priv->tx_ring.desc_addr = NULL;
+}
+
+/* Name		emac_free_rx_resources
+ * Arguments	priv : pointer to driver private data structure
+ * Return	none
+ * Description	Frees the Rx resources allocated
+ */
+void emac_free_rx_resources(struct emac_priv *priv)
+{
+	emac_clean_rx_desc_ring(priv);
+	kfree(priv->rx_ring.desc_buf);
+	priv->rx_ring.desc_buf = NULL;
+	if (priv->rx_ring.in_sram)
+		gen_pool_free(priv->sram_pool,
+			      (unsigned long) priv->rx_ring.desc_addr,
+			      priv->rx_ring.total_size);
+	else
+		dma_free_coherent(&priv->pdev->dev, priv->rx_ring.total_size,
+					priv->rx_ring.desc_addr,
+					priv->rx_ring.desc_dma_addr);
+	priv->rx_ring.desc_addr = NULL;
+}
+
+/* Name		emac_open
+ * Arguments	pstNetdev : pointer to net_device structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	This function is called when net interface is made up.
+ *		Setting up Tx and Rx
+ *		resources and making the interface up.
+ */
+static int emac_open(struct net_device *ndev)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	int ret;
+
+	ret = emac_alloc_tx_resources(priv);
+	if (ret) {
+		pr_err("Error in setting up the Tx resources\n");
+		goto emac_alloc_tx_resource_fail;
+	}
+
+	ret = emac_alloc_rx_resources(priv);
+	if (ret) {
+		pr_err("Error in setting up the Rx resources\n");
+		goto emac_alloc_rx_resource_fail;
+	}
+
+	ret = emac_up(priv);
+	if (ret) {
+		pr_err("Error in making the net intrface up\n");
+		goto emac_up_fail;
+	}
+	return 0;
+
+emac_up_fail:
+	emac_free_rx_resources(priv);
+emac_alloc_rx_resource_fail:
+	emac_free_tx_resources(priv);
+emac_alloc_tx_resource_fail:
+	emac_reset_hw(priv);
+	return ret;
+}
+
+/* Name		emac_close
+ * Arguments	pstNetdev : pointer to net_device structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	This function is called when net interface is made down.
+ *		It calls the appropriate functions to
+ *		free Tx and Rx resources.
+ */
+static int emac_close(struct net_device *ndev)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	emac_down(priv);
+	emac_free_tx_resources(priv);
+	emac_free_rx_resources(priv);
+
+	return 0;
+}
+
+/* Name		emac_tx_clean_desc
+ * Arguments	priv : pointer to driver private data structure
+ * Return	1: Cleaned; 0:Failed
+ * Description
+ */
+#ifdef CONFIG_ASR_EMAC_NAPI
+static int emac_tx_clean_desc(struct emac_priv *priv, int budget)
+#else
+static int emac_tx_clean_desc(struct emac_priv *priv)
+#endif
+{
+	struct emac_desc_ring *tx_ring;
+	struct emac_tx_desc *tx_desc, *end_desc;
+	struct emac_desc_buffer *tx_buf;
+	struct net_device *ndev = priv->ndev;
+	u32 i, u32LastIndex;
+	u8 u8Cleaned;
+	unsigned int count = 0;
+
+	tx_ring = &priv->tx_ring;
+	i = tx_ring->nxt_clean;
+	do {
+		if (i == tx_ring->nxt_use)
+			break;
+
+		u32LastIndex = tx_ring->desc_buf[i].nxt_watch;
+		end_desc = emac_get_tx_desc(priv, u32LastIndex);
+		if (end_desc->OWN == 1 ||
+		    (priv->tso && (end_desc->tso || end_desc->coe)))
+			break;
+
+		u8Cleaned = false;
+		for ( ; !u8Cleaned; count++) {
+			tx_desc = emac_get_tx_desc(priv, i);
+			tx_buf = &tx_ring->desc_buf[i];
+
+			emac_get_tx_hwtstamp(priv, tx_buf->skb);
+
+			/* own bit will be reset to 0 by dma
+			 * once packet is transmitted
+			 */
+			if (tx_buf->dma_addr) {
+				dma_unmap_page(&priv->pdev->dev,
+					       tx_buf->dma_addr,
+					       tx_buf->dma_len,
+					       DMA_TO_DEVICE);
+				tx_buf->dma_addr = 0;
+			}
+			if (tx_buf->skb) {
+				dev_kfree_skb_any(tx_buf->skb);
+				tx_buf->skb = NULL;
+			}
+			if (tx_buf->buff_addr)
+				tx_buf->buff_addr = NULL;
+
+			memset(tx_desc, 0, sizeof(struct emac_tx_desc));
+			u8Cleaned = (i == u32LastIndex);
+			if (++i == tx_ring->total_cnt)
+				i = 0;
+		}
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+		if (count >= budget) {
+			count = budget;
+			break;
+		}
+#endif
+	} while (1);
+	tx_ring->nxt_clean = i;
+
+#ifndef CONFIG_ASR_EMAC_NAPI
+	spin_lock(&priv->spTxLock);
+#endif
+	if (unlikely(count && netif_queue_stopped(ndev) &&
+		    netif_carrier_ok(ndev) &&
+		    EMAC_DESC_UNUSED(tx_ring) >= EMAC_TX_WAKE_THRESHOLD))
+		netif_wake_queue(ndev);
+#ifndef CONFIG_ASR_EMAC_NAPI
+	spin_unlock(&priv->spTxLock);
+#endif
+	return count;
+}
+
+static int emac_rx_frame_status(struct emac_priv *priv, struct emac_rx_desc *dsc)
+{
+	/* if last descritpor isn't set, so we drop it*/
+	if (!dsc->LastDescriptor) {
+		netdev_dbg(priv->ndev, "rx LD bit isn't set, drop it.\n");
+		return frame_discard;
+	}
+
+	/*
+	 * A Frame that is less than 64-bytes (from DA thru the FCS field)
+	 * is considered as Runt Frame.
+	 * Most of the Runt Frames happen because of collisions.
+	 */
+	if (dsc->ApplicationStatus & EMAC_RX_FRAME_RUNT) {
+		netdev_dbg(priv->ndev, "rx frame less than 64.\n");
+		return frame_discard;
+	}
+
+	/*
+	 * When the frame fails the CRC check,
+	 * the frame is assumed to have the CRC error
+	 */
+	if (dsc->ApplicationStatus & EMAC_RX_FRAME_CRC_ERR) {
+		netdev_dbg(priv->ndev, "rx frame crc error\n");
+		return frame_discard;
+	}
+
+	if (priv->tso && dsc->csum_res == EMAC_CSUM_FAIL) {
+		netdev_dbg(priv->ndev, "COE: rx frame checksum error\n");
+		return frame_discard;
+	}
+
+	/*
+	 * When the length of the frame exceeds
+	 * the Programmed Max Frame Length
+	 */
+	if (dsc->ApplicationStatus & EMAC_RX_FRAME_MAX_LEN_ERR) {
+		netdev_dbg(priv->ndev, "rx frame too long\n");
+		return frame_discard;
+	}
+
+	/*
+	 * frame reception is truncated at that point and
+	 * frame is considered to have Jabber Error
+	 */
+	if (dsc->ApplicationStatus & EMAC_RX_FRAME_JABBER_ERR) {
+		netdev_dbg(priv->ndev, "rx frame has been truncated\n");
+		return frame_discard;
+	}
+
+	/* this bit is only for 802.3 Type Frames */
+	if (dsc->ApplicationStatus & EMAC_RX_FRAME_LENGTH_ERR) {
+		netdev_dbg(priv->ndev, "rx frame length err for 802.3\n");
+		return frame_discard;
+	}
+
+	if (dsc->FramePacketLength <= ETHERNET_FCS_SIZE ||
+	    dsc->FramePacketLength > EMAC_RX_BUFFER_2048) {
+		netdev_dbg(priv->ndev, "rx frame len too small or too long\n");
+		return frame_discard;
+	}
+	return frame_ok;
+}
+
+/* Name		emac_rx_clean_desc
+ * Arguments	priv : pointer to driver private data structure
+ * Return	1: Cleaned; 0:Failed
+ * Description
+ */
+#ifdef CONFIG_ASR_EMAC_NAPI
+static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
+#else
+static int emac_rx_clean_desc(struct emac_priv *priv)
+#endif
+{
+	struct emac_desc_ring *rx_ring;
+	struct emac_desc_buffer *rx_buf;
+	struct net_device *ndev = priv->ndev;
+	struct emac_rx_desc *rx_desc;
+	struct sk_buff *skb = NULL;
+	int status;
+#ifdef CONFIG_ASR_EMAC_NAPI
+	u32 receive_packet = 0;
+#endif
+	u32 i;
+	u32 u32Len;
+	u32 u32Size;
+	u8 *pu8Data;
+#ifdef WAN_LAN_AUTO_ADAPT
+	int port = -1, vlan = -1;
+	struct vlan_hdr *vhdr;
+	struct iphdr *iph = NULL;
+	struct udphdr *udph = NULL;
+#endif
+
+	rx_ring = &priv->rx_ring;
+	i = rx_ring->nxt_clean;
+	rx_desc = emac_get_rx_desc(priv, i);
+	u32Size = 0;
+
+	if (priv->pause.tx_pause && !priv->pause.fc_auto)
+		emac_check_ring_and_send_pause(priv);
+
+	while (rx_desc->OWN == 0) {
+		if (priv->tso && !rx_desc->csum_done)
+			break;
+
+		if (skb_queue_len(&priv->rx_skb) > priv->rx_ring.total_cnt)
+			break;
+
+		rx_buf = &rx_ring->desc_buf[i];
+		if (!rx_buf->skb)
+			break;
+
+		emac_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+					rx_buf->dma_len, DMA_FROM_DEVICE);
+		status = emac_rx_frame_status(priv, rx_desc);
+		if (unlikely(status == frame_discard)) {
+			ndev->stats.rx_dropped++;
+			dev_kfree_skb_irq(rx_buf->skb);
+			rx_buf->skb = NULL;
+		} else {
+			skb = rx_buf->skb;
+			u32Len = rx_desc->FramePacketLength - ETHERNET_FCS_SIZE;
+
+			pu8Data = skb_put(skb, u32Len);
+#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
+			memcpy(pu8Data, (u8 *)rx_buf->buff_addr, u32Len);
+#endif
+			skb->dev = ndev;
+			ndev->hard_header_len = ETH_HLEN;
+
+			emac_get_rx_hwtstamp(priv, rx_desc, skb);
+
+			skb->protocol = eth_type_trans(skb, ndev);
+			if (priv->tso)
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+			else
+				skb->ip_summed = CHECKSUM_NONE;
+
+#ifdef WAN_LAN_AUTO_ADAPT
+			{/* Special tag format: DA-SA-0x81-xx-data.
+				Bit 7-3 Packet Information
+				- bit 4: Reserved
+				- bit 3: Reserved
+				- bit 2: Miss address table
+				- bit 1: Security violation
+				- bit 0: VLAN violation
+				Bit 2-0 Ingress Port number
+				- b000: Disabled
+				- b001: Port 0
+				- b010: Port 1
+				- b011: Port 2
+				- b100: Port 3
+				- b101: Port 4
+				- Other: Reserved                          */
+				if(ntohs(skb->protocol)>>8 == 0x81) {
+					port = ntohs(skb->protocol) & 0x7;
+					if(port > 0 && port <= 0x5) {
+						skb->protocol = htons(ETH_P_8021Q);
+						port = port - 1;
+					}
+				}
+				if (skb->protocol == htons(ETH_P_8021Q)) {
+					vhdr = (struct vlan_hdr *) skb->data;
+					vlan = ntohs(vhdr->h_vlan_TCI);
+					iph = (struct iphdr *)(skb->data + VLAN_HLEN);
+				} else if (skb->protocol == htons(ETH_P_IP))
+					iph = (struct iphdr *)skb->data;
+
+				if (iph && iph->protocol == IPPROTO_UDP) {
+					udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
+					if ((htons(udph->dest) == 68 && htons(udph->source) == 67)) {
+						u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
+						u8 dhcp_type = *(udp_data + 242);
+						if ((DHCP_ACK == dhcp_type || DHCP_OFFER == dhcp_type)
+								&& (DHCP_SEND_REQ == priv->dhcp)) {
+							priv->dhcp = DHCP_REC_RESP;
+							if (ndev->phydev->phy_id == IP175D_PHY_ID)
+								priv->vlan_port = port;
+							else
+								priv->vlan_port = -1;
+						}
+					}
+				}
+			}
+#endif
+			skb_queue_tail(&priv->rx_skb, skb);
+			rx_buf->skb = NULL;
+		}
+
+		if (++i == rx_ring->total_cnt)
+			i = 0;
+
+		rx_desc = emac_get_rx_desc(priv, i);
+
+		/* restart RX COE */
+		if (priv->tso)
+			emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
+	}
+
+	rx_ring->nxt_clean = i;
+
+	emac_alloc_rx_desc_buffers(priv);
+
+	/*
+	 * Since netif_rx may consume too much time, put this after
+	 * emac_alloc_rx_desc_buffers so that RX DMA desc refill ASAP,
+	 * reduce packet loss probability.
+	 */
+	while ((skb = skb_dequeue(&priv->rx_skb))) {
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += skb->len;
+#ifdef CONFIG_ASR_EMAC_NAPI
+		napi_gro_receive(&priv->rx_napi, skb);
+#else
+		netif_rx(skb);
+#endif
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+		receive_packet++;
+		if (receive_packet >= budget)
+			break;
+#endif
+	}
+
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+	emac_ddr_clk_scaling(priv);
+#endif
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+	return receive_packet;
+#else
+	return 0;
+#endif
+}
+
+/* Name		emac_alloc_rx_desc_buffers
+ * Arguments	priv : pointer to driver private data structure
+ * Return	1: Cleaned; 0:Failed
+ * Description
+ */
+static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
+{
+	struct net_device *ndev = priv->ndev;
+	struct emac_desc_ring *rx_ring = &priv->rx_ring;
+	struct emac_desc_buffer *rx_buf;
+	struct sk_buff *skb;
+	struct emac_rx_desc *rx_desc;
+	u32 i;
+#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
+	void *buff;
+#endif
+	u32 buff_len;
+	int fail_cnt = 0;
+
+	i = rx_ring->nxt_use;
+	rx_buf = &rx_ring->desc_buf[i];
+
+	buff_len = priv->u32RxBufferLen;
+
+	while (!rx_buf->skb) {
+		skb = emac_skbrb_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
+		if (!skb) {
+			if (priv->rx_ring.total_cnt == EMAC_RX_RING_SIZE)
+				skb = dev_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
+			if (!skb) {
+				fail_cnt++;
+				pr_warn_ratelimited("emac sk_buff allocation failed\n");
+				break;
+			}
+		}
+
+		/* make buffer alignment */
+		skb_reserve(skb, NET_IP_ALIGN + EMAC_EXTRA_ROOM);
+		skb->dev = ndev;
+
+#ifdef CONFIG_ASR_EMAC_RX_NO_COPY
+		rx_buf->buff_addr = skb->data;
+#else
+		if (!rx_buf->buff_addr) {
+			buff = kmalloc(buff_len, GFP_ATOMIC | GFP_DMA);
+			if (!buff) {
+				pr_err("kmalloc failed\n");
+				dev_kfree_skb(skb);
+				break;
+			}
+			rx_buf->buff_addr = buff;
+		}
+#endif
+		rx_buf->skb = skb;
+		rx_buf->dma_len = buff_len;
+		rx_buf->dma_addr = emac_map_single(&priv->pdev->dev,
+						  rx_buf->buff_addr,
+						  buff_len,
+						  DMA_FROM_DEVICE);
+
+		rx_desc = emac_get_rx_desc(priv, i);
+		rx_desc->BufferAddr1 = rx_buf->dma_addr;
+		rx_desc->BufferSize1 = rx_buf->dma_len;
+		rx_desc->rx_timestamp = 0;
+		rx_desc->ptp_pkt = 0;
+		rx_desc->FirstDescriptor = 0;
+		rx_desc->LastDescriptor = 0;
+		rx_desc->FramePacketLength = 0;
+		rx_desc->ApplicationStatus = 0;
+		if (++i == rx_ring->total_cnt) {
+			rx_desc->EndRing = 1;
+			i = 0;
+		}
+
+		wmb();
+		rx_desc->OWN = 1;
+		if (priv->tso)
+			rx_desc->csum_done = 0;
+
+		rx_buf = &rx_ring->desc_buf[i];
+	}
+	rx_ring->nxt_use = i;
+
+	if (fail_cnt)
+		priv->refill = 1;
+	else
+		priv->refill = 0;
+	emac_dma_start_receive(priv);
+}
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+static int emac_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct emac_priv *priv = container_of(napi, struct emac_priv, rx_napi);
+	int work_done;
+
+	work_done = emac_rx_clean_desc(priv, budget);
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&priv->intr_lock, flags);
+		emac_enable_interrupt(priv, 0);
+		spin_unlock_irqrestore(&priv->intr_lock, flags);
+
+		if (priv->refill)
+			emac_rx_timer_arm(priv);
+	}
+
+	return work_done;
+}
+
+static int emac_tx_poll(struct napi_struct *napi, int budget)
+{
+	struct emac_priv *priv = container_of(napi, struct emac_priv, tx_napi);
+	int work_done;
+
+	work_done = emac_tx_clean_desc(priv, budget);
+	if (work_done < budget && napi_complete_done(napi, work_done)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&priv->intr_lock, flags);
+		emac_enable_interrupt(priv, 1);
+		spin_unlock_irqrestore(&priv->intr_lock, flags);
+	}
+
+	return work_done;
+}
+#endif
+
+/* Name		emac_tx_mem_map
+ * Arguments	priv : pointer to driver private data structure
+ *		pstSkb : pointer to sk_buff structure passed by upper layer
+ *		max_tx_len : max data len per descriptor
+ *		frag_num : number of fragments in the packet
+ * Return	number of descriptors needed for transmitting packet
+ * Description
+ */
+static int emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb,
+			   u32 max_tx_len, u32 frag_num, int ioc)
+{
+	struct emac_desc_ring *tx_ring;
+	struct emac_desc_buffer *tx_buf;
+	struct emac_tx_desc *tx_desc, *first_desc;
+	u32 skb_len;
+	u32 u32Offset, u32Size, i;
+	u32 use_desc_cnt;
+	u32 f;
+	void *pvPtr;
+	u32 cur_desc_addr;
+	u32 cur_desc_idx;
+	u8 do_tx_timestamp = 0;
+	bool use_buf2 = 0;
+
+	u32Offset = 0;
+	use_desc_cnt = 0;
+
+	skb_tx_timestamp(skb);
+	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+			priv->hwts_tx_en)) {
+		/* declare that device is doing timestamping */
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		do_tx_timestamp = 1;
+	}
+
+	tx_ring = &priv->tx_ring;
+	skb_len = skb->len - skb->data_len;
+	i = cur_desc_idx = tx_ring->nxt_use;
+	cur_desc_addr = emac_rd(priv, DMA_TRANSMIT_BASE_ADDRESS);
+	while (skb_len > 0) {
+		u32Size = min(skb_len, max_tx_len);
+		skb_len -= u32Size;
+
+		tx_buf = &tx_ring->desc_buf[i];
+		tx_buf->dma_len = u32Size;
+		pvPtr = skb->data + u32Offset;
+		tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pvPtr,
+						u32Size, DMA_TO_DEVICE);
+		tx_buf->buff_addr = pvPtr;
+		tx_buf->ulTimeStamp = jiffies;
+
+		tx_desc = emac_get_tx_desc(priv, i);
+
+		if (use_buf2) {
+			tx_desc->BufferAddr2 = tx_buf->dma_addr;
+			tx_desc->BufferSize2 = tx_buf->dma_len;
+			i++;
+			use_buf2 = 0;
+		} else {
+			memset(tx_desc, 0, sizeof(struct emac_tx_desc));
+			tx_desc->BufferAddr1 = tx_buf->dma_addr;
+			tx_desc->BufferSize1 = tx_buf->dma_len;
+			use_buf2 = 1;
+		}
+
+		if (use_desc_cnt == 0) {
+			first_desc = tx_desc;
+			tx_desc->FirstSegment = 1;
+			if (do_tx_timestamp)
+				tx_desc->tx_timestamp = 1;
+		}
+
+		if (skb_len == 0 && frag_num == 0) {
+			tx_desc->LastSegment = 1;
+			tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
+		}
+
+		if (!use_buf2 && i == tx_ring->total_cnt) {
+			tx_desc->EndRing = 1;
+			i = 0;
+		}
+
+		/* trigger first desc OWN bit later */
+		use_desc_cnt++;
+		if (use_desc_cnt > 2)
+			tx_desc->OWN = 1;
+
+		u32Offset += u32Size;
+	}
+
+	/* if the data is fragmented */
+	for (f = 0; f < frag_num; f++) {
+		skb_frag_t *frag;
+
+		frag = &(skb_shinfo(skb)->frags[f]);
+		skb_len = skb_frag_size(frag);
+		u32Offset = skb_frag_off(frag);
+
+		while (skb_len) {
+			u32Size = min(skb_len, max_tx_len);
+			skb_len -= u32Size;
+
+			tx_buf = &tx_ring->desc_buf[i];
+			tx_buf->dma_len = u32Size;
+			tx_buf->dma_addr =
+				dma_map_page(&priv->pdev->dev,
+					skb_frag_page(frag),
+					u32Offset,
+					u32Size,
+					DMA_TO_DEVICE);
+			tx_buf->ulTimeStamp = jiffies;
+
+			tx_desc = emac_get_tx_desc(priv, i);
+			if (use_buf2) {
+				tx_desc->BufferAddr2 = tx_buf->dma_addr;
+				tx_desc->BufferSize2 = tx_buf->dma_len;
+				i++;
+				use_buf2 = 0;
+			} else {
+				memset(tx_desc, 0, sizeof(struct emac_tx_desc));
+				tx_desc->BufferAddr1 = tx_buf->dma_addr;
+				tx_desc->BufferSize1 = tx_buf->dma_len;
+				use_buf2 = 1;
+			}
+
+			if (skb_len == 0 && f == (frag_num - 1)) {
+				tx_desc->LastSegment = 1;
+				tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
+			}
+
+			if (!use_buf2 && i == tx_ring->total_cnt) {
+				tx_desc->EndRing = 1;
+				i = 0;
+			}
+
+			/* trigger first desc OWN bit later */
+			use_desc_cnt++;
+			if (use_desc_cnt > 2)
+				tx_desc->OWN = 1;
+
+			u32Offset += u32Size;
+		}
+	}
+
+	if (use_buf2 && ++i == tx_ring->total_cnt) {
+		tx_desc->EndRing = 1;
+		i = 0;
+	}
+
+	tx_ring->desc_buf[cur_desc_idx].skb = skb;
+	tx_ring->desc_buf[cur_desc_idx].nxt_watch =
+		(i == 0 ? tx_ring->total_cnt : 0) + i - 1;
+
+	wmb();
+
+	first_desc->OWN = 1;
+
+	emac_dma_start_transmit(priv);
+
+	tx_ring->nxt_use = i;
+	return use_desc_cnt;
+}
+
+static int emac_prepare_tso_desc(struct emac_priv *priv, int idx,
+				bool tso, bool coe,
+				u32 addr, int payload, u8 hlen, int mss,
+				bool fst, bool last, bool ioc, bool ts,
+				u32 *cnt)
+{
+	struct emac_desc_ring *tx_ring = &priv->tx_ring;
+	struct emac_tx_desc *pdesc;
+
+	pdesc = emac_get_tx_desc(priv, idx);
+	if (tso) {
+		if (fst && hlen) {
+			emac_set_buf1_addr_len(pdesc, addr, 0);
+			payload -= hlen;
+			addr += hlen;
+		}
+		emac_set_buf2_addr_len(pdesc, addr, payload);
+	} else {
+		emac_set_buf1_addr_len(pdesc, addr, payload);
+	}
+
+	if (fst) {
+		emac_tx_desc_set_fd(pdesc);
+	} else {
+		if (tso)
+			emac_tx_desc_set_offload(pdesc, 1, 1, 1);
+		else if (coe)
+			emac_tx_desc_set_offload(pdesc, 0, 1, 0);
+		else
+			emac_tx_desc_set_offload(pdesc, 1, 0, 0);
+	}
+
+	if (ts)
+		emac_tx_desc_set_ts(pdesc);
+
+	if (last) {
+		/* last segment */
+		emac_tx_desc_set_ld(pdesc);
+		if (ioc)
+			emac_tx_desc_set_ioc(pdesc);
+	}
+
+	print_desc((void *)pdesc, 16);
+	if (payload <= 0)
+		return idx;
+
+	do {
+		(*cnt)++;
+
+		if (++idx == tx_ring->total_cnt) {
+			emac_tx_desc_set_ring_end(pdesc);
+			idx = 0;
+		}
+
+		if (!tso)
+			break;
+
+		payload -= mss;
+		if (payload <= 0)
+			break;
+
+		pdesc = emac_get_tx_desc(priv, idx);
+		emac_tx_desc_set_offload(pdesc, 1, 1, 0);
+
+		print_desc((void *)pdesc, 16);
+	} while (1);
+
+	return idx;
+}
+
+static int emac_tso_xmit(struct sk_buff *skb, struct net_device *ndev,
+				bool tso, bool coe)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	struct emac_desc_ring *tx_ring = &priv->tx_ring;
+	struct emac_desc_buffer *tx_buf;
+	struct emac_tx_desc *pdesc;
+	skb_frag_t *frag;
+	u32 desc_cnt, frag_num, f, mss, fst;
+	u32 offset, i;
+	u8 hlen;
+	int skb_len, payload;
+	void *pbuf;
+	int ioc;
+	u8 timestamp = 0;
+
+	frag_num = skb_shinfo(skb)->nr_frags;
+	skb_len = skb->len - skb->data_len;
+	if (tso) {
+		hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		mss = skb_shinfo(skb)->gso_size;
+		desc_cnt = (skb_len / mss) + 1;
+		for (f = 0; f < frag_num; f++) {
+			frag = &skb_shinfo(skb)->frags[f];
+			desc_cnt += (skb_frag_size(frag) / mss) + 1;
+		}
+	} else {
+		hlen = 0;
+		mss = 0;
+		desc_cnt = EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
+		for (i = 0; i < frag_num; i++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			desc_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
+					MAX_DATA_PWR_TX_DES);
+		}
+	}
+
+	emac_print("%s: skb=0x%x, skb->len=%d skb_len=%d mss=%d frag_num=%d hlen=%d\n",
+		__func__, (unsigned)skb, skb->len, skb_len, mss, frag_num, hlen);
+
+#ifdef EMAC_DEBUG
+	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 32, 1, skb->data, skb_len, 0);
+#endif
+	/* disable hard interrupt on local CPUs */
+#ifndef CONFIG_ASR_EMAC_NAPI
+	local_irq_save(ulFlags);
+#endif
+	if (!spin_trylock(&priv->spTxLock)) {
+		pr_err("Collision detected\n");
+#ifndef CONFIG_ASR_EMAC_NAPI
+		local_irq_restore(ulFlags);
+#endif
+		return NETDEV_TX_BUSY;
+	}
+
+	/* check whether sufficient free descriptors are there */
+	if (EMAC_DESC_UNUSED(tx_ring) < (desc_cnt + 2)) {
+		pr_err_ratelimited("TSO Descriptors are not free\n");
+		netif_stop_queue(ndev);
+#ifndef CONFIG_ASR_EMAC_NAPI
+		spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+		spin_unlock(&priv->spTxLock);
+#endif
+		return NETDEV_TX_BUSY;
+	}
+
+	priv->tx_count_frames += desc_cnt;
+	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+		priv->hwts_tx_en))
+		ioc = 1;
+	else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
+		ioc = 1;
+	else
+		ioc = 0;
+
+	if (ioc)
+		priv->tx_count_frames = 0;
+
+	skb_tx_timestamp(skb);
+	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+			priv->hwts_tx_en)) {
+		/* declare that device is doing timestamping */
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		timestamp = 1;
+	}
+
+	offset = 0;
+	desc_cnt = 0;
+	i = fst = tx_ring->nxt_use;
+	do {
+		payload = min(skb_len, TSO_MAX_SEG_SIZE);
+
+		tx_buf = &tx_ring->desc_buf[i];
+		tx_buf->dma_len = payload;
+		pbuf = skb->data + offset;
+		tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pbuf,
+						payload, DMA_TO_DEVICE);
+		tx_buf->buff_addr = pbuf;
+		tx_buf->ulTimeStamp = jiffies;
+
+		skb_len -= payload;
+		offset += payload;
+
+		i = emac_prepare_tso_desc(priv, i, tso, coe,
+				tx_buf->dma_addr, payload, hlen, mss,
+				(i == fst), (skb_len == 0 && frag_num == 0),
+				ioc, timestamp, &desc_cnt);
+	} while (skb_len > 0);
+
+	/* if the data is fragmented */
+	for (f = 0; f < frag_num; f++) {
+		frag = &(skb_shinfo(skb)->frags[f]);
+		skb_len = skb_frag_size(frag);
+		offset = skb_frag_off(frag);
+
+		emac_print("%s: frag %d len=%d\n", __func__, f, skb_len);
+#ifdef EMAC_DEBUG
+		{
+			u8 *vaddr;
+
+			vaddr = kmap_atomic(skb_frag_page(frag));
+			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
+					32, 1, vaddr + offset, skb_len, 0);
+			kunmap_atomic(vaddr);
+		}
+#endif
+		do {
+			payload = min(skb_len, TSO_MAX_SEG_SIZE);
+
+			tx_buf = &tx_ring->desc_buf[i];
+			tx_buf->dma_len = payload;
+			//pbuf = skb->data + offset;
+			tx_buf->dma_addr = dma_map_page(&priv->pdev->dev,
+						skb_frag_page(frag),
+						offset, payload,
+						DMA_TO_DEVICE);
+			tx_buf->ulTimeStamp = jiffies;
+
+			skb_len -= payload;
+			offset += payload;
+
+			i = emac_prepare_tso_desc(priv, i, tso, coe,
+					tx_buf->dma_addr, payload, 0, mss,
+					(i == fst),
+					(skb_len == 0 && f == (frag_num - 1)),
+					ioc, timestamp, &desc_cnt);
+		} while (skb_len > 0);
+	}
+
+	tx_ring->desc_buf[fst].skb = skb;
+	tx_ring->desc_buf[fst].nxt_watch =
+		(i == 0 ? tx_ring->total_cnt : 0) + i - 1;
+
+	wmb();
+
+	/* set first descriptor for this packet */
+	pdesc = emac_get_tx_desc(priv, fst);
+	emac_tx_update_fst_desc(pdesc, hlen, mss, tso, coe);
+	print_desc((void *)pdesc, 16);
+
+	tx_ring->nxt_use = i;
+
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += skb->len;
+	if (tso) {
+		priv->hw_stats->tx_tso_pkts++;
+		priv->hw_stats->tx_tso_bytes += skb->len;
+	}
+
+	emac_wr_tso(priv, TSO_TX_POLL_DEMAND, 0xFF);
+	/* Make sure there is space in the ring for the next send. */
+	if (EMAC_DESC_UNUSED(tx_ring) < (MAX_SKB_FRAGS + 2)) {
+		pr_debug_ratelimited("TSO Descriptors not enough, stop\n");
+		netif_stop_queue(ndev);
+	}
+
+#ifndef CONFIG_ASR_EMAC_NAPI
+	spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+	spin_unlock(&priv->spTxLock);
+#endif
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+	emac_ddr_clk_scaling(priv);
+#endif
+
+	if (!tso && !coe)
+		emac_tx_timer_arm(priv);
+
+	return NETDEV_TX_OK;
+}
+
+/* Name		emac_start_xmit
+ * Arguments	pstSkb : pointer to sk_buff structure passed by upper layer
+ *		pstNetdev : pointer to net_device structure
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	This function is called by upper layer to
+ *		handover the Tx packet to the driver
+ *		for sending it to the device.
+ *		Currently this is doing nothing but
+ *		simply to simulate the tx packet handling.
+ */
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	int ioc;
+	u32 frag_num;
+	u32 skb_len;
+	u32 tx_des_cnt = 0;
+	u32 i;
+#ifndef CONFIG_ASR_EMAC_NAPI
+	unsigned long ulFlags;
+#endif
+#ifdef WAN_LAN_AUTO_ADAPT
+	int vlan = 0;
+	struct iphdr *iph = NULL;
+	struct udphdr *udph = NULL;
+	struct vlan_hdr *vhdr;
+
+	{	struct ethhdr *myeth = (struct ethhdr *)skb->data;
+		if (myeth->h_proto == htons(ETH_P_8021Q)) {
+			vhdr = (struct vlan_hdr *)((u8 *)myeth + sizeof(struct ethhdr));
+			vlan = ntohs(vhdr->h_vlan_TCI);
+			iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr) + VLAN_HLEN);
+		}
+		else if (myeth->h_proto == htons(ETH_P_IP))
+			iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr));
+
+		if (iph && iph->protocol == IPPROTO_UDP) {
+			udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
+			if ((htons(udph->dest) == 67 && htons(udph->source) == 68)) {
+				u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
+				u8 dhcp_type = *(udp_data + 242);
+				if ((DHCP_DISCOVER == dhcp_type || DHCP_REQUEST == dhcp_type)
+						&& (0 == priv->dhcp)) {
+					priv->dhcp = DHCP_SEND_REQ;
+					if (ndev->phydev->phy_id == IP175D_PHY_ID)
+						priv->vlan_port = vlan;
+					else
+						priv->vlan_port = -1;
+				}
+			}
+		}
+	}
+#endif
+
+	/* pstSkb->len: is the full length of the data in the packet
+	 * pstSkb->data_len: the number of bytes in skb fragments
+	 * u16Len: length of the first fragment
+	 */
+	skb_len = skb->len - skb->data_len;
+
+	if (skb->len <= 0) {
+		pr_err("Packet length is zero\n");
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (priv->tso) {
+		bool tso = false, coe = false;
+
+		if (skb_is_gso(skb) &&
+		    (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+			tso = true;
+			coe = true;
+		} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			coe = true;
+		}
+
+		/* WR: COE need skb->data to be 2 bytes alinged */
+		if (coe && !IS_ALIGNED((unsigned long)skb->data, 2))
+			pskb_expand_head(skb, 1, 0, GFP_ATOMIC);
+
+		return emac_tso_xmit(skb, ndev, tso, coe);
+	}
+
+	/* increment the count if len exceeds MAX_DATA_LEN_TX_DES */
+	tx_des_cnt += EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
+
+	frag_num = skb_shinfo(skb)->nr_frags;
+
+	for (i = 0; i < frag_num; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		tx_des_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
+				       MAX_DATA_PWR_TX_DES);
+	}
+
+	/* disable hard interrupt on local CPUs */
+#ifndef CONFIG_ASR_EMAC_NAPI
+	local_irq_save(ulFlags);
+#endif
+	if (!spin_trylock(&priv->spTxLock)) {
+		pr_err("Collision detected\n");
+#ifndef CONFIG_ASR_EMAC_NAPI
+		local_irq_restore(ulFlags);
+#endif
+		return NETDEV_TX_BUSY;
+	}
+
+	/* check whether sufficient free descriptors are there */
+	if (EMAC_DESC_UNUSED(&priv->tx_ring) < (tx_des_cnt + 2)) {
+		pr_err_ratelimited("Descriptors are not free\n");
+		netif_stop_queue(ndev);
+#ifndef CONFIG_ASR_EMAC_NAPI
+		spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+		spin_unlock(&priv->spTxLock);
+#endif
+		return NETDEV_TX_BUSY;
+	}
+
+	priv->tx_count_frames += frag_num + 1;
+	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+		priv->hwts_tx_en))
+		ioc = 1;
+	else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
+		ioc = 1;
+	else
+		ioc = 0;
+
+	if (ioc)
+		priv->tx_count_frames = 0;
+
+	tx_des_cnt = emac_tx_mem_map(priv, skb, MAX_DATA_LEN_TX_DES, frag_num, ioc);
+	if (tx_des_cnt == 0) {
+		pr_err("Could not acquire memory from pool\n");
+		netif_stop_queue(ndev);
+#ifndef CONFIG_ASR_EMAC_NAPI
+		spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+		spin_unlock(&priv->spTxLock);
+#endif
+		return NETDEV_TX_BUSY;
+	}
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += skb->len;
+
+	/* Make sure there is space in the ring for the next send. */
+	if (EMAC_DESC_UNUSED(&priv->tx_ring) < (MAX_SKB_FRAGS + 2))
+		netif_stop_queue(ndev);
+
+#ifndef CONFIG_ASR_EMAC_NAPI
+	spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+	spin_unlock(&priv->spTxLock);
+#endif
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+	emac_ddr_clk_scaling(priv);
+#endif
+	emac_tx_timer_arm(priv);
+	return NETDEV_TX_OK;
+}
+
+u32 ReadTxStatCounters(struct emac_priv *priv, u8 cnt)
+{
+	u32 val, tmp;
+
+	val = 0x8000 | cnt;
+	emac_wr(priv, MAC_TX_STATCTR_CONTROL, val);
+	val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
+
+	while (val & 0x8000)
+		val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
+
+	tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_HIGH);
+	val = tmp << 16;
+	tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_LOW);
+	val |= tmp;
+
+	return val;
+}
+
+u32 ReadRxStatCounters(struct emac_priv *priv, u8 cnt)
+{
+	u32 val, tmp;
+
+	val = 0x8000 | cnt;
+	emac_wr(priv, MAC_RX_STATCTR_CONTROL, val);
+	val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
+
+	while (val & 0x8000)
+		val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
+
+	tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_HIGH);
+	val = tmp << 16;
+	tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_LOW);
+	val |= tmp;
+	return val;
+}
+
+/* Name		emac_set_mac_address
+ * Arguments	pstNetdev	: pointer to net_device structure
+ *		addr : pointer to addr
+ * Return	Status: 0 - Success;  non-zero - Fail
+ * Description	It is called by upper layer to set the mac address.
+ */
+static int emac_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct sockaddr *sa = addr;
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
+
+	emac_set_mac_addr(priv, ndev->dev_addr);
+
+	emac_set_fc_source_addr(priv, ndev->dev_addr);
+
+	return 0;
+}
+
+/* Name		emac_change_mtu
+ * Arguments	pstNetdev : pointer to net_device structure
+ *		u32MTU	: maximum transmit unit value
+ *		Return		Status: 0 - Success;  non-zero - Fail
+ * Description	It is called by upper layer to set the MTU value.
+ */
+static int emac_change_mtu(struct net_device *ndev, int mtu)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	u32 frame_len;
+
+	if (netif_running(ndev)) {
+		pr_err("must be stopped to change its MTU\n");
+		return -EBUSY;
+	}
+
+	frame_len = mtu + ETHERNET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+
+	if (frame_len < MINIMUM_ETHERNET_FRAME_SIZE ||
+	    frame_len > EMAC_SKBRB_MAX_PAYLOAD) {
+			pr_err("Invalid MTU setting\n");
+			return -EINVAL;
+	}
+
+	if (frame_len <= EMAC_RX_BUFFER_1024)
+		priv->u32RxBufferLen = EMAC_RX_BUFFER_1024;
+	else
+		priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
+
+	ndev->mtu = mtu;
+
+	return 0;
+}
+
+static void emac_reset(struct emac_priv *priv)
+{
+	if (!test_and_clear_bit(EMAC_RESET_REQUESTED, &priv->state))
+		return;
+	if (test_bit(EMAC_DOWN, &priv->state))
+		return;
+
+	netdev_dbg(priv->ndev, "Reset controller.\n");
+
+	rtnl_lock();
+	//netif_trans_update(priv->ndev);
+	while (test_and_set_bit(EMAC_RESETING, &priv->state))
+		usleep_range(1000, 2000);
+
+	dev_close(priv->ndev);
+	dev_open(priv->ndev, NULL);
+	clear_bit(EMAC_RESETING, &priv->state);
+	rtnl_unlock();
+}
+
+static void emac_tx_timeout_task(struct work_struct *work)
+{
+	struct emac_priv *priv = container_of(work,
+					      struct emac_priv, tx_timeout_task);
+	emac_reset(priv);
+	clear_bit(EMAC_TASK_SCHED, &priv->state);
+}
+
+/* Name		emac_tx_timeout
+ * Arguments	pstNetdev : pointer to net_device structure
+ * Return	none
+ * Description	It is called by upper layer
+ *		for packet transmit timeout.
+ */
+static void emac_tx_timeout(struct net_device *ndev)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+
+	netdev_info(ndev, "TX timeout\n");
+	register_dump(priv);
+
+	netif_carrier_off(priv->ndev);
+	set_bit(EMAC_RESET_REQUESTED, &priv->state);
+
+	if (!test_bit(EMAC_DOWN, &priv->state) &&
+	    !test_and_set_bit(EMAC_TASK_SCHED, &priv->state))
+		schedule_work(&priv->tx_timeout_task);
+}
+
+static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable)
+{
+	const struct emac_regdata *regdata = priv->regdata;
+	void __iomem* apmu;
+	u32 val;
+
+	apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+	if (apmu == NULL) {
+		pr_err("error to ioremap APMU base\n");
+		return -ENOMEM;
+	}
+
+	val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+	if (enable) {
+		val |= 0x1;
+	} else {
+		val &= ~0x1;
+	}
+	writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+	iounmap(apmu);
+	return 0;
+}
+
+static int clk_phase_rgmii_set(struct emac_priv *priv, bool is_tx)
+{
+	const struct emac_regdata *regdata = priv->regdata;
+	void __iomem* apmu;
+	u32 val, dline;
+	u8 phase, tmp;
+
+	apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+	if (apmu == NULL) {
+		pr_err("error to ioremap APMU base\n");
+		return -ENOMEM;
+	}
+
+	val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+	if (is_tx) {
+		if (regdata->rgmii_tx_clk_src_sel_shift > 0) {
+			phase = (priv->tx_clk_config >> 16) & 0x1;
+			val &= ~(0x1 << regdata->rgmii_tx_clk_src_sel_shift);
+			val |= phase << regdata->rgmii_tx_clk_src_sel_shift;
+		}
+
+		if (regdata->rgmii_tx_dline_reg_offset > 0) {
+			/* Set RGMIII TX DLINE */
+			dline = readl(apmu + regdata->rgmii_tx_dline_reg_offset);
+
+			/* delay code */
+			tmp = (priv->tx_clk_config >> 8) &
+				regdata->rgmii_tx_delay_code_mask;
+			dline &= ~(regdata->rgmii_tx_delay_code_mask <<
+					regdata->rgmii_tx_delay_code_shift);
+			dline |= tmp << regdata->rgmii_tx_delay_code_shift;
+
+			/* delay step */
+			tmp = priv->tx_clk_config &
+				regdata->rgmii_tx_delay_step_mask;
+			dline &= ~(regdata->rgmii_tx_delay_step_mask <<
+					regdata->rgmii_tx_delay_step_shift);
+			dline |= tmp << regdata->rgmii_tx_delay_step_shift;
+
+			/* delay line enable */
+			dline |= 1 << regdata->rgmii_tx_delay_enable_shift;
+			writel(dline, apmu + regdata->rgmii_tx_dline_reg_offset);
+			pr_info("===> emac set tx dline 0x%x 0x%x", dline,
+				readl(apmu + regdata->rgmii_tx_dline_reg_offset));
+		}
+	} else {
+		if (regdata->rgmii_rx_clk_src_sel_shift > 0) {
+			phase = (priv->rx_clk_config >> 16) & 0x1;
+			val &= ~(0x1 << regdata->rgmii_rx_clk_src_sel_shift);
+			val |= phase << regdata->rgmii_rx_clk_src_sel_shift;
+		}
+
+		/* Set RGMIII RX DLINE */
+		if (regdata->rgmii_rx_dline_reg_offset > 0) {
+			dline = readl(apmu + regdata->rgmii_rx_dline_reg_offset);
+
+			/* delay code */
+			tmp = (priv->rx_clk_config >> 8) &
+				regdata->rgmii_rx_delay_code_mask;
+			dline &= ~(regdata->rgmii_rx_delay_code_mask <<
+					regdata->rgmii_rx_delay_code_shift);
+			dline |= tmp << regdata->rgmii_rx_delay_code_shift;
+
+			/* delay step */
+			tmp = priv->rx_clk_config &
+				regdata->rgmii_rx_delay_step_mask;
+			dline &= ~(regdata->rgmii_rx_delay_step_mask <<
+					regdata->rgmii_rx_delay_step_shift);
+			dline |= tmp << regdata->rgmii_rx_delay_step_shift;
+
+			/* delay line enable */
+			dline |= 1 << regdata->rgmii_rx_delay_enable_shift;
+			writel(dline, apmu + regdata->rgmii_rx_dline_reg_offset);
+			pr_info("===> emac set rx dline 0x%x 0x%x", dline,
+				readl(apmu + regdata->rgmii_rx_dline_reg_offset));
+		}
+	}
+	writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+	pr_info("%s phase:%d direction:%s 0x%x 0x%x\n", __func__, phase,
+		is_tx ? "tx": "rx", val,
+		readl(apmu + regdata->clk_rst_ctrl_reg_offset));
+
+	iounmap(apmu);
+	return 0;
+}
+
+static int clk_phase_rmii_set(struct emac_priv *priv, bool is_tx)
+{
+	const struct emac_regdata *regdata = priv->regdata;
+	void __iomem* apmu;
+	u32 val;
+	u8 phase, tmp;
+
+	apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+	if (apmu == NULL) {
+		pr_err("error to ioremap APMU base\n");
+		return -ENOMEM;
+	}
+
+	val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+	if (is_tx) {
+		/* rmii tx clock select */
+		if (regdata->rmii_tx_clk_sel_shift > 0) {
+			tmp = (priv->tx_clk_config >> 16) & 0x1;
+			val &= ~(0x1 << regdata->rmii_tx_clk_sel_shift);
+			val |= tmp << regdata->rmii_tx_clk_sel_shift;
+		}
+
+		/* rmii ref clock selct, 1 - from soc, 0 - from phy */
+		if (regdata->rmii_rx_clk_sel_shift) {
+			tmp = (priv->tx_clk_config >> 24) & 0x1;
+			val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
+			val |= tmp << regdata->rmii_ref_clk_sel_shift;
+		}
+	} else {
+		/* rmii rx clock select */
+		if (regdata->rmii_rx_clk_sel_shift > 0) {
+			tmp = (priv->rx_clk_config >> 16) & 0x1;
+			val &= ~(0x1 << regdata->rmii_rx_clk_sel_shift);
+			val |= tmp << regdata->rmii_rx_clk_sel_shift;
+		}
+
+		/* rmii ref clock selct, 1 - from soc, 0 - from phy */
+		if (regdata->rmii_rx_clk_sel_shift) {
+			tmp = (priv->tx_clk_config >> 24) & 0x1;
+			val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
+			val |= tmp << regdata->rmii_ref_clk_sel_shift;
+		}
+	}
+
+	writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+	pr_debug("%s phase:%d direction:%s\n", __func__, phase,
+		is_tx ? "tx": "rx");
+
+	iounmap(apmu);
+	return 0;
+}
+
+static int clk_phase_set(struct emac_priv *priv, bool is_tx)
+{
+	if (emac_is_rmii_interface(priv)) {
+		clk_phase_rmii_set(priv, is_tx);
+	} else {
+		clk_phase_rgmii_set(priv, is_tx);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int clk_phase_show(struct seq_file *s, void *data)
+{
+	struct emac_priv *priv = s->private;
+	bool rmii_intf;
+	rmii_intf = emac_is_rmii_interface(priv);
+
+	seq_printf(s, "Emac MII Interface : %s\n", rmii_intf ? "RMII" : "RGMII");
+	seq_printf(s, "Current rx clk config : %d\n", priv->rx_clk_config);
+	seq_printf(s, "Current tx clk config : %d\n", priv->tx_clk_config);
+	return 0;
+}
+
+static ssize_t clk_tuning_write(struct file *file,
+				const char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct emac_priv *priv =
+				((struct seq_file *)(file->private_data))->private;
+	int err;
+	int clk_phase;
+	char buff[TUNING_CMD_LEN] = { 0 };
+	char mode_str[20];
+
+	if (count > TUNING_CMD_LEN) {
+		pr_err("count must be less than 50.\n");
+		return count;
+	}
+	err = copy_from_user(buff, user_buf, count);
+	if (err)
+		return err;
+
+	err = sscanf(buff, "%s %d", (char *)&mode_str, &clk_phase);
+	if (err != 2) {
+		pr_err("debugfs para count error\n");
+		return count;
+	}
+	pr_info("input:%s %d\n", mode_str, clk_phase);
+
+	if (strcmp(mode_str, "tx") == 0) {
+		priv->tx_clk_config = clk_phase;
+		clk_phase_set(priv, TX_PHASE);
+	} else if (strcmp(mode_str, "rx") == 0) {
+		priv->rx_clk_config = clk_phase;
+		clk_phase_set(priv, RX_PHASE);
+	} else {
+		pr_err("command error\n");
+		pr_err("eg: echo rx 1 > clk_tuning\n");
+		return count;
+	}
+
+	return count;
+}
+
+static int clk_tuning_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, clk_phase_show, inode->i_private);
+}
+
+const struct file_operations clk_tuning_fops = {
+	.open		= clk_tuning_open,
+	.write		= clk_tuning_write,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif
+
+static int emac_power_down(struct emac_priv *priv)
+{
+	if (priv->rst_gpio >= 0)
+		gpio_direction_output(priv->rst_gpio,
+					priv->low_active_rst ? 0 : 1);
+
+	if (priv->ldo_gpio >= 0)
+		gpio_direction_output(priv->ldo_gpio,
+					priv->low_active_ldo ? 0 : 1);
+
+	return 0;
+}
+
+static int emac_power_up(struct emac_priv *priv)
+{
+	u32 *delays_ldo = priv->delays_ldo;
+	u32 *delays_rst = priv->delays_rst;
+	int rst_gpio = priv->rst_gpio;
+	int low_active_rst = priv->low_active_rst;
+	int ldo_gpio = priv->ldo_gpio;
+	int low_active_ldo = priv->low_active_ldo;
+
+	if (rst_gpio >= 0) {
+		gpio_direction_output(rst_gpio, low_active_rst ? 0 : 1);
+	}
+
+	if (ldo_gpio >= 0) {
+		gpio_direction_output(ldo_gpio, low_active_ldo ? 0 : 1);
+		if (delays_ldo[0]) {
+			gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
+			msleep(DIV_ROUND_UP(delays_ldo[0], 1000));
+		}
+
+		gpio_set_value(ldo_gpio, low_active_ldo ? 0 : 1);
+		if (delays_ldo[1])
+			msleep(DIV_ROUND_UP(delays_ldo[1], 1000));
+
+		gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
+		if (delays_ldo[2])
+			msleep(DIV_ROUND_UP(delays_ldo[2], 1000));
+	}
+
+	if (rst_gpio >= 0) {
+		if (delays_rst[0]) {
+			gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
+			msleep(DIV_ROUND_UP(delays_rst[0], 1000));
+		}
+
+		gpio_set_value(rst_gpio, low_active_rst ? 0 : 1);
+		if (delays_rst[1])
+			msleep(DIV_ROUND_UP(delays_rst[1], 1000));
+
+		gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
+		if (delays_rst[2])
+			msleep(DIV_ROUND_UP(delays_rst[2], 1000));
+	}
+
+	return 0;
+}
+
+static int emac_mii_reset(struct mii_bus *bus)
+{
+	struct emac_priv *priv = bus->priv;
+	struct device *dev = &priv->pdev->dev;
+	struct device_node *np = dev->of_node;
+	int rst_gpio, ldo_gpio;
+	int low_active_ldo, low_active_rst;
+	u32 *delays_ldo = priv->delays_ldo;
+	u32 *delays_rst = priv->delays_rst;
+
+	priv->rst_gpio = -1;
+	priv->ldo_gpio = -1;
+
+	if (!np)
+		return 0;
+
+	rst_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+	if (rst_gpio >= 0) {
+		low_active_rst = of_property_read_bool(np, "reset-active-low");
+		of_property_read_u32_array(np, "reset-delays-us", delays_rst, 3);
+
+		if (gpio_request(rst_gpio, "mdio-reset")) {
+			printk("emac: reset-gpio=%d request failed\n",
+				rst_gpio);
+			return 0;
+		}
+		priv->rst_gpio = rst_gpio;
+		priv->low_active_rst = low_active_rst;
+	}
+
+	ldo_gpio = of_get_named_gpio(np, "ldo-gpio", 0);
+	if (ldo_gpio >= 0) {
+		low_active_ldo = of_property_read_bool(np, "ldo-active-low");
+		of_property_read_u32_array(np, "ldo-delays-us", delays_ldo, 3);
+
+		if (gpio_request(ldo_gpio, "mdio-ldo"))
+			return 0;
+
+		priv->ldo_gpio = ldo_gpio;
+		priv->low_active_ldo = low_active_ldo;
+	}
+
+	/*
+	 * Some device not allow MDC/MDIO operation during power on/reset,
+	 * disable AXI clock to shutdown mdio clock.
+	 */
+	clk_disable_unprepare(priv->clk);
+
+	emac_power_up(priv);
+
+	clk_prepare_enable(priv->clk);
+
+	emac_reset_hw(priv);
+
+	return 0;
+}
+
+static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+	struct emac_priv *priv = bus->priv;
+	u32 cmd = 0;
+	u32 val;
+
+	if (!__clk_is_enabled(priv->clk))
+		return -EBUSY;
+
+	mutex_lock(&priv->mii_mutex);
+	cmd |= phy_addr & 0x1F;
+	cmd |= (regnum & 0x1F) << 5;
+	cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
+
+	/*
+	 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
+	 * change during MDIO read/write
+	 */
+#ifdef CONFIG_DDR_DEVFREQ
+	pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
+#endif
+	emac_wr(priv, MAC_MDIO_DATA, 0x0);
+	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+	if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+			       !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
+		return -EBUSY;
+
+	val = emac_rd(priv, MAC_MDIO_DATA);
+
+#ifdef CONFIG_DDR_DEVFREQ
+	pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
+#endif
+	mutex_unlock(&priv->mii_mutex);
+	return val;
+}
+
+static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
+			    u16 value)
+{
+	struct emac_priv *priv = bus->priv;
+	u32 cmd = 0;
+	u32 val;
+
+	if (!__clk_is_enabled(priv->clk))
+		return -EBUSY;
+
+	mutex_lock(&priv->mii_mutex);
+	emac_wr(priv, MAC_MDIO_DATA, value);
+
+	cmd |= phy_addr & 0x1F;
+	cmd |= (regnum & 0x1F) << 5;
+	cmd |= MREGBIT_START_MDIO_TRANS;
+
+	/*
+	 * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
+	 * change during MDIO read/write
+	 */
+#ifdef CONFIG_DDR_DEVFREQ
+	pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
+#endif
+	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+	if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+			       !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
+		return -EBUSY;
+
+#ifdef CONFIG_DDR_DEVFREQ
+	pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
+#endif
+
+	mutex_unlock(&priv->mii_mutex);
+	return 0;
+}
+
+static void emac_adjust_link(struct net_device *dev)
+{
+	struct phy_device *phydev = dev->phydev;
+	struct emac_priv *priv = netdev_priv(dev);
+	u32 ctrl;
+#ifdef WAN_LAN_AUTO_ADAPT
+	int status_change = 0;
+	int addr = 0;
+	int i = 0;
+#endif
+	if (!phydev || priv->fix_link)
+		return;
+
+	if (phydev->link) {
+		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+
+		/* Now we make sure that we can be in full duplex mode
+		 * If not, we operate in half-duplex mode.
+		 */
+		if (phydev->duplex != priv->duplex) {
+			if (!phydev->duplex)
+				ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
+			else
+				ctrl |= MREGBIT_FULL_DUPLEX_MODE;
+			priv->duplex = phydev->duplex;
+		}
+
+		if (phydev->speed != priv->speed) {
+			ctrl &= ~MREGBIT_SPEED;
+
+			switch (phydev->speed) {
+			case SPEED_1000:
+				ctrl |= MREGBIT_SPEED_1000M;
+				break;
+			case SPEED_100:
+				ctrl |= MREGBIT_SPEED_100M;
+				break;
+			case SPEED_10:
+				ctrl |= MREGBIT_SPEED_10M;
+				break;
+			default:
+				pr_err("broken speed: %d\n", phydev->speed);
+				phydev->speed = SPEED_UNKNOWN;
+				break;
+			}
+			if (phydev->speed != SPEED_UNKNOWN) {
+				priv->speed = phydev->speed;
+			}
+		}
+		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+		pr_info("%s link:%d speed:%dM duplex:%s\n", __func__,
+				phydev->link, phydev->speed,
+				phydev->duplex ? "Full": "Half");
+	}
+
+#ifdef WAN_LAN_AUTO_ADAPT
+	if(phydev->phy_id == IP175D_PHY_ID) {
+		if (phydev->link != priv->link) {
+			for (i=0; i<16; i++) {
+				if((priv->link & (1<<i)) != (phydev->link & (1<<i))) {
+					addr = i;
+					if (phydev->link & (1<<i)) {
+						/* link up */
+						printk("eth0 port%d link up\n", addr);
+						priv->dhcp = 0;
+						emac_sig_workq(CARRIER_UP_IP175D, addr);
+						if(priv->dhcp_delaywork)
+							cancel_delayed_work(&priv->dhcp_work);
+						priv->dhcp_delaywork = 1;
+						schedule_delayed_work(&priv->dhcp_work, 25*HZ);
+					} else {
+						/* link down */
+						printk("eth0 port%d link down\n", addr);
+						priv->dhcp = 0;
+						if(priv->dhcp_delaywork)
+							cancel_delayed_work(&priv->dhcp_work);
+						priv->dhcp_delaywork = 0;
+						emac_sig_workq(CARRIER_DOWN_IP175D, addr);
+					}
+				}
+			}
+			priv->link = phydev->link;
+		}
+	} else {
+		if (phydev->link != priv->link) {
+			priv->link = phydev->link;
+			status_change = 1;
+		}
+
+		if (status_change) {
+			if (phydev->link) {
+				/* link up */
+				priv->dhcp = 0;
+				emac_sig_workq(CARRIER_UP, 0);
+				if(priv->dhcp_delaywork)
+					cancel_delayed_work(&priv->dhcp_work);
+				priv->dhcp_delaywork = 1;
+				schedule_delayed_work(&priv->dhcp_work, 25*HZ);
+
+			} else {
+				/* link down */
+				priv->dhcp = 0;
+				if(priv->dhcp_delaywork)
+					cancel_delayed_work(&priv->dhcp_work);
+				priv->dhcp_delaywork = 0;
+				emac_sig_workq(CARRIER_DOWN, 0);
+			}
+		}
+	}
+#endif
+}
+
+static int emac_phy_connect(struct net_device *dev)
+{
+	struct phy_device *phydev;
+	int phy_interface;
+	struct device_node *np;
+	struct emac_priv *priv = netdev_priv(dev);
+
+	np = of_parse_phandle(priv->pdev->dev.of_node, "phy-handle", 0);
+	if (!np) {
+		if (priv->fix_link) {
+			emac_phy_interface_config(priv, priv->interface);
+			if (priv->interface == PHY_INTERFACE_MODE_RGMII)
+				pinctrl_select_state(priv->pinctrl,
+							priv->rgmii_pins);
+			emac_config_phy_interrupt(priv, 0);
+			return 0;
+		}
+		return -ENODEV;
+	}
+
+	printk("%s: %s\n",__func__, np->full_name);
+	phy_interface = of_get_phy_mode(np);
+	emac_phy_interface_config(priv, phy_interface);
+	if (phy_interface != PHY_INTERFACE_MODE_RMII)
+		pinctrl_select_state(priv->pinctrl, priv->rgmii_pins);
+
+	phydev = of_phy_connect(dev, np,
+				&emac_adjust_link, 0, phy_interface);
+	if (IS_ERR_OR_NULL(phydev)) {
+		pr_err("Could not attach to PHY\n");
+		emac_power_down(priv);
+		if (!phydev)
+			return -ENODEV;
+		return PTR_ERR(phydev);
+	}
+
+	if (!phydev->phy_id || phydev->phy_id == 0xffffffff) {
+		pr_err("Not valid phy_id=0x%x\n", phydev->phy_id);
+		emac_power_down(priv);
+		return -ENODEV;
+	}
+
+	if(phy_interrupt_is_valid(phydev))
+		emac_config_phy_interrupt(priv, 1);
+	else
+		emac_config_phy_interrupt(priv, 0);
+
+	//phydev->supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
+	pr_info("%s:  %s: attached to PHY (UID 0x%x)"
+			" Link = %d irq=%d\n", __func__,
+			dev->name, phydev->phy_id, phydev->link, phydev->irq);
+	dev->phydev = phydev;
+
+#ifdef WAN_LAN_AUTO_ADAPT
+	if(phydev->phy_id == IP175D_PHY_ID)
+		emac_sig_workq(PHY_IP175D_CONNECT, 0);
+#endif
+
+	return 0;
+}
+
+static int emac_mdio_init(struct emac_priv *priv)
+{
+	struct device_node *mii_np;
+	struct device *dev = &priv->pdev->dev;
+	int ret;
+
+	mii_np = of_get_child_by_name(dev->of_node, "mdio-bus");
+	if (!mii_np) {
+		dev_err(dev, "no %s child node found", "mdio-bus");
+		return -ENODEV;
+	}
+
+	if (!of_device_is_available(mii_np)) {
+		ret = -ENODEV;
+		goto err_put_node;
+	}
+
+	priv->mii = mdiobus_alloc();//devm_mdiobus_alloc(dev);
+	if (!priv->mii) {
+		ret = -ENOMEM;
+		goto err_put_node;
+	}
+	priv->mii->priv = priv;
+	//priv->mii->irq = priv->mdio_irqs;
+	priv->mii->name = "emac mii";
+	priv->mii->reset = emac_mii_reset;
+	priv->mii->read = emac_mii_read;
+	priv->mii->write = emac_mii_write;
+	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%pOFn",
+			mii_np);
+	priv->mii->parent = dev;
+	priv->mii->phy_mask = 0xffffffff;
+	ret = of_mdiobus_register(priv->mii, mii_np);
+
+err_put_node:
+	of_node_put(mii_np);
+	return ret;
+}
+
+static int emac_mdio_deinit(struct emac_priv *priv)
+{
+	if (!priv->mii)
+		return 0;
+
+	mdiobus_unregister(priv->mii);
+	return 0;
+}
+
+static int emac_get_ts_info(struct net_device *dev,
+			      struct ethtool_ts_info *info)
+{
+	struct emac_priv *priv = netdev_priv(dev);
+
+	if (priv->ptp_support) {
+
+		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+					SOF_TIMESTAMPING_TX_HARDWARE |
+					SOF_TIMESTAMPING_RX_SOFTWARE |
+					SOF_TIMESTAMPING_RX_HARDWARE |
+					SOF_TIMESTAMPING_SOFTWARE |
+					SOF_TIMESTAMPING_RAW_HARDWARE;
+
+		if (priv->ptp_clock)
+			info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+		info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+		info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+				    (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+				    (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+				    (1 << HWTSTAMP_FILTER_ALL));
+		if (priv->regdata->ptp_rx_ts_all_events) {
+			info->rx_filters |=
+				(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+				(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+		}
+
+		return 0;
+	} else
+		return ethtool_op_get_ts_info(dev, info);
+}
+
+static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
+			memcpy(data, emac_ethtool_stats[i].str, ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+	break;
+	}
+}
+
+static int emac_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(emac_ethtool_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void emac_stats_update(struct emac_priv *priv)
+{
+	struct emac_hw_stats *hwstats = priv->hw_stats;
+	int i;
+	u32 *p;
+
+	p = (u32 *)(hwstats);
+
+	for (i = 0; i < MAX_TX_STATS_NUM; i++)
+		*(p + i) = ReadTxStatCounters(priv, i);
+
+	p = (u32 *)hwstats + MAX_TX_STATS_NUM;
+
+	for (i = 0; i < MAX_RX_STATS_NUM; i++)
+		*(p + i) = ReadRxStatCounters(priv, i);
+
+	*(p + i++) = emac_rd(priv, DMA_MISSED_FRAME_COUNTER);
+
+	*(p + i++) = hwstats->tx_tso_pkts;
+	*(p + i++) = hwstats->tx_tso_bytes;
+}
+
+static void emac_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *stats, u64 *data)
+{
+	struct emac_priv *priv = netdev_priv(dev);
+	struct emac_hw_stats *hwstats = priv->hw_stats;
+	u32 *data_src;
+	u64 *data_dst;
+	int i;
+
+	if (netif_running(dev) && netif_device_present(dev)) {
+		if (spin_trylock_bh(&hwstats->stats_lock)) {
+			emac_stats_update(priv);
+			spin_unlock_bh(&hwstats->stats_lock);
+		}
+	}
+
+	data_dst = data;
+
+	for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
+		data_src = (u32 *)hwstats + emac_ethtool_stats[i].offset;
+		*data_dst++ = (u64)(*data_src);
+	}
+}
+
+static int emac_ethtool_get_regs_len(struct net_device *dev)
+{
+	return EMAC_REG_SPACE_SIZE;
+}
+
+static void emac_ethtool_get_regs(struct net_device *dev,
+				  struct ethtool_regs *regs, void *space)
+{
+	struct emac_priv *priv = netdev_priv(dev);
+	u32 *reg_space = (u32 *) space;
+	void __iomem *base = priv->iobase;
+	int i;
+
+	regs->version = 1;
+
+	memset(reg_space, 0x0, EMAC_REG_SPACE_SIZE);
+
+	for (i = 0; i < EMAC_DMA_REG_CNT; i++)
+		reg_space[i] = readl(base + DMA_CONFIGURATION + i * 4);
+
+	for (i = 0; i < EMAC_MAC_REG_CNT; i++)
+		reg_space[i + MAC_GLOBAL_CONTROL / 4] = readl(base + MAC_GLOBAL_CONTROL + i * 4);
+}
+
+static int emac_get_link_ksettings(struct net_device *ndev,
+					struct ethtool_link_ksettings *cmd)
+{
+	if (!ndev->phydev)
+                return -ENODEV;
+
+	phy_ethtool_ksettings_get(ndev->phydev, cmd);
+	return 0;
+}
+
+static int emac_set_link_ksettings(struct net_device *ndev,
+					const struct ethtool_link_ksettings *cmd)
+{
+	if (!ndev->phydev)
+                return -ENODEV;
+
+	return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+}
+
+static void emac_get_drvinfo(struct net_device *dev,
+				struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+	info->n_stats = ARRAY_SIZE(emac_ethtool_stats);
+}
+
+static void emac_get_pauseparam(struct net_device *ndev,
+								struct ethtool_pauseparam *param)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	int val = emac_mii_read(priv->mii, 0, 0);
+
+	param->autoneg = (val & BIT(12)) ? 1 : 0;
+	param->rx_pause = priv->pause.rx_pause;
+	param->tx_pause = priv->pause.tx_pause;
+	
+	return;
+}
+
+static int emac_set_pauseparam(struct net_device *ndev,
+							   struct ethtool_pauseparam *param)
+{
+	struct emac_priv *priv = netdev_priv(ndev);
+	struct device *dev = &priv->pdev->dev;
+	struct device_node *np = dev->of_node;
+	int val;
+	int phyval;
+	u32 threshold[2];
+	static int init_flag = 1;
+
+	val = readl(priv->iobase + MAC_FC_CONTROL);
+	phyval = emac_mii_read(priv->mii, 0, 0);
+
+	if (param->rx_pause)
+		val |= MREGBIT_FC_DECODE_ENABLE;
+	else
+		val &= ~MREGBIT_FC_DECODE_ENABLE;
+
+	if (param->tx_pause)
+		val |= MREGBIT_FC_GENERATION_ENABLE;
+	else
+		val &= ~MREGBIT_FC_GENERATION_ENABLE;
+
+	if (init_flag && (param->rx_pause | param->tx_pause)) {
+		val |= MREGBIT_MULTICAST_MODE;
+		priv->pause.pause_time_max = 0;
+		if (0 != of_property_read_u32_array(np, "flow-control-threshold", threshold, 2)) {
+			threshold[0] = 60;
+			threshold[1] = 90;
+		}
+		threshold[0] = clamp(threshold[0], 0U, 99U);
+		threshold[1] = clamp(threshold[1], 1U, 100U);
+
+		if (cpu_is_asr18xx() || cpu_is_asr1903_z1()) {
+			priv->pause.low_water = priv->rx_ring.total_cnt * threshold[0] / 100;
+			priv->pause.high_water = priv->rx_ring.total_cnt * threshold[1] / 100 - 1;
+			priv->pause.fc_auto = 0;
+		} else {
+			priv->pause.low_water = 0;
+			priv->pause.high_water = 0;
+			priv->pause.fc_auto = 1;
+			val |= MREGBIT_AUTO_FC_GENERATION_ENABLE;
+			threshold[0] = 1024 * threshold[0] / 100;
+			threshold[1] = 1024 * threshold[1] / 100;
+			emac_wr(priv, MAC_FC_AUTO_HIGH_THRESHOLD, threshold[1]);
+			emac_wr(priv, MAC_FC_AUTO_LOW_THRESHOLD, threshold[0]);
+			emac_wr(priv, MAC_FC_AUTO_HIGH_PAUSE_TIME_VALUE, 0xffff);
+			emac_wr(priv, MAC_FC_AUTO_LOW_PAUSE_TIME_VALUE, 0);
+		}
+		init_flag = 0;
+	}
+	emac_wr(priv, MAC_FC_CONTROL, val);
+
+	if (param->autoneg)
+		phyval |= BIT(12);
+	else
+		phyval &= ~BIT(12);
+
+	(void)emac_mii_write(priv->mii, 0, 0, (u16)phyval);
+
+	priv->pause.rx_pause = param->rx_pause;
+	priv->pause.tx_pause = param->tx_pause;
+	return 0;
+}
+
+static void emac_get_wol(struct net_device *dev,
+			       struct ethtool_wolinfo *wol)
+{
+	struct emac_priv *priv = netdev_priv(dev);
+	struct device *device = &priv->pdev->dev;
+
+	if (device_can_wakeup(device)) {
+		wol->supported = WAKE_MAGIC | WAKE_UCAST;
+		wol->wolopts = priv->wolopts;
+	}
+}
+
+static int emac_set_wol(struct net_device *dev,
+			      struct ethtool_wolinfo *wol)
+{
+	struct emac_priv *priv = netdev_priv(dev);
+	struct device *device = &priv->pdev->dev;
+	u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+	if (!device_can_wakeup(device) || !priv->en_suspend)
+		return -ENOTSUPP;
+
+	if (wol->wolopts & ~support)
+		return -EINVAL;
+
+	priv->wolopts = wol->wolopts;
+
+	if (wol->wolopts) {
+		device_set_wakeup_enable(device, 1);
+		enable_irq_wake(priv->irq_wakeup);
+	} else {
+		device_set_wakeup_enable(device, 0);
+		disable_irq_wake(priv->irq_wakeup);
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+	.get_link_ksettings     = emac_get_link_ksettings,
+	.set_link_ksettings     = emac_set_link_ksettings,
+	.get_drvinfo            = emac_get_drvinfo,
+	.nway_reset             = phy_ethtool_nway_reset,
+	.get_link               = ethtool_op_get_link,
+	.get_pauseparam = emac_get_pauseparam,
+	.set_pauseparam = emac_set_pauseparam,
+	.get_strings            = emac_get_strings,
+	.get_sset_count         = emac_get_sset_count,
+	.get_ethtool_stats      = emac_get_ethtool_stats,
+	.get_regs		= emac_ethtool_get_regs,
+	.get_regs_len		= emac_ethtool_get_regs_len,
+	.get_ts_info 		= emac_get_ts_info,
+	.get_wol		= emac_get_wol,
+	.set_wol		= emac_set_wol,
+};
+
+static const struct net_device_ops emac_netdev_ops = {
+	.ndo_open               = emac_open,
+	.ndo_stop               = emac_close,
+	.ndo_start_xmit         = emac_start_xmit,
+	.ndo_set_mac_address    = emac_set_mac_address,
+	.ndo_do_ioctl           = emac_ioctl,
+	.ndo_change_mtu         = emac_change_mtu,
+	.ndo_tx_timeout         = emac_tx_timeout,
+};
+
+#ifdef WAN_LAN_AUTO_ADAPT
+#define EMAC_SKB_SIZE	2048
+static int emac_event_add_var(struct emac_event *event, int argv,
+		const char *format, ...)
+{
+	static char buf[128];
+	char *s;
+	va_list args;
+	int len;
+
+	if (argv)
+		return 0;
+
+	va_start(args, format);
+	len = vsnprintf(buf, sizeof(buf), format, args);
+	va_end(args);
+
+	if (len >= sizeof(buf)) {
+		printk("buffer size too small\n");
+		WARN_ON(1);
+		return -ENOMEM;
+	}
+
+	s = skb_put(event->skb, len + 1);
+	strcpy(s, buf);
+
+	return 0;
+}
+
+static int emac_hotplug_fill_event(struct emac_event *event)
+{
+	int ret;
+
+	ret = emac_event_add_var(event, 0, "HOME=%s", "/");
+	if (ret)
+		return ret;
+
+	ret = emac_event_add_var(event, 0, "PATH=%s",
+					"/sbin:/bin:/usr/sbin:/usr/bin");
+	if (ret)
+		return ret;
+
+	ret = emac_event_add_var(event, 0, "SUBSYSTEM=%s", "ethernet");
+	if (ret)
+		return ret;
+
+	ret = emac_event_add_var(event, 0, "ACTION=%s", event->action);
+	if (ret)
+		return ret;
+
+	ret = emac_event_add_var(event, 0, "ETH=%s", event->name);
+	if (ret)
+		return ret;
+
+	ret = emac_event_add_var(event, 0, "PORT=%d", event->port);
+	if (ret)
+		return ret;
+
+	ret = emac_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());
+
+	return ret;
+}
+
+static void emac_hotplug_work(struct work_struct *work)
+{
+	struct emac_event *event = container_of(work, struct emac_event, work);
+	int ret = 0;
+
+	event->skb = alloc_skb(EMAC_SKB_SIZE, GFP_KERNEL);
+	if (!event->skb)
+		goto out_free_event;
+
+	ret = emac_event_add_var(event, 0, "%s@", event->action);
+	if (ret)
+		goto out_free_skb;
+
+	ret = emac_hotplug_fill_event(event);
+	if (ret)
+		goto out_free_skb;
+
+	NETLINK_CB(event->skb).dst_group = 1;
+	broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);
+
+ out_free_skb:
+	if (ret) {
+		printk("work error %d\n", ret);
+		kfree_skb(event->skb);
+	}
+ out_free_event:
+	kfree(event);
+}
+
+static int emac_sig_workq(int event, int port)
+{
+	struct emac_event *u_event = NULL;
+
+	u_event = kzalloc(sizeof(*u_event), GFP_KERNEL);
+	if (!u_event)
+		return -ENOMEM;
+
+	u_event->name = DRIVER_NAME;
+	if(event == CARRIER_UP)
+		u_event->action = "LINKUP";
+	else if(event == CARRIER_DOWN)
+		u_event->action = "LINKDW";
+	else if(event == CARRIER_DOWN_IP175D)
+		u_event->action = "IP175D_LINKDW";
+	else if(event == CARRIER_UP_IP175D)
+		u_event->action = "IP175D_LINKUP";
+	else if(event == DHCP_EVENT_CLIENT)
+		u_event->action = "DHCPCLIENT";
+	else if(event == DHCP_EVENT_SERVER)
+		u_event->action = "DHCPSERVER";
+	else if(event == PHY_IP175D_CONNECT)
+		u_event->action = "PHY_CONNECT";
+
+	u_event->port = port;
+	INIT_WORK(&u_event->work, (void *)emac_hotplug_work);
+	schedule_work(&u_event->work);
+
+	return 0;
+}
+
+static inline void __emac_dhcp_work_func(struct emac_priv *priv)
+{
+	if (priv->dhcp == DHCP_REC_RESP) {
+		emac_sig_workq(DHCP_EVENT_CLIENT, priv->vlan_port);
+	} else if (priv->dhcp == DHCP_SEND_REQ || priv->dhcp == 0) {
+		emac_sig_workq(DHCP_EVENT_SERVER, priv->vlan_port);
+	}
+
+	priv->dhcp = 0;
+	if(priv->dhcp_delaywork){
+		cancel_delayed_work(&priv->dhcp_work);
+		priv->dhcp_delaywork = 0;
+	}
+}
+
+static void emac_dhcp_work_func_t(struct work_struct *work)
+{
+	struct emac_priv *priv = container_of(work, struct emac_priv, dhcp_work.work);
+
+	__emac_dhcp_work_func(priv);
+}
+#endif
+
+static int emac_probe(struct platform_device *pdev)
+{
+	struct emac_priv *priv;
+	struct net_device *ndev = NULL;
+	struct resource *res;
+	struct device_node *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	const unsigned char *mac_addr = NULL;
+	const struct of_device_id *match;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *emac_fs_dir = NULL;
+	struct dentry *emac_clk_tuning;
+#endif
+	int ret;
+
+	ndev = alloc_etherdev(sizeof(struct emac_priv));
+	if (!ndev) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	priv = netdev_priv(ndev);
+	priv->ndev = ndev;
+	priv->pdev = pdev;
+#ifdef WAN_LAN_AUTO_ADAPT
+	priv->dhcp  = -1;
+	priv->vlan_port  = -1;
+	priv->dhcp_delaywork = 0;
+#endif
+	platform_set_drvdata(pdev, priv);
+
+	match = of_match_device(of_match_ptr(emac_of_match), &pdev->dev);
+	if (match) {
+		priv->regdata = match->data;
+	} else {
+		pr_info("===> not match valid device\n");
+	}
+
+	emac_command_options(priv);
+	emac_skbrb_init(EMAC_SKBRB_SLOT_SIZE, priv->rx_ring.total_cnt * 2);
+
+	priv->hw_stats = devm_kzalloc(&pdev->dev,
+					sizeof(*priv->hw_stats),
+						GFP_KERNEL);
+	if (!priv->hw_stats) {
+		dev_err(&pdev->dev, "failed to allocate counter memory\n");
+		ret = -ENOMEM;
+		goto err_netdev;
+	}
+
+	spin_lock_init(&priv->hw_stats->stats_lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->iobase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->iobase)) {
+		ret = -ENOMEM;
+		goto err_netdev;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	priv->tso_base = devm_ioremap_resource(&pdev->dev, res);
+	if (!IS_ERR(priv->tso_base)) {
+		dev_info(&pdev->dev, "tso base=0x%x\n", (unsigned)priv->tso_base);
+	}
+
+	priv->irq = irq_of_parse_and_map(np, 0);
+	if (!priv->irq) {
+		ret = -ENXIO;
+		goto err_netdev;
+	}
+	priv->irq_wakeup = irq_of_parse_and_map(np, 1);
+	if (!priv->irq_wakeup)
+		dev_err(&pdev->dev, "wake_up irq not found\n");
+
+	priv->tso = of_property_read_bool(np, "tso-support");
+	if (cpu_is_asr1903_a0() || cpu_is_asr1903_z1())
+		priv->tso = false;
+	if (priv->tso) {
+		priv->irq_tso = irq_of_parse_and_map(np, 3);
+		if (!priv->irq_tso) {
+			dev_err(&pdev->dev, "tso irq not found\n");
+			priv->tso = false;
+		}
+	}
+
+	priv->sram_pool = of_gen_pool_get(dev->of_node, "eth,sram", 0);
+	if (priv->sram_pool) {
+		dev_notice(&pdev->dev, "use sram as tx desc\n");
+	}
+
+	ret = of_property_read_u32(np, "lpm-qos", &priv->pm_qos);
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32(np, "3v3-enable", &priv->power_domain);
+	if (ret)
+		priv->power_domain = 0;
+
+	ret = of_property_read_u32(np, "mdio-clk-div", &priv->mdio_clk_div);
+	if (ret)
+		priv->mdio_clk_div = 0xfe;
+
+	if (of_property_read_bool(np, "enable-suspend"))
+		priv->en_suspend = 1;
+	else
+		priv->en_suspend = 0;
+
+	priv->wolopts = 0;
+	if (of_property_read_bool(np, "magic-packet-wakeup"))
+		priv->wolopts |= WAKE_MAGIC;
+
+	if (of_property_read_bool(np, "unicast-packet-wakeup"))
+		priv->wolopts |= WAKE_UCAST;
+
+	priv->dev_flags = 0;
+	if (of_property_read_bool(np, "suspend-not-keep-power")) {
+		priv->dev_flags |= EMAC_SUSPEND_POWER_DOWN_PHY;
+		priv->wolopts = 0;
+	}
+
+	priv->pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(priv->pinctrl))
+		dev_err(dev, "could not get pinctrl handle\n");
+
+	priv->rgmii_pins = pinctrl_lookup_state(priv->pinctrl, "rgmii-pins");
+	if (IS_ERR(priv->rgmii_pins))
+		dev_err(dev, "could not get rgmii-pins pinstate\n");
+
+	emac_set_aib_power_domain(priv);
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	priv->pm_qos_req.name = pdev->name;
+	pm_qos_add_request(&priv->pm_qos_req, PM_QOS_CPUIDLE_BLOCK,
+		PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+#ifdef CONFIG_DDR_DEVFREQ
+	pm_qos_add_request(&priv->pm_ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
+				PM_QOS_DEFAULT_VALUE);
+
+	priv->clk_scaling.polling_delay_ms = 1000; /* 1s window */
+	priv->clk_scaling.tx_up_threshold = 120;   /* 120Mbps   */
+	priv->clk_scaling.tx_down_threshold = 60;
+	priv->clk_scaling.rx_up_threshold = 60;    /* 60Mbps    */
+	priv->clk_scaling.rx_down_threshold = 20;
+	priv->clk_scaling.window_time = jiffies;
+	pm_qos_add_request(&priv->clk_scaling.ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
+			   PM_QOS_DEFAULT_VALUE);
+	INIT_WORK(&priv->qos_work, emac_ddr_qos_work);
+#endif
+	skb_queue_head_init(&priv->rx_skb);
+	ndev->watchdog_timeo = 5 * HZ;
+	ndev->base_addr = (unsigned long)priv->iobase;
+	ndev->irq  = priv->irq;
+	/* set hw features */
+	ndev->features = NETIF_F_SG | NETIF_F_SOFT_FEATURES;
+	if (priv->tso) {
+		ndev->features |= NETIF_F_RXCSUM;
+		ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+		ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+		dev_info(&pdev->dev, "TSO feature enabled\n");
+	}
+	ndev->hw_features = ndev->features;
+	ndev->vlan_features = ndev->features;
+
+	ndev->ethtool_ops = &emac_ethtool_ops;
+	ndev->netdev_ops = &emac_netdev_ops;
+	if (pdev->dev.of_node)
+		mac_addr = of_get_mac_address(np);
+
+	if (!IS_ERR_OR_NULL(mac_addr)) {
+		//ether_addr_copy(ndev->dev_addr, mac_addr);
+		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+		if (!is_valid_ether_addr(ndev->dev_addr)) {
+			dev_info(&pdev->dev, "Using random mac address\n");
+			eth_hw_addr_random(ndev);
+		}
+	} else {
+		dev_info(&pdev->dev, "Using random mac address\n");
+		eth_hw_addr_random(ndev);
+	}
+
+	priv->hw_adj = of_property_read_bool(np, "hw-increment");
+	priv->ptp_support = of_property_read_bool(np, "ptp-support");
+	if (priv->ptp_support) {
+		pr_info("EMAC support IEEE1588 PTP Protocol\n");
+		if (of_property_read_u32(np, "ptp-clk-rate",
+					&priv->ptp_clk_rate)) {
+			priv->ptp_clk_rate = 20000000;
+			pr_info("%s ptp_clk rate using default value:%d may inaccurate!!1\n",
+				__func__, priv->ptp_clk_rate);
+		}
+
+		priv->ptp_clk = devm_clk_get(&pdev->dev, "ptp-clk");
+		if (IS_ERR(priv->ptp_clk)) {
+			dev_err(&pdev->dev, "ptp clock not found.\n");
+			ret = PTR_ERR(priv->ptp_clk);
+			goto err_netdev;
+		}
+
+		clk_set_rate(priv->ptp_clk, priv->ptp_clk_rate);
+	}
+
+	priv->pps_info.enable_pps = 0;
+#ifdef CONFIG_PPS
+	ret = of_property_read_u32(np, "pps_source", &priv->pps_info.pps_source);
+	if (!ret) {
+		priv->irq_pps = irq_of_parse_and_map(np, 2);
+
+		if (priv->pps_info.pps_source < EMAC_PPS_MAX)
+			priv->pps_info.enable_pps = 1;
+		else
+			dev_err(&pdev->dev, "wrong PPS source!\n");
+	}
+#endif
+	priv->clk = devm_clk_get(&pdev->dev, "emac-clk");
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "emac clock not found.\n");
+		ret = PTR_ERR(priv->clk);
+		goto err_netdev;
+	}
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to enable emac clock: %d\n",
+			ret);
+		goto clk_disable;
+	}
+
+	emac_sw_init(priv);
+	ret = emac_mdio_init(priv);
+	if (ret)
+		goto clk_disable;
+
+	INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
+#ifdef WAN_LAN_AUTO_ADAPT
+	INIT_DELAYED_WORK(&priv->dhcp_work, emac_dhcp_work_func_t);
+#endif
+	if (of_phy_is_fixed_link(np)) {
+		if ((emac_set_fixed_link(np, priv) < 0)) {
+			ret = -ENODEV;
+			goto clk_disable;
+		}
+		dev_info(&pdev->dev, "find fixed link\n");
+		priv->fix_link = 1;
+	}
+
+	INIT_DELAYED_WORK(&priv->emac_pause_work, emac_pause_generate_work_fuc);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	strcpy(ndev->name, "eth%d");
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		pr_err("register_netdev failed\n");
+		goto err_mdio_deinit;
+	}
+	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+#ifdef CONFIG_ASR_EMAC_NAPI
+	netif_napi_add(ndev, &priv->rx_napi, emac_rx_poll, 32);
+	netif_tx_napi_add(ndev, &priv->tx_napi, emac_tx_poll, 32);
+#endif
+	priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
+	priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
+	priv->clk_tuning_enable = of_property_read_bool(np, "clk-tuning-enable");
+
+	if (priv->clk_tuning_enable) {
+		ret = of_property_read_u32(np, "tx-clk-config",
+			&priv->tx_clk_config);
+		if (ret)
+			priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
+
+		ret = of_property_read_u32(np, "rx-clk-config",
+			&priv->rx_clk_config);
+		if (ret)
+			priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
+#ifdef CONFIG_DEBUG_FS
+		if (!emac_fs_dir) {
+			emac_fs_dir = debugfs_create_dir(DRIVER_NAME, NULL);
+
+			if (!emac_fs_dir || IS_ERR(emac_fs_dir)) {
+				pr_err("emac debugfs create directory failed\n");
+			}else {
+				emac_clk_tuning = debugfs_create_file("clk_tuning", 0664,
+								      emac_fs_dir, priv, &clk_tuning_fops);
+				if (!emac_clk_tuning) {
+					pr_err("emac debugfs create file failed\n");
+				}
+			}
+		}
+#endif
+	}
+	return 0;
+
+err_mdio_deinit:
+	emac_mdio_deinit(priv);
+clk_disable:
+	clk_disable_unprepare(priv->clk);
+err_netdev:
+	free_netdev(ndev);
+	emac_skbrb_release();
+	return ret;
+}
+
+static int emac_remove(struct platform_device *pdev)
+{
+	struct emac_priv *priv = platform_get_drvdata(pdev);
+
+	device_init_wakeup(&pdev->dev, 0);
+	unregister_netdev(priv->ndev);
+	emac_reset_hw(priv);
+	free_netdev(priv->ndev);
+	emac_mdio_deinit(priv);
+	clk_disable_unprepare(priv->clk);
+	pm_qos_remove_request(&priv->pm_qos_req);
+	cancel_delayed_work_sync(&priv->emac_pause_work);
+#ifdef CONFIG_DDR_DEVFREQ
+	pm_qos_remove_request(&priv->pm_ddr_qos);
+	pm_qos_remove_request(&priv->clk_scaling.ddr_qos);
+#endif
+	emac_skbrb_release();
+	return 0;
+}
+
+static void emac_shutdown(struct platform_device *pdev)
+{
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int emac_resume(struct device *dev)
+{
+	struct emac_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->ndev;
+	u32 ctrl, wake_mode = 0;
+
+	if (!priv->en_suspend)
+		return 0;
+
+	if (priv->wolopts) {
+		if (netif_running(ndev)) {
+			netif_device_attach(ndev);
+#ifdef CONFIG_ASR_EMAC_NAPI
+			napi_enable(&priv->rx_napi);
+			napi_enable(&priv->tx_napi);
+#endif
+		}
+
+		if (priv->wolopts & WAKE_MAGIC)
+			wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
+		if (priv->wolopts & WAKE_UCAST)
+			wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
+
+		disable_irq_wake(priv->irq_wakeup);
+		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+		ctrl &= ~wake_mode;
+		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+	} else {
+		clk_prepare_enable(priv->clk);
+
+		if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
+			emac_power_up(priv);
+
+		rtnl_lock();
+		dev_open(ndev, NULL);
+		rtnl_unlock();
+	}
+
+	return 0;
+}
+
+static int emac_suspend(struct device *dev)
+{
+	struct emac_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->ndev;
+	u32 ctrl, wake_mode = 0;
+
+	if (!priv->en_suspend)
+		return 0;
+
+	if (priv->wolopts) {
+		if (netif_running(ndev)) {
+			netif_device_detach(ndev);
+#ifdef CONFIG_ASR_EMAC_NAPI
+			napi_disable(&priv->rx_napi);
+			napi_disable(&priv->tx_napi);
+#endif
+		}
+
+		if (priv->wolopts & WAKE_MAGIC)
+			wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
+		if (priv->wolopts & WAKE_UCAST)
+			wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
+
+		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+		ctrl |= wake_mode;
+		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+		enable_irq_wake(priv->irq_wakeup);
+	} else {
+		rtnl_lock();
+		dev_close(ndev);
+		rtnl_unlock();
+
+		if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
+			emac_power_down(priv);
+
+		clk_disable_unprepare(priv->clk);
+	}
+
+	return 0;
+}
+
+static int emac_suspend_noirq(struct device *dev)
+{
+	struct emac_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->ndev;
+
+	if (!ndev->phydev && !priv->fix_link)
+		return 0;
+
+	pr_pm_debug("==> enter emac_suspend_noirq\n");
+	pm_qos_update_request(&priv->pm_qos_req,
+			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+	return 0;
+}
+
+static int emac_resume_noirq(struct device *dev)
+{
+	struct emac_priv *priv = dev_get_drvdata(dev);
+	struct net_device *ndev = priv->ndev;
+
+        if (!ndev->phydev && !priv->fix_link)
+                return 0;
+
+	pr_pm_debug("==> enter emac_resume_noirq\n");
+	pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
+	return 0;
+}
+
+static const struct dev_pm_ops emac_pm_ops = {
+	.suspend	= emac_suspend,
+	.resume		= emac_resume,
+	.suspend_noirq = emac_suspend_noirq,
+	.resume_noirq = emac_resume_noirq,
+};
+
+#define ASR_EMAC_PM_OPS (&emac_pm_ops)
+#else
+#define ASR_EMAC_PM_OPS NULL
+#endif
+
+static struct platform_driver emac_driver = {
+	.probe = emac_probe,
+	.remove = emac_remove,
+	.shutdown = emac_shutdown,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = of_match_ptr(emac_of_match),
+		.pm	= ASR_EMAC_PM_OPS,
+	},
+};
+
+module_platform_driver(emac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for ASR Emac");
+MODULE_ALIAS("platform:asr_eth");