ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/net/ethernet/asr/Kconfig b/marvell/linux/drivers/net/ethernet/asr/Kconfig
new file mode 100644
index 0000000..46285ed
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/asr/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config NET_VENDOR_ASR
+ bool "ASR ethernet devices"
+ default n
+ ---help---
+ If you have a ASR SoC with ethernet, say Y.
+
+if NET_VENDOR_ASR
+
+config ASR_EMAC
+ tristate "ASR SoC Ethernet support"
+ depends on NET_VENDOR_ASR
+ select PHYLIB
+ ---help---
+ This driver supports ethernet MACs in the
+ ASR SoC family.
+
+endif #NET_VENDOR_ASR
diff --git a/marvell/linux/drivers/net/ethernet/asr/Makefile b/marvell/linux/drivers/net/ethernet/asr/Makefile
new file mode 100644
index 0000000..e4abd25
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/asr/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_ASR_EMAC) += emac_ptp.o emac_eth.o
diff --git a/marvell/linux/drivers/net/ethernet/asr/emac_eth.c b/marvell/linux/drivers/net/ethernet/asr/emac_eth.c
new file mode 100644
index 0000000..5aed7ec
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/asr/emac_eth.c
@@ -0,0 +1,4931 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * asr emac driver
+ *
+ * Copyright (C) 2019 ASR Micro Limited
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <linux/phy_fixed.h>
+#include <linux/pm_qos.h>
+#include <asm/cacheflush.h>
+#include <linux/cputype.h>
+#include <linux/iopoll.h>
+#include <linux/genalloc.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif /* CONFIG_DEBUG_FS */
+#include <asm/atomic.h>
+#include "emac_eth.h"
+#include <linux/skbrb.h>
+
+#ifdef WAN_LAN_AUTO_ADAPT
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/kobject.h>
+#endif
+
+#define DRIVER_NAME "asr_emac"
+
+#define AXI_PHYS_BASE 0xd4200000
+
+#define AIB_GMAC_IO_REG 0xD401E804
+#define APBC_ASFAR 0xD4015050
+#define AKEY_ASFAR 0xbaba
+#define AKEY_ASSAR 0xeb10
+
+#define EMAC_DIRECT_MAP
+#define TUNING_CMD_LEN 50
+#define CLK_PHASE_CNT 8
+#define TXCLK_PHASE_DEFAULT 0
+#define RXCLK_PHASE_DEFAULT 0
+#define TX_PHASE 1
+#define RX_PHASE 0
+
+#define EMAC_DMA_REG_CNT 16
+#define EMAC_MAC_REG_CNT 61
+#define EMAC_EMPTY_FROM_DMA_TO_MAC 48
+#define EMAC_REG_SPACE_SIZE ((EMAC_DMA_REG_CNT + \
+ EMAC_MAC_REG_CNT + EMAC_EMPTY_FROM_DMA_TO_MAC) * 4)
+#define EMAC_ETHTOOL_STAT(x) { #x, \
+ offsetof(struct emac_hw_stats, x) / sizeof(u32) }
+
+#define EMAC_SKBRB_SLOT_SIZE 1600
+#define EMAC_EXTRA_ROOM 72
+#define EMAC_SKBRB_MAX_PAYLOAD (EMAC_SKBRB_SLOT_SIZE - EMAC_EXTRA_ROOM - NET_IP_ALIGN)
+
+#define EMAC_RX_FILL_TIMER_US 0
+#define EMAC_TX_COAL_TIMER_US (1000)
+#define EMAC_TX_FRAMES (64)
+
+#ifdef WAN_LAN_AUTO_ADAPT
+#define DHCP_DISCOVER 1
+#define DHCP_OFFER 2
+#define DHCP_REQUEST 3
+#define DHCP_ACK 5
+#define IP175D_PHY_ID 0x02430d80
+
+enum emac_SIG {
+ CARRIER_DOWN = 0,
+ CARRIER_UP,
+ DHCP_EVENT_CLIENT,
+ DHCP_EVENT_SERVER,
+ PHY_IP175D_CONNECT,
+ CARRIER_DOWN_IP175D,
+ CARRIER_UP_IP175D,
+};
+
+enum emac_DHCP {
+ DHCP_SEND_REQ = 1,
+ DHCP_REC_RESP = 2,
+};
+
+struct emac_event {
+ const char *name;
+ char *action;
+ int port;
+ struct sk_buff *skb;
+ struct work_struct work;
+};
+
+extern u64 uevent_next_seqnum(void);
+static int emac_sig_workq(int event, int port);
+#endif
+
+static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable);
+static int clk_phase_set(struct emac_priv *priv, bool is_tx);
+#ifdef CONFIG_ASR_EMAC_NAPI
+static int emac_rx_clean_desc(struct emac_priv *priv, int budget);
+#else
+static int emac_rx_clean_desc(struct emac_priv *priv);
+#endif
+static void emac_alloc_rx_desc_buffers(struct emac_priv *priv);
+static int emac_phy_connect(struct net_device *dev);
+
+/* for falcon */
+struct emac_regdata asr_emac_regdata_v1 = {
+ .support_dual_vol_power = 1,
+ .ptp_rx_ts_all_events = 0,
+ .clk_rst_ctrl_reg_offset = 0x160,
+ .axi_mst_single_id_shift = 17,
+ .phy_intr_enable_shift = 16,
+ .int_clk_src_sel_shift = -1,
+ .rgmii_tx_clk_src_sel_shift = 5,
+ .rgmii_rx_clk_src_sel_shift = 4,
+ .rmii_rx_clk_sel_shift = 7,
+ .rmii_tx_clk_sel_shift = 6,
+ .rmii_ref_clk_sel_shift = -1,
+ .mac_intf_sel_shift = 2,
+ .rgmii_tx_dline_reg_offset = -1,
+ .rgmii_tx_delay_code_shift = -1,
+ .rgmii_tx_delay_code_mask =-1,
+ .rgmii_tx_delay_step_shift = -1,
+ .rgmii_tx_delay_step_mask = -1,
+ .rgmii_tx_delay_enable_shift = -1,
+ .rgmii_rx_dline_reg_offset = -1,
+ .rgmii_rx_delay_code_shift = -1,
+ .rgmii_rx_delay_code_mask = -1,
+ .rgmii_rx_delay_step_shift = -1,
+ .rgmii_rx_delay_step_mask = -1,
+ .rgmii_rx_delay_enable_shift = -1,
+};
+
+/* for kagu */
+struct emac_regdata asr_emac_regdata_v2 = {
+ .support_dual_vol_power = 0,
+ .ptp_rx_ts_all_events = 0,
+ .clk_rst_ctrl_reg_offset = 0x160,
+ .axi_mst_single_id_shift = 13,
+ .phy_intr_enable_shift = 12,
+ .int_clk_src_sel_shift = 9,
+ .rgmii_tx_clk_src_sel_shift = 8,
+ .rgmii_rx_clk_src_sel_shift = -1,
+ .rmii_rx_clk_sel_shift = 7,
+ .rmii_tx_clk_sel_shift = 6,
+ .rmii_ref_clk_sel_shift = 3,
+ .mac_intf_sel_shift = 2,
+ .rgmii_tx_dline_reg_offset = 0x178,
+ .rgmii_tx_delay_code_shift = 24,
+ .rgmii_tx_delay_code_mask = 0xff,
+ .rgmii_tx_delay_step_shift = 20,
+ .rgmii_tx_delay_step_mask = 0x3,
+ .rgmii_tx_delay_enable_shift = 16,
+ .rgmii_rx_dline_reg_offset = 0x178,
+ .rgmii_rx_delay_code_shift = 8,
+ .rgmii_rx_delay_code_mask = 0xff,
+ .rgmii_rx_delay_step_shift = 4,
+ .rgmii_rx_delay_step_mask = 0x3,
+ .rgmii_rx_delay_enable_shift = 0,
+};
+
+/* for lapwing */
+struct emac_regdata asr_emac_regdata_v3 = {
+ .support_dual_vol_power = 1,
+ .ptp_rx_ts_all_events = 1,
+ .clk_rst_ctrl_reg_offset = 0x164,
+ .axi_mst_single_id_shift = 13,
+ .phy_intr_enable_shift = 12,
+ .int_clk_src_sel_shift = 9,
+ .rgmii_tx_clk_src_sel_shift = 8,
+ .rgmii_rx_clk_src_sel_shift = -1,
+ .rmii_rx_clk_sel_shift = 7,
+ .rmii_tx_clk_sel_shift = 6,
+ .rmii_ref_clk_sel_shift = 3,
+ .mac_intf_sel_shift = 2,
+ .rgmii_tx_dline_reg_offset = 0x16c,
+ .rgmii_tx_delay_code_shift = 8,
+ .rgmii_tx_delay_code_mask = 0xff,
+ .rgmii_tx_delay_step_shift = 0,
+ .rgmii_tx_delay_step_mask = 0x3,
+ .rgmii_tx_delay_enable_shift = 31,
+ .rgmii_rx_dline_reg_offset = 0x168,
+ .rgmii_rx_delay_code_shift = 8,
+ .rgmii_rx_delay_code_mask = 0xff,
+ .rgmii_rx_delay_step_shift = 0,
+ .rgmii_rx_delay_step_mask = 0x3,
+ .rgmii_rx_delay_enable_shift = 31,
+};
+
+static const struct of_device_id emac_of_match[] = {
+ {
+ .compatible = "asr,asr-eth",
+ .data = (void *)&asr_emac_regdata_v1,
+ },
+ {
+ .compatible = "asr,asr-eth-v2",
+ .data = (void *)&asr_emac_regdata_v2,
+ },
+ {
+ .compatible = "asr,asr-eth-v3",
+ .data = (void *)&asr_emac_regdata_v3,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, emac_of_match);
+
+#ifdef EMAC_DIRECT_MAP
+dma_addr_t inline emac_map_direct(unsigned buf, unsigned len)
+{
+ unsigned ret;
+ ret = mv_cp_virtual_to_physical(buf);
+ BUG_ON(ret == buf);
+ __cpuc_flush_dcache_area((void *)(buf & ~ 31),
+ ((len + (buf & 31) + 31) & ~ 31));
+ return (dma_addr_t)ret;
+}
+#endif
+
+static inline void emac_unmap_single(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+#ifdef EMAC_DIRECT_MAP
+ if (dir == DMA_TO_DEVICE)
+ return;
+#endif
+ dma_unmap_single(dev, handle, size ,dir);
+}
+
+static inline dma_addr_t emac_map_single(struct device *dev, void *ptr,
+ size_t size,enum dma_data_direction dir)
+{
+ if (dir == DMA_FROM_DEVICE)
+ return dma_map_single(dev, ptr, size, dir);
+#ifndef EMAC_DIRECT_MAP
+ return dma_map_single(dev, ptr, size, dir);
+#else
+ return emac_map_direct((unsigned)ptr, (unsigned)size);
+#endif
+}
+
+#ifdef CONFIG_DDR_DEVFREQ
+static void emac_ddr_qos_work(struct work_struct *work)
+{
+ struct emac_priv *priv;
+ int val;
+
+ priv = container_of(work, struct emac_priv, qos_work);
+ val = priv->clk_scaling.qos_val;
+
+ if (val == PM_QOS_DEFAULT_VALUE)
+ pm_qos_update_request(&priv->clk_scaling.ddr_qos, val);
+ else
+ pm_qos_update_request_timeout(
+ &priv->clk_scaling.ddr_qos, val, (2 * USEC_PER_SEC));
+}
+
+static void emac_ddr_clk_scaling(struct emac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ unsigned long rx_bytes, tx_bytes;
+ unsigned long last_rx_bytes, last_tx_bytes;
+ unsigned long total_time_ms = 0;
+ unsigned int cur_rx_threshold, cur_tx_threshold;
+ unsigned long polling_jiffies;
+ int qos_val;
+
+ polling_jiffies = msecs_to_jiffies(priv->clk_scaling.polling_delay_ms);
+ if (time_is_after_jiffies(priv->clk_scaling.window_time +
+ polling_jiffies))
+ return;
+
+ total_time_ms = jiffies_to_msecs((long)jiffies -
+ (long)priv->clk_scaling.window_time);
+
+ if (!ndev) {
+ pr_err("%s: dev or net is not ready\n", __func__);
+ return;
+ }
+
+ qos_val = priv->clk_scaling.qos_val;
+ last_rx_bytes = priv->clk_scaling.rx_bytes;
+ last_tx_bytes = priv->clk_scaling.tx_bytes;
+ if (!last_rx_bytes && !last_tx_bytes)
+ goto out;
+
+ if (likely(ndev->stats.rx_bytes > last_rx_bytes))
+ rx_bytes = ndev->stats.rx_bytes - last_rx_bytes;
+ else
+ rx_bytes = ULONG_MAX - last_rx_bytes + ndev->stats.rx_bytes + 1;
+
+ if (likely(ndev->stats.tx_bytes > last_tx_bytes))
+ tx_bytes = ndev->stats.tx_bytes - last_tx_bytes;
+ else
+ tx_bytes = ULONG_MAX - last_tx_bytes + ndev->stats.tx_bytes + 1;
+
+ cur_tx_threshold = tx_bytes * 8 / (total_time_ms * 1000);
+ pr_debug("%s: tx_rate=%dMbps, up_threshold=%dMbps\n",
+ __func__, cur_tx_threshold, priv->clk_scaling.tx_up_threshold);
+ if (cur_tx_threshold >= priv->clk_scaling.tx_up_threshold) {
+ qos_val = ASR_EMAC_DDR_BOOST_FREQ;
+ goto out;
+ }
+
+ cur_rx_threshold = rx_bytes * 8 / (total_time_ms * 1000);
+ pr_debug("%s: rx_rate=%dMbps, up_threshold=%dMbps\n",
+ __func__, cur_rx_threshold, priv->clk_scaling.rx_up_threshold);
+ if (cur_rx_threshold >= priv->clk_scaling.rx_up_threshold) {
+ qos_val = ASR_EMAC_DDR_BOOST_FREQ;
+ goto out;
+ }
+
+ if (cur_tx_threshold < priv->clk_scaling.tx_down_threshold &&
+ cur_rx_threshold < priv->clk_scaling.rx_down_threshold)
+ qos_val = PM_QOS_DEFAULT_VALUE;
+
+out:
+ priv->clk_scaling.rx_bytes = ndev->stats.rx_bytes;
+ priv->clk_scaling.tx_bytes = ndev->stats.tx_bytes;
+ priv->clk_scaling.window_time = jiffies;
+
+ if (qos_val != priv->clk_scaling.qos_val) {
+ priv->clk_scaling.qos_val = qos_val;
+ schedule_work(&priv->qos_work);
+ }
+
+ return;
+}
+#endif
+
+/* strings used by ethtool */
+static const struct emac_ethtool_stats {
+ char str[ETH_GSTRING_LEN];
+ u32 offset;
+} emac_ethtool_stats[] = {
+ EMAC_ETHTOOL_STAT(tx_ok_pkts),
+ EMAC_ETHTOOL_STAT(tx_total_pkts),
+ EMAC_ETHTOOL_STAT(tx_ok_bytes),
+ EMAC_ETHTOOL_STAT(tx_err_pkts),
+ EMAC_ETHTOOL_STAT(tx_singleclsn_pkts),
+ EMAC_ETHTOOL_STAT(tx_multiclsn_pkts),
+ EMAC_ETHTOOL_STAT(tx_lateclsn_pkts),
+ EMAC_ETHTOOL_STAT(tx_excessclsn_pkts),
+ EMAC_ETHTOOL_STAT(tx_unicast_pkts),
+ EMAC_ETHTOOL_STAT(tx_multicast_pkts),
+ EMAC_ETHTOOL_STAT(tx_broadcast_pkts),
+ EMAC_ETHTOOL_STAT(tx_pause_pkts),
+ EMAC_ETHTOOL_STAT(rx_ok_pkts),
+ EMAC_ETHTOOL_STAT(rx_total_pkts),
+ EMAC_ETHTOOL_STAT(rx_crc_err_pkts),
+ EMAC_ETHTOOL_STAT(rx_align_err_pkts),
+ EMAC_ETHTOOL_STAT(rx_err_total_pkts),
+ EMAC_ETHTOOL_STAT(rx_ok_bytes),
+ EMAC_ETHTOOL_STAT(rx_total_bytes),
+ EMAC_ETHTOOL_STAT(rx_unicast_pkts),
+ EMAC_ETHTOOL_STAT(rx_multicast_pkts),
+ EMAC_ETHTOOL_STAT(rx_broadcast_pkts),
+ EMAC_ETHTOOL_STAT(rx_pause_pkts),
+ EMAC_ETHTOOL_STAT(rx_len_err_pkts),
+ EMAC_ETHTOOL_STAT(rx_len_undersize_pkts),
+ EMAC_ETHTOOL_STAT(rx_len_oversize_pkts),
+ EMAC_ETHTOOL_STAT(rx_len_fragment_pkts),
+ EMAC_ETHTOOL_STAT(rx_len_jabber_pkts),
+ EMAC_ETHTOOL_STAT(rx_64_pkts),
+ EMAC_ETHTOOL_STAT(rx_65_127_pkts),
+ EMAC_ETHTOOL_STAT(rx_128_255_pkts),
+ EMAC_ETHTOOL_STAT(rx_256_511_pkts),
+ EMAC_ETHTOOL_STAT(rx_512_1023_pkts),
+ EMAC_ETHTOOL_STAT(rx_1024_1518_pkts),
+ EMAC_ETHTOOL_STAT(rx_1519_plus_pkts),
+ EMAC_ETHTOOL_STAT(rx_drp_fifo_full_pkts),
+ EMAC_ETHTOOL_STAT(rx_truncate_fifo_full_pkts),
+ EMAC_ETHTOOL_STAT(rx_dma_missed_frame_cnt),
+ EMAC_ETHTOOL_STAT(tx_tso_pkts),
+ EMAC_ETHTOOL_STAT(tx_tso_bytes),
+};
+
+static int emac_set_speed_duplex(struct emac_priv *priv)
+{
+ u32 ctrl;
+
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+ if (priv->duplex)
+ ctrl |= MREGBIT_FULL_DUPLEX_MODE;
+ else
+ ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
+
+ switch (priv->speed) {
+ case SPEED_1000:
+ ctrl |= MREGBIT_SPEED_1000M;
+ break;
+ case SPEED_100:
+ ctrl |= MREGBIT_SPEED_100M;
+ break;
+ case SPEED_10:
+ ctrl |= MREGBIT_SPEED_10M;
+ break;
+ default:
+ pr_err("broken speed: %d\n", priv->speed);
+ return 0;
+ }
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+ pr_info("emac: force link speed:%dM duplex:%s\n",
+ priv->speed, priv->duplex ? "Full": "Half");
+
+ return 0;
+}
+
+static int emac_set_fixed_link(struct device_node *np, struct emac_priv *priv)
+{
+ struct fixed_phy_status status = {};
+ struct device_node *fixed_link_node;
+ u32 fixed_link_prop[5];
+ const char *managed;
+ int interface;
+
+ if (of_property_read_string(np, "managed", &managed) == 0 &&
+ strcmp(managed, "in-band-status") == 0) {
+ /* status is zeroed, namely its .link member */
+ goto fix_link;
+ }
+
+ /* New binding */
+ fixed_link_node = of_get_child_by_name(np, "fixed-link");
+ if (fixed_link_node) {
+ status.link = 1;
+ status.duplex = of_property_read_bool(fixed_link_node,
+ "full-duplex");
+ if (of_property_read_u32(fixed_link_node, "speed",
+ &status.speed)) {
+ of_node_put(fixed_link_node);
+ return -EINVAL;
+ }
+ status.pause = of_property_read_bool(fixed_link_node, "pause");
+ status.asym_pause = of_property_read_bool(fixed_link_node,
+ "asym-pause");
+ interface = of_get_phy_mode(fixed_link_node);
+ if (interface < 0) {
+ priv->interface = PHY_INTERFACE_MODE_RGMII;
+ pr_info("no interface for fix-link, use RGMII\n");
+ } else {
+ priv->interface = interface;
+ }
+
+ of_node_put(fixed_link_node);
+ goto fix_link;
+ }
+
+ /* Old binding */
+ if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
+ ARRAY_SIZE(fixed_link_prop)) == 0) {
+ status.link = 1;
+ status.duplex = fixed_link_prop[1];
+ status.speed = fixed_link_prop[2];
+ status.pause = fixed_link_prop[3];
+ status.asym_pause = fixed_link_prop[4];
+ goto fix_link;
+ }
+
+ return -ENODEV;
+
+fix_link:
+ priv->speed = status.speed;
+ priv->duplex = status.duplex;
+
+ return emac_set_speed_duplex(priv);
+}
+
+void register_dump(struct emac_priv *priv)
+{
+ int i;
+ void __iomem *base = priv->iobase;
+
+ for (i = 0; i < 16; i++) {
+ pr_info("DMA:0x%x:0x%x\n",
+ DMA_CONFIGURATION + i * 4,
+ readl(base + DMA_CONFIGURATION + i * 4));
+ }
+ for (i = 0; i < 60; i++) {
+ pr_info("MAC:0x%x:0x%x\n",
+ MAC_GLOBAL_CONTROL + i * 4,
+ readl(base + MAC_GLOBAL_CONTROL + i * 4));
+ }
+
+ for (i = 0; i < 4; i++) {
+ pr_info("1588:0x%x:0x%x\n",
+ PTP_1588_CTRL + i * 4,
+ readl(base + PTP_1588_CTRL + i * 4));
+ }
+
+ for (i = 0; i < 6; i++) {
+ pr_info("1588:0x%x:0x%x\n",
+ SYS_TIME_GET_LOW + i * 4,
+ readl(base + SYS_TIME_GET_LOW + i * 4));
+ }
+ for (i = 0; i < 5; i++) {
+ pr_info("1588:0x%x:0x%x\n",
+ RX_TIMESTAMP_LOW + i * 4,
+ readl(base + RX_TIMESTAMP_LOW + i * 4));
+ }
+ for (i = 0; i < 2; i++) {
+ pr_info("1588:0x%x:0x%x\n",
+ PTP_1588_IRQ_STS + i * 4,
+ readl(base + PTP_1588_IRQ_STS + i * 4));
+ }
+
+ if (priv->tso) {
+ for (i = 0; i < 18; i++) {
+ pr_info("TSO:0x%x:0x%x\n", i * 4,
+ emac_rd_tso(priv, i * 4));
+ }
+ }
+}
+
+void print_pkt(unsigned char *buf, int len)
+{
+ int i = 0;
+
+ pr_debug("data len = %d byte, buf addr: 0x%x\n",
+ len, (unsigned int)buf);
+ for (i = 0; i < len; i = i + 8) {
+ pr_debug("0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ *(buf + i),
+ *(buf + i + 1),
+ *(buf + i + 2),
+ *(buf + i + 3),
+ *(buf + i + 4),
+ *(buf + i + 5),
+ *(buf + i + 6),
+ *(buf + i + 7)
+ );
+ }
+}
+
+#ifdef EMAC_DEBUG
+void print_desc(unsigned char *buf, int len)
+{
+ int i;
+
+ pr_info("descriptor len = %d byte, buf addr: 0x%x\n",
+ len, (unsigned int)buf);
+ for (i = 0; i < len; i = i + 4) {
+ pr_info("0x%02x%02x%02x%02x\n",
+ *(buf + i + 3),
+ *(buf + i + 2),
+ *(buf + i + 1),
+ *(buf + i));
+ }
+}
+#else
+void print_desc(unsigned char *buf, int len)
+{
+
+}
+#endif
+
+/* Name emac_reset_hw
+ * Arguments priv : pointer to hardware data structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description TBDL
+ */
+int emac_reset_hw(struct emac_priv *priv)
+{
+ mutex_lock(&priv->mii_mutex);
+ /* disable all the interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
+
+ /* disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
+
+ /* stop the DMA */
+ emac_wr(priv, DMA_CONTROL, 0x0000);
+
+ /* reset mac, statistic counters */
+ emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0018);
+
+ emac_wr(priv, MAC_GLOBAL_CONTROL, 0x0000);
+
+ emac_wr(priv, MAC_MDIO_CLK_DIV,
+ priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
+ mutex_unlock(&priv->mii_mutex);
+ return 0;
+}
+
+/* Name emac_init_hw
+ * Arguments pstHWData : pointer to hardware data structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description TBDL
+ * Assumes that the controller has previously been reset
+ * and is in apost-reset uninitialized state.
+ * Initializes the receive address registers,
+ * multicast table, and VLAN filter table.
+ * Calls routines to setup link
+ * configuration and flow control settings.
+ * Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ */
+int emac_init_hw(struct emac_priv *priv)
+{
+ u32 val = 0, threshold;
+
+ mutex_lock(&priv->mii_mutex);
+ /* MAC Init
+ * disable transmit and receive units
+ */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0000);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0000);
+
+ /* enable mac address 1 filtering */
+ //emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0001);
+ emac_wr(priv, MAC_ADDRESS_CONTROL, 0x0100);
+
+ /* zero initialize the multicast hash table */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0000);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0000);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0000);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0000);
+
+ emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, EMAC_TX_FIFO_DWORDS - 8);
+
+ if (priv->speed == SPEED_1000)
+ threshold = 1024;
+ else if (priv->speed == SPEED_100)
+ threshold = 256;
+ else
+ threshold = TX_STORE_FORWARD_MODE;
+ emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, threshold);
+
+ emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, 0xc);
+
+ /* reset dma */
+ emac_wr(priv, DMA_CONTROL, 0x0000);
+
+ emac_wr(priv, DMA_CONFIGURATION, 0x01);
+ mdelay(10);
+ emac_wr(priv, DMA_CONFIGURATION, 0x00);
+ mdelay(10);
+
+ val |= MREGBIT_WAIT_FOR_DONE;
+ val |= MREGBIT_STRICT_BURST;
+ val |= MREGBIT_DMA_64BIT_MODE;
+ val |= MREGBIT_BURST_16WORD; //MREGBIT_BURST_1WORD;
+
+ emac_wr(priv, DMA_CONFIGURATION, val);
+
+ /* MDC Clock Division: AXI-312M/96 = 3.25M */
+ emac_wr(priv, MAC_MDIO_CLK_DIV,
+ priv->mdio_clk_div & MREGBIT_MAC_MDIO_CLK_DIV_MASK);
+
+ mutex_unlock(&priv->mii_mutex);
+
+ printk("MDIO clock div: 0x%x\n", emac_rd(priv, MAC_MDIO_CLK_DIV));
+ return 0;
+}
+
+int emac_set_mac_addr(struct emac_priv *priv, unsigned char *addr)
+{
+ emac_wr(priv, MAC_ADDRESS1_HIGH, (addr[1] << 8 | addr[0]));
+ emac_wr(priv, MAC_ADDRESS1_MED, (addr[3] << 8 | addr[2]));
+ emac_wr(priv, MAC_ADDRESS1_LOW, (addr[5] << 8 | addr[4]));
+
+ return 0;
+}
+
+void emac_set_fc_source_addr(struct emac_priv *priv, unsigned char *addr)
+{
+ emac_wr(priv, MAC_FC_SOURCE_ADDRESS_HIGH, (addr[1] << 8 | addr[0]));
+ emac_wr(priv, MAC_FC_SOURCE_ADDRESS_MED, (addr[3] << 8 | addr[2]));
+ emac_wr(priv, MAC_FC_SOURCE_ADDRESS_LOW, (addr[5] << 8 | addr[4]));
+
+ return;
+}
+
+static inline void emac_dma_start_transmit(struct emac_priv *priv)
+{
+ emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 0xFF);
+}
+
+static inline void emac_dma_start_receive(struct emac_priv *priv)
+{
+ emac_wr(priv, DMA_RECEIVE_POLL_DEMAND, 0xFF);
+}
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+void emac_enable_interrupt(struct emac_priv *priv, int tx)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+
+ if (tx) {
+ val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ } else {
+ val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
+ if (priv->tso)
+ emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
+ TSO_AP_RX_INTR_ENA_CSUM_DONE |
+ TSO_AP_RX_INTR_ENA_CSUM_ERR);
+ }
+
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+void emac_disable_interrupt(struct emac_priv *priv, int tx)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+
+ if (tx) {
+ val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ } else {
+ val &= ~(MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
+ if (priv->tso)
+ emac_wr_tso(priv, TSO_AP_RX_INTR_ENA, 0x0);
+ }
+
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+#endif
+
+bool emac_is_rmii_interface(struct emac_priv *priv)
+{
+ const struct emac_regdata *regdata = priv->regdata;
+ void __iomem* apmu;
+ u32 val;
+
+ apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+ if (apmu == NULL) {
+ pr_err("error to ioremap APMU base\n");
+ return -ENOMEM;
+ }
+
+ val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+ val &= (0x1 << regdata->mac_intf_sel_shift);
+ if (val)
+ return false;
+ else
+ return true;
+}
+
+void emac_config_phy_interrupt(struct emac_priv *priv, int enable)
+{
+ const struct emac_regdata *regdata = priv->regdata;
+ void __iomem* apmu;
+ u32 val;
+
+ apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+ if (apmu == NULL) {
+ pr_err("error to ioremap APMU base\n");
+ return;
+ }
+
+ val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+ if (enable)
+ val |= 0x1 << regdata->phy_intr_enable_shift;
+ else
+ val &= ~(0x1 << regdata->phy_intr_enable_shift);
+ writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+ iounmap(apmu);
+ return;
+}
+
+void emac_phy_interface_config(struct emac_priv *priv, int phy_interface)
+{
+ const struct emac_regdata *regdata = priv->regdata;
+ void __iomem* apmu;
+ u32 val;
+
+ apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+ if (apmu == NULL) {
+ pr_err("error to ioremap APMU base\n");
+ return;
+ }
+
+ val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+ if (PHY_INTERFACE_MODE_RMII == phy_interface) {
+ val &= ~(0x1 << regdata->mac_intf_sel_shift);
+ printk("===> set eamc interface: rmii\n");
+ } else {
+ val |= 0x1 << regdata->mac_intf_sel_shift;
+ printk("===> set eamc interface: rgmii\n");
+ }
+ val |= 0x1 << regdata->axi_mst_single_id_shift;
+ writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+
+ iounmap(apmu);
+ priv->interface = phy_interface;
+ return;
+}
+
+static void emac_set_aib_power_domain(struct emac_priv *priv)
+{
+ const struct emac_regdata *regdata = priv->regdata;
+ void __iomem *aib_emac_io;
+ void __iomem *apbc_asfar;
+ u32 tmp;
+
+ if (!regdata->support_dual_vol_power)
+ return;
+
+ aib_emac_io = ioremap(AIB_GMAC_IO_REG, 4);
+ apbc_asfar = ioremap(APBC_ASFAR, 8);
+
+ writel(AKEY_ASFAR, apbc_asfar);
+ writel(AKEY_ASSAR, apbc_asfar + 4);
+ tmp = readl(aib_emac_io);
+
+ /* 0= power down, only set power down when vol = 0 */
+ if (priv->power_domain) {
+ tmp &= ~(0x1 << 2); /* 3.3v */
+ printk("===> emac set io to 3.3v\n");
+ } else {
+ tmp |= 0x1 << 2; /* 1.8v */
+ printk("===> emac set io to 1.8v\n");
+ }
+
+ writel(AKEY_ASFAR, apbc_asfar);
+ writel(AKEY_ASSAR, apbc_asfar + 4);
+ writel(tmp, aib_emac_io);
+
+ writel(AKEY_ASFAR, apbc_asfar);
+ writel(AKEY_ASSAR, apbc_asfar + 4);
+ tmp = readl(aib_emac_io);
+ printk("===> emac AIB read back: 0x%x\n", tmp);
+
+ iounmap(apbc_asfar);
+ iounmap(aib_emac_io);
+}
+
+static void emac_pause_generate_work_fuc(struct work_struct *work)
+{
+ struct emac_priv *priv= container_of(work, struct emac_priv, emac_pause_work.work);
+ int time_nxt = 0;
+ /* because pause time value = 0XFFFF,equal to stopping for 336ms(100M)/34ms(1000M) to transmit */
+ /* by a repeated testing, delay 20ms(1000M)/300ms(100M) satisfy making the neighbor stop transmission */
+ time_nxt = (priv->speed == SPEED_1000) ? 20 : 300;
+ if (!priv->pause.pause_time_max) {
+ emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0xffff);
+ priv->pause.pause_time_max = 1;
+ }
+
+ emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
+ schedule_delayed_work(&priv->emac_pause_work, msecs_to_jiffies(time_nxt));
+ return;
+}
+
+static inline void emac_check_ring_and_send_pause(struct emac_priv *priv)
+{
+ int pos;
+ int high_water;
+ int low_water;
+ struct emac_rx_desc *rx_desc;
+ struct emac_desc_ring *rx_ring;
+
+ rx_ring = &priv->rx_ring;
+ pos = rx_ring->nxt_clean;
+ high_water = (pos + priv->pause.high_water) % priv->rx_ring.total_cnt;
+ low_water = (pos + priv->pause.low_water) % priv->rx_ring.total_cnt;
+
+ rx_desc = emac_get_rx_desc(priv, high_water);
+ if (priv->pause.pause_sending == 0 && rx_desc->OWN == 0) {
+ schedule_delayed_work(&priv->emac_pause_work, 0);
+ priv->pause.pause_sending = 1;
+ }
+
+ rx_desc = emac_get_rx_desc(priv, low_water);
+ if (rx_desc->OWN && priv->pause.pause_sending) {
+ cancel_delayed_work_sync(&priv->emac_pause_work);
+ emac_wr(priv, MAC_FC_PAUSE_TIME_VALUE, 0);
+ emac_wr(priv, MAC_FC_PAUSE_FRAME_GENERATE, 0x1);
+ priv->pause.pause_time_max = 0;
+ priv->pause.pause_sending = 0;
+ }
+}
+
+/* Name emac_sw_init
+ * Arguments priv : pointer to driver private data structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description Reads PCI space configuration information and
+ * initializes the variables with
+ * their default values
+ */
+static int emac_sw_init(struct emac_priv *priv)
+{
+ priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
+
+ mutex_init(&priv->mii_mutex);
+ spin_lock_init(&priv->spStatsLock);
+ spin_lock_init(&priv->spTxLock);
+ spin_lock_init(&priv->intr_lock);
+
+ return 0;
+}
+
+static int emac_check_ptp_packet(struct emac_priv *priv,
+ struct sk_buff *skb, int txrx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct ptp_header *ptph = NULL;
+ struct iphdr *iph;
+ struct udphdr *udph;
+ int msg_type, msg_id;
+ int ts;
+
+ if (eth->h_proto == htons(ETH_P_1588)) {
+ netdev_dbg(priv->ndev, "get PTP packet over ETH\n");
+ ptph = (struct ptp_header *)((u8 *)eth + sizeof(struct ethhdr));
+ } else if (eth->h_proto == htons(ETH_P_IP)) {
+ iph = (struct iphdr *)((u8 *)eth + sizeof(struct ethhdr));
+ if (iph->protocol != IPPROTO_UDP)
+ return -1;
+
+ udph = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
+ if ((htons(udph->dest) != PTP_EVENT_PORT ||
+ htons(udph->source) != PTP_EVENT_PORT))
+ return -1;
+
+ netdev_dbg(priv->ndev, "get PTP packet over UDP\n");
+ ptph = (struct ptp_header *)((u8 *)udph + sizeof(struct udphdr));
+ } else {
+ return -1;
+ }
+
+ msg_id = -1;
+ ts = ptph->tsmt & 0xF0;
+ msg_type = (ptph->tsmt) & 0x0F;
+ if (txrx) {
+ if (msg_type == MSG_SYNC) {
+ if (ts)
+ msg_id = MSG_PDELAY_REQ;
+ else
+ msg_id = MSG_DELAY_REQ;
+ } else if (msg_type == MSG_DELAY_REQ) {
+ msg_id = MSG_SYNC;
+ } else if (msg_type == MSG_PDELAY_REQ) {
+ msg_id = MSG_PDELAY_RESP;
+ memcpy(&priv->sourcePortIdentity,
+ &ptph->sourcePortIdentity,
+ sizeof(struct PortIdentity));
+ } else if (msg_type == MSG_PDELAY_RESP) {
+ msg_id = MSG_PDELAY_REQ;
+ }
+ } else {
+ netdev_dbg(priv->ndev, "RX timestamp for message type %d\n",
+ ptph->tsmt);
+
+ if (msg_type == MSG_PDELAY_RESP) {
+ struct pdelay_resp_msg *presp = (struct pdelay_resp_msg *)ptph;
+
+ /*
+ * Change to monitor SYNC packet if pdelay response
+ * received for same clock indentity.
+ */
+ if (!memcmp(&presp->requestingPortIdentity.clockIdentity,
+ &priv->sourcePortIdentity.clockIdentity,
+ sizeof(struct ClockIdentity))) {
+ msg_id = MSG_SYNC;
+ }
+ }
+ }
+
+ /*
+ * Since some platform not support to timestamp two or more
+ * message type, so change here.
+ */
+ if (msg_id >= 0) {
+ if (priv->regdata->ptp_rx_ts_all_events) {
+ msg_id = ALL_EVENTS;
+ msg_id |= ts | ts << 8 | ts << 16 | ts << 24;
+ } else {
+ msg_id |= ts;
+ }
+
+ priv->hwptp->config_hw_tstamping(priv, 1, PTP_V2_L2_L4, msg_id);
+ }
+
+ return ptph->tsmt;
+}
+
+/* emac_get_tx_hwtstamp - get HW TX timestamps
+ * @priv: driver private structure
+ * @skb : the socket buffer
+ * Description :
+ * This function will read timestamp from the register & pass it to stack.
+ * and also perform some sanity checks.
+ */
+static void emac_get_tx_hwtstamp(struct emac_priv *priv, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamp;
+ u64 ns;
+
+ if (!priv->hwts_tx_en)
+ return;
+
+ /* exit if skb doesn't support hw tstamp */
+ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ return;
+
+ emac_check_ptp_packet(priv, skb, 1);
+
+ /* get the valid tstamp */
+ ns = priv->hwptp->get_tx_timestamp(priv);
+
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
+
+ wmb();
+ netdev_dbg(priv->ndev, "get valid TX hw timestamp %llu\n", ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+
+ return;
+}
+
+/* emac_get_rx_hwtstamp - get HW RX timestamps
+ * @priv: driver private structure
+ * @p : descriptor pointer
+ * @skb : the socket buffer
+ * Description :
+ * This function will read received packet's timestamp from the descriptor
+ * and pass it to stack. It also perform some sanity checks.
+ */
+static void emac_get_rx_hwtstamp(struct emac_priv *priv, struct emac_rx_desc *p,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ u64 ns;
+
+ if (!priv->hwts_rx_en)
+ return;
+
+ /* Check if timestamp is available */
+ if (p->ptp_pkt && p->rx_timestamp) {
+ emac_check_ptp_packet(priv, skb, 0);
+ ns = priv->hwptp->get_rx_timestamp(priv);
+ netdev_dbg(priv->ndev, "get valid RX hw timestamp %llu\n", ns);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ } else {
+ netdev_dbg(priv->ndev, "cannot get RX hw timestamp\n");
+ }
+}
+
+/**
+ * emac_hwtstamp_set - control hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function configures the MAC to enable/disable both outgoing(TX)
+ * and incoming(RX) packets time stamping based on user input.
+ * Return Value:
+ * 0 on success and an appropriate -ve integer on failure.
+ */
+static int emac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+ struct timespec64 now;
+ u64 ns_ptp;
+ u32 ptp_event_msg_id = 0;
+ u32 rx_ptp_type = 0;
+
+ if (!priv->ptp_support) {
+ netdev_alert(priv->ndev, "No support for HW time stamping\n");
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data,
+ sizeof(struct hwtstamp_config)))
+ return -EFAULT;
+
+ netdev_dbg(priv->ndev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* time stamp no incoming packet at all */
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ /* PTP v1, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ /* take time stamp for SYNC messages only */
+ ptp_event_msg_id = MSG_SYNC;
+ rx_ptp_type = PTP_V1_L4_ONLY;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /* PTP v1, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ptp_event_msg_id = MSG_DELAY_REQ;
+ rx_ptp_type = PTP_V1_L4_ONLY;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* PTP v2, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ /* take time stamp for SYNC messages only */
+ ptp_event_msg_id = MSG_SYNC;
+ rx_ptp_type = PTP_V2_L2_L4;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* PTP v2, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ptp_event_msg_id = MSG_DELAY_REQ;
+ rx_ptp_type = PTP_V2_L2_L4;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* PTP v2/802.AS1 any layer, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ /*
+ * IF not support ALL EVENTS, default timestamp SYNC packet,
+ * changed to MSG_DELAY_REQ automactically if needed
+ */
+ if (priv->regdata->ptp_rx_ts_all_events)
+ ptp_event_msg_id = ALL_EVENTS;
+ else
+ ptp_event_msg_id = MSG_SYNC;
+
+ rx_ptp_type = PTP_V2_L2_L4;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ /* take time stamp for SYNC messages only */
+ ptp_event_msg_id = MSG_SYNC;
+ rx_ptp_type = PTP_V2_L2_L4;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ptp_event_msg_id = MSG_DELAY_REQ;
+ rx_ptp_type = PTP_V2_L2_L4;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+ priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+
+ if (!priv->hwts_tx_en && !priv->hwts_rx_en)
+ priv->hwptp->config_hw_tstamping(priv, 0, 0, 0);
+ else {
+
+ priv->hwptp->config_hw_tstamping(priv, 1,
+ rx_ptp_type, ptp_event_msg_id);
+
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+ priv->hwptp->init_systime(priv, timespec64_to_ns(&now));
+
+ /* program Increment reg */
+ priv->hwptp->config_systime_increment(priv);
+
+ ns_ptp = priv->hwptp->get_phc_time(priv);
+ ktime_get_real_ts64(&now);
+ /* check the diff between ptp timer and system time */
+ if (abs(timespec64_to_ns(&now) - ns_ptp) > 5000)
+ priv->hwptp->init_systime(priv,
+ timespec64_to_ns(&now));
+ }
+
+ memcpy(&priv->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+/**
+ * emac_hwtstamp_get - read hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function obtain the current hardware timestamping settings
+ as requested.
+ */
+static int emac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config *config = &priv->tstamp_config;
+
+ if (!priv->ptp_support)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+
+/* Name emac_ioctl
+ * Arguments pstNetdev : pointer to net_device structure
+ * pstIfReq : pointer to interface request structure used.
+ * u32Cmd : IOCTL command number
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description It is called by upper layer and
+ * handling various task IOCTL commands.
+ */
+static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!ndev->phydev)
+ return -EINVAL;
+ ret = phy_mii_ioctl(ndev->phydev, rq, cmd);
+ break;
+ case SIOCSHWTSTAMP:
+ ret = emac_hwtstamp_set(ndev, rq);
+ break;
+ case SIOCGHWTSTAMP:
+ ret = emac_hwtstamp_get(ndev, rq);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static irqreturn_t emac_wakeup_handler(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 ctrl;
+
+ emac_set_axi_bus_clock(priv, 1);
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+ if (!(ctrl & (MREGBIT_UNICAST_WAKEUP_MODE |
+ MREGBIT_MAGIC_PACKET_WAKEUP_MODE)))
+ return IRQ_NONE;
+
+ ctrl &= ~(MREGBIT_UNICAST_WAKEUP_MODE |
+ MREGBIT_MAGIC_PACKET_WAKEUP_MODE);
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t emac_irq_tso(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 status;
+
+ /* handle rx */
+ status = emac_rd_tso(priv, TSO_AP_RX_INTR_STS);
+ if (status) {
+ emac_print("TSO_AP_RX_INTR_STS=0x%x", status);
+
+ if (status & TSO_AP_RX_INTR_ENA_CSUM_DONE) {
+#ifdef CONFIG_ASR_EMAC_NAPI
+ if (likely(napi_schedule_prep(&priv->rx_napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->intr_lock, flags);
+ emac_disable_interrupt(priv, 0);
+ spin_unlock_irqrestore(&priv->intr_lock, flags);
+ __napi_schedule(&priv->rx_napi);
+ }
+#else
+ emac_rx_clean_desc(priv);
+#endif
+ }
+
+#ifdef EMAC_DEBUG
+ if (status & TSO_AP_RX_INTR_ENA_CSUM_ERR)
+ pr_err("rx checksum err irq\n");
+#endif
+ /* clear rx status */
+ emac_wr_tso(priv, TSO_AP_RX_INTR_STS, status);
+ }
+
+ /* handle tx */
+ status = emac_rd_tso(priv, TSO_AP_TX_INTR_STS);
+ if (status) {
+ emac_print("TSO_AP_TX_INTR_STS=0x%x\n", status);
+ if (status & TSO_AP_TX_INTR_TSO_DONE) {
+ emac_print("TX TSO done\n");
+ emac_dma_start_transmit(priv);
+ }
+
+ if (status & TSO_AP_TX_INTR_CSUM_DONE) {
+ emac_print("TX checksum done\n");
+ emac_dma_start_transmit(priv);
+ }
+
+ /* clear tx status */
+ emac_wr_tso(priv, TSO_AP_TX_INTR_STS, status);
+ }
+
+ /* handle err */
+ status = emac_rd_tso(priv, TSO_ERR_INTR_STS);
+ if (status) {
+ pr_err("TSO: TX/RX ERR, status=0x%x\n", status);
+ emac_wr_tso(priv, TSO_ERR_INTR_STS, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+/* Name emac_interrupt_handler
+ * Arguments irq : irq number for which the interrupt is fired
+ * dev_id : pointer was passed to request_irq and same pointer is passed
+ * back to handler
+ * Return irqreturn_t : integer value
+ * Description Interrupt handler routine for interrupts from target for RX packets indication.
+ */
+static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 status;
+ u32 clr = 0;
+
+ /* read the status register for IRQ received */
+ status = emac_rd(priv, DMA_STATUS_IRQ);
+
+ /* Check if emac is up */
+ if (test_bit(EMAC_DOWN, &priv->state)) {
+ emac_wr(priv, DMA_STATUS_IRQ, status & 0x1F7);
+ return IRQ_HANDLED;
+ }
+
+ if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
+#ifdef CONFIG_ASR_EMAC_NAPI
+ if (likely(napi_schedule_prep(&priv->tx_napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->intr_lock, flags);
+ emac_disable_interrupt(priv, 1);
+ spin_unlock_irqrestore(&priv->intr_lock, flags);
+ __napi_schedule(&priv->tx_napi);
+ }
+#else
+ emac_tx_clean_desc(priv);
+#endif
+ }
+
+ if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
+
+ if (status & (MREGBIT_RECEIVE_TRANSFER_DONE_IRQ |
+ MREGBIT_RECEIVE_MISSED_FRAME_IRQ)) {
+ if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ)
+ clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
+
+ if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
+ clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
+
+ if (priv->tso)
+ emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+ if (likely(napi_schedule_prep(&priv->rx_napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->intr_lock, flags);
+ emac_disable_interrupt(priv, 0);
+ spin_unlock_irqrestore(&priv->intr_lock, flags);
+ __napi_schedule(&priv->rx_napi);
+ }
+#else
+ emac_rx_clean_desc(priv);
+#endif
+ }
+
+ if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
+
+ emac_wr(priv, DMA_STATUS_IRQ, clr);
+
+ return IRQ_HANDLED;
+}
+
+/* Name emac_command_options
+ * Arguments priv : pointer to driver private data structure
+ * Return none
+ * Description This function actually handles the command line para passed
+ * when the driver is loaded at the command prompt.
+ * It parses the parameters and validates them for valid values.
+ */
+void emac_command_options(struct emac_priv *priv)
+{
+ int pages = totalram_pages();
+
+ if (pages <= (EMAC_SMALL_RING_MEM_LIMIT >> PAGE_SHIFT))
+ priv->rx_ring.total_cnt = EMAC_SMALL_RX_RING_SIZE;
+ else
+ priv->rx_ring.total_cnt = EMAC_RX_RING_SIZE;
+ priv->tx_ring.total_cnt = EMAC_TX_RING_SIZE;
+
+ pr_info("emac: rx_ring=%d, tx_ring=%d, pages=%d\n",
+ priv->rx_ring.total_cnt, priv->tx_ring.total_cnt, pages);
+}
+
+/* Name emac_configure_tx
+ * Arguments priv : pointer to driver private data structure
+ * Return none
+ * Description Configures the transmit unit of the device
+ */
+static void emac_configure_tx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* set the transmit base address */
+ val = (u32)(priv->tx_ring.desc_dma_addr);
+
+ emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
+
+ /* Tx Inter Packet Gap value and enable the transmit */
+ val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
+ val &= (~MREGBIT_IFG_LEN);
+ val |= MREGBIT_TRANSMIT_ENABLE;
+ val |= MREGBIT_TRANSMIT_AUTO_RETRY;
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
+
+ emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x00);
+
+ /* start tx dma */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_TRANSMIT_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+/* Name emac_configure_rx
+ * Arguments priv : pointer to driver private data structure
+ * Return none
+ * Description Configures the receive unit of the device
+ */
+static void emac_configure_rx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* set the receive base address */
+ val = (u32)(priv->rx_ring.desc_dma_addr);
+ emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
+
+ /* enable the receive */
+ val = emac_rd(priv, MAC_RECEIVE_CONTROL);
+ val |= MREGBIT_RECEIVE_ENABLE;
+ val |= MREGBIT_STORE_FORWARD;
+ val |= MREGBIT_ACOOUNT_VLAN;
+ emac_wr(priv, MAC_RECEIVE_CONTROL, val);
+
+ /* start rx dma */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_RECEIVE_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+/* Name emac_clean_tx_desc_ring
+ * Arguments priv : pointer to driver private data structure
+ * Return none
+ * Description Freeing the TX resources allocated earlier.
+ */
+static void emac_clean_tx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct emac_desc_buffer *tx_buf;
+ u32 i;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->total_cnt; i++) {
+ tx_buf = &tx_ring->desc_buf[i];
+
+ if (tx_buf->dma_addr) {
+ dma_unmap_page(&priv->pdev->dev,
+ tx_buf->dma_addr,
+ tx_buf->dma_len,
+ DMA_TO_DEVICE);
+ tx_buf->dma_addr = 0;
+ }
+
+ if (tx_buf->skb) {
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+ }
+ }
+
+ tx_ring->nxt_use = 0;
+ tx_ring->nxt_clean = 0;
+}
+
+/* Name emac_clean_rx_desc_ring
+ * Arguments priv : pointer to driver private data structure
+ * Return none
+ * Description Freeing the RX resources allocated earlier.
+ */
+static void emac_clean_rx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring;
+ struct emac_desc_buffer *rx_buf;
+ u32 i;
+
+ rx_ring = &priv->rx_ring;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->total_cnt; i++) {
+ rx_buf = &rx_ring->desc_buf[i];
+ if (rx_buf->skb) {
+ emac_unmap_single(&priv->pdev->dev,
+ rx_buf->dma_addr,
+ rx_buf->dma_len,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ if (rx_buf->buff_addr) {
+#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
+ kfree(rx_buf->buff_addr);
+#endif
+ rx_buf->buff_addr = NULL;
+ }
+ }
+
+ rx_ring->nxt_clean = 0;
+ rx_ring->nxt_use = 0;
+}
+
+void emac_ptp_init(struct emac_priv *priv)
+{
+ int ret;
+
+ if (priv->ptp_support) {
+ ret = clk_prepare_enable(priv->ptp_clk);
+ if (ret < 0) {
+ pr_warning("ptp clock failed to enable \n");
+ priv->ptp_clk = NULL;
+ }
+
+ emac_ptp_register(priv);
+
+ if (IS_ERR_OR_NULL(priv->ptp_clock)) {
+ priv->ptp_support = 0;
+ pr_warning("disable PTP due to clock not enabled\n");
+ }
+ }
+}
+
+void emac_ptp_deinit(struct emac_priv *priv)
+{
+ if (priv->ptp_support) {
+ if (priv->ptp_clk)
+ clk_disable_unprepare(priv->ptp_clk);
+
+ emac_ptp_unregister(priv);
+ }
+}
+
+static void emac_rx_timer_arm(struct emac_priv *priv)
+{
+ u32 rx_fill_timer = EMAC_RX_FILL_TIMER_US;
+
+ if (!rx_fill_timer)
+ return;
+
+ if (hrtimer_is_queued(&priv->rx_timer))
+ return;
+
+ hrtimer_start(&priv->rx_timer,
+ ns_to_ktime(rx_fill_timer) * NSEC_PER_USEC,
+ HRTIMER_MODE_REL);
+}
+
+static enum hrtimer_restart emac_rx_timer(struct hrtimer *t)
+{
+ struct emac_priv *priv = container_of(t, struct emac_priv, rx_timer);
+ struct napi_struct *napi = &priv->rx_napi;
+
+ if (likely(napi_schedule_prep(napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->intr_lock, flags);
+ emac_disable_interrupt(priv, 0);
+ spin_unlock_irqrestore(&priv->intr_lock, flags);
+ __napi_schedule(napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void emac_tx_timer_arm(struct emac_priv *priv)
+{
+ u32 tx_coal_timer = EMAC_TX_COAL_TIMER_US;
+
+ if (!tx_coal_timer)
+ return;
+
+ if (hrtimer_is_queued(&priv->tx_timer))
+ return;
+
+ hrtimer_start(&priv->tx_timer,
+ ns_to_ktime(tx_coal_timer) * NSEC_PER_USEC,
+ HRTIMER_MODE_REL);
+}
+
+static enum hrtimer_restart emac_tx_timer(struct hrtimer *t)
+{
+ struct emac_priv *priv = container_of(t, struct emac_priv, tx_timer);
+ struct napi_struct *napi = &priv->tx_napi;
+
+ if (priv->tso) {
+ emac_dma_start_transmit(priv);
+ return HRTIMER_NORESTART;
+ }
+
+ if (likely(napi_schedule_prep(napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->intr_lock, flags);
+ emac_disable_interrupt(priv, 1);
+ spin_unlock_irqrestore(&priv->intr_lock, flags);
+ __napi_schedule(napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+
+static int emac_tso_config(struct emac_priv *priv)
+{
+ struct emac_desc_ring * tx_ring = &priv->tx_ring;
+ u32 val = 0;
+
+ /* reset */
+ emac_wr_tso(priv, TSO_CONFIG, TSO_CONFIG_RST);
+ mdelay(1);
+ emac_wr_tso(priv, TSO_CONFIG, 0x0);
+
+ emac_wr_tso(priv, TSO_DMA_CONFIG, 0x2 << 8);
+
+ /* rx */
+ /* set the transmit base address */
+ val = (u32)(priv->rx_ring.desc_dma_addr);
+ emac_wr_tso(priv, TSO_RX_DESC_BA, val >> 1);
+ emac_wr_tso(priv, TSO_RX_AUTO_POLL_CNT, 0x0);
+
+ /* tx */
+ val = (u32)(priv->tx_ring.desc_dma_addr);
+ emac_wr_tso(priv, TSO_TX_DESC_BA, val >> 1);
+
+ priv->tso_hdr = dma_alloc_coherent(&priv->pdev->dev,
+ tx_ring->total_cnt * 0x80,
+ &priv->tso_hdr_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!priv->tso_hdr) {
+ pr_err("Memory allocation failed for tso_hdr\n");
+ return -ENOMEM;
+ }
+
+ val = (u32)(priv->tso_hdr_addr);
+ emac_wr_tso(priv, TSO_TX_HDR_BA, val >> 1);
+ emac_wr_tso(priv, TSO_TX_HDR_CTR, tx_ring->total_cnt);
+ emac_wr_tso(priv, TSO_TX_AUTO_POLL_CNT, 0x0);
+
+ /* enable tx/rx tso/coe */
+ emac_wr_tso(priv, TSO_CONFIG,
+ TSO_CONFIG_RX_EN | TSO_CONFIG_TX_EN | TSO_CONFIG_RX_CSUM_EN);
+
+ /* enable tx/rx/err interrupt */
+ emac_wr_tso(priv, TSO_ERR_INTR_ENA, 0xF0007);
+ emac_wr_tso(priv, TSO_AP_RX_INTR_ENA,
+ TSO_AP_RX_INTR_ENA_CSUM_DONE | TSO_AP_RX_INTR_ENA_CSUM_ERR);
+#if 1
+ emac_wr_tso(priv, TSO_AP_TX_INTR_ENA,
+ TSO_AP_TX_INTR_ENA_TSO_DONE | TSO_AP_TX_INTR_ENA_CSUM_DONE);
+#else
+ emac_wr_tso(priv, TSO_AP_TX_INTR_ENA, 0x0);
+#endif
+ return 0;
+}
+
+/* Name emac_up
+ * Arguments priv : pointer to driver private data structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description This function is called from emac_open and
+ * performs the things when net interface is about to up.
+ * It configues the Tx and Rx unit of the device and
+ * registers interrupt handler.
+ * It also starts one watchdog timer to monitor
+ * the net interface link status.
+ */
+int emac_up(struct emac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ int ret, val;
+#ifdef WAN_LAN_AUTO_ADAPT
+ u32 phy_id;
+#endif
+
+ priv->hw_stats->tx_tso_pkts = 0;
+ priv->hw_stats->tx_tso_bytes = 0;
+
+ ret = emac_phy_connect(ndev);
+ if (ret) {
+ pr_err("%s phy_connet failed\n", __func__);
+ return ret;
+ }
+
+ if (!priv->en_suspend)
+ pm_stay_awake(&priv->pdev->dev);
+ pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
+
+ clk_phase_set(priv, TX_PHASE);
+ clk_phase_set(priv, RX_PHASE);
+
+ /* init hardware */
+ emac_init_hw(priv);
+
+ emac_ptp_init(priv);
+
+ emac_set_mac_addr(priv, ndev->dev_addr);
+
+ emac_set_fc_source_addr(priv, ndev->dev_addr);
+
+ /* configure transmit unit */
+ emac_configure_tx(priv);
+ /* configure rx unit */
+ emac_configure_rx(priv);
+
+ /* allocate buffers for receive descriptors */
+ emac_alloc_rx_desc_buffers(priv);
+
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
+
+ /* allocates interrupt resources and
+ * enables the interrupt line and IRQ handling
+ */
+ ret = request_irq(priv->irq, emac_interrupt_handler,
+ IRQF_SHARED, ndev->name, ndev);
+ if (ret) {
+ pr_err("request_irq failed, ret=%d\n", ret);
+ goto request_irq_failed;
+ }
+
+ if (priv->irq_wakeup) {
+ ret = request_irq(priv->irq_wakeup, emac_wakeup_handler,
+ IRQF_SHARED, ndev->name, ndev);
+ if (ret) {
+ pr_err("request wakeup_irq failed, ret=%d\\n", ret);
+ goto request_wakeup_irq_failed;
+ }
+ }
+
+ if (priv->irq_tso) {
+ ret = request_irq(priv->irq_tso, emac_irq_tso,
+ IRQF_SHARED, "emac_tso", ndev);
+ if (ret) {
+ pr_err("request tso failed, ret=%d\\n", ret);
+ goto request_tso_irq_failed;
+ }
+ }
+
+ if (priv->fix_link)
+ emac_set_speed_duplex(priv);
+
+ clear_bit(EMAC_DOWN, &priv->state);
+
+ /* enable mac interrupt */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
+
+ /* both rx tx */
+ val = MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE;
+#if 0
+ val |= MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE;
+#endif
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+ napi_enable(&priv->rx_napi);
+ napi_enable(&priv->tx_napi);
+#endif
+
+ if (priv->fix_link && !netif_carrier_ok(ndev))
+ netif_carrier_on(ndev);
+
+#ifdef WAN_LAN_AUTO_ADAPT
+ phy_id = ndev->phydev->phy_id;
+ if(phy_id == IP175D_PHY_ID)
+ emac_sig_workq(CARRIER_UP_IP175D, 0);
+ else
+ emac_sig_workq(CARRIER_UP, 0);
+#endif
+
+ hrtimer_init(&priv->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->tx_timer.function = emac_tx_timer;
+ hrtimer_init(&priv->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->rx_timer.function = emac_rx_timer;
+
+ if (priv->tso)
+ emac_tso_config(priv);
+
+ netif_tx_start_all_queues(ndev);
+ return 0;
+
+request_tso_irq_failed:
+ if (priv->irq_wakeup)
+ free_irq(priv->irq_wakeup, ndev);
+
+request_wakeup_irq_failed:
+ free_irq(priv->irq, ndev);
+
+request_irq_failed:
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ }
+
+ return ret;
+}
+
+/* Name emac_down
+ * Arguments priv : pointer to driver private data structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description This function is called from emac_close and
+ * performs the things when net interface is about to down.
+ * It frees the irq, removes the various timers.
+ * It sets the net interface off and
+ * resets the hardware. Cleans the Tx and Rx
+ * ring descriptor.
+ */
+int emac_down(struct emac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+#ifdef WAN_LAN_AUTO_ADAPT
+ u32 phy_id;
+
+ priv->dhcp = 0;
+ priv->vlan_port = -1;
+ priv->link = 0;
+ phy_id = ndev->phydev->phy_id;
+ if(priv->dhcp_delaywork){
+ cancel_delayed_work(&priv->dhcp_work);
+ priv->dhcp_delaywork = 0;
+ }
+#endif
+ set_bit(EMAC_DOWN, &priv->state);
+
+ netif_tx_disable(ndev);
+
+ hrtimer_cancel(&priv->tx_timer);
+ hrtimer_cancel(&priv->rx_timer);
+ /* Stop and disconnect the PHY */
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ }
+
+ if (!priv->fix_link) {
+ priv->duplex = DUPLEX_UNKNOWN;
+ priv->speed = SPEED_UNKNOWN;
+ }
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+ napi_disable(&priv->rx_napi);
+ napi_disable(&priv->tx_napi);
+#endif
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0000);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0000);
+
+ free_irq(priv->irq, ndev);
+ if (priv->irq_wakeup)
+ free_irq(priv->irq_wakeup, ndev);
+
+ emac_ptp_deinit(priv);
+
+ emac_reset_hw(priv);
+ netif_carrier_off(ndev);
+
+#ifdef WAN_LAN_AUTO_ADAPT
+ if(phy_id == IP175D_PHY_ID)
+ emac_sig_workq(CARRIER_DOWN_IP175D, 0);
+ else
+ emac_sig_workq(CARRIER_DOWN, 0);
+#endif
+
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+ flush_work(&priv->qos_work);
+ pm_qos_update_request(&priv->clk_scaling.ddr_qos, PM_QOS_DEFAULT_VALUE);
+#endif
+ pm_qos_update_request(&priv->pm_qos_req,
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+
+ if (!priv->en_suspend)
+ pm_relax(&priv->pdev->dev);
+
+ if (priv->tso) {
+ dma_free_coherent(&priv->pdev->dev,
+ priv->tx_ring.total_cnt * 0x80,
+ priv->tso_hdr,
+ priv->tso_hdr_addr);
+ }
+
+ return 0;
+}
+
+/* Name emac_alloc_tx_resources
+ * Arguments priv : pointer to driver private data structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description Allocates TX resources and getting virtual & physical address.
+ */
+int emac_alloc_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct platform_device *pdev = priv->pdev;
+ u32 size;
+
+ size = sizeof(struct emac_desc_buffer) * tx_ring->total_cnt;
+
+ /* allocate memory */
+ tx_ring->desc_buf = kzalloc(size, GFP_KERNEL);
+ if (!tx_ring->desc_buf) {
+ pr_err("Memory allocation failed for the Transmit descriptor buffer\n");
+ return -ENOMEM;
+ }
+
+ memset(tx_ring->desc_buf, 0, size);
+
+ tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_tx_desc);
+
+ EMAC_ROUNDUP(tx_ring->total_size, 1024);
+
+ if (priv->sram_pool) {
+ tx_ring->desc_addr =
+ (void *)gen_pool_dma_alloc(
+ priv->sram_pool, tx_ring->total_size,
+ &tx_ring->desc_dma_addr);
+ tx_ring->in_sram = true;
+ }
+
+ if (!tx_ring->desc_addr) {
+ tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
+ tx_ring->total_size,
+ &tx_ring->desc_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tx_ring->desc_addr) {
+ pr_err("Memory allocation failed for the Transmit descriptor ring\n");
+ kfree(tx_ring->desc_buf);
+ return -ENOMEM;
+ }
+
+ if (priv->sram_pool) {
+ pr_err("sram pool left size not enough, tx fallback\n");
+ tx_ring->in_sram = false;
+ }
+ }
+
+ memset(tx_ring->desc_addr, 0, tx_ring->total_size);
+
+ tx_ring->nxt_use = 0;
+ tx_ring->nxt_clean = 0;
+
+ return 0;
+}
+
+/* Name emac_alloc_rx_resources
+ * Arguments priv : pointer to driver private data structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description Allocates RX resources and getting virtual & physical address.
+ */
+int emac_alloc_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct platform_device *pdev = priv->pdev;
+ u32 buf_len;
+
+ buf_len = sizeof(struct emac_desc_buffer) * rx_ring->total_cnt;
+
+ rx_ring->desc_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!rx_ring->desc_buf) {
+ pr_err("Memory allocation failed for the Receive descriptor buffer\n");
+ return -ENOMEM;
+ }
+
+ memset(rx_ring->desc_buf, 0, buf_len);
+
+ /* round up to nearest 4K */
+ rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_rx_desc);
+
+ EMAC_ROUNDUP(rx_ring->total_size, 1024);
+
+ if (priv->sram_pool) {
+ rx_ring->desc_addr =
+ (void *)gen_pool_dma_alloc(
+ priv->sram_pool, rx_ring->total_size,
+ &rx_ring->desc_dma_addr);
+ rx_ring->in_sram = true;
+ }
+
+ if (!rx_ring->desc_addr) {
+ rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev,
+ rx_ring->total_size,
+ &rx_ring->desc_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!rx_ring->desc_addr) {
+ pr_err("Memory allocation failed for the Receive descriptor ring\n");
+ kfree(rx_ring->desc_buf);
+ return -ENOMEM;
+ }
+
+ if (priv->sram_pool) {
+ pr_err("sram pool left size not enough, rx fallback\n");
+ rx_ring->in_sram = false;
+ }
+ }
+
+ memset(rx_ring->desc_addr, 0, rx_ring->total_size);
+
+ rx_ring->nxt_use = 0;
+ rx_ring->nxt_clean = 0;
+
+ return 0;
+}
+
+/* Name emac_free_tx_resources
+ * Arguments priv : pointer to driver private data structure
+ * Return none
+ * Description Frees the Tx resources allocated
+ */
+void emac_free_tx_resources(struct emac_priv *priv)
+{
+ emac_clean_tx_desc_ring(priv);
+ kfree(priv->tx_ring.desc_buf);
+ priv->tx_ring.desc_buf = NULL;
+ if (priv->tx_ring.in_sram)
+ gen_pool_free(priv->sram_pool,
+ (unsigned long) priv->tx_ring.desc_addr,
+ priv->tx_ring.total_size);
+ else
+ dma_free_coherent(&priv->pdev->dev, priv->tx_ring.total_size,
+ priv->tx_ring.desc_addr,
+ priv->tx_ring.desc_dma_addr);
+ priv->tx_ring.desc_addr = NULL;
+}
+
+/* Name emac_free_rx_resources
+ * Arguments priv : pointer to driver private data structure
+ * Return none
+ * Description Frees the Rx resources allocated
+ */
+void emac_free_rx_resources(struct emac_priv *priv)
+{
+ emac_clean_rx_desc_ring(priv);
+ kfree(priv->rx_ring.desc_buf);
+ priv->rx_ring.desc_buf = NULL;
+ if (priv->rx_ring.in_sram)
+ gen_pool_free(priv->sram_pool,
+ (unsigned long) priv->rx_ring.desc_addr,
+ priv->rx_ring.total_size);
+ else
+ dma_free_coherent(&priv->pdev->dev, priv->rx_ring.total_size,
+ priv->rx_ring.desc_addr,
+ priv->rx_ring.desc_dma_addr);
+ priv->rx_ring.desc_addr = NULL;
+}
+
+/* Name emac_open
+ * Arguments pstNetdev : pointer to net_device structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description This function is called when net interface is made up.
+ * Setting up Tx and Rx
+ * resources and making the interface up.
+ */
+static int emac_open(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = emac_alloc_tx_resources(priv);
+ if (ret) {
+ pr_err("Error in setting up the Tx resources\n");
+ goto emac_alloc_tx_resource_fail;
+ }
+
+ ret = emac_alloc_rx_resources(priv);
+ if (ret) {
+ pr_err("Error in setting up the Rx resources\n");
+ goto emac_alloc_rx_resource_fail;
+ }
+
+ ret = emac_up(priv);
+ if (ret) {
+ pr_err("Error in making the net intrface up\n");
+ goto emac_up_fail;
+ }
+ return 0;
+
+emac_up_fail:
+ emac_free_rx_resources(priv);
+emac_alloc_rx_resource_fail:
+ emac_free_tx_resources(priv);
+emac_alloc_tx_resource_fail:
+ emac_reset_hw(priv);
+ return ret;
+}
+
+/* Name emac_close
+ * Arguments pstNetdev : pointer to net_device structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description This function is called when net interface is made down.
+ * It calls the appropriate functions to
+ * free Tx and Rx resources.
+ */
+static int emac_close(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_down(priv);
+ emac_free_tx_resources(priv);
+ emac_free_rx_resources(priv);
+
+ return 0;
+}
+
+/* Name emac_tx_clean_desc
+ * Arguments priv : pointer to driver private data structure
+ * Return 1: Cleaned; 0:Failed
+ * Description
+ */
+#ifdef CONFIG_ASR_EMAC_NAPI
+static int emac_tx_clean_desc(struct emac_priv *priv, int budget)
+#else
+static int emac_tx_clean_desc(struct emac_priv *priv)
+#endif
+{
+ struct emac_desc_ring *tx_ring;
+ struct emac_tx_desc *tx_desc, *end_desc;
+ struct emac_desc_buffer *tx_buf;
+ struct net_device *ndev = priv->ndev;
+ u32 i, u32LastIndex;
+ u8 u8Cleaned;
+ unsigned int count = 0;
+
+ tx_ring = &priv->tx_ring;
+ i = tx_ring->nxt_clean;
+ do {
+ if (i == tx_ring->nxt_use)
+ break;
+
+ u32LastIndex = tx_ring->desc_buf[i].nxt_watch;
+ end_desc = emac_get_tx_desc(priv, u32LastIndex);
+ if (end_desc->OWN == 1 ||
+ (priv->tso && (end_desc->tso || end_desc->coe)))
+ break;
+
+ u8Cleaned = false;
+ for ( ; !u8Cleaned; count++) {
+ tx_desc = emac_get_tx_desc(priv, i);
+ tx_buf = &tx_ring->desc_buf[i];
+
+ emac_get_tx_hwtstamp(priv, tx_buf->skb);
+
+ /* own bit will be reset to 0 by dma
+ * once packet is transmitted
+ */
+ if (tx_buf->dma_addr) {
+ dma_unmap_page(&priv->pdev->dev,
+ tx_buf->dma_addr,
+ tx_buf->dma_len,
+ DMA_TO_DEVICE);
+ tx_buf->dma_addr = 0;
+ }
+ if (tx_buf->skb) {
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+ }
+ if (tx_buf->buff_addr)
+ tx_buf->buff_addr = NULL;
+
+ memset(tx_desc, 0, sizeof(struct emac_tx_desc));
+ u8Cleaned = (i == u32LastIndex);
+ if (++i == tx_ring->total_cnt)
+ i = 0;
+ }
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+ if (count >= budget) {
+ count = budget;
+ break;
+ }
+#endif
+ } while (1);
+ tx_ring->nxt_clean = i;
+
+#ifndef CONFIG_ASR_EMAC_NAPI
+ spin_lock(&priv->spTxLock);
+#endif
+ if (unlikely(count && netif_queue_stopped(ndev) &&
+ netif_carrier_ok(ndev) &&
+ EMAC_DESC_UNUSED(tx_ring) >= EMAC_TX_WAKE_THRESHOLD))
+ netif_wake_queue(ndev);
+#ifndef CONFIG_ASR_EMAC_NAPI
+ spin_unlock(&priv->spTxLock);
+#endif
+ return count;
+}
+
+static int emac_rx_frame_status(struct emac_priv *priv, struct emac_rx_desc *dsc)
+{
+ /* if last descritpor isn't set, so we drop it*/
+ if (!dsc->LastDescriptor) {
+ netdev_dbg(priv->ndev, "rx LD bit isn't set, drop it.\n");
+ return frame_discard;
+ }
+
+ /*
+ * A Frame that is less than 64-bytes (from DA thru the FCS field)
+ * is considered as Runt Frame.
+ * Most of the Runt Frames happen because of collisions.
+ */
+ if (dsc->ApplicationStatus & EMAC_RX_FRAME_RUNT) {
+ netdev_dbg(priv->ndev, "rx frame less than 64.\n");
+ return frame_discard;
+ }
+
+ /*
+ * When the frame fails the CRC check,
+ * the frame is assumed to have the CRC error
+ */
+ if (dsc->ApplicationStatus & EMAC_RX_FRAME_CRC_ERR) {
+ netdev_dbg(priv->ndev, "rx frame crc error\n");
+ return frame_discard;
+ }
+
+ if (priv->tso && dsc->csum_res == EMAC_CSUM_FAIL) {
+ netdev_dbg(priv->ndev, "COE: rx frame checksum error\n");
+ return frame_discard;
+ }
+
+ /*
+ * When the length of the frame exceeds
+ * the Programmed Max Frame Length
+ */
+ if (dsc->ApplicationStatus & EMAC_RX_FRAME_MAX_LEN_ERR) {
+ netdev_dbg(priv->ndev, "rx frame too long\n");
+ return frame_discard;
+ }
+
+ /*
+ * frame reception is truncated at that point and
+ * frame is considered to have Jabber Error
+ */
+ if (dsc->ApplicationStatus & EMAC_RX_FRAME_JABBER_ERR) {
+ netdev_dbg(priv->ndev, "rx frame has been truncated\n");
+ return frame_discard;
+ }
+
+ /* this bit is only for 802.3 Type Frames */
+ if (dsc->ApplicationStatus & EMAC_RX_FRAME_LENGTH_ERR) {
+ netdev_dbg(priv->ndev, "rx frame length err for 802.3\n");
+ return frame_discard;
+ }
+
+ if (dsc->FramePacketLength <= ETHERNET_FCS_SIZE ||
+ dsc->FramePacketLength > EMAC_RX_BUFFER_2048) {
+ netdev_dbg(priv->ndev, "rx frame len too small or too long\n");
+ return frame_discard;
+ }
+ return frame_ok;
+}
+
+/* Name emac_rx_clean_desc
+ * Arguments priv : pointer to driver private data structure
+ * Return 1: Cleaned; 0:Failed
+ * Description
+ */
+#ifdef CONFIG_ASR_EMAC_NAPI
+static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
+#else
+static int emac_rx_clean_desc(struct emac_priv *priv)
+#endif
+{
+ struct emac_desc_ring *rx_ring;
+ struct emac_desc_buffer *rx_buf;
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc *rx_desc;
+ struct sk_buff *skb = NULL;
+ int status;
+#ifdef CONFIG_ASR_EMAC_NAPI
+ u32 receive_packet = 0;
+#endif
+ u32 i;
+ u32 u32Len;
+ u32 u32Size;
+ u8 *pu8Data;
+#ifdef WAN_LAN_AUTO_ADAPT
+ int port = -1, vlan = -1;
+ struct vlan_hdr *vhdr;
+ struct iphdr *iph = NULL;
+ struct udphdr *udph = NULL;
+#endif
+
+ rx_ring = &priv->rx_ring;
+ i = rx_ring->nxt_clean;
+ rx_desc = emac_get_rx_desc(priv, i);
+ u32Size = 0;
+
+ if (priv->pause.tx_pause && !priv->pause.fc_auto)
+ emac_check_ring_and_send_pause(priv);
+
+ while (rx_desc->OWN == 0) {
+ if (priv->tso && !rx_desc->csum_done)
+ break;
+
+ if (skb_queue_len(&priv->rx_skb) > priv->rx_ring.total_cnt)
+ break;
+
+ rx_buf = &rx_ring->desc_buf[i];
+ if (!rx_buf->skb)
+ break;
+
+ emac_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+ status = emac_rx_frame_status(priv, rx_desc);
+ if (unlikely(status == frame_discard)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb_irq(rx_buf->skb);
+ rx_buf->skb = NULL;
+ } else {
+ skb = rx_buf->skb;
+ u32Len = rx_desc->FramePacketLength - ETHERNET_FCS_SIZE;
+
+ pu8Data = skb_put(skb, u32Len);
+#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
+ memcpy(pu8Data, (u8 *)rx_buf->buff_addr, u32Len);
+#endif
+ skb->dev = ndev;
+ ndev->hard_header_len = ETH_HLEN;
+
+ emac_get_rx_hwtstamp(priv, rx_desc, skb);
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (priv->tso)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+#ifdef WAN_LAN_AUTO_ADAPT
+ {/* Special tag format: DA-SA-0x81-xx-data.
+ Bit 7-3 Packet Information
+ - bit 4: Reserved
+ - bit 3: Reserved
+ - bit 2: Miss address table
+ - bit 1: Security violation
+ - bit 0: VLAN violation
+ Bit 2-0 Ingress Port number
+ - b000: Disabled
+ - b001: Port 0
+ - b010: Port 1
+ - b011: Port 2
+ - b100: Port 3
+ - b101: Port 4
+ - Other: Reserved */
+ if(ntohs(skb->protocol)>>8 == 0x81) {
+ port = ntohs(skb->protocol) & 0x7;
+ if(port > 0 && port <= 0x5) {
+ skb->protocol = htons(ETH_P_8021Q);
+ port = port - 1;
+ }
+ }
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ vhdr = (struct vlan_hdr *) skb->data;
+ vlan = ntohs(vhdr->h_vlan_TCI);
+ iph = (struct iphdr *)(skb->data + VLAN_HLEN);
+ } else if (skb->protocol == htons(ETH_P_IP))
+ iph = (struct iphdr *)skb->data;
+
+ if (iph && iph->protocol == IPPROTO_UDP) {
+ udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
+ if ((htons(udph->dest) == 68 && htons(udph->source) == 67)) {
+ u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
+ u8 dhcp_type = *(udp_data + 242);
+ if ((DHCP_ACK == dhcp_type || DHCP_OFFER == dhcp_type)
+ && (DHCP_SEND_REQ == priv->dhcp)) {
+ priv->dhcp = DHCP_REC_RESP;
+ if (ndev->phydev->phy_id == IP175D_PHY_ID)
+ priv->vlan_port = port;
+ else
+ priv->vlan_port = -1;
+ }
+ }
+ }
+ }
+#endif
+ skb_queue_tail(&priv->rx_skb, skb);
+ rx_buf->skb = NULL;
+ }
+
+ if (++i == rx_ring->total_cnt)
+ i = 0;
+
+ rx_desc = emac_get_rx_desc(priv, i);
+
+ /* restart RX COE */
+ if (priv->tso)
+ emac_wr_tso(priv, TSO_RX_POLL_DEMAND, 0xFF);
+ }
+
+ rx_ring->nxt_clean = i;
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ /*
+ * Since netif_rx may consume too much time, put this after
+ * emac_alloc_rx_desc_buffers so that RX DMA desc refill ASAP,
+ * reduce packet loss probability.
+ */
+ while ((skb = skb_dequeue(&priv->rx_skb))) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+#ifdef CONFIG_ASR_EMAC_NAPI
+ napi_gro_receive(&priv->rx_napi, skb);
+#else
+ netif_rx(skb);
+#endif
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+ receive_packet++;
+ if (receive_packet >= budget)
+ break;
+#endif
+ }
+
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+ emac_ddr_clk_scaling(priv);
+#endif
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+ return receive_packet;
+#else
+ return 0;
+#endif
+}
+
+/* Name emac_alloc_rx_desc_buffers
+ * Arguments priv : pointer to driver private data structure
+ * Return 1: Cleaned; 0:Failed
+ * Description
+ */
+static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct emac_desc_buffer *rx_buf;
+ struct sk_buff *skb;
+ struct emac_rx_desc *rx_desc;
+ u32 i;
+#ifndef CONFIG_ASR_EMAC_RX_NO_COPY
+ void *buff;
+#endif
+ u32 buff_len;
+ int fail_cnt = 0;
+
+ i = rx_ring->nxt_use;
+ rx_buf = &rx_ring->desc_buf[i];
+
+ buff_len = priv->u32RxBufferLen;
+
+ while (!rx_buf->skb) {
+ skb = emac_skbrb_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
+ if (!skb) {
+ if (priv->rx_ring.total_cnt == EMAC_RX_RING_SIZE)
+ skb = dev_alloc_skb(EMAC_SKBRB_SLOT_SIZE);
+ if (!skb) {
+ fail_cnt++;
+ pr_warn_ratelimited("emac sk_buff allocation failed\n");
+ break;
+ }
+ }
+
+ /* make buffer alignment */
+ skb_reserve(skb, NET_IP_ALIGN + EMAC_EXTRA_ROOM);
+ skb->dev = ndev;
+
+#ifdef CONFIG_ASR_EMAC_RX_NO_COPY
+ rx_buf->buff_addr = skb->data;
+#else
+ if (!rx_buf->buff_addr) {
+ buff = kmalloc(buff_len, GFP_ATOMIC | GFP_DMA);
+ if (!buff) {
+ pr_err("kmalloc failed\n");
+ dev_kfree_skb(skb);
+ break;
+ }
+ rx_buf->buff_addr = buff;
+ }
+#endif
+ rx_buf->skb = skb;
+ rx_buf->dma_len = buff_len;
+ rx_buf->dma_addr = emac_map_single(&priv->pdev->dev,
+ rx_buf->buff_addr,
+ buff_len,
+ DMA_FROM_DEVICE);
+
+ rx_desc = emac_get_rx_desc(priv, i);
+ rx_desc->BufferAddr1 = rx_buf->dma_addr;
+ rx_desc->BufferSize1 = rx_buf->dma_len;
+ rx_desc->rx_timestamp = 0;
+ rx_desc->ptp_pkt = 0;
+ rx_desc->FirstDescriptor = 0;
+ rx_desc->LastDescriptor = 0;
+ rx_desc->FramePacketLength = 0;
+ rx_desc->ApplicationStatus = 0;
+ if (++i == rx_ring->total_cnt) {
+ rx_desc->EndRing = 1;
+ i = 0;
+ }
+
+ wmb();
+ rx_desc->OWN = 1;
+ if (priv->tso)
+ rx_desc->csum_done = 0;
+
+ rx_buf = &rx_ring->desc_buf[i];
+ }
+ rx_ring->nxt_use = i;
+
+ if (fail_cnt)
+ priv->refill = 1;
+ else
+ priv->refill = 0;
+ emac_dma_start_receive(priv);
+}
+
+#ifdef CONFIG_ASR_EMAC_NAPI
+static int emac_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct emac_priv *priv = container_of(napi, struct emac_priv, rx_napi);
+ int work_done;
+
+ work_done = emac_rx_clean_desc(priv, budget);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->intr_lock, flags);
+ emac_enable_interrupt(priv, 0);
+ spin_unlock_irqrestore(&priv->intr_lock, flags);
+
+ if (priv->refill)
+ emac_rx_timer_arm(priv);
+ }
+
+ return work_done;
+}
+
+static int emac_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct emac_priv *priv = container_of(napi, struct emac_priv, tx_napi);
+ int work_done;
+
+ work_done = emac_tx_clean_desc(priv, budget);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->intr_lock, flags);
+ emac_enable_interrupt(priv, 1);
+ spin_unlock_irqrestore(&priv->intr_lock, flags);
+ }
+
+ return work_done;
+}
+#endif
+
+/* Name emac_tx_mem_map
+ * Arguments priv : pointer to driver private data structure
+ * pstSkb : pointer to sk_buff structure passed by upper layer
+ * max_tx_len : max data len per descriptor
+ * frag_num : number of fragments in the packet
+ * Return number of descriptors needed for transmitting packet
+ * Description
+ */
+static int emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb,
+ u32 max_tx_len, u32 frag_num, int ioc)
+{
+ struct emac_desc_ring *tx_ring;
+ struct emac_desc_buffer *tx_buf;
+ struct emac_tx_desc *tx_desc, *first_desc;
+ u32 skb_len;
+ u32 u32Offset, u32Size, i;
+ u32 use_desc_cnt;
+ u32 f;
+ void *pvPtr;
+ u32 cur_desc_addr;
+ u32 cur_desc_idx;
+ u8 do_tx_timestamp = 0;
+ bool use_buf2 = 0;
+
+ u32Offset = 0;
+ use_desc_cnt = 0;
+
+ skb_tx_timestamp(skb);
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_tx_timestamp = 1;
+ }
+
+ tx_ring = &priv->tx_ring;
+ skb_len = skb->len - skb->data_len;
+ i = cur_desc_idx = tx_ring->nxt_use;
+ cur_desc_addr = emac_rd(priv, DMA_TRANSMIT_BASE_ADDRESS);
+ while (skb_len > 0) {
+ u32Size = min(skb_len, max_tx_len);
+ skb_len -= u32Size;
+
+ tx_buf = &tx_ring->desc_buf[i];
+ tx_buf->dma_len = u32Size;
+ pvPtr = skb->data + u32Offset;
+ tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pvPtr,
+ u32Size, DMA_TO_DEVICE);
+ tx_buf->buff_addr = pvPtr;
+ tx_buf->ulTimeStamp = jiffies;
+
+ tx_desc = emac_get_tx_desc(priv, i);
+
+ if (use_buf2) {
+ tx_desc->BufferAddr2 = tx_buf->dma_addr;
+ tx_desc->BufferSize2 = tx_buf->dma_len;
+ i++;
+ use_buf2 = 0;
+ } else {
+ memset(tx_desc, 0, sizeof(struct emac_tx_desc));
+ tx_desc->BufferAddr1 = tx_buf->dma_addr;
+ tx_desc->BufferSize1 = tx_buf->dma_len;
+ use_buf2 = 1;
+ }
+
+ if (use_desc_cnt == 0) {
+ first_desc = tx_desc;
+ tx_desc->FirstSegment = 1;
+ if (do_tx_timestamp)
+ tx_desc->tx_timestamp = 1;
+ }
+
+ if (skb_len == 0 && frag_num == 0) {
+ tx_desc->LastSegment = 1;
+ tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
+ }
+
+ if (!use_buf2 && i == tx_ring->total_cnt) {
+ tx_desc->EndRing = 1;
+ i = 0;
+ }
+
+ /* trigger first desc OWN bit later */
+ use_desc_cnt++;
+ if (use_desc_cnt > 2)
+ tx_desc->OWN = 1;
+
+ u32Offset += u32Size;
+ }
+
+ /* if the data is fragmented */
+ for (f = 0; f < frag_num; f++) {
+ skb_frag_t *frag;
+
+ frag = &(skb_shinfo(skb)->frags[f]);
+ skb_len = skb_frag_size(frag);
+ u32Offset = skb_frag_off(frag);
+
+ while (skb_len) {
+ u32Size = min(skb_len, max_tx_len);
+ skb_len -= u32Size;
+
+ tx_buf = &tx_ring->desc_buf[i];
+ tx_buf->dma_len = u32Size;
+ tx_buf->dma_addr =
+ dma_map_page(&priv->pdev->dev,
+ skb_frag_page(frag),
+ u32Offset,
+ u32Size,
+ DMA_TO_DEVICE);
+ tx_buf->ulTimeStamp = jiffies;
+
+ tx_desc = emac_get_tx_desc(priv, i);
+ if (use_buf2) {
+ tx_desc->BufferAddr2 = tx_buf->dma_addr;
+ tx_desc->BufferSize2 = tx_buf->dma_len;
+ i++;
+ use_buf2 = 0;
+ } else {
+ memset(tx_desc, 0, sizeof(struct emac_tx_desc));
+ tx_desc->BufferAddr1 = tx_buf->dma_addr;
+ tx_desc->BufferSize1 = tx_buf->dma_len;
+ use_buf2 = 1;
+ }
+
+ if (skb_len == 0 && f == (frag_num - 1)) {
+ tx_desc->LastSegment = 1;
+ tx_desc->InterruptOnCompletion = ioc ? 1 : 0;
+ }
+
+ if (!use_buf2 && i == tx_ring->total_cnt) {
+ tx_desc->EndRing = 1;
+ i = 0;
+ }
+
+ /* trigger first desc OWN bit later */
+ use_desc_cnt++;
+ if (use_desc_cnt > 2)
+ tx_desc->OWN = 1;
+
+ u32Offset += u32Size;
+ }
+ }
+
+ if (use_buf2 && ++i == tx_ring->total_cnt) {
+ tx_desc->EndRing = 1;
+ i = 0;
+ }
+
+ tx_ring->desc_buf[cur_desc_idx].skb = skb;
+ tx_ring->desc_buf[cur_desc_idx].nxt_watch =
+ (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
+
+ wmb();
+
+ first_desc->OWN = 1;
+
+ emac_dma_start_transmit(priv);
+
+ tx_ring->nxt_use = i;
+ return use_desc_cnt;
+}
+
+static int emac_prepare_tso_desc(struct emac_priv *priv, int idx,
+ bool tso, bool coe,
+ u32 addr, int payload, u8 hlen, int mss,
+ bool fst, bool last, bool ioc, bool ts,
+ u32 *cnt)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct emac_tx_desc *pdesc;
+
+ pdesc = emac_get_tx_desc(priv, idx);
+ if (tso) {
+ if (fst && hlen) {
+ emac_set_buf1_addr_len(pdesc, addr, 0);
+ payload -= hlen;
+ addr += hlen;
+ }
+ emac_set_buf2_addr_len(pdesc, addr, payload);
+ } else {
+ emac_set_buf1_addr_len(pdesc, addr, payload);
+ }
+
+ if (fst) {
+ emac_tx_desc_set_fd(pdesc);
+ } else {
+ if (tso)
+ emac_tx_desc_set_offload(pdesc, 1, 1, 1);
+ else if (coe)
+ emac_tx_desc_set_offload(pdesc, 0, 1, 0);
+ else
+ emac_tx_desc_set_offload(pdesc, 1, 0, 0);
+ }
+
+ if (ts)
+ emac_tx_desc_set_ts(pdesc);
+
+ if (last) {
+ /* last segment */
+ emac_tx_desc_set_ld(pdesc);
+ if (ioc)
+ emac_tx_desc_set_ioc(pdesc);
+ }
+
+ print_desc((void *)pdesc, 16);
+ if (payload <= 0)
+ return idx;
+
+ do {
+ (*cnt)++;
+
+ if (++idx == tx_ring->total_cnt) {
+ emac_tx_desc_set_ring_end(pdesc);
+ idx = 0;
+ }
+
+ if (!tso)
+ break;
+
+ payload -= mss;
+ if (payload <= 0)
+ break;
+
+ pdesc = emac_get_tx_desc(priv, idx);
+ emac_tx_desc_set_offload(pdesc, 1, 1, 0);
+
+ print_desc((void *)pdesc, 16);
+ } while (1);
+
+ return idx;
+}
+
+static int emac_tso_xmit(struct sk_buff *skb, struct net_device *ndev,
+ bool tso, bool coe)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct emac_desc_buffer *tx_buf;
+ struct emac_tx_desc *pdesc;
+ skb_frag_t *frag;
+ u32 desc_cnt, frag_num, f, mss, fst;
+ u32 offset, i;
+ u8 hlen;
+ int skb_len, payload;
+ void *pbuf;
+ int ioc;
+ u8 timestamp = 0;
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+ skb_len = skb->len - skb->data_len;
+ if (tso) {
+ hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ desc_cnt = (skb_len / mss) + 1;
+ for (f = 0; f < frag_num; f++) {
+ frag = &skb_shinfo(skb)->frags[f];
+ desc_cnt += (skb_frag_size(frag) / mss) + 1;
+ }
+ } else {
+ hlen = 0;
+ mss = 0;
+ desc_cnt = EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
+ for (i = 0; i < frag_num; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ desc_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
+ MAX_DATA_PWR_TX_DES);
+ }
+ }
+
+ emac_print("%s: skb=0x%x, skb->len=%d skb_len=%d mss=%d frag_num=%d hlen=%d\n",
+ __func__, (unsigned)skb, skb->len, skb_len, mss, frag_num, hlen);
+
+#ifdef EMAC_DEBUG
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 32, 1, skb->data, skb_len, 0);
+#endif
+ /* disable hard interrupt on local CPUs */
+#ifndef CONFIG_ASR_EMAC_NAPI
+ local_irq_save(ulFlags);
+#endif
+ if (!spin_trylock(&priv->spTxLock)) {
+ pr_err("Collision detected\n");
+#ifndef CONFIG_ASR_EMAC_NAPI
+ local_irq_restore(ulFlags);
+#endif
+ return NETDEV_TX_BUSY;
+ }
+
+ /* check whether sufficient free descriptors are there */
+ if (EMAC_DESC_UNUSED(tx_ring) < (desc_cnt + 2)) {
+ pr_err_ratelimited("TSO Descriptors are not free\n");
+ netif_stop_queue(ndev);
+#ifndef CONFIG_ASR_EMAC_NAPI
+ spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+ spin_unlock(&priv->spTxLock);
+#endif
+ return NETDEV_TX_BUSY;
+ }
+
+ priv->tx_count_frames += desc_cnt;
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en))
+ ioc = 1;
+ else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
+ ioc = 1;
+ else
+ ioc = 0;
+
+ if (ioc)
+ priv->tx_count_frames = 0;
+
+ skb_tx_timestamp(skb);
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ timestamp = 1;
+ }
+
+ offset = 0;
+ desc_cnt = 0;
+ i = fst = tx_ring->nxt_use;
+ do {
+ payload = min(skb_len, TSO_MAX_SEG_SIZE);
+
+ tx_buf = &tx_ring->desc_buf[i];
+ tx_buf->dma_len = payload;
+ pbuf = skb->data + offset;
+ tx_buf->dma_addr = emac_map_single(&priv->pdev->dev, pbuf,
+ payload, DMA_TO_DEVICE);
+ tx_buf->buff_addr = pbuf;
+ tx_buf->ulTimeStamp = jiffies;
+
+ skb_len -= payload;
+ offset += payload;
+
+ i = emac_prepare_tso_desc(priv, i, tso, coe,
+ tx_buf->dma_addr, payload, hlen, mss,
+ (i == fst), (skb_len == 0 && frag_num == 0),
+ ioc, timestamp, &desc_cnt);
+ } while (skb_len > 0);
+
+ /* if the data is fragmented */
+ for (f = 0; f < frag_num; f++) {
+ frag = &(skb_shinfo(skb)->frags[f]);
+ skb_len = skb_frag_size(frag);
+ offset = skb_frag_off(frag);
+
+ emac_print("%s: frag %d len=%d\n", __func__, f, skb_len);
+#ifdef EMAC_DEBUG
+ {
+ u8 *vaddr;
+
+ vaddr = kmap_atomic(skb_frag_page(frag));
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
+ 32, 1, vaddr + offset, skb_len, 0);
+ kunmap_atomic(vaddr);
+ }
+#endif
+ do {
+ payload = min(skb_len, TSO_MAX_SEG_SIZE);
+
+ tx_buf = &tx_ring->desc_buf[i];
+ tx_buf->dma_len = payload;
+ //pbuf = skb->data + offset;
+ tx_buf->dma_addr = dma_map_page(&priv->pdev->dev,
+ skb_frag_page(frag),
+ offset, payload,
+ DMA_TO_DEVICE);
+ tx_buf->ulTimeStamp = jiffies;
+
+ skb_len -= payload;
+ offset += payload;
+
+ i = emac_prepare_tso_desc(priv, i, tso, coe,
+ tx_buf->dma_addr, payload, 0, mss,
+ (i == fst),
+ (skb_len == 0 && f == (frag_num - 1)),
+ ioc, timestamp, &desc_cnt);
+ } while (skb_len > 0);
+ }
+
+ tx_ring->desc_buf[fst].skb = skb;
+ tx_ring->desc_buf[fst].nxt_watch =
+ (i == 0 ? tx_ring->total_cnt : 0) + i - 1;
+
+ wmb();
+
+ /* set first descriptor for this packet */
+ pdesc = emac_get_tx_desc(priv, fst);
+ emac_tx_update_fst_desc(pdesc, hlen, mss, tso, coe);
+ print_desc((void *)pdesc, 16);
+
+ tx_ring->nxt_use = i;
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ if (tso) {
+ priv->hw_stats->tx_tso_pkts++;
+ priv->hw_stats->tx_tso_bytes += skb->len;
+ }
+
+ emac_wr_tso(priv, TSO_TX_POLL_DEMAND, 0xFF);
+ /* Make sure there is space in the ring for the next send. */
+ if (EMAC_DESC_UNUSED(tx_ring) < (MAX_SKB_FRAGS + 2)) {
+ pr_debug_ratelimited("TSO Descriptors not enough, stop\n");
+ netif_stop_queue(ndev);
+ }
+
+#ifndef CONFIG_ASR_EMAC_NAPI
+ spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+ spin_unlock(&priv->spTxLock);
+#endif
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+ emac_ddr_clk_scaling(priv);
+#endif
+
+ if (!tso && !coe)
+ emac_tx_timer_arm(priv);
+
+ return NETDEV_TX_OK;
+}
+
+/* Name emac_start_xmit
+ * Arguments pstSkb : pointer to sk_buff structure passed by upper layer
+ * pstNetdev : pointer to net_device structure
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description This function is called by upper layer to
+ * handover the Tx packet to the driver
+ * for sending it to the device.
+ * Currently this is doing nothing but
+ * simply to simulate the tx packet handling.
+ */
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int ioc;
+ u32 frag_num;
+ u32 skb_len;
+ u32 tx_des_cnt = 0;
+ u32 i;
+#ifndef CONFIG_ASR_EMAC_NAPI
+ unsigned long ulFlags;
+#endif
+#ifdef WAN_LAN_AUTO_ADAPT
+ int vlan = 0;
+ struct iphdr *iph = NULL;
+ struct udphdr *udph = NULL;
+ struct vlan_hdr *vhdr;
+
+ { struct ethhdr *myeth = (struct ethhdr *)skb->data;
+ if (myeth->h_proto == htons(ETH_P_8021Q)) {
+ vhdr = (struct vlan_hdr *)((u8 *)myeth + sizeof(struct ethhdr));
+ vlan = ntohs(vhdr->h_vlan_TCI);
+ iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr) + VLAN_HLEN);
+ }
+ else if (myeth->h_proto == htons(ETH_P_IP))
+ iph = (struct iphdr *)((u8 *)myeth + sizeof(struct ethhdr));
+
+ if (iph && iph->protocol == IPPROTO_UDP) {
+ udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2));
+ if ((htons(udph->dest) == 67 && htons(udph->source) == 68)) {
+ u8 *udp_data = (u8 *)((u8 *)udph + sizeof(struct udphdr));
+ u8 dhcp_type = *(udp_data + 242);
+ if ((DHCP_DISCOVER == dhcp_type || DHCP_REQUEST == dhcp_type)
+ && (0 == priv->dhcp)) {
+ priv->dhcp = DHCP_SEND_REQ;
+ if (ndev->phydev->phy_id == IP175D_PHY_ID)
+ priv->vlan_port = vlan;
+ else
+ priv->vlan_port = -1;
+ }
+ }
+ }
+ }
+#endif
+
+ /* pstSkb->len: is the full length of the data in the packet
+ * pstSkb->data_len: the number of bytes in skb fragments
+ * u16Len: length of the first fragment
+ */
+ skb_len = skb->len - skb->data_len;
+
+ if (skb->len <= 0) {
+ pr_err("Packet length is zero\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (priv->tso) {
+ bool tso = false, coe = false;
+
+ if (skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+ tso = true;
+ coe = true;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ coe = true;
+ }
+
+ /* WR: COE need skb->data to be 2 bytes alinged */
+ if (coe && !IS_ALIGNED((unsigned long)skb->data, 2))
+ pskb_expand_head(skb, 1, 0, GFP_ATOMIC);
+
+ return emac_tso_xmit(skb, ndev, tso, coe);
+ }
+
+ /* increment the count if len exceeds MAX_DATA_LEN_TX_DES */
+ tx_des_cnt += EMAC_TXD_COUNT(skb_len, MAX_DATA_PWR_TX_DES);
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < frag_num; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ tx_des_cnt += EMAC_TXD_COUNT(skb_frag_size(frag),
+ MAX_DATA_PWR_TX_DES);
+ }
+
+ /* disable hard interrupt on local CPUs */
+#ifndef CONFIG_ASR_EMAC_NAPI
+ local_irq_save(ulFlags);
+#endif
+ if (!spin_trylock(&priv->spTxLock)) {
+ pr_err("Collision detected\n");
+#ifndef CONFIG_ASR_EMAC_NAPI
+ local_irq_restore(ulFlags);
+#endif
+ return NETDEV_TX_BUSY;
+ }
+
+ /* check whether sufficient free descriptors are there */
+ if (EMAC_DESC_UNUSED(&priv->tx_ring) < (tx_des_cnt + 2)) {
+ pr_err_ratelimited("Descriptors are not free\n");
+ netif_stop_queue(ndev);
+#ifndef CONFIG_ASR_EMAC_NAPI
+ spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+ spin_unlock(&priv->spTxLock);
+#endif
+ return NETDEV_TX_BUSY;
+ }
+
+ priv->tx_count_frames += frag_num + 1;
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en))
+ ioc = 1;
+ else if (priv->tx_count_frames >= EMAC_TX_FRAMES)
+ ioc = 1;
+ else
+ ioc = 0;
+
+ if (ioc)
+ priv->tx_count_frames = 0;
+
+ tx_des_cnt = emac_tx_mem_map(priv, skb, MAX_DATA_LEN_TX_DES, frag_num, ioc);
+ if (tx_des_cnt == 0) {
+ pr_err("Could not acquire memory from pool\n");
+ netif_stop_queue(ndev);
+#ifndef CONFIG_ASR_EMAC_NAPI
+ spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+ spin_unlock(&priv->spTxLock);
+#endif
+ return NETDEV_TX_BUSY;
+ }
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+
+ /* Make sure there is space in the ring for the next send. */
+ if (EMAC_DESC_UNUSED(&priv->tx_ring) < (MAX_SKB_FRAGS + 2))
+ netif_stop_queue(ndev);
+
+#ifndef CONFIG_ASR_EMAC_NAPI
+ spin_unlock_irqrestore(&priv->spTxLock, ulFlags);
+#else
+ spin_unlock(&priv->spTxLock);
+#endif
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+ emac_ddr_clk_scaling(priv);
+#endif
+ emac_tx_timer_arm(priv);
+ return NETDEV_TX_OK;
+}
+
+u32 ReadTxStatCounters(struct emac_priv *priv, u8 cnt)
+{
+ u32 val, tmp;
+
+ val = 0x8000 | cnt;
+ emac_wr(priv, MAC_TX_STATCTR_CONTROL, val);
+ val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
+
+ while (val & 0x8000)
+ val = emac_rd(priv, MAC_TX_STATCTR_CONTROL);
+
+ tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_HIGH);
+ val = tmp << 16;
+ tmp = emac_rd(priv, MAC_TX_STATCTR_DATA_LOW);
+ val |= tmp;
+
+ return val;
+}
+
+u32 ReadRxStatCounters(struct emac_priv *priv, u8 cnt)
+{
+ u32 val, tmp;
+
+ val = 0x8000 | cnt;
+ emac_wr(priv, MAC_RX_STATCTR_CONTROL, val);
+ val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
+
+ while (val & 0x8000)
+ val = emac_rd(priv, MAC_RX_STATCTR_CONTROL);
+
+ tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_HIGH);
+ val = tmp << 16;
+ tmp = emac_rd(priv, MAC_RX_STATCTR_DATA_LOW);
+ val |= tmp;
+ return val;
+}
+
+/* Name emac_set_mac_address
+ * Arguments pstNetdev : pointer to net_device structure
+ * addr : pointer to addr
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description It is called by upper layer to set the mac address.
+ */
+static int emac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
+
+ emac_set_mac_addr(priv, ndev->dev_addr);
+
+ emac_set_fc_source_addr(priv, ndev->dev_addr);
+
+ return 0;
+}
+
+/* Name emac_change_mtu
+ * Arguments pstNetdev : pointer to net_device structure
+ * u32MTU : maximum transmit unit value
+ * Return Status: 0 - Success; non-zero - Fail
+ * Description It is called by upper layer to set the MTU value.
+ */
+static int emac_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 frame_len;
+
+ if (netif_running(ndev)) {
+ pr_err("must be stopped to change its MTU\n");
+ return -EBUSY;
+ }
+
+ frame_len = mtu + ETHERNET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+
+ if (frame_len < MINIMUM_ETHERNET_FRAME_SIZE ||
+ frame_len > EMAC_SKBRB_MAX_PAYLOAD) {
+ pr_err("Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ if (frame_len <= EMAC_RX_BUFFER_1024)
+ priv->u32RxBufferLen = EMAC_RX_BUFFER_1024;
+ else
+ priv->u32RxBufferLen = EMAC_SKBRB_MAX_PAYLOAD;
+
+ ndev->mtu = mtu;
+
+ return 0;
+}
+
+static void emac_reset(struct emac_priv *priv)
+{
+ if (!test_and_clear_bit(EMAC_RESET_REQUESTED, &priv->state))
+ return;
+ if (test_bit(EMAC_DOWN, &priv->state))
+ return;
+
+ netdev_dbg(priv->ndev, "Reset controller.\n");
+
+ rtnl_lock();
+ //netif_trans_update(priv->ndev);
+ while (test_and_set_bit(EMAC_RESETING, &priv->state))
+ usleep_range(1000, 2000);
+
+ dev_close(priv->ndev);
+ dev_open(priv->ndev, NULL);
+ clear_bit(EMAC_RESETING, &priv->state);
+ rtnl_unlock();
+}
+
+static void emac_tx_timeout_task(struct work_struct *work)
+{
+ struct emac_priv *priv = container_of(work,
+ struct emac_priv, tx_timeout_task);
+ emac_reset(priv);
+ clear_bit(EMAC_TASK_SCHED, &priv->state);
+}
+
+/* Name emac_tx_timeout
+ * Arguments pstNetdev : pointer to net_device structure
+ * Return none
+ * Description It is called by upper layer
+ * for packet transmit timeout.
+ */
+static void emac_tx_timeout(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ netdev_info(ndev, "TX timeout\n");
+ register_dump(priv);
+
+ netif_carrier_off(priv->ndev);
+ set_bit(EMAC_RESET_REQUESTED, &priv->state);
+
+ if (!test_bit(EMAC_DOWN, &priv->state) &&
+ !test_and_set_bit(EMAC_TASK_SCHED, &priv->state))
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static int emac_set_axi_bus_clock(struct emac_priv *priv, bool enable)
+{
+ const struct emac_regdata *regdata = priv->regdata;
+ void __iomem* apmu;
+ u32 val;
+
+ apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+ if (apmu == NULL) {
+ pr_err("error to ioremap APMU base\n");
+ return -ENOMEM;
+ }
+
+ val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+ if (enable) {
+ val |= 0x1;
+ } else {
+ val &= ~0x1;
+ }
+ writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+ iounmap(apmu);
+ return 0;
+}
+
+static int clk_phase_rgmii_set(struct emac_priv *priv, bool is_tx)
+{
+ const struct emac_regdata *regdata = priv->regdata;
+ void __iomem* apmu;
+ u32 val, dline;
+ u8 phase, tmp;
+
+ apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+ if (apmu == NULL) {
+ pr_err("error to ioremap APMU base\n");
+ return -ENOMEM;
+ }
+
+ val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+ if (is_tx) {
+ if (regdata->rgmii_tx_clk_src_sel_shift > 0) {
+ phase = (priv->tx_clk_config >> 16) & 0x1;
+ val &= ~(0x1 << regdata->rgmii_tx_clk_src_sel_shift);
+ val |= phase << regdata->rgmii_tx_clk_src_sel_shift;
+ }
+
+ if (regdata->rgmii_tx_dline_reg_offset > 0) {
+ /* Set RGMIII TX DLINE */
+ dline = readl(apmu + regdata->rgmii_tx_dline_reg_offset);
+
+ /* delay code */
+ tmp = (priv->tx_clk_config >> 8) &
+ regdata->rgmii_tx_delay_code_mask;
+ dline &= ~(regdata->rgmii_tx_delay_code_mask <<
+ regdata->rgmii_tx_delay_code_shift);
+ dline |= tmp << regdata->rgmii_tx_delay_code_shift;
+
+ /* delay step */
+ tmp = priv->tx_clk_config &
+ regdata->rgmii_tx_delay_step_mask;
+ dline &= ~(regdata->rgmii_tx_delay_step_mask <<
+ regdata->rgmii_tx_delay_step_shift);
+ dline |= tmp << regdata->rgmii_tx_delay_step_shift;
+
+ /* delay line enable */
+ dline |= 1 << regdata->rgmii_tx_delay_enable_shift;
+ writel(dline, apmu + regdata->rgmii_tx_dline_reg_offset);
+ pr_info("===> emac set tx dline 0x%x 0x%x", dline,
+ readl(apmu + regdata->rgmii_tx_dline_reg_offset));
+ }
+ } else {
+ if (regdata->rgmii_rx_clk_src_sel_shift > 0) {
+ phase = (priv->rx_clk_config >> 16) & 0x1;
+ val &= ~(0x1 << regdata->rgmii_rx_clk_src_sel_shift);
+ val |= phase << regdata->rgmii_rx_clk_src_sel_shift;
+ }
+
+ /* Set RGMIII RX DLINE */
+ if (regdata->rgmii_rx_dline_reg_offset > 0) {
+ dline = readl(apmu + regdata->rgmii_rx_dline_reg_offset);
+
+ /* delay code */
+ tmp = (priv->rx_clk_config >> 8) &
+ regdata->rgmii_rx_delay_code_mask;
+ dline &= ~(regdata->rgmii_rx_delay_code_mask <<
+ regdata->rgmii_rx_delay_code_shift);
+ dline |= tmp << regdata->rgmii_rx_delay_code_shift;
+
+ /* delay step */
+ tmp = priv->rx_clk_config &
+ regdata->rgmii_rx_delay_step_mask;
+ dline &= ~(regdata->rgmii_rx_delay_step_mask <<
+ regdata->rgmii_rx_delay_step_shift);
+ dline |= tmp << regdata->rgmii_rx_delay_step_shift;
+
+ /* delay line enable */
+ dline |= 1 << regdata->rgmii_rx_delay_enable_shift;
+ writel(dline, apmu + regdata->rgmii_rx_dline_reg_offset);
+ pr_info("===> emac set rx dline 0x%x 0x%x", dline,
+ readl(apmu + regdata->rgmii_rx_dline_reg_offset));
+ }
+ }
+ writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+ pr_info("%s phase:%d direction:%s 0x%x 0x%x\n", __func__, phase,
+ is_tx ? "tx": "rx", val,
+ readl(apmu + regdata->clk_rst_ctrl_reg_offset));
+
+ iounmap(apmu);
+ return 0;
+}
+
+static int clk_phase_rmii_set(struct emac_priv *priv, bool is_tx)
+{
+ const struct emac_regdata *regdata = priv->regdata;
+ void __iomem* apmu;
+ u32 val;
+ u8 phase, tmp;
+
+ apmu = ioremap(AXI_PHYS_BASE + 0x82800, SZ_4K);
+ if (apmu == NULL) {
+ pr_err("error to ioremap APMU base\n");
+ return -ENOMEM;
+ }
+
+ val = readl(apmu + regdata->clk_rst_ctrl_reg_offset);
+ if (is_tx) {
+ /* rmii tx clock select */
+ if (regdata->rmii_tx_clk_sel_shift > 0) {
+ tmp = (priv->tx_clk_config >> 16) & 0x1;
+ val &= ~(0x1 << regdata->rmii_tx_clk_sel_shift);
+ val |= tmp << regdata->rmii_tx_clk_sel_shift;
+ }
+
+ /* rmii ref clock selct, 1 - from soc, 0 - from phy */
+ if (regdata->rmii_rx_clk_sel_shift) {
+ tmp = (priv->tx_clk_config >> 24) & 0x1;
+ val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
+ val |= tmp << regdata->rmii_ref_clk_sel_shift;
+ }
+ } else {
+ /* rmii rx clock select */
+ if (regdata->rmii_rx_clk_sel_shift > 0) {
+ tmp = (priv->rx_clk_config >> 16) & 0x1;
+ val &= ~(0x1 << regdata->rmii_rx_clk_sel_shift);
+ val |= tmp << regdata->rmii_rx_clk_sel_shift;
+ }
+
+ /* rmii ref clock selct, 1 - from soc, 0 - from phy */
+ if (regdata->rmii_rx_clk_sel_shift) {
+ tmp = (priv->tx_clk_config >> 24) & 0x1;
+ val &= ~(0x1 << regdata->rmii_ref_clk_sel_shift);
+ val |= tmp << regdata->rmii_ref_clk_sel_shift;
+ }
+ }
+
+ writel(val, apmu + regdata->clk_rst_ctrl_reg_offset);
+ pr_debug("%s phase:%d direction:%s\n", __func__, phase,
+ is_tx ? "tx": "rx");
+
+ iounmap(apmu);
+ return 0;
+}
+
+static int clk_phase_set(struct emac_priv *priv, bool is_tx)
+{
+ if (emac_is_rmii_interface(priv)) {
+ clk_phase_rmii_set(priv, is_tx);
+ } else {
+ clk_phase_rgmii_set(priv, is_tx);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int clk_phase_show(struct seq_file *s, void *data)
+{
+ struct emac_priv *priv = s->private;
+ bool rmii_intf;
+ rmii_intf = emac_is_rmii_interface(priv);
+
+ seq_printf(s, "Emac MII Interface : %s\n", rmii_intf ? "RMII" : "RGMII");
+ seq_printf(s, "Current rx clk config : %d\n", priv->rx_clk_config);
+ seq_printf(s, "Current tx clk config : %d\n", priv->tx_clk_config);
+ return 0;
+}
+
+static ssize_t clk_tuning_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct emac_priv *priv =
+ ((struct seq_file *)(file->private_data))->private;
+ int err;
+ int clk_phase;
+ char buff[TUNING_CMD_LEN] = { 0 };
+ char mode_str[20];
+
+ if (count > TUNING_CMD_LEN) {
+ pr_err("count must be less than 50.\n");
+ return count;
+ }
+ err = copy_from_user(buff, user_buf, count);
+ if (err)
+ return err;
+
+ err = sscanf(buff, "%s %d", (char *)&mode_str, &clk_phase);
+ if (err != 2) {
+ pr_err("debugfs para count error\n");
+ return count;
+ }
+ pr_info("input:%s %d\n", mode_str, clk_phase);
+
+ if (strcmp(mode_str, "tx") == 0) {
+ priv->tx_clk_config = clk_phase;
+ clk_phase_set(priv, TX_PHASE);
+ } else if (strcmp(mode_str, "rx") == 0) {
+ priv->rx_clk_config = clk_phase;
+ clk_phase_set(priv, RX_PHASE);
+ } else {
+ pr_err("command error\n");
+ pr_err("eg: echo rx 1 > clk_tuning\n");
+ return count;
+ }
+
+ return count;
+}
+
+static int clk_tuning_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_phase_show, inode->i_private);
+}
+
+const struct file_operations clk_tuning_fops = {
+ .open = clk_tuning_open,
+ .write = clk_tuning_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif
+
+static int emac_power_down(struct emac_priv *priv)
+{
+ if (priv->rst_gpio >= 0)
+ gpio_direction_output(priv->rst_gpio,
+ priv->low_active_rst ? 0 : 1);
+
+ if (priv->ldo_gpio >= 0)
+ gpio_direction_output(priv->ldo_gpio,
+ priv->low_active_ldo ? 0 : 1);
+
+ return 0;
+}
+
+static int emac_power_up(struct emac_priv *priv)
+{
+ u32 *delays_ldo = priv->delays_ldo;
+ u32 *delays_rst = priv->delays_rst;
+ int rst_gpio = priv->rst_gpio;
+ int low_active_rst = priv->low_active_rst;
+ int ldo_gpio = priv->ldo_gpio;
+ int low_active_ldo = priv->low_active_ldo;
+
+ if (rst_gpio >= 0) {
+ gpio_direction_output(rst_gpio, low_active_rst ? 0 : 1);
+ }
+
+ if (ldo_gpio >= 0) {
+ gpio_direction_output(ldo_gpio, low_active_ldo ? 0 : 1);
+ if (delays_ldo[0]) {
+ gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
+ msleep(DIV_ROUND_UP(delays_ldo[0], 1000));
+ }
+
+ gpio_set_value(ldo_gpio, low_active_ldo ? 0 : 1);
+ if (delays_ldo[1])
+ msleep(DIV_ROUND_UP(delays_ldo[1], 1000));
+
+ gpio_set_value(ldo_gpio, low_active_ldo ? 1 : 0);
+ if (delays_ldo[2])
+ msleep(DIV_ROUND_UP(delays_ldo[2], 1000));
+ }
+
+ if (rst_gpio >= 0) {
+ if (delays_rst[0]) {
+ gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
+ msleep(DIV_ROUND_UP(delays_rst[0], 1000));
+ }
+
+ gpio_set_value(rst_gpio, low_active_rst ? 0 : 1);
+ if (delays_rst[1])
+ msleep(DIV_ROUND_UP(delays_rst[1], 1000));
+
+ gpio_set_value(rst_gpio, low_active_rst ? 1 : 0);
+ if (delays_rst[2])
+ msleep(DIV_ROUND_UP(delays_rst[2], 1000));
+ }
+
+ return 0;
+}
+
+static int emac_mii_reset(struct mii_bus *bus)
+{
+ struct emac_priv *priv = bus->priv;
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *np = dev->of_node;
+ int rst_gpio, ldo_gpio;
+ int low_active_ldo, low_active_rst;
+ u32 *delays_ldo = priv->delays_ldo;
+ u32 *delays_rst = priv->delays_rst;
+
+ priv->rst_gpio = -1;
+ priv->ldo_gpio = -1;
+
+ if (!np)
+ return 0;
+
+ rst_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (rst_gpio >= 0) {
+ low_active_rst = of_property_read_bool(np, "reset-active-low");
+ of_property_read_u32_array(np, "reset-delays-us", delays_rst, 3);
+
+ if (gpio_request(rst_gpio, "mdio-reset")) {
+ printk("emac: reset-gpio=%d request failed\n",
+ rst_gpio);
+ return 0;
+ }
+ priv->rst_gpio = rst_gpio;
+ priv->low_active_rst = low_active_rst;
+ }
+
+ ldo_gpio = of_get_named_gpio(np, "ldo-gpio", 0);
+ if (ldo_gpio >= 0) {
+ low_active_ldo = of_property_read_bool(np, "ldo-active-low");
+ of_property_read_u32_array(np, "ldo-delays-us", delays_ldo, 3);
+
+ if (gpio_request(ldo_gpio, "mdio-ldo"))
+ return 0;
+
+ priv->ldo_gpio = ldo_gpio;
+ priv->low_active_ldo = low_active_ldo;
+ }
+
+ /*
+ * Some device not allow MDC/MDIO operation during power on/reset,
+ * disable AXI clock to shutdown mdio clock.
+ */
+ clk_disable_unprepare(priv->clk);
+
+ emac_power_up(priv);
+
+ clk_prepare_enable(priv->clk);
+
+ emac_reset_hw(priv);
+
+ return 0;
+}
+
+static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0;
+ u32 val;
+
+ if (!__clk_is_enabled(priv->clk))
+ return -EBUSY;
+
+ mutex_lock(&priv->mii_mutex);
+ cmd |= phy_addr & 0x1F;
+ cmd |= (regnum & 0x1F) << 5;
+ cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
+
+ /*
+ * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
+ * change during MDIO read/write
+ */
+#ifdef CONFIG_DDR_DEVFREQ
+ pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
+#endif
+ emac_wr(priv, MAC_MDIO_DATA, 0x0);
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
+ return -EBUSY;
+
+ val = emac_rd(priv, MAC_MDIO_DATA);
+
+#ifdef CONFIG_DDR_DEVFREQ
+ pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
+#endif
+ mutex_unlock(&priv->mii_mutex);
+ return val;
+}
+
+static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0;
+ u32 val;
+
+ if (!__clk_is_enabled(priv->clk))
+ return -EBUSY;
+
+ mutex_lock(&priv->mii_mutex);
+ emac_wr(priv, MAC_MDIO_DATA, value);
+
+ cmd |= phy_addr & 0x1F;
+ cmd |= (regnum & 0x1F) << 5;
+ cmd |= MREGBIT_START_MDIO_TRANS;
+
+ /*
+ * MDC/MDIO clock is from AXI, add qos to avoid MDC frequency
+ * change during MDIO read/write
+ */
+#ifdef CONFIG_DDR_DEVFREQ
+ pm_qos_update_request(&priv->pm_ddr_qos, INT_MAX);
+#endif
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ if (readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 100000))
+ return -EBUSY;
+
+#ifdef CONFIG_DDR_DEVFREQ
+ pm_qos_update_request(&priv->pm_ddr_qos, PM_QOS_DEFAULT_VALUE);
+#endif
+
+ mutex_unlock(&priv->mii_mutex);
+ return 0;
+}
+
+static void emac_adjust_link(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+ struct emac_priv *priv = netdev_priv(dev);
+ u32 ctrl;
+#ifdef WAN_LAN_AUTO_ADAPT
+ int status_change = 0;
+ int addr = 0;
+ int i = 0;
+#endif
+ if (!phydev || priv->fix_link)
+ return;
+
+ if (phydev->link) {
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+
+ /* Now we make sure that we can be in full duplex mode
+ * If not, we operate in half-duplex mode.
+ */
+ if (phydev->duplex != priv->duplex) {
+ if (!phydev->duplex)
+ ctrl &= ~MREGBIT_FULL_DUPLEX_MODE;
+ else
+ ctrl |= MREGBIT_FULL_DUPLEX_MODE;
+ priv->duplex = phydev->duplex;
+ }
+
+ if (phydev->speed != priv->speed) {
+ ctrl &= ~MREGBIT_SPEED;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ ctrl |= MREGBIT_SPEED_1000M;
+ break;
+ case SPEED_100:
+ ctrl |= MREGBIT_SPEED_100M;
+ break;
+ case SPEED_10:
+ ctrl |= MREGBIT_SPEED_10M;
+ break;
+ default:
+ pr_err("broken speed: %d\n", phydev->speed);
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+ if (phydev->speed != SPEED_UNKNOWN) {
+ priv->speed = phydev->speed;
+ }
+ }
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+ pr_info("%s link:%d speed:%dM duplex:%s\n", __func__,
+ phydev->link, phydev->speed,
+ phydev->duplex ? "Full": "Half");
+ }
+
+#ifdef WAN_LAN_AUTO_ADAPT
+ if(phydev->phy_id == IP175D_PHY_ID) {
+ if (phydev->link != priv->link) {
+ for (i=0; i<16; i++) {
+ if((priv->link & (1<<i)) != (phydev->link & (1<<i))) {
+ addr = i;
+ if (phydev->link & (1<<i)) {
+ /* link up */
+ printk("eth0 port%d link up\n", addr);
+ priv->dhcp = 0;
+ emac_sig_workq(CARRIER_UP_IP175D, addr);
+ if(priv->dhcp_delaywork)
+ cancel_delayed_work(&priv->dhcp_work);
+ priv->dhcp_delaywork = 1;
+ schedule_delayed_work(&priv->dhcp_work, 25*HZ);
+ } else {
+ /* link down */
+ printk("eth0 port%d link down\n", addr);
+ priv->dhcp = 0;
+ if(priv->dhcp_delaywork)
+ cancel_delayed_work(&priv->dhcp_work);
+ priv->dhcp_delaywork = 0;
+ emac_sig_workq(CARRIER_DOWN_IP175D, addr);
+ }
+ }
+ }
+ priv->link = phydev->link;
+ }
+ } else {
+ if (phydev->link != priv->link) {
+ priv->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link) {
+ /* link up */
+ priv->dhcp = 0;
+ emac_sig_workq(CARRIER_UP, 0);
+ if(priv->dhcp_delaywork)
+ cancel_delayed_work(&priv->dhcp_work);
+ priv->dhcp_delaywork = 1;
+ schedule_delayed_work(&priv->dhcp_work, 25*HZ);
+
+ } else {
+ /* link down */
+ priv->dhcp = 0;
+ if(priv->dhcp_delaywork)
+ cancel_delayed_work(&priv->dhcp_work);
+ priv->dhcp_delaywork = 0;
+ emac_sig_workq(CARRIER_DOWN, 0);
+ }
+ }
+ }
+#endif
+}
+
+static int emac_phy_connect(struct net_device *dev)
+{
+ struct phy_device *phydev;
+ int phy_interface;
+ struct device_node *np;
+ struct emac_priv *priv = netdev_priv(dev);
+
+ np = of_parse_phandle(priv->pdev->dev.of_node, "phy-handle", 0);
+ if (!np) {
+ if (priv->fix_link) {
+ emac_phy_interface_config(priv, priv->interface);
+ if (priv->interface == PHY_INTERFACE_MODE_RGMII)
+ pinctrl_select_state(priv->pinctrl,
+ priv->rgmii_pins);
+ emac_config_phy_interrupt(priv, 0);
+ return 0;
+ }
+ return -ENODEV;
+ }
+
+ printk("%s: %s\n",__func__, np->full_name);
+ phy_interface = of_get_phy_mode(np);
+ emac_phy_interface_config(priv, phy_interface);
+ if (phy_interface != PHY_INTERFACE_MODE_RMII)
+ pinctrl_select_state(priv->pinctrl, priv->rgmii_pins);
+
+ phydev = of_phy_connect(dev, np,
+ &emac_adjust_link, 0, phy_interface);
+ if (IS_ERR_OR_NULL(phydev)) {
+ pr_err("Could not attach to PHY\n");
+ emac_power_down(priv);
+ if (!phydev)
+ return -ENODEV;
+ return PTR_ERR(phydev);
+ }
+
+ if (!phydev->phy_id || phydev->phy_id == 0xffffffff) {
+ pr_err("Not valid phy_id=0x%x\n", phydev->phy_id);
+ emac_power_down(priv);
+ return -ENODEV;
+ }
+
+ if(phy_interrupt_is_valid(phydev))
+ emac_config_phy_interrupt(priv, 1);
+ else
+ emac_config_phy_interrupt(priv, 0);
+
+ //phydev->supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full);
+ pr_info("%s: %s: attached to PHY (UID 0x%x)"
+ " Link = %d irq=%d\n", __func__,
+ dev->name, phydev->phy_id, phydev->link, phydev->irq);
+ dev->phydev = phydev;
+
+#ifdef WAN_LAN_AUTO_ADAPT
+ if(phydev->phy_id == IP175D_PHY_ID)
+ emac_sig_workq(PHY_IP175D_CONNECT, 0);
+#endif
+
+ return 0;
+}
+
+static int emac_mdio_init(struct emac_priv *priv)
+{
+ struct device_node *mii_np;
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ mii_np = of_get_child_by_name(dev->of_node, "mdio-bus");
+ if (!mii_np) {
+ dev_err(dev, "no %s child node found", "mdio-bus");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(mii_np)) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ priv->mii = mdiobus_alloc();//devm_mdiobus_alloc(dev);
+ if (!priv->mii) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+ priv->mii->priv = priv;
+ //priv->mii->irq = priv->mdio_irqs;
+ priv->mii->name = "emac mii";
+ priv->mii->reset = emac_mii_reset;
+ priv->mii->read = emac_mii_read;
+ priv->mii->write = emac_mii_write;
+ snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%pOFn",
+ mii_np);
+ priv->mii->parent = dev;
+ priv->mii->phy_mask = 0xffffffff;
+ ret = of_mdiobus_register(priv->mii, mii_np);
+
+err_put_node:
+ of_node_put(mii_np);
+ return ret;
+}
+
+static int emac_mdio_deinit(struct emac_priv *priv)
+{
+ if (!priv->mii)
+ return 0;
+
+ mdiobus_unregister(priv->mii);
+ return 0;
+}
+
+static int emac_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+
+ if (priv->ptp_support) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (priv->ptp_clock)
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+ if (priv->regdata->ptp_rx_ts_all_events) {
+ info->rx_filters |=
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+ }
+
+ return 0;
+ } else
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
+ memcpy(data, emac_ethtool_stats[i].str, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int emac_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(emac_ethtool_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_stats_update(struct emac_priv *priv)
+{
+ struct emac_hw_stats *hwstats = priv->hw_stats;
+ int i;
+ u32 *p;
+
+ p = (u32 *)(hwstats);
+
+ for (i = 0; i < MAX_TX_STATS_NUM; i++)
+ *(p + i) = ReadTxStatCounters(priv, i);
+
+ p = (u32 *)hwstats + MAX_TX_STATS_NUM;
+
+ for (i = 0; i < MAX_RX_STATS_NUM; i++)
+ *(p + i) = ReadRxStatCounters(priv, i);
+
+ *(p + i++) = emac_rd(priv, DMA_MISSED_FRAME_COUNTER);
+
+ *(p + i++) = hwstats->tx_tso_pkts;
+ *(p + i++) = hwstats->tx_tso_bytes;
+}
+
+static void emac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct emac_hw_stats *hwstats = priv->hw_stats;
+ u32 *data_src;
+ u64 *data_dst;
+ int i;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock_bh(&hwstats->stats_lock)) {
+ emac_stats_update(priv);
+ spin_unlock_bh(&hwstats->stats_lock);
+ }
+ }
+
+ data_dst = data;
+
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_stats); i++) {
+ data_src = (u32 *)hwstats + emac_ethtool_stats[i].offset;
+ *data_dst++ = (u64)(*data_src);
+ }
+}
+
+static int emac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return EMAC_REG_SPACE_SIZE;
+}
+
+static void emac_ethtool_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u32 *reg_space = (u32 *) space;
+ void __iomem *base = priv->iobase;
+ int i;
+
+ regs->version = 1;
+
+ memset(reg_space, 0x0, EMAC_REG_SPACE_SIZE);
+
+ for (i = 0; i < EMAC_DMA_REG_CNT; i++)
+ reg_space[i] = readl(base + DMA_CONFIGURATION + i * 4);
+
+ for (i = 0; i < EMAC_MAC_REG_CNT; i++)
+ reg_space[i + MAC_GLOBAL_CONTROL / 4] = readl(base + MAC_GLOBAL_CONTROL + i * 4);
+}
+
+static int emac_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!ndev->phydev)
+ return -ENODEV;
+
+ phy_ethtool_ksettings_get(ndev->phydev, cmd);
+ return 0;
+}
+
+static int emac_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ if (!ndev->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+}
+
+static void emac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ info->n_stats = ARRAY_SIZE(emac_ethtool_stats);
+}
+
+static void emac_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *param)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int val = emac_mii_read(priv->mii, 0, 0);
+
+ param->autoneg = (val & BIT(12)) ? 1 : 0;
+ param->rx_pause = priv->pause.rx_pause;
+ param->tx_pause = priv->pause.tx_pause;
+
+ return;
+}
+
+static int emac_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *param)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *np = dev->of_node;
+ int val;
+ int phyval;
+ u32 threshold[2];
+ static int init_flag = 1;
+
+ val = readl(priv->iobase + MAC_FC_CONTROL);
+ phyval = emac_mii_read(priv->mii, 0, 0);
+
+ if (param->rx_pause)
+ val |= MREGBIT_FC_DECODE_ENABLE;
+ else
+ val &= ~MREGBIT_FC_DECODE_ENABLE;
+
+ if (param->tx_pause)
+ val |= MREGBIT_FC_GENERATION_ENABLE;
+ else
+ val &= ~MREGBIT_FC_GENERATION_ENABLE;
+
+ if (init_flag && (param->rx_pause | param->tx_pause)) {
+ val |= MREGBIT_MULTICAST_MODE;
+ priv->pause.pause_time_max = 0;
+ if (0 != of_property_read_u32_array(np, "flow-control-threshold", threshold, 2)) {
+ threshold[0] = 60;
+ threshold[1] = 90;
+ }
+ threshold[0] = clamp(threshold[0], 0U, 99U);
+ threshold[1] = clamp(threshold[1], 1U, 100U);
+
+ if (cpu_is_asr18xx() || cpu_is_asr1903_z1()) {
+ priv->pause.low_water = priv->rx_ring.total_cnt * threshold[0] / 100;
+ priv->pause.high_water = priv->rx_ring.total_cnt * threshold[1] / 100 - 1;
+ priv->pause.fc_auto = 0;
+ } else {
+ priv->pause.low_water = 0;
+ priv->pause.high_water = 0;
+ priv->pause.fc_auto = 1;
+ val |= MREGBIT_AUTO_FC_GENERATION_ENABLE;
+ threshold[0] = 1024 * threshold[0] / 100;
+ threshold[1] = 1024 * threshold[1] / 100;
+ emac_wr(priv, MAC_FC_AUTO_HIGH_THRESHOLD, threshold[1]);
+ emac_wr(priv, MAC_FC_AUTO_LOW_THRESHOLD, threshold[0]);
+ emac_wr(priv, MAC_FC_AUTO_HIGH_PAUSE_TIME_VALUE, 0xffff);
+ emac_wr(priv, MAC_FC_AUTO_LOW_PAUSE_TIME_VALUE, 0);
+ }
+ init_flag = 0;
+ }
+ emac_wr(priv, MAC_FC_CONTROL, val);
+
+ if (param->autoneg)
+ phyval |= BIT(12);
+ else
+ phyval &= ~BIT(12);
+
+ (void)emac_mii_write(priv->mii, 0, 0, (u16)phyval);
+
+ priv->pause.rx_pause = param->rx_pause;
+ priv->pause.tx_pause = param->tx_pause;
+ return 0;
+}
+
+static void emac_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct device *device = &priv->pdev->dev;
+
+ if (device_can_wakeup(device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = priv->wolopts;
+ }
+}
+
+static int emac_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct device *device = &priv->pdev->dev;
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(device) || !priv->en_suspend)
+ return -ENOTSUPP;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ priv->wolopts = wol->wolopts;
+
+ if (wol->wolopts) {
+ device_set_wakeup_enable(device, 1);
+ enable_irq_wake(priv->irq_wakeup);
+ } else {
+ device_set_wakeup_enable(device, 0);
+ disable_irq_wake(priv->irq_wakeup);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_link_ksettings = emac_get_link_ksettings,
+ .set_link_ksettings = emac_set_link_ksettings,
+ .get_drvinfo = emac_get_drvinfo,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = emac_get_pauseparam,
+ .set_pauseparam = emac_set_pauseparam,
+ .get_strings = emac_get_strings,
+ .get_sset_count = emac_get_sset_count,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+ .get_regs = emac_ethtool_get_regs,
+ .get_regs_len = emac_ethtool_get_regs_len,
+ .get_ts_info = emac_get_ts_info,
+ .get_wol = emac_get_wol,
+ .set_wol = emac_set_wol,
+};
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_close,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_set_mac_address = emac_set_mac_address,
+ .ndo_do_ioctl = emac_ioctl,
+ .ndo_change_mtu = emac_change_mtu,
+ .ndo_tx_timeout = emac_tx_timeout,
+};
+
+#ifdef WAN_LAN_AUTO_ADAPT
+#define EMAC_SKB_SIZE 2048
+static int emac_event_add_var(struct emac_event *event, int argv,
+ const char *format, ...)
+{
+ static char buf[128];
+ char *s;
+ va_list args;
+ int len;
+
+ if (argv)
+ return 0;
+
+ va_start(args, format);
+ len = vsnprintf(buf, sizeof(buf), format, args);
+ va_end(args);
+
+ if (len >= sizeof(buf)) {
+ printk("buffer size too small\n");
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ s = skb_put(event->skb, len + 1);
+ strcpy(s, buf);
+
+ return 0;
+}
+
+static int emac_hotplug_fill_event(struct emac_event *event)
+{
+ int ret;
+
+ ret = emac_event_add_var(event, 0, "HOME=%s", "/");
+ if (ret)
+ return ret;
+
+ ret = emac_event_add_var(event, 0, "PATH=%s",
+ "/sbin:/bin:/usr/sbin:/usr/bin");
+ if (ret)
+ return ret;
+
+ ret = emac_event_add_var(event, 0, "SUBSYSTEM=%s", "ethernet");
+ if (ret)
+ return ret;
+
+ ret = emac_event_add_var(event, 0, "ACTION=%s", event->action);
+ if (ret)
+ return ret;
+
+ ret = emac_event_add_var(event, 0, "ETH=%s", event->name);
+ if (ret)
+ return ret;
+
+ ret = emac_event_add_var(event, 0, "PORT=%d", event->port);
+ if (ret)
+ return ret;
+
+ ret = emac_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());
+
+ return ret;
+}
+
+static void emac_hotplug_work(struct work_struct *work)
+{
+ struct emac_event *event = container_of(work, struct emac_event, work);
+ int ret = 0;
+
+ event->skb = alloc_skb(EMAC_SKB_SIZE, GFP_KERNEL);
+ if (!event->skb)
+ goto out_free_event;
+
+ ret = emac_event_add_var(event, 0, "%s@", event->action);
+ if (ret)
+ goto out_free_skb;
+
+ ret = emac_hotplug_fill_event(event);
+ if (ret)
+ goto out_free_skb;
+
+ NETLINK_CB(event->skb).dst_group = 1;
+ broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);
+
+ out_free_skb:
+ if (ret) {
+ printk("work error %d\n", ret);
+ kfree_skb(event->skb);
+ }
+ out_free_event:
+ kfree(event);
+}
+
+static int emac_sig_workq(int event, int port)
+{
+ struct emac_event *u_event = NULL;
+
+ u_event = kzalloc(sizeof(*u_event), GFP_KERNEL);
+ if (!u_event)
+ return -ENOMEM;
+
+ u_event->name = DRIVER_NAME;
+ if(event == CARRIER_UP)
+ u_event->action = "LINKUP";
+ else if(event == CARRIER_DOWN)
+ u_event->action = "LINKDW";
+ else if(event == CARRIER_DOWN_IP175D)
+ u_event->action = "IP175D_LINKDW";
+ else if(event == CARRIER_UP_IP175D)
+ u_event->action = "IP175D_LINKUP";
+ else if(event == DHCP_EVENT_CLIENT)
+ u_event->action = "DHCPCLIENT";
+ else if(event == DHCP_EVENT_SERVER)
+ u_event->action = "DHCPSERVER";
+ else if(event == PHY_IP175D_CONNECT)
+ u_event->action = "PHY_CONNECT";
+
+ u_event->port = port;
+ INIT_WORK(&u_event->work, (void *)emac_hotplug_work);
+ schedule_work(&u_event->work);
+
+ return 0;
+}
+
+static inline void __emac_dhcp_work_func(struct emac_priv *priv)
+{
+ if (priv->dhcp == DHCP_REC_RESP) {
+ emac_sig_workq(DHCP_EVENT_CLIENT, priv->vlan_port);
+ } else if (priv->dhcp == DHCP_SEND_REQ || priv->dhcp == 0) {
+ emac_sig_workq(DHCP_EVENT_SERVER, priv->vlan_port);
+ }
+
+ priv->dhcp = 0;
+ if(priv->dhcp_delaywork){
+ cancel_delayed_work(&priv->dhcp_work);
+ priv->dhcp_delaywork = 0;
+ }
+}
+
+static void emac_dhcp_work_func_t(struct work_struct *work)
+{
+ struct emac_priv *priv = container_of(work, struct emac_priv, dhcp_work.work);
+
+ __emac_dhcp_work_func(priv);
+}
+#endif
+
+static int emac_probe(struct platform_device *pdev)
+{
+ struct emac_priv *priv;
+ struct net_device *ndev = NULL;
+ struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ const unsigned char *mac_addr = NULL;
+ const struct of_device_id *match;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *emac_fs_dir = NULL;
+ struct dentry *emac_clk_tuning;
+#endif
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct emac_priv));
+ if (!ndev) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+#ifdef WAN_LAN_AUTO_ADAPT
+ priv->dhcp = -1;
+ priv->vlan_port = -1;
+ priv->dhcp_delaywork = 0;
+#endif
+ platform_set_drvdata(pdev, priv);
+
+ match = of_match_device(of_match_ptr(emac_of_match), &pdev->dev);
+ if (match) {
+ priv->regdata = match->data;
+ } else {
+ pr_info("===> not match valid device\n");
+ }
+
+ emac_command_options(priv);
+ emac_skbrb_init(EMAC_SKBRB_SLOT_SIZE, priv->rx_ring.total_cnt * 2);
+
+ priv->hw_stats = devm_kzalloc(&pdev->dev,
+ sizeof(*priv->hw_stats),
+ GFP_KERNEL);
+ if (!priv->hw_stats) {
+ dev_err(&pdev->dev, "failed to allocate counter memory\n");
+ ret = -ENOMEM;
+ goto err_netdev;
+ }
+
+ spin_lock_init(&priv->hw_stats->stats_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->iobase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->iobase)) {
+ ret = -ENOMEM;
+ goto err_netdev;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->tso_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(priv->tso_base)) {
+ dev_info(&pdev->dev, "tso base=0x%x\n", (unsigned)priv->tso_base);
+ }
+
+ priv->irq = irq_of_parse_and_map(np, 0);
+ if (!priv->irq) {
+ ret = -ENXIO;
+ goto err_netdev;
+ }
+ priv->irq_wakeup = irq_of_parse_and_map(np, 1);
+ if (!priv->irq_wakeup)
+ dev_err(&pdev->dev, "wake_up irq not found\n");
+
+ priv->tso = of_property_read_bool(np, "tso-support");
+ if (cpu_is_asr1903_a0() || cpu_is_asr1903_z1())
+ priv->tso = false;
+ if (priv->tso) {
+ priv->irq_tso = irq_of_parse_and_map(np, 3);
+ if (!priv->irq_tso) {
+ dev_err(&pdev->dev, "tso irq not found\n");
+ priv->tso = false;
+ }
+ }
+
+ priv->sram_pool = of_gen_pool_get(dev->of_node, "eth,sram", 0);
+ if (priv->sram_pool) {
+ dev_notice(&pdev->dev, "use sram as tx desc\n");
+ }
+
+ ret = of_property_read_u32(np, "lpm-qos", &priv->pm_qos);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "3v3-enable", &priv->power_domain);
+ if (ret)
+ priv->power_domain = 0;
+
+ ret = of_property_read_u32(np, "mdio-clk-div", &priv->mdio_clk_div);
+ if (ret)
+ priv->mdio_clk_div = 0xfe;
+
+ if (of_property_read_bool(np, "enable-suspend"))
+ priv->en_suspend = 1;
+ else
+ priv->en_suspend = 0;
+
+ priv->wolopts = 0;
+ if (of_property_read_bool(np, "magic-packet-wakeup"))
+ priv->wolopts |= WAKE_MAGIC;
+
+ if (of_property_read_bool(np, "unicast-packet-wakeup"))
+ priv->wolopts |= WAKE_UCAST;
+
+ priv->dev_flags = 0;
+ if (of_property_read_bool(np, "suspend-not-keep-power")) {
+ priv->dev_flags |= EMAC_SUSPEND_POWER_DOWN_PHY;
+ priv->wolopts = 0;
+ }
+
+ priv->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(priv->pinctrl))
+ dev_err(dev, "could not get pinctrl handle\n");
+
+ priv->rgmii_pins = pinctrl_lookup_state(priv->pinctrl, "rgmii-pins");
+ if (IS_ERR(priv->rgmii_pins))
+ dev_err(dev, "could not get rgmii-pins pinstate\n");
+
+ emac_set_aib_power_domain(priv);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ priv->pm_qos_req.name = pdev->name;
+ pm_qos_add_request(&priv->pm_qos_req, PM_QOS_CPUIDLE_BLOCK,
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+#ifdef CONFIG_DDR_DEVFREQ
+ pm_qos_add_request(&priv->pm_ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
+ PM_QOS_DEFAULT_VALUE);
+
+ priv->clk_scaling.polling_delay_ms = 1000; /* 1s window */
+ priv->clk_scaling.tx_up_threshold = 120; /* 120Mbps */
+ priv->clk_scaling.tx_down_threshold = 60;
+ priv->clk_scaling.rx_up_threshold = 60; /* 60Mbps */
+ priv->clk_scaling.rx_down_threshold = 20;
+ priv->clk_scaling.window_time = jiffies;
+ pm_qos_add_request(&priv->clk_scaling.ddr_qos, PM_QOS_DDR_DEVFREQ_MIN,
+ PM_QOS_DEFAULT_VALUE);
+ INIT_WORK(&priv->qos_work, emac_ddr_qos_work);
+#endif
+ skb_queue_head_init(&priv->rx_skb);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->base_addr = (unsigned long)priv->iobase;
+ ndev->irq = priv->irq;
+ /* set hw features */
+ ndev->features = NETIF_F_SG | NETIF_F_SOFT_FEATURES;
+ if (priv->tso) {
+ ndev->features |= NETIF_F_RXCSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ dev_info(&pdev->dev, "TSO feature enabled\n");
+ }
+ ndev->hw_features = ndev->features;
+ ndev->vlan_features = ndev->features;
+
+ ndev->ethtool_ops = &emac_ethtool_ops;
+ ndev->netdev_ops = &emac_netdev_ops;
+ if (pdev->dev.of_node)
+ mac_addr = of_get_mac_address(np);
+
+ if (!IS_ERR_OR_NULL(mac_addr)) {
+ //ether_addr_copy(ndev->dev_addr, mac_addr);
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_info(&pdev->dev, "Using random mac address\n");
+ eth_hw_addr_random(ndev);
+ }
+ } else {
+ dev_info(&pdev->dev, "Using random mac address\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ priv->hw_adj = of_property_read_bool(np, "hw-increment");
+ priv->ptp_support = of_property_read_bool(np, "ptp-support");
+ if (priv->ptp_support) {
+ pr_info("EMAC support IEEE1588 PTP Protocol\n");
+ if (of_property_read_u32(np, "ptp-clk-rate",
+ &priv->ptp_clk_rate)) {
+ priv->ptp_clk_rate = 20000000;
+ pr_info("%s ptp_clk rate using default value:%d may inaccurate!!1\n",
+ __func__, priv->ptp_clk_rate);
+ }
+
+ priv->ptp_clk = devm_clk_get(&pdev->dev, "ptp-clk");
+ if (IS_ERR(priv->ptp_clk)) {
+ dev_err(&pdev->dev, "ptp clock not found.\n");
+ ret = PTR_ERR(priv->ptp_clk);
+ goto err_netdev;
+ }
+
+ clk_set_rate(priv->ptp_clk, priv->ptp_clk_rate);
+ }
+
+ priv->pps_info.enable_pps = 0;
+#ifdef CONFIG_PPS
+ ret = of_property_read_u32(np, "pps_source", &priv->pps_info.pps_source);
+ if (!ret) {
+ priv->irq_pps = irq_of_parse_and_map(np, 2);
+
+ if (priv->pps_info.pps_source < EMAC_PPS_MAX)
+ priv->pps_info.enable_pps = 1;
+ else
+ dev_err(&pdev->dev, "wrong PPS source!\n");
+ }
+#endif
+ priv->clk = devm_clk_get(&pdev->dev, "emac-clk");
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "emac clock not found.\n");
+ ret = PTR_ERR(priv->clk);
+ goto err_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable emac clock: %d\n",
+ ret);
+ goto clk_disable;
+ }
+
+ emac_sw_init(priv);
+ ret = emac_mdio_init(priv);
+ if (ret)
+ goto clk_disable;
+
+ INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
+#ifdef WAN_LAN_AUTO_ADAPT
+ INIT_DELAYED_WORK(&priv->dhcp_work, emac_dhcp_work_func_t);
+#endif
+ if (of_phy_is_fixed_link(np)) {
+ if ((emac_set_fixed_link(np, priv) < 0)) {
+ ret = -ENODEV;
+ goto clk_disable;
+ }
+ dev_info(&pdev->dev, "find fixed link\n");
+ priv->fix_link = 1;
+ }
+
+ INIT_DELAYED_WORK(&priv->emac_pause_work, emac_pause_generate_work_fuc);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ strcpy(ndev->name, "eth%d");
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("register_netdev failed\n");
+ goto err_mdio_deinit;
+ }
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+#ifdef CONFIG_ASR_EMAC_NAPI
+ netif_napi_add(ndev, &priv->rx_napi, emac_rx_poll, 32);
+ netif_tx_napi_add(ndev, &priv->tx_napi, emac_tx_poll, 32);
+#endif
+ priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
+ priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
+ priv->clk_tuning_enable = of_property_read_bool(np, "clk-tuning-enable");
+
+ if (priv->clk_tuning_enable) {
+ ret = of_property_read_u32(np, "tx-clk-config",
+ &priv->tx_clk_config);
+ if (ret)
+ priv->tx_clk_config = TXCLK_PHASE_DEFAULT;
+
+ ret = of_property_read_u32(np, "rx-clk-config",
+ &priv->rx_clk_config);
+ if (ret)
+ priv->rx_clk_config = RXCLK_PHASE_DEFAULT;
+#ifdef CONFIG_DEBUG_FS
+ if (!emac_fs_dir) {
+ emac_fs_dir = debugfs_create_dir(DRIVER_NAME, NULL);
+
+ if (!emac_fs_dir || IS_ERR(emac_fs_dir)) {
+ pr_err("emac debugfs create directory failed\n");
+ }else {
+ emac_clk_tuning = debugfs_create_file("clk_tuning", 0664,
+ emac_fs_dir, priv, &clk_tuning_fops);
+ if (!emac_clk_tuning) {
+ pr_err("emac debugfs create file failed\n");
+ }
+ }
+ }
+#endif
+ }
+ return 0;
+
+err_mdio_deinit:
+ emac_mdio_deinit(priv);
+clk_disable:
+ clk_disable_unprepare(priv->clk);
+err_netdev:
+ free_netdev(ndev);
+ emac_skbrb_release();
+ return ret;
+}
+
+static int emac_remove(struct platform_device *pdev)
+{
+ struct emac_priv *priv = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ unregister_netdev(priv->ndev);
+ emac_reset_hw(priv);
+ free_netdev(priv->ndev);
+ emac_mdio_deinit(priv);
+ clk_disable_unprepare(priv->clk);
+ pm_qos_remove_request(&priv->pm_qos_req);
+ cancel_delayed_work_sync(&priv->emac_pause_work);
+#ifdef CONFIG_DDR_DEVFREQ
+ pm_qos_remove_request(&priv->pm_ddr_qos);
+ pm_qos_remove_request(&priv->clk_scaling.ddr_qos);
+#endif
+ emac_skbrb_release();
+ return 0;
+}
+
+static void emac_shutdown(struct platform_device *pdev)
+{
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int emac_resume(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ u32 ctrl, wake_mode = 0;
+
+ if (!priv->en_suspend)
+ return 0;
+
+ if (priv->wolopts) {
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+#ifdef CONFIG_ASR_EMAC_NAPI
+ napi_enable(&priv->rx_napi);
+ napi_enable(&priv->tx_napi);
+#endif
+ }
+
+ if (priv->wolopts & WAKE_MAGIC)
+ wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
+ if (priv->wolopts & WAKE_UCAST)
+ wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
+
+ disable_irq_wake(priv->irq_wakeup);
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+ ctrl &= ~wake_mode;
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+ } else {
+ clk_prepare_enable(priv->clk);
+
+ if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
+ emac_power_up(priv);
+
+ rtnl_lock();
+ dev_open(ndev, NULL);
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+static int emac_suspend(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ u32 ctrl, wake_mode = 0;
+
+ if (!priv->en_suspend)
+ return 0;
+
+ if (priv->wolopts) {
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+#ifdef CONFIG_ASR_EMAC_NAPI
+ napi_disable(&priv->rx_napi);
+ napi_disable(&priv->tx_napi);
+#endif
+ }
+
+ if (priv->wolopts & WAKE_MAGIC)
+ wake_mode |= MREGBIT_UNICAST_WAKEUP_MODE;
+ if (priv->wolopts & WAKE_UCAST)
+ wake_mode |= MREGBIT_MAGIC_PACKET_WAKEUP_MODE;
+
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+ ctrl |= wake_mode;
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+ enable_irq_wake(priv->irq_wakeup);
+ } else {
+ rtnl_lock();
+ dev_close(ndev);
+ rtnl_unlock();
+
+ if (priv->dev_flags & EMAC_SUSPEND_POWER_DOWN_PHY)
+ emac_power_down(priv);
+
+ clk_disable_unprepare(priv->clk);
+ }
+
+ return 0;
+}
+
+static int emac_suspend_noirq(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!ndev->phydev && !priv->fix_link)
+ return 0;
+
+ pr_pm_debug("==> enter emac_suspend_noirq\n");
+ pm_qos_update_request(&priv->pm_qos_req,
+ PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+ return 0;
+}
+
+static int emac_resume_noirq(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!ndev->phydev && !priv->fix_link)
+ return 0;
+
+ pr_pm_debug("==> enter emac_resume_noirq\n");
+ pm_qos_update_request(&priv->pm_qos_req, priv->pm_qos);
+ return 0;
+}
+
+static const struct dev_pm_ops emac_pm_ops = {
+ .suspend = emac_suspend,
+ .resume = emac_resume,
+ .suspend_noirq = emac_suspend_noirq,
+ .resume_noirq = emac_resume_noirq,
+};
+
+#define ASR_EMAC_PM_OPS (&emac_pm_ops)
+#else
+#define ASR_EMAC_PM_OPS NULL
+#endif
+
+static struct platform_driver emac_driver = {
+ .probe = emac_probe,
+ .remove = emac_remove,
+ .shutdown = emac_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(emac_of_match),
+ .pm = ASR_EMAC_PM_OPS,
+ },
+};
+
+module_platform_driver(emac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for ASR Emac");
+MODULE_ALIAS("platform:asr_eth");
diff --git a/marvell/linux/drivers/net/ethernet/asr/emac_eth.h b/marvell/linux/drivers/net/ethernet/asr/emac_eth.h
new file mode 100644
index 0000000..fb98e19
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/asr/emac_eth.h
@@ -0,0 +1,1053 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _EMAC_ETH_H_
+#define _EMAC_ETH_H_
+#include <linux/bitops.h>
+#include <linux/phy.h>
+#include <linux/mutex.h>
+#include <linux/timecounter.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
+#include <soc/asr/addr-map.h>
+
+//#define WAN_LAN_AUTO_ADAPT 1
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~UL(0)) - (UL(1) << (l)) + 1) & \
+ (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~ULL(0)) - (ULL(1) << (l)) + 1) & \
+ (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#define ASR_EMAC_DDR_BOOST_FREQ (400000)
+#define CONFIG_ASR_EMAC_NAPI
+#define CONFIG_ASR_EMAC_RX_NO_COPY
+#define EMAC_TX_FIFO_DWORDS (4096/4)
+#define EMAC_RX_FIFO_DWORDS (4096/4)
+
+/* EMAC clock control */
+#define EMAC_CLK_CTL 0x160
+#define PHY_INTF_RGMII BIT(2)
+
+#define RMII_TX_PHASE_OFFSET (6)
+#define RMII_TX_PHASE_MASK GENMASK(6, 6)
+#define RMII_RX_PHASE_OFFSET (7)
+#define RMII_RX_PHASE_MASK GENMASK(7, 7)
+
+#define RGMII_TX_PHASE_OFFSET (5)
+#define RGMII_TX_PHASE_MASK GENMASK(5, 5)
+#define RGMII_RX_PHASE_OFFSET (4)
+#define RGMII_RX_PHASE_MASK GENMASK(4, 4)
+
+/* DMA register set */
+#define DMA_CONFIGURATION 0x0000
+#define DMA_CONTROL 0x0004
+#define DMA_STATUS_IRQ 0x0008
+#define DMA_INTERRUPT_ENABLE 0x000C
+
+#define DMA_TRANSMIT_AUTO_POLL_COUNTER 0x0010
+#define DMA_TRANSMIT_POLL_DEMAND 0x0014
+#define DMA_RECEIVE_POLL_DEMAND 0x0018
+
+#define DMA_TRANSMIT_BASE_ADDRESS 0x001C
+#define DMA_RECEIVE_BASE_ADDRESS 0x0020
+#define DMA_MISSED_FRAME_COUNTER 0x0024
+#define DMA_STOP_FLUSH_COUNTER 0x0028
+
+#define DMA_CURRENT_TRANSMIT_DESCRIPTOR_POINTER 0x0030
+#define DMA_CURRENT_TRANSMIT_BUFFER_POINTER 0x0034
+#define DMA_CURRENT_RECEIVE_DESCRIPTOR_POINTER 0x0038
+#define DMA_CURRENT_RECEIVE_BUFFER_POINTER 0x003C
+
+/* MAC Register set */
+#define MAC_GLOBAL_CONTROL 0x0100
+#define MAC_TRANSMIT_CONTROL 0x0104
+#define MAC_RECEIVE_CONTROL 0x0108
+#define MAC_MAXIMUM_FRAME_SIZE 0x010C
+#define MAC_TRANSMIT_JABBER_SIZE 0x0110
+#define MAC_RECEIVE_JABBER_SIZE 0x0114
+#define MAC_ADDRESS_CONTROL 0x0118
+#define MAC_MDIO_CLK_DIV 0x011C
+#define MAC_ADDRESS1_HIGH 0x0120
+#define MAC_ADDRESS1_MED 0x0124
+#define MAC_ADDRESS1_LOW 0x0128
+#define MAC_ADDRESS2_HIGH 0x012C
+#define MAC_ADDRESS2_MED 0x0130
+#define MAC_ADDRESS2_LOW 0x0134
+#define MAC_ADDRESS3_HIGH 0x0138
+#define MAC_ADDRESS3_MED 0x013C
+#define MAC_ADDRESS3_LOW 0x0140
+#define MAC_ADDRESS4_HIGH 0x0144
+#define MAC_ADDRESS4_MED 0x0148
+#define MAC_ADDRESS4_LOW 0x014C
+#define MAC_MULTICAST_HASH_TABLE1 0x0150
+#define MAC_MULTICAST_HASH_TABLE2 0x0154
+#define MAC_MULTICAST_HASH_TABLE3 0x0158
+#define MAC_MULTICAST_HASH_TABLE4 0x015C
+#define MAC_FC_CONTROL 0x0160
+#define MAC_FC_PAUSE_FRAME_GENERATE 0x0164
+#define MAC_FC_SOURCE_ADDRESS_HIGH 0x0168
+#define MAC_FC_SOURCE_ADDRESS_MED 0x016C
+#define MAC_FC_SOURCE_ADDRESS_LOW 0x0170
+#define MAC_FC_DESTINATION_ADDRESS_HIGH 0x0174
+#define MAC_FC_DESTINATION_ADDRESS_MED 0x0178
+#define MAC_FC_DESTINATION_ADDRESS_LOW 0x017C
+#define MAC_FC_PAUSE_TIME_VALUE 0x0180
+#define MAC_FC_AUTO_HIGH_PAUSE_TIME_VALUE 0x0184
+#define MAC_FC_AUTO_LOW_PAUSE_TIME_VALUE 0x0188
+#define MAC_FC_AUTO_HIGH_THRESHOLD 0x018C
+#define MAC_FC_AUTO_LOW_THRESHOLD 0x0190
+#define MAC_MDIO_CONTROL 0x01A0
+#define MAC_MDIO_DATA 0x01A4
+#define MAC_RX_STATCTR_CONTROL 0x01A8
+#define MAC_RX_STATCTR_DATA_HIGH 0x01AC
+#define MAC_RX_STATCTR_DATA_LOW 0x01B0
+#define MAC_TX_STATCTR_CONTROL 0x01B4
+#define MAC_TX_STATCTR_DATA_HIGH 0x01B8
+#define MAC_TX_STATCTR_DATA_LOW 0x01BC
+#define MAC_TRANSMIT_FIFO_ALMOST_FULL 0x01C0
+#define MAC_TRANSMIT_PACKET_START_THRESHOLD 0x01C4
+#define MAC_RECEIVE_PACKET_START_THRESHOLD 0x01C8
+#define MAC_STATUS_IRQ 0x01E0
+#define MAC_INTERRUPT_ENABLE 0x01E4
+
+/* DMA_CONFIGURATION (0x0000) register bit info
+ * 0-DMA controller in normal operation mode,
+ * 1-DMA controller reset to default state,
+ * clearing all internal state information
+ */
+#define MREGBIT_SOFTWARE_RESET BIT(0)
+#define MREGBIT_BURST_1WORD BIT(1)
+#define MREGBIT_BURST_2WORD BIT(2)
+#define MREGBIT_BURST_4WORD BIT(3)
+#define MREGBIT_BURST_8WORD BIT(4)
+#define MREGBIT_BURST_16WORD BIT(5)
+#define MREGBIT_BURST_32WORD BIT(6)
+#define MREGBIT_BURST_64WORD BIT(7)
+#define MREGBIT_BURST_LENGTH GENMASK(7, 1)
+#define MREGBIT_DESCRIPTOR_SKIP_LENGTH GENMASK(12, 8)
+/* For Receive and Transmit DMA operate in Big-Endian mode for Descriptors. */
+#define MREGBIT_DESCRIPTOR_BYTE_ORDERING BIT(13)
+#define MREGBIT_BIG_LITLE_ENDIAN BIT(14)
+#define MREGBIT_TX_RX_ARBITRATION BIT(15)
+#define MREGBIT_WAIT_FOR_DONE BIT(16)
+#define MREGBIT_STRICT_BURST BIT(17)
+#define MREGBIT_DMA_64BIT_MODE BIT(18)
+
+/* DMA_CONTROL (0x0004) register bit info */
+#define MREGBIT_START_STOP_TRANSMIT_DMA BIT(0)
+#define MREGBIT_START_STOP_RECEIVE_DMA BIT(1)
+
+/* DMA_STATUS_IRQ (0x0008) register bit info */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_IRQ BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_IRQ BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_IRQ BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_IRQ BIT(7)
+#define MREGBIT_MAC_IRQ BIT(8)
+#define MREGBIT_TRANSMIT_DMA_STATE GENMASK(18, 16)
+#define MREGBIT_RECEIVE_DMA_STATE GENMASK(23, 20)
+
+/* DMA_INTERRUPT_ENABLE ( 0x000C) register bit info */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_INTR_ENABLE BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE BIT(7)
+#define MREGBIT_MAC_INTR_ENABLE BIT(8)
+
+/* MAC_GLOBAL_CONTROL (0x0100) register bit info */
+#define MREGBIT_SPEED GENMASK(1, 0)
+#define MREGBIT_SPEED_10M 0x0
+#define MREGBIT_SPEED_100M BIT(0)
+#define MREGBIT_SPEED_1000M BIT(1)
+#define MREGBIT_FULL_DUPLEX_MODE BIT(2)
+#define MREGBIT_RESET_RX_STAT_COUNTERS BIT(3)
+#define MREGBIT_RESET_TX_STAT_COUNTERS BIT(4)
+#define MREGBIT_UNICAST_WAKEUP_MODE BIT(8)
+#define MREGBIT_MAGIC_PACKET_WAKEUP_MODE BIT(9)
+
+/* MAC_TRANSMIT_CONTROL (0x0104) register bit info */
+#define MREGBIT_TRANSMIT_ENABLE BIT(0)
+#define MREGBIT_INVERT_FCS BIT(1)
+#define MREGBIT_DISABLE_FCS_INSERT BIT(2)
+#define MREGBIT_TRANSMIT_AUTO_RETRY BIT(3)
+#define MREGBIT_IFG_LEN GENMASK(6, 4)
+#define MREGBIT_PREAMBLE_LENGTH GENMASK(9, 7)
+
+/* MAC_RECEIVE_CONTROL (0x0108) register bit info */
+#define MREGBIT_RECEIVE_ENABLE BIT(0)
+#define MREGBIT_DISABLE_FCS_CHECK BIT(1)
+#define MREGBIT_STRIP_FCS BIT(2)
+#define MREGBIT_STORE_FORWARD BIT(3)
+#define MREGBIT_STATUS_FIRST BIT(4)
+#define MREGBIT_PASS_BAD_FRAMES BIT(5)
+#define MREGBIT_ACOOUNT_VLAN BIT(6)
+
+/* MAC_MAXIMUM_FRAME_SIZE (0x010C) register bit info */
+#define MREGBIT_MAX_FRAME_SIZE GENMASK(13, 0)
+
+/* MAC_TRANSMIT_JABBER_SIZE (0x0110) register bit info */
+#define MREGBIT_TRANSMIT_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_RECEIVE_JABBER_SIZE (0x0114) register bit info */
+#define MREGBIT_RECEIVE_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_ADDRESS_CONTROL (0x0118) register bit info */
+#define MREGBIT_MAC_ADDRESS1_ENABLE BIT(0)
+#define MREGBIT_MAC_ADDRESS2_ENABLE BIT(1)
+#define MREGBIT_MAC_ADDRESS3_ENABLE BIT(2)
+#define MREGBIT_MAC_ADDRESS4_ENABLE BIT(3)
+#define MREGBIT_INVERSE_MAC_ADDRESS1_ENABLE BIT(4)
+#define MREGBIT_INVERSE_MAC_ADDRESS2_ENABLE BIT(5)
+#define MREGBIT_INVERSE_MAC_ADDRESS3_ENABLE BIT(6)
+#define MREGBIT_INVERSE_MAC_ADDRESS4_ENABLE BIT(7)
+#define MREGBIT_PROMISCUOUS_MODE BIT(8)
+
+/* MAC MDIO Clock Division Control 0x011C) register bit info */
+#define MREGBIT_MAC_MDIO_CLK_DIV BIT(0)
+#define MREGBIT_MAC_MDIO_CLK_DIV_MASK GENMASK(7, 0)
+
+/* MAC_ADDRESSx_HIGH (0x0120) register bit info */
+#define MREGBIT_MAC_ADDRESS1_01_BYTE GENMASK(7, 0)
+#define MREGBIT_MAC_ADDRESS1_02_BYTE GENMASK(15, 8)
+/* MAC_ADDRESSx_MED (0x0124) register bit info */
+#define MREGBIT_MAC_ADDRESS1_03_BYTE GENMASK(7, 0)
+#define MREGBIT_MAC_ADDRESS1_04_BYTE GENMASK(15, 8)
+/* MAC_ADDRESSx_LOW (0x0128) register bit info */
+#define MREGBIT_MAC_ADDRESS1_05_BYTE GENMASK(7, 0)
+#define MREGBIT_MAC_ADDRESS1_06_BYTE GENMASK(15, 8)
+
+/* MAC_FC_CONTROL (0x0160) register bit info */
+#define MREGBIT_FC_DECODE_ENABLE BIT(0)
+#define MREGBIT_FC_GENERATION_ENABLE BIT(1)
+#define MREGBIT_AUTO_FC_GENERATION_ENABLE BIT(2)
+#define MREGBIT_MULTICAST_MODE BIT(3)
+#define MREGBIT_BLOCK_PAUSE_FRAMES BIT(4)
+
+/* MAC_FC_PAUSE_FRAME_GENERATE (0x0164) register bit info */
+#define MREGBIT_GENERATE_PAUSE_FRAME BIT(0)
+
+/* MAC_FC_SRC/DST_ADDRESS_HIGH (0x0168) register bit info */
+#define MREGBIT_MAC_ADDRESS_01_BYTE GENMASK(7, 0)
+#define MREGBIT_MAC_ADDRESS_02_BYTE GENMASK(15, 8)
+/* MAC_FC_SRC/DST_ADDRESS_MED (0x016C) register bit info */
+#define MREGBIT_MAC_ADDRESS_03_BYTE GENMASK(7, 0)
+#define MREGBIT_MAC_ADDRESS_04_BYTE GENMASK(15, 8)
+/* MAC_FC_SRC/DSTD_ADDRESS_LOW (0x0170) register bit info */
+#define MREGBIT_MAC_ADDRESS_05_BYTE GENMASK(7, 0)
+#define MREGBIT_MAC_ADDRESS_06_BYTE GENMASK(15, 8)
+
+/* MAC_FC_PAUSE_TIME_VALUE (0x0180) register bit info */
+#define MREGBIT_MAC_FC_PAUSE_TIME GENMASK(15, 0)
+
+/* MAC_MDIO_CONTROL (0x01A0) register bit info */
+#define MREGBIT_PHY_ADDRESS GENMASK(4, 0)
+#define MREGBIT_REGISTER_ADDRESS GENMASK(9, 5)
+#define MREGBIT_MDIO_READ_WRITE BIT(10)
+#define MREGBIT_START_MDIO_TRANS BIT(15)
+
+/* MAC_MDIO_DATA (0x01A4) register bit info */
+#define MREGBIT_MDIO_DATA GENMASK(15, 0)
+
+/* MAC_RX_STATCTR_CONTROL (0x01A8) register bit info */
+#define MREGBIT_RX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_RX_COUNTER_READ BIT(15)
+
+/* MAC_RX_STATCTR_DATA_HIGH (0x01AC) register bit info */
+#define MREGBIT_RX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_RX_STATCTR_DATA_LOW (0x01B0) register bit info */
+#define MREGBIT_RX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TX_STATCTR_CONTROL (0x01B4) register bit info */
+#define MREGBIT_TX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_TX_COUNTER_READ BIT(15)
+
+/* MAC_TX_STATCTR_DATA_HIGH (0x01B8) register bit info */
+#define MREGBIT_TX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_TX_STATCTR_DATA_LOW (0x01BC) register bit info */
+#define MREGBIT_TX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TRANSMIT_FIFO_ALMOST_FULL (0x01C0) register bit info */
+#define MREGBIT_TX_FIFO_AF GENMASK(13, 0)
+
+/* MAC_TRANSMIT_PACKET_START_THRESHOLD (0x01C4) register bit info */
+#define MREGBIT_TX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_RECEIVE_PACKET_START_THRESHOLD (0x01C8) register bit info */
+#define MREGBIT_RX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_STATUS_IRQ (0x01E0) register bit info */
+#define MREGBIT_MAC_UNDERRUN_IRQ BIT(0)
+#define MREGBIT_MAC_JABBER_IRQ BIT(1)
+
+/* MAC_INTERRUPT_ENABLE (0x01E4) register bit info */
+#define MREGBIT_MAC_UNDERRUN_INTERRUPT_ENABLE BIT(0)
+#define MREGBIT_JABBER_INTERRUPT_ENABLE BIT(1)
+
+/* Receive Descriptors */
+/* MAC_RECEIVE_DESCRIPTOR0 () register bit info */
+#define MREGBIT_FRAME_LENGTH GENMASK(13, 0)
+#define MREGBIT_APPLICATION_STATUS GENMASK(28, 14)
+#define MREGBIT_LAST_DESCRIPTOR BIT(29)
+#define MREGBIT_FIRST_DESCRIPTOR BIT(30)
+#define MREGBIT_OWN_BIT BIT(31)
+
+/* MAC_RECEIVE_DESCRIPTOR1 () register bit info */
+#define MREGBIT_BUFFER1_SIZE GENMASK(11, 0)
+#define MREGBIT_BUFFER2_SIZE GENMASK(23, 12)
+#define MREGBIT_SECOND_ADDRESS_CHAINED BIT(25)
+#define MREGBIT_END_OF_RING BIT(26)
+
+/* MAC_RECEIVE_DESCRIPTOR2 () register bit info */
+#define MREGBIT_BUFFER_ADDRESS1 GENMASK(31, 0)
+
+/* MAC_RECEIVE_DESCRIPTOR3 () register bit info */
+#define MREGBIT_BUFFER_ADDRESS1 GENMASK(31, 0)
+
+/* Transmit Descriptors */
+/* TD_TRANSMIT_DESCRIPTOR0 () register bit info */
+#define MREGBIT_TX_PACKET_STATUS GENMASK(29, 0)
+#define MREGBIT_OWN_BIT BIT(31)
+
+/* TD_TRANSMIT_DESCRIPTOR1 () register bit info */
+#define MREGBIT_BUFFER1_SIZE GENMASK(11, 0)
+#define MREGBIT_BUFFER2_SIZE GENMASK(23, 12)
+#define MREGBIT_FORCE_EOP_ERROR BIT(24)
+#define MREGBIT_SECOND_ADDRESS_CHAINED BIT(25)
+#define MREGBIT_END_OF_RING BIT(26)
+#define MREGBIT_DISABLE_PADDING BIT(27)
+#define MREGBIT_ADD_CRC_DISABLE BIT(28)
+#define MREGBIT_FIRST_SEGMENT BIT(29)
+#define MREGBIT_LAST_SEGMENT BIT(30)
+#define MREGBIT_INTERRUPT_ON_COMPLETION BIT(31)
+
+/* TD_TRANSMIT_DESCRIPTOR2 () register bit info */
+#define MREGBIT_BUFFER_ADDRESS1 GENMASK(31, 0)
+
+/* TD_TRANSMIT_DESCRIPTOR3 () register bit info */
+#define MREGBIT_BUFFER_ADDRESS1 GENMASK(31, 0)
+
+/* RX frame status */
+#define EMAC_RX_FRAME_ALIGN_ERR BIT(0)
+#define EMAC_RX_FRAME_RUNT BIT(1)
+#define EMAC_RX_FRAME_ETHERNET_TYPE BIT(2)
+#define EMAC_RX_FRAME_VLAN BIT(3)
+#define EMAC_RX_FRAME_MULTICAST BIT(4)
+#define EMAC_RX_FRAME_BROADCAST BIT(5)
+#define EMAC_RX_FRAME_CRC_ERR BIT(6)
+#define EMAC_RX_FRAME_MAX_LEN_ERR BIT(7)
+#define EMAC_RX_FRAME_JABBER_ERR BIT(8)
+#define EMAC_RX_FRAME_LENGTH_ERR BIT(9)
+#define EMAC_RX_FRAME_MAC_ADDR1_MATCH BIT(10)
+#define EMAC_RX_FRAME_MAC_ADDR2_MATCH BIT(11)
+#define EMAC_RX_FRAME_MAC_ADDR3_MATCH BIT(12)
+#define EMAC_RX_FRAME_MAC_ADDR4_MATCH BIT(13)
+#define EMAC_RX_FRAME_PAUSE_CTRL BIT(14)
+
+/* emac ptp 1588 register */
+#define PTP_1588_CTRL (0x300)
+#define TX_TIMESTAMP_EN BIT(1)
+#define RX_TIMESTAMP_EN BIT(2)
+#define RX_PTP_PKT_TYPE_OFST 3
+#define RX_PTP_PKT_TYPE_MSK GENMASK(5, 3)
+#define PPS_MODE_ENABLE BIT(6)
+#define PPS_COUNTER_RESET BIT(7)
+
+#define PTP_INRC_ATTR (0x304)
+#define INRC_VAL_MSK GENMASK(23, 0)
+#define INCR_PERIOD_OFST 24
+#define INCR_PERIOD_MSK GENMASK(31, 24)
+
+#define PTP_ETH_TYPE (0x308)
+#define PTP_ETH_TYPE_MSK GENMASK(15, 0)
+
+#define PTP_MSG_ID (0x30c)
+
+#define PTP_UDP_PORT (0x310)
+#define PTP_UDP_PORT_MSK GENMASK(15, 0)
+
+/* read current system time from controller */
+#define SYS_TIME_GET_LOW (0x320)
+#define SYS_TIME_GET_HI (0x324)
+
+#define SYS_TIME_ADJ_LOW (0x328)
+#define SYS_TIME_LOW_MSK GENMASK(31, 0)
+#define SYS_TIME_ADJ_HI (0x32c)
+#define SYS_TIME_IS_NEG BIT(31)
+
+#define TX_TIMESTAMP_LOW (0x330)
+#define TX_TIMESTAMP_HI (0x334)
+
+#define RX_TIMESTAMP_LOW (0x340)
+#define RX_TIMESTAMP_HI (0x344)
+
+#define RX_PTP_PKT_ATTR_LOW (0x348)
+#define PTP_SEQ_ID_MSK GENMASK(15, 0)
+#define PTP_SRC_ID_LOW_OFST 16
+#define PTP_SRC_ID_LOW_MSK GENMASK(31, 16)
+
+#define RX_PTP_PKT_ATTR_MID (0x34c)
+#define PTP_SRC_ID_MID_MSK GENMASK(31, 0)
+
+#define RX_PTP_PKT_ATTR_HI (0x350)
+#define PTP_SRC_ID_HI_MSK GENMASK(31, 0)
+
+#define PTP_1588_IRQ_STS (0x360)
+#define PTP_1588_IRQ_EN (0x364)
+#define PTP_TX_TIMESTAMP BIT(0)
+#define PTP_RX_TIMESTAMP BIT(1)
+#define PTP_PPS_VALID BIT(2)
+
+#define PTP_PPS_TIME_L (0x368)
+#define PTP_PPS_TIME_H (0x36c)
+#define PTP_PPS_COUNTER (0x370)
+#define PTP_PPS_VALUE (0x374)
+
+/* emac TSO register */
+#define TSO_CONFIG (0x0000)
+#define TSO_CONFIG_RST BIT(0)
+#define TSO_CONFIG_RX_EN BIT(1)
+#define TSO_CONFIG_TX_EN BIT(2)
+#define TSO_CONFIG_RX_CSUM_EN BIT(4)
+
+#define TSO_DMA_CONFIG (0x0004)
+#define TSO_ERR_INTR_STS (0x0008)
+#define TSO_ERR_INTR_ENA (0x000C)
+
+#define TSO_AP_RX_INTR_STS (0x0010)
+#define TSO_AP_RX_INTR_CSUM_DONE BIT(0)
+#define TSO_AP_RX_INTR_CSUM_ERR BIT(1)
+#define TSO_AP_RX_INTR_UNAVAIL BIT(2)
+
+#define TSO_AP_RX_INTR_ENA (0x0014)
+#define TSO_AP_RX_INTR_ENA_CSUM_DONE BIT(0)
+#define TSO_AP_RX_INTR_ENA_CSUM_ERR BIT(1)
+#define TSO_AP_RX_INTR_ENA_UNAVAIL BIT(2)
+
+#define TSO_AP_TX_INTR_STS (0x0018)
+#define TSO_AP_TX_INTR_TSO_DONE BIT(0)
+#define TSO_AP_TX_INTR_CSUM_DONE BIT(1)
+#define TSO_AP_TX_INTR_UNAVAIL BIT(2)
+
+#define TSO_AP_TX_INTR_ENA (0x001C)
+#define TSO_AP_TX_INTR_ENA_TSO_DONE BIT(0)
+#define TSO_AP_TX_INTR_ENA_CSUM_DONE BIT(1)
+#define TSO_AP_TX_INTR_ENA_UNAVAIL BIT(2)
+
+#define TSO_RX_DESC_BA (0x0020)
+#define TSO_RX_AUTO_POLL_CNT (0x0024)
+#define TSO_RX_POLL_DEMAND (0x0028)
+#define TSO_TX_DESC_BA (0x002C)
+#define TSO_TX_HDR_BA (0x0030)
+#define TSO_TX_HDR_CTR (0x0034)
+#define TSO_TX_AUTO_POLL_CNT (0x0038)
+#define TSO_TX_POLL_DEMAND (0x003C)
+#define TSO_RX_CURR_DESC_ADDR (0x0040)
+#define TSO_TX_CURR_DESC_ADDR (0x0044)
+
+#define TSO_MAX_SEG_SIZE (SZ_64K - 1)
+
+#define EMAC_TX_RING_SIZE 512
+#define EMAC_RX_RING_SIZE 512
+#define EMAC_SMALL_RX_RING_SIZE 128
+#define EMAC_SMALL_RING_MEM_LIMIT (64 * 1024 * 1204)
+#define EMAC_TX_WAKE_THRESHOLD 32
+
+#define EMAC_RX_BUFFER_1024 1024
+#define EMAC_RX_BUFFER_2048 2048
+#define EMAC_RX_BUFFER_4096 4096
+
+#define MAX_DATA_PWR_TX_DES 11
+#define MAX_DATA_LEN_TX_DES 2048 //2048=1<<11
+
+#define MAX_TX_STATS_NUM 12
+#define MAX_RX_STATS_NUM 25
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ETHERNET_HEADER_SIZE 14
+#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 //With FCS
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 //With FCS
+#define ETHERNET_FCS_SIZE 4
+#define MAXIMUM_ETHERNET_PACKET_SIZE \
+ (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+ (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+
+#define CRC_LENGTH ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+#define TX_STORE_FORWARD_MODE 0x5EE
+
+/* only works for sizes that are powers of 2 */
+#define EMAC_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
+
+/* number of descriptors are required for len */
+#define EMAC_TXD_COUNT(S, X) (((S) >> (X)) + 1)
+
+#define EMAC_DESC_UNUSED(R) \
+({ \
+ unsigned int clean = smp_load_acquire(&(R)->nxt_clean); \
+ unsigned int use = READ_ONCE((R)->nxt_use); \
+ (clean > use ? 0 : (R)->total_cnt) + clean - use - 1; \
+})
+
+typedef struct ifreq st_ifreq, *pst_ifreq;
+
+enum rx_frame_status {
+ frame_ok = 0,
+ frame_discard,
+ frame_max,
+};
+
+enum rx_ptp_type {
+ PTP_V2_L2_ONLY = 0x0,
+ PTP_V1_L4_ONLY = 0x1,
+ PTP_V2_L2_L4 = 0x2,
+};
+
+enum ptp_event_msg_id {
+ MSG_SYNC = 0x00,
+ MSG_DELAY_REQ = 0x01,
+ MSG_PDELAY_REQ = 0x02,
+ MSG_PDELAY_RESP = 0x03,
+ ALL_EVENTS = 0x03020100,
+};
+
+struct ClockIdentity {
+ u8 id[8];
+};
+struct PortIdentity {
+ struct ClockIdentity clockIdentity;
+ u16 portNumber;
+} __packed;
+struct ptp_header {
+ u8 tsmt; /* transportSpecific | messageType */
+ u8 ver; /* reserved | versionPTP */
+ u16 messageLength;
+ u8 domainNumber;
+ u8 reserved1;
+ u8 flagField[2];
+ s64 correction;
+ u32 reserved2;
+ struct PortIdentity sourcePortIdentity;
+ u16 sequenceId;
+ u8 control;
+ s8 logMessageInterval;
+} __packed;
+
+struct Timestamp {
+ u16 seconds_msb; /* 16 bits + */
+ u32 seconds_lsb; /* 32 bits = 48 bits*/
+ s32 nanoseconds;
+} __packed;
+
+struct pdelay_req_msg {
+ struct ptp_header hdr;
+ struct Timestamp originTimestamp;
+ struct PortIdentity reserved;
+} __packed;
+
+struct pdelay_resp_msg {
+ struct ptp_header hdr;
+ struct Timestamp requestReceiptTimestamp;
+ struct PortIdentity requestingPortIdentity;
+} __packed;
+
+enum emac_state {
+ EMAC_DOWN,
+ EMAC_RESET_REQUESTED,
+ EMAC_RESETING,
+ EMAC_TASK_SCHED,
+ EMAC_STATE_MAX,
+};
+
+enum emac_csum_res {
+ EMAC_CSUM_FAIL,
+ EMAC_CSUM_UNAVAIL,
+ EMAC_CSUM_TCP_UDP,
+ EMAC_CSUM_IP4_TCP_UDP,
+};
+
+/* Receive Descriptor structure */
+struct emac_rx_desc {
+ u32 FramePacketLength:14;
+ u32 ApplicationStatus:15;
+ u32 LastDescriptor:1;
+ u32 FirstDescriptor:1;
+ u32 OWN:1;
+
+ u32 BufferSize1:12;
+ u32 BufferSize2:12;
+ u32 Reserved1:1;
+ u32 SecondAddressChained:1;
+ u32 EndRing:1;
+ u32 csum_res:2;
+ u32 csum_done:1;
+ u32 rx_timestamp:1;
+ u32 ptp_pkt:1;
+
+ u32 BufferAddr1;
+ u32 BufferAddr2;
+};
+
+#define EMAC_TDES0_MSS GENMASK(11, 0)
+#define EMAC_TDES0_MSS_SHIFT 0
+#define EMAC_TDES0_HB2S GENMASK(15, 12)
+#define EMAC_TDES0_HB2S_SHIFT 12
+#define EMAC_TDES0_HB1S GENMASK(19, 16)
+#define EMAC_TDES0_HB1S_SHIFT 16
+#define EMAC_TDES0_HL GENMASK(26, 20)
+#define EMAC_TDES0_HL_SHIFT 20
+#define EMAC_TDES0_HSE BIT(27)
+#define EMAC_TDES0_TSO BIT(28)
+#define EMAC_TDES0_COE BIT(29)
+#define EMAC_TDES0_TS BIT(30)
+#define EMAC_TDES0_OWN BIT(31)
+
+#define EMAC_TDES1_BF1_SIZE GENMASK(11, 0)
+#define EMAC_TDES1_BF1_SIZE_SHIFT 0
+#define EMAC_TDES1_BF2_SIZE GENMASK(23, 12)
+#define EMAC_TDES1_BF2_SIZE_SHIFT 12
+#define EMAC_TDES1_ER BIT(26)
+#define EMAC_TDES1_FD BIT(29)
+#define EMAC_TDES1_LD BIT(30)
+#define EMAC_TDES1_IOC BIT(31)
+
+/* Transmit Descriptor */
+struct emac_tx_desc {
+ u32 mss:12;
+ u32 hb2s:4;
+ u32 hb1s:4;
+ u32 hdr_len:7;
+ u32 hse:1;
+ u32 tso:1;
+ u32 coe:1;
+ u32 tx_timestamp:1;
+ u32 OWN:1;
+
+ u32 BufferSize1:12;
+ u32 BufferSize2:12;
+ u32 ForceEOPError:1;
+ u32 SecondAddressChained:1;
+ u32 EndRing:1;
+ u32 DisablePadding:1;
+ u32 AddCRCDisable:1;
+ u32 FirstSegment:1;
+ u32 LastSegment:1;
+ u32 InterruptOnCompletion:1;
+
+ u32 BufferAddr1;
+ u32 BufferAddr2;
+};
+
+/* Descriptor buffer structure */
+struct emac_desc_buffer {
+ struct sk_buff *skb;
+ u64 dma_addr;
+ void *buff_addr;
+ unsigned long ulTimeStamp;
+ u16 dma_len;
+ u16 nxt_watch;
+};
+
+/* Descriptor ring structure */
+struct emac_desc_ring {
+ /* virtual memory address to the descriptor ring memory */
+ void *desc_addr;
+ /* physical address of the descriptor ring */
+ dma_addr_t desc_dma_addr;
+ /* length of descriptor ring in bytes */
+ u32 total_size;
+ /* number of descriptors in the ring */
+ u32 total_cnt;
+ bool in_sram;
+ /* next descriptor to associate a buffer with */
+ u32 nxt_use;
+ /* next descriptor to check for DD status bit */
+ u32 nxt_clean;
+ /* array of buffer information structs */
+ struct emac_desc_buffer *desc_buf;
+};
+
+struct emac_hw_stats {
+ u32 tx_ok_pkts;
+ u32 tx_total_pkts;
+ u32 tx_ok_bytes;
+ u32 tx_err_pkts;
+ u32 tx_singleclsn_pkts;
+ u32 tx_multiclsn_pkts;
+ u32 tx_lateclsn_pkts;
+ u32 tx_excessclsn_pkts;
+ u32 tx_unicast_pkts;
+ u32 tx_multicast_pkts;
+ u32 tx_broadcast_pkts;
+ u32 tx_pause_pkts;
+ u32 rx_ok_pkts;
+ u32 rx_total_pkts;
+ u32 rx_crc_err_pkts;
+ u32 rx_align_err_pkts;
+ u32 rx_err_total_pkts;
+ u32 rx_ok_bytes;
+ u32 rx_total_bytes;
+ u32 rx_unicast_pkts;
+ u32 rx_multicast_pkts;
+ u32 rx_broadcast_pkts;
+ u32 rx_pause_pkts;
+ u32 rx_len_err_pkts;
+ u32 rx_len_undersize_pkts;
+ u32 rx_len_oversize_pkts;
+ u32 rx_len_fragment_pkts;
+ u32 rx_len_jabber_pkts;
+ u32 rx_64_pkts;
+ u32 rx_65_127_pkts;
+ u32 rx_128_255_pkts;
+ u32 rx_256_511_pkts;
+ u32 rx_512_1023_pkts;
+ u32 rx_1024_1518_pkts;
+ u32 rx_1519_plus_pkts;
+ u32 rx_drp_fifo_full_pkts;
+ u32 rx_truncate_fifo_full_pkts;
+
+ u32 rx_dma_missed_frame_cnt;
+
+ u32 tx_tso_pkts;
+ u32 tx_tso_bytes;
+
+ spinlock_t stats_lock;
+};
+
+struct emac_regdata {
+ int support_dual_vol_power; /* 1.8v/ 3.3v power domain */
+ int ptp_rx_ts_all_events; /* hw timestamp all events */
+ int clk_rst_ctrl_reg_offset;
+
+ int axi_mst_single_id_shift;
+ int phy_intr_enable_shift;
+
+ int int_clk_src_sel_shift;
+ int rgmii_tx_clk_src_sel_shift;
+ int rgmii_rx_clk_src_sel_shift;
+ int rmii_rx_clk_sel_shift;
+ int rmii_tx_clk_sel_shift;
+ int rmii_ref_clk_sel_shift;
+ int mac_intf_sel_shift;
+
+ int rgmii_tx_dline_reg_offset; /*no dline if less than zero */
+ int rgmii_tx_delay_code_shift;
+ int rgmii_tx_delay_code_mask;
+ int rgmii_tx_delay_step_shift;
+ int rgmii_tx_delay_step_mask;
+ int rgmii_tx_delay_enable_shift;
+
+ int rgmii_rx_dline_reg_offset;
+ int rgmii_rx_delay_code_shift;
+ int rgmii_rx_delay_code_mask;
+ int rgmii_rx_delay_step_shift;
+ int rgmii_rx_delay_step_mask;
+ int rgmii_rx_delay_enable_shift;
+};
+
+struct emac_pause_str {
+ u32 rx_pause:1; // value 1 means enable
+ u32 tx_pause:1; // ditto
+ u32 pause_sending:1; // value 1 means emac is sending pause frame
+ u32 pause_time_max:1; // value 1 means pause time is 0xffff
+ u32 high_water:14;
+ u32 low_water:14;
+ u8 fc_auto;
+};
+
+/* for ptp event message , udp port is 319 */
+#define PTP_EVENT_PORT (0x13F)
+
+struct emac_priv;
+struct emac_hw_ptp {
+ void (*config_hw_tstamping) (struct emac_priv *priv, u32 enable,
+ u8 rx_ptp_type, u32 ptp_msg_id);
+ u32 (*config_systime_increment)(struct emac_priv *priv);
+ int (*init_systime) (struct emac_priv *priv, u64 set_ns);
+ u64 (*get_phc_time)(struct emac_priv *priv);
+ u64 (*get_tx_timestamp)(struct emac_priv *priv);
+ u64 (*get_rx_timestamp)(struct emac_priv *priv);
+};
+
+#define EMAC_SUSPEND_POWER_DOWN_PHY BIT(0)
+
+enum {
+ EMAC_PPS_BCODE = 0,
+ EMAC_PPS_GNSS,
+ EMAC_PPS_MAX,
+};
+
+struct emac_pps {
+ int enable_pps;
+ int pps_source;
+ u32 pps_cycle;
+ u32 ppscnt;
+ u64 ppstime;
+ u64 utc_ns;
+};
+
+struct emac_priv {
+ u32 u32RxBufferLen;
+ u32 wol;
+ struct mutex mii_mutex;
+ spinlock_t spStatsLock;
+ struct work_struct tx_timeout_task;
+ struct emac_desc_ring tx_ring;
+ struct emac_desc_ring rx_ring;
+ spinlock_t spTxLock;
+ struct net_device *ndev;
+ struct sk_buff_head rx_skb;
+ struct hrtimer tx_timer;
+ struct hrtimer rx_timer;
+ struct napi_struct rx_napi;
+ struct napi_struct tx_napi;
+ spinlock_t intr_lock;
+ int tx_count_frames;
+ int refill;
+ struct platform_device *pdev;
+ struct clk *clk;
+ struct clk *ptp_clk;
+ void __iomem *iobase;
+ void __iomem *tso_base;
+ void *tso_hdr;
+ dma_addr_t tso_hdr_addr;
+ bool tso;
+ int power_domain; /* 0 - 1.8v, 1 - 3.3v */
+ int fix_link;
+ int irq;
+ int irq_wakeup;
+ int irq_tso;
+ int irq_pps;
+ int duplex;
+ int speed;
+ int interface;
+ int mdio_clk_div;
+ int en_suspend;
+ int dev_flags;
+ int wolopts;
+ int rst_gpio, ldo_gpio;
+ int low_active_ldo, low_active_rst;
+ u32 delays_ldo[3];
+ u32 delays_rst[3];
+ struct mii_bus *mii;
+ int mdio_irqs[PHY_MAX_ADDR];
+ struct phy_device *phy;
+ struct emac_hw_stats *hw_stats;
+ const struct emac_regdata *regdata;
+ struct hwtstamp_config tstamp_config;
+ struct delayed_work systim_overflow_work;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_ops;
+ struct emac_pps pps_info;
+ unsigned int default_addend;
+ s64 addend_adj;
+ int hw_adj;
+ spinlock_t ptp_lock;
+ int ptp_support;
+ u32 ptp_clk_rate;
+ u32 ptp_clk_inc;
+ u32 frac_div;
+ struct PortIdentity sourcePortIdentity;
+ int hwts_tx_en;
+ int hwts_rx_en;
+ struct emac_hw_ptp *hwptp;
+
+ struct gen_pool *sram_pool;
+
+ /* clk-config(32bit)
+ *
+ * rmii_ref_clk(clk-config[31:24])
+ * 0 - from SOC
+ * 1 - from phy
+ * clk_sel(clk-config[23:16])
+ * RGMII:
+ * tx | clk_sel: 0 - from RX clock
+ * 1 - from SOC clock
+ * rx | clk_sel: not care
+ *
+ * RMII:
+ * tx | clk_sel: 0 - RMII clock
+ * 1 - Inverted RMII clock
+ * rx | clk_sel: 0 - RMII clock
+ * 1 - Inverted RMII clock
+ *
+ * delay_code(clk-config[15:8])
+ * 0 ~ 255
+ *
+ * delay_step(clk-config[7:0])
+ * 0b000 - 22ps
+ * 0b001 - 29ps
+ * 0b010 - 36ps
+ * 0b011 - 43ps
+ */
+ u32 tx_clk_config;
+ u32 rx_clk_config;
+
+ bool clk_tuning_enable;
+ unsigned long state;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *rgmii_pins;
+ struct pm_qos_request pm_qos_req;
+ struct pm_qos_request pm_ddr_qos;
+ s32 pm_qos;
+ struct emac_pause_str pause;
+ struct delayed_work emac_pause_work;
+#ifdef WAN_LAN_AUTO_ADAPT
+ int link; /* interface link status */
+ u8 dhcp;
+ u8 dhcp_delaywork;
+ struct delayed_work dhcp_work;
+ u32 vlan_port; /* dhcp_req: vlan, dhcp_resp: port */
+#endif
+
+#ifdef CONFIG_DDR_DEVFREQ
+#define CONFIG_ASR_EMAC_DDR_QOS 1
+#endif
+#ifdef CONFIG_ASR_EMAC_DDR_QOS
+ struct {
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long window_time;
+ unsigned long polling_delay_ms;
+ unsigned int tx_up_threshold;
+ unsigned int rx_up_threshold;
+ unsigned int tx_down_threshold;
+ unsigned int rx_down_threshold;
+ int qos_val;
+ struct pm_qos_request ddr_qos;
+ } clk_scaling;
+ struct work_struct qos_work;
+#endif
+};
+
+void emac_ptp_register(struct emac_priv *priv);
+void emac_ptp_unregister(struct emac_priv *priv);
+void emac_ptp_init(struct emac_priv *priv);
+void emac_ptp_deinit(struct emac_priv *priv);
+
+static inline void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
+{
+ writel(val, (priv->iobase + reg));
+}
+
+static inline int emac_rd(struct emac_priv *priv, u32 reg)
+{
+ return readl(priv->iobase + reg);
+}
+
+static inline void emac_wr_tso(struct emac_priv *priv, u32 reg, u32 val)
+{
+ writel((val), (priv->tso_base + reg));
+}
+
+static inline int emac_rd_tso(struct emac_priv *priv, u32 reg)
+{
+ return readl(priv->tso_base + reg);
+}
+
+static inline struct emac_tx_desc *emac_get_tx_desc(struct emac_priv *priv,
+ int index)
+{
+ return &((struct emac_tx_desc *)priv->tx_ring.desc_addr)[index];
+}
+
+static inline struct emac_rx_desc *emac_get_rx_desc(struct emac_priv *priv,
+ int index)
+{
+ return &((struct emac_rx_desc *)priv->rx_ring.desc_addr)[index];
+}
+
+static inline void emac_tx_desc_set_offload(struct emac_tx_desc *desc,
+ bool tso, bool csum, bool hse)
+{
+ desc->hse = hse;
+ desc->tso = tso;
+ desc->coe = csum;
+}
+
+static inline void emac_tx_desc_set_fd(struct emac_tx_desc *desc)
+{
+ desc->FirstSegment = 1;
+}
+
+static inline void emac_tx_desc_set_ts(struct emac_tx_desc *desc)
+{
+ desc->tx_timestamp = 1;
+}
+
+static inline void emac_tx_desc_set_ring_end(struct emac_tx_desc *desc)
+{
+ desc->EndRing = 1;
+}
+
+static inline void emac_tx_desc_set_ld(struct emac_tx_desc *desc)
+{
+ desc->LastSegment = 1;
+}
+
+static inline void emac_tx_desc_set_ioc(struct emac_tx_desc *desc)
+{
+ desc->InterruptOnCompletion = 1;
+}
+
+static inline void emac_set_buf1_addr_len(struct emac_tx_desc *desc,
+ u32 addr, u32 len)
+{
+ desc->BufferAddr1= addr;
+ desc->BufferSize1 = (len & 0xfff);
+ desc->hb1s = (len >> 12) & 0xf;
+}
+
+static inline void emac_set_buf2_addr_len(struct emac_tx_desc *desc,
+ u32 addr, u32 len)
+{
+ desc->BufferAddr2= addr;
+ desc->BufferSize2 = len & 0xfff;
+ desc->hb2s = (len >> 12) & 0xf;
+}
+
+static inline void emac_tx_update_fst_desc(void *txdesc, int hl, int mss,
+ bool tso, bool coe)
+{
+ u32 desc0 = *(u32 *)txdesc;
+
+ if (tso) {
+ desc0 |= hl << EMAC_TDES0_HL_SHIFT;
+ desc0 |= mss << EMAC_TDES0_MSS_SHIFT;
+ desc0 |= EMAC_TDES0_HSE | EMAC_TDES0_TSO | EMAC_TDES0_COE;
+ } else if (coe) {
+ desc0 |= EMAC_TDES0_COE;
+ } else {
+ desc0 |= EMAC_TDES0_TSO;
+ }
+
+ *(u32 *)txdesc = desc0;
+}
+
+//#define EMAC_DEBUG
+#if defined(EMAC_DEBUG)
+#define emac_print(fmt, ...) \
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define emac_print(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+#endif //_EMAC_ETH_H_
diff --git a/marvell/linux/drivers/net/ethernet/asr/emac_ptp.c b/marvell/linux/drivers/net/ethernet/asr/emac_ptp.c
new file mode 100644
index 0000000..382c57b
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/asr/emac_ptp.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * emac ptp driver
+ *
+ * Copyright (C) 2023 ASR Micro Limited
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/pps_kernel.h>
+#include <linux/timer.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include "emac_eth.h"
+
+#define to_emacpriv(_ptp) container_of(_ptp, struct emac_priv, ptp_clock_ops)
+
+#define CIU_UTC_OUT_REG1 0x54
+#define CIU_UTC_OUT_REG2 0x58
+#define CIU_PPS_SOURCE 0xBC
+
+#define INCVALUE_100MHZ 10
+#define INCVALUE_SHIFT_HW 19
+#define INCVALUE_SHIFT_SW 23
+#define INCPERIOD 1
+
+#ifndef NS_PER_SEC
+#define NS_PER_SEC 1000000000ULL
+#endif
+
+//#define EMAC_PPS_DEBUG
+
+/* Another drawback of scaling the incvalue by a large factor is the
+ * 64-bit SYSTIM register overflows more quickly. This is dealt with
+ * by simply reading the clock before it overflows.
+ *
+ * Clock ns bits Overflows after
+ * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~
+ * 100MHz (64-19)bit 2^45 / 10^9 / 3600 = 9.77 hrs
+ */
+#define EMAC_SYSTIM_OVERFLOW_CNT (1ULL << (64 - INCVALUE_SHIFT_HW))
+#define EMAC_SYSTIM_OVERFLOW_SEC ((unsigned long)((EMAC_SYSTIM_OVERFLOW_CNT/NS_PER_SEC)/2))
+#define EMAC_SYSTIM_OVERFLOW_PERIOD (HZ * EMAC_SYSTIM_OVERFLOW_SEC)
+
+static u64 emac_timer_cyc2ns(struct emac_priv *priv, u64 cycle_delta,
+ u64 *frac, int backwards)
+{
+ struct timecounter *tc = &priv->tc;
+ u64 delta = cycle_delta & tc->cc->mask;
+ u64 nsec, adj, to_adj;
+ int neg_adj;
+
+ if (!cycle_delta)
+ return 0;
+
+ if (priv->hw_adj) {
+ nsec = delta * tc->cc->mult;
+
+ if (priv->frac_div > 0)
+ nsec += div_u64(nsec, priv->frac_div);
+
+ if (frac) {
+ if (backwards)
+ nsec -= *frac;
+ else
+ nsec += *frac;
+ *frac = nsec & tc->mask;
+ }
+
+ nsec >>= tc->cc->shift;
+ } else {
+ nsec = delta * tc->cc->mult;
+
+ if (priv->addend_adj < 0) {
+ neg_adj = 1;
+ adj = -priv->addend_adj;
+ } else {
+ neg_adj = 0;
+ adj = priv->addend_adj;
+ }
+
+ to_adj = div_u64(nsec, priv->ptp_clk_inc) * adj;
+ if (priv->frac_div > 0)
+ nsec += div_u64(nsec, priv->frac_div);
+
+ if (neg_adj)
+ nsec -= to_adj >> tc->cc->shift;
+ else
+ nsec += to_adj >> tc->cc->shift;
+
+ if (frac)
+ *frac = 0;
+ }
+
+ return nsec;
+}
+
+static void emac_timecounter_init(struct emac_priv *priv, u64 ns)
+{
+ timecounter_init(&priv->tc, &priv->cc, ns);
+
+ if (!priv->pps_info.enable_pps)
+ return;
+
+ priv->pps_info.utc_ns = ns;
+}
+
+static u64 emac_timecounter_read(struct emac_priv *priv)
+{
+ struct timecounter *tc = &priv->tc;
+ u64 cycle_now, cycle_delta;
+ u64 ns_offset;
+
+ /* read cycle counter: */
+ cycle_now = tc->cc->read(tc->cc);
+
+ /* calculate the delta since the last timecounter_read_delta(): */
+ cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
+
+ /* convert to nanoseconds: */
+ ns_offset = emac_timer_cyc2ns(priv, cycle_delta, &tc->frac, 0);
+
+ /* update time stamp of timecounter_read_delta() call: */
+ tc->cycle_last = cycle_now;
+
+ ns_offset += tc->nsec;
+ tc->nsec = ns_offset;
+ return ns_offset;
+}
+
+u64 emac_timer_cyc2time(struct emac_priv *priv, u64 cycle_tstamp)
+{
+ struct timecounter *tc = &priv->tc;
+ u64 nsec, cycle_last, delta, frac;
+ int backwards = 0;
+ u64 delta_ns;
+
+ if (priv->pps_info.enable_pps) {
+ nsec = priv->pps_info.utc_ns;
+ frac = 0;
+ cycle_last = priv->pps_info.ppstime;
+ } else {
+ nsec = tc->nsec;
+ frac = tc->frac;
+ cycle_last = tc->cycle_last;
+ }
+
+ delta = (cycle_tstamp - cycle_last) & tc->cc->mask;
+ if (delta > tc->cc->mask / 2) {
+ delta = (cycle_last - cycle_tstamp) & tc->cc->mask;
+ backwards = 1;
+ }
+
+ delta_ns = emac_timer_cyc2ns(priv, delta, &frac, backwards);
+ if (backwards)
+ nsec -= delta_ns;
+ else
+ nsec += delta_ns;
+
+ return nsec;
+}
+
+void emac_hw_timestamp_config(struct emac_priv *priv, u32 enable,
+ u8 rx_ptp_type, u32 ptp_msg_id)
+{
+ void __iomem *ioaddr = priv->iobase;
+ u32 val;
+
+ if (enable) {
+ /*
+ * enable tx/rx timestamp and config rx ptp type
+ */
+ val = emac_rd(priv, PTP_1588_CTRL);
+ val |= TX_TIMESTAMP_EN | RX_TIMESTAMP_EN;
+ val &= ~RX_PTP_PKT_TYPE_MSK;
+ val |= (rx_ptp_type << RX_PTP_PKT_TYPE_OFST) &
+ RX_PTP_PKT_TYPE_MSK;
+ writel(val, ioaddr + PTP_1588_CTRL);
+
+ /* config ptp message id */
+ writel(ptp_msg_id, ioaddr + PTP_MSG_ID);
+
+ /* config ptp ethernet type */
+ writel(ETH_P_1588, ioaddr + PTP_ETH_TYPE);
+
+ /* config ptp udp port */
+ writel(PTP_EVENT_PORT, ioaddr + PTP_UDP_PORT);
+
+ } else {
+ val = emac_rd(priv, PTP_1588_CTRL);
+ val &= ~(TX_TIMESTAMP_EN | RX_TIMESTAMP_EN);
+ writel(val, ioaddr + PTP_1588_CTRL);
+ }
+}
+
+u32 emac_hw_config_systime_increment(struct emac_priv *priv)
+{
+ void __iomem *ioaddr = priv->iobase;
+ u32 ptp_clock = priv->ptp_clk_rate;
+ u32 cycle_inc, cycle_mod;
+ u32 val;
+
+ /*
+ * set system time counter resolution as ns if ptp clock is 100Mhz,
+ * 10ns per clock cycle, so increment value should be 10, increment
+ * period should be 1
+ */
+ cycle_inc = (NS_PER_SEC / ptp_clock) * INCPERIOD;
+ cycle_mod = NS_PER_SEC % ptp_clock;
+ priv->ptp_clk_inc = cycle_inc;
+
+ /*
+ * Assume ptp_clk_rate= 38.4M
+ * Tns = 1000000000/38400000 = 26 + 1/24
+ * nsec = (delta/26) * Tns = (delta/26) * (26 + 1/24)
+ * nsec = delta + delta/(26*24)
+ * frag = delta/div, div = 26*24
+ */
+ if (cycle_mod > 0)
+ priv->frac_div = (priv->ptp_clk_rate * cycle_inc / cycle_mod);
+ else
+ priv->frac_div = 0;
+
+ if (priv->hw_adj) {
+ priv->default_addend = cycle_inc << INCVALUE_SHIFT_HW;
+ val = (priv->default_addend | (INCPERIOD << INCR_PERIOD_OFST));
+ } else {
+ priv->default_addend = cycle_inc << INCVALUE_SHIFT_SW;
+ priv->addend_adj = 0;
+ val = (cycle_inc | (INCPERIOD << INCR_PERIOD_OFST));
+ }
+
+ writel(val, ioaddr + PTP_INRC_ATTR);
+ pr_info("default_addend=%d cycle_mod=%d cycle_inc=%d frac_div=%d\n",
+ priv->default_addend, cycle_mod, cycle_inc, priv->frac_div);
+ return 0;
+}
+
+u64 emac_hw_get_systime(struct emac_priv *priv)
+{
+ void __iomem *ioaddr = priv->iobase;
+ unsigned long flags;
+ u64 systimel, systimeh;
+ u64 systim;
+
+ local_irq_save(flags);
+
+ /* first read system time low register */
+ systimel = readl(ioaddr + SYS_TIME_GET_LOW);
+ systimeh = readl(ioaddr + SYS_TIME_GET_HI);
+ systim = (systimeh << 32) | systimel;
+
+ /* Add a dummy read to WA systimer jump issue */
+ systimel = emac_rd(priv, SYS_TIME_ADJ_LOW);
+
+ local_irq_restore(flags);
+
+ return systim;
+}
+
+u64 emac_hw_get_ppstime(struct emac_priv *priv)
+{
+ void __iomem *ioaddr = priv->iobase;
+ u64 ppstimel, ppstimeh;
+ u64 ppstime;
+
+ ppstimel = readl(ioaddr + PTP_PPS_TIME_L);
+ ppstimeh = readl(ioaddr + PTP_PPS_TIME_H);
+ ppstime = (ppstimeh << 32) | ppstimel;
+
+ return ppstime;
+}
+
+u64 emac_hw_get_phc_time(struct emac_priv *priv)
+{
+ unsigned long flags;
+ u64 cycles, ns;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ cycles = emac_hw_get_systime(priv);
+ ns = emac_timer_cyc2time(priv, cycles);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return ns;
+}
+
+u64 emac_hw_get_tx_timestamp(struct emac_priv *priv)
+{
+ void __iomem *ioaddr = priv->iobase;
+ unsigned long flags;
+ u64 systimel, systimeh;
+ u64 systim;
+ u64 ns;
+
+ /* first read system time low register */
+ systimel = readl(ioaddr + TX_TIMESTAMP_LOW);
+ systimeh = readl(ioaddr + TX_TIMESTAMP_HI);
+ systim = (systimeh << 32) | systimel;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ ns = emac_timer_cyc2time(priv, systim);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return ns;
+}
+
+u64 emac_hw_get_rx_timestamp(struct emac_priv *priv)
+{
+ void __iomem *ioaddr = priv->iobase;
+ unsigned long flags;
+ u64 systimel, systimeh;
+ u64 systim;
+ u64 ns;
+
+ /* first read system time low register */
+ systimel = readl(ioaddr + RX_TIMESTAMP_LOW);
+ systimeh = readl(ioaddr + RX_TIMESTAMP_HI);
+ systim = (systimeh << 32) | systimel;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ ns = emac_timer_cyc2time(priv, systim);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return ns;
+}
+
+/**
+ * emac_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static u64 emac_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct emac_priv *priv = container_of(cc, struct emac_priv, cc);
+
+ return emac_hw_get_systime(priv);
+}
+
+int emac_hw_init_systime(struct emac_priv *priv, u64 set_ns)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ emac_timecounter_init(priv, set_ns);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+struct emac_hw_ptp emac_hwptp = {
+ .config_hw_tstamping = emac_hw_timestamp_config,
+ .config_systime_increment = emac_hw_config_systime_increment,
+ .init_systime = emac_hw_init_systime,
+ .get_phc_time = emac_hw_get_phc_time,
+ .get_tx_timestamp = emac_hw_get_tx_timestamp,
+ .get_rx_timestamp = emac_hw_get_rx_timestamp,
+};
+
+/**
+ * emac_adjust_freq
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ppb: desired period change in parts ber billion
+ *
+ * Description: this function will adjust the frequency of hardware clock.
+ */
+static int emac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct emac_priv *priv = to_emacpriv(ptp);
+ void __iomem *ioaddr = priv->iobase;
+ unsigned long flags;
+ int neg_adj = 0;
+ u64 incvalue, adj;
+ u32 addend;
+
+ /* ppb means delta time each sample cycle, in nano second */
+ if ((ppb > ptp->max_adj) || (ppb <= -1000000000))
+ return -EINVAL;
+
+ if (!ppb)
+ return 0;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ incvalue = priv->default_addend;
+ adj = incvalue;
+ adj *= ppb;
+ adj = div_u64(adj, 1000000000);
+ if (priv->hw_adj) {
+ addend = neg_adj ? (incvalue - adj) : (incvalue + adj);
+ addend = (addend | (INCPERIOD << INCR_PERIOD_OFST));
+ writel(addend, ioaddr + PTP_INRC_ATTR);
+ priv->addend_adj = 0;
+ } else {
+ /* update tc->cycle_last since adj to be changed */
+ emac_timecounter_read(priv);
+ priv->addend_adj = neg_adj ? - adj : adj;
+ }
+#ifdef EMAC_PPS_DEBUG
+ pr_info("emac_adjust_freq: ppb=%d adj=%s%lld\n", ppb,
+ neg_adj ? "-" : "+" , adj);
+#endif
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ return 0;
+}
+
+/**
+ * emac_adjust_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @delta: desired change in nanoseconds
+ *
+ * Description: this function will shift/adjust the hardware clock time.
+ */
+static int emac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct emac_priv *priv = to_emacpriv(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ if (priv->pps_info.enable_pps)
+ priv->pps_info.utc_ns += delta;
+ else
+ timecounter_adjtime(&priv->tc, delta);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * emac_phc_get_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: pointer to hold time/result
+ *
+ * Description: this function will read the current time from the
+ * hardware clock and store it in @ts.
+ */
+static int emac_phc_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct emac_priv *priv = to_emacpriv(ptp);
+ unsigned long flags;
+ u64 cycles, ns;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ cycles = emac_hw_get_systime(priv);
+ ns = emac_timer_cyc2time(priv, cycles);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+/**
+ * emac_phc_set_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: time value to set
+ *
+ * Description: this function will set the current time on the
+ * hardware clock.
+ */
+static int emac_phc_set_time(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct emac_priv *priv = to_emacpriv(ptp);
+ unsigned long flags;
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ if (priv->pps_info.enable_pps &&
+ priv->pps_info.pps_source == EMAC_PPS_GNSS) {
+ u64 ppstime;
+
+ ppstime = emac_hw_get_ppstime(priv);
+ if (ppstime != priv->pps_info.ppstime) {
+ pr_warn("New PPS come, the time is out-of-date\n");
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ return 0;
+ }
+ }
+
+ emac_timecounter_init(priv, ns);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+/* structure describing a PTP hardware clock */
+static struct ptp_clock_info emac_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "emac_ptp_clock",
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = emac_adjust_freq,
+ .adjtime = emac_adjust_time,
+ .gettime64 = emac_phc_get_time,
+ .settime64 = emac_phc_set_time,
+};
+
+#ifdef CONFIG_PPS
+static s64 emac_read_bcode_utc(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ struct timespec64 utc;
+ struct tm result;
+ s64 utc_ns;
+ u32 val;
+
+ val = readl(CIU_VIRT_BASE + CIU_UTC_OUT_REG2);
+ year = 2000 + ((val >> 16) & 0x7F);
+ mon = 0;
+ day = val & 0x1FF;
+
+ val = readl(CIU_VIRT_BASE + CIU_UTC_OUT_REG1);
+ hour = (val >> 16) & 0x1F;
+ min = (val >> 8) & 0x3F;
+ sec = val & 0x3F;
+
+ utc.tv_sec = mktime64(year, mon, day, hour, min, sec);
+ utc.tv_nsec = 0;
+ utc_ns = timespec64_to_ns(&utc);
+ time64_to_tm(utc.tv_sec, 0, &result);
+#ifdef EMAC_PPS_DEBUG
+ pr_info("UTC from bcode: %lldns\n", utc_ns);
+ pr_info("%d-%d-%d %d:%d:%d ==> %ld-%d-%d %d:%d:%d\n",
+ year, mon, day, hour, min, sec,
+ (1900 + result.tm_year), (result.tm_mon + 1), result.tm_mday,
+ result.tm_hour, result.tm_min, result.tm_sec);
+#endif
+ return utc_ns;
+}
+
+static inline int emac_lost_pps_interrupt(struct emac_priv *priv, u64 *ns,
+ u64 interval)
+{
+ s64 offset = 0;
+
+ *ns = 0;
+ if (interval < (priv->pps_info.pps_cycle * NS_PER_SEC * 3)/2)
+ return 0;
+
+ *ns = div_u64(interval, NS_PER_SEC);
+ *ns *= NS_PER_SEC;
+ offset = interval - *ns;
+ if (offset > NS_PER_SEC / 2)
+ *ns += NS_PER_SEC;
+
+ pr_warn("Lost PPS signal about %lldns\n", interval);
+ return 1;
+}
+
+static inline void emac_pps_get_ts(struct emac_priv *priv,
+ struct pps_event_time *ts, u64 pps_ns)
+{
+ struct emac_pps *info = &priv->pps_info;
+ struct system_time_snapshot snap;
+ u64 ns;
+
+ ktime_get_snapshot(&snap);
+ if (priv->pps_info.enable_pps) {
+ ts->ts_real = ns_to_timespec64(info->utc_ns + pps_ns);
+ if (info->pps_source == EMAC_PPS_BCODE) {
+ info->utc_ns = emac_read_bcode_utc();
+ } else {
+ if (emac_lost_pps_interrupt(priv, &ns, pps_ns))
+ info->utc_ns += ns;
+ else
+ info->utc_ns += info->pps_cycle * NS_PER_SEC;
+ }
+ } else {
+ ts->ts_real = ktime_to_timespec64(snap.real);
+ }
+
+#ifdef CONFIG_NTP_PPS
+ ts->ts_raw = ktime_to_timespec64(snap.raw);
+#endif
+}
+
+static irqreturn_t emac_pps_irq(int irq, void *dev_id)
+{
+ struct emac_priv *priv = (struct emac_priv *)dev_id;
+ struct ptp_clock *ptp = priv->ptp_clock;
+ struct pps_device *pps;
+ struct pps_event_time ts;
+ u32 pps_cycle, pps_cnt;
+ u64 ppstime, systime;
+ u64 ppstime_ns;
+ u64 pps_interval_ns, pps_interval, delay_ns;
+ u32 status;
+
+ status = emac_rd(priv, PTP_1588_IRQ_STS);
+ if (!(status & PTP_PPS_VALID))
+ return IRQ_NONE;
+
+ ppstime = emac_hw_get_ppstime(priv);
+ systime = emac_hw_get_systime(priv);
+ pps_cnt = emac_rd(priv, PTP_PPS_COUNTER);
+ pps_cycle = emac_rd(priv, PTP_PPS_VALUE);
+
+ delay_ns = emac_timer_cyc2ns(priv, systime - ppstime, NULL, 0);
+ pps_interval = ppstime - priv->pps_info.ppstime;
+ pps_interval_ns = emac_timer_cyc2ns(priv, pps_interval, NULL, 0);
+ ppstime_ns = emac_timer_cyc2ns(priv, ppstime, NULL, 0);
+
+#ifdef EMAC_PPS_DEBUG
+ pr_info("emac_pps_irq: pps_cnt=%d interrupt_delay=%lldns\n",
+ pps_cnt, delay_ns);
+ pr_info("emac_pps_irq: ppstime_ns=%lld interval=%lldns ppstime=%lld systime=%lld\n",
+ ppstime_ns, pps_interval_ns, ppstime, systime);
+#endif
+ /* report pps event */
+ pps = ptp_pps_device(ptp);
+ if (pps) {
+#ifdef EMAC_PPS_DEBUG
+ s64 diff;
+
+ diff = NS_PER_SEC * pps_cycle * (pps_cnt - priv->pps_info.ppscnt);
+ diff = pps_interval_ns - diff;
+
+ /*
+ * Stop when |diff| < cycle, show higher accuracy for
+ * frequency synchronization via PPS
+ */
+ if (abs(diff) < priv->ptp_clk_inc)
+ pps_interval_ns = NS_PER_SEC * pps_cycle;
+ pr_info("emac_pps_irq: pps width diff %lldns\n", diff);
+#endif
+ emac_pps_get_ts(priv, &ts, pps_interval_ns);
+ pps_event(pps, &ts, PPS_CAPTUREASSERT, dev_id);
+ }
+
+ priv->pps_info.ppstime = ppstime;
+ priv->pps_info.ppscnt = pps_cnt;
+
+ emac_wr(priv, PTP_1588_IRQ_STS, PTP_PPS_VALID);
+ return IRQ_HANDLED;
+}
+
+static int emac_ptp_config_pps(struct emac_priv *priv)
+{
+ unsigned long timeo = jiffies + msecs_to_jiffies(500);
+ u64 ppstime;
+ u32 pps_cnt;
+ u32 reg;
+ int ret;
+
+ if (priv->irq_pps) {
+ ret = request_irq(priv->irq_pps, emac_pps_irq,
+ IRQF_SHARED, "emac_pps", priv);
+ if (ret) {
+ pr_err("request irq_pps failed, ret=%d\\n", ret);
+ return ret;
+ }
+ }
+
+ priv->ptp_clock_ops.pps = 1;
+ priv->pps_info.pps_cycle = 1;
+
+ /* Config PPS source */
+ reg = readl(CIU_VIRT_BASE + CIU_PPS_SOURCE);
+ if (priv->pps_info.pps_source == EMAC_PPS_BCODE)
+ reg &= ~BIT(0);
+ else
+ reg |= BIT(0);
+ writel(reg, CIU_VIRT_BASE + CIU_PPS_SOURCE);
+
+ /* Clear PPS register */
+ pr_info("reset emac PPS\n");
+ reg = emac_rd(priv, PTP_1588_CTRL);
+ reg |= PPS_COUNTER_RESET;
+ emac_wr(priv, PTP_1588_CTRL, reg);
+ do {
+ reg = emac_rd(priv, PTP_1588_CTRL);
+ if (!(reg & PPS_COUNTER_RESET))
+ break;
+ } while (time_before(jiffies, timeo));
+
+ if (reg & PPS_COUNTER_RESET)
+ pr_err("reset PPS failed\n");
+
+ /* Config PPS interrupt cycle */
+ emac_wr(priv, PTP_PPS_VALUE, priv->pps_info.pps_cycle);
+
+ /* Disable PPS interrupt */
+ reg = emac_rd(priv, PTP_1588_IRQ_EN);
+ reg &= ~PTP_PPS_VALID;
+ emac_wr(priv, PTP_1588_IRQ_EN, reg);
+
+ reg = emac_rd(priv, PTP_1588_CTRL);
+ reg |= PPS_MODE_ENABLE | TX_TIMESTAMP_EN | RX_TIMESTAMP_EN;
+ emac_wr(priv, PTP_1588_CTRL, reg);
+
+ ppstime = emac_hw_get_ppstime(priv);
+ pps_cnt = emac_rd(priv, PTP_PPS_COUNTER);
+ pr_info("ppstime=%lld pps_cnt=%d\n", ppstime, pps_cnt);
+
+ priv->pps_info.ppscnt = pps_cnt;
+ priv->pps_info.ppstime = ppstime;
+ priv->pps_info.utc_ns = emac_read_bcode_utc();
+
+ /* Enable PPS interrupt */
+ reg = emac_rd(priv, PTP_1588_IRQ_EN);
+ reg |= PTP_PPS_VALID;
+ emac_wr(priv, PTP_1588_IRQ_EN, reg);
+ return 0;
+}
+#endif
+
+static void emac_systim_overflow_work(struct work_struct *work)
+{
+ struct emac_priv *priv = container_of(work, struct emac_priv,
+ systim_overflow_work.work);
+ struct timespec64 ts;
+ u64 ns;
+
+ /* Update the timecounter */
+ ns = emac_timecounter_read(priv);
+
+ ts = ns_to_timespec64(ns);
+ pr_debug("SYSTIM overflow check at %lld.%09lu\n",
+ (long long) ts.tv_sec, ts.tv_nsec);
+
+#ifdef CONFIG_PPS
+ if (priv->pps_info.enable_pps) {
+ u64 systime, ppstime, interval_ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ systime = emac_hw_get_systime(priv);
+ ppstime = priv->pps_info.ppstime;
+ interval_ns = emac_timer_cyc2ns(priv, systime - ppstime, NULL, 0);
+ if (emac_lost_pps_interrupt(priv, &ns, interval_ns)) {
+ struct timecounter *tc = &priv->tc;
+
+ priv->pps_info.utc_ns += ns;
+ priv->pps_info.ppstime +=
+ div_u64(ns << tc->cc->shift, tc->cc->mult);
+ pr_warn("PPS overflow: add %lld seconds to UTC\n", ns);
+ }
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ }
+#endif
+ schedule_delayed_work(&priv->systim_overflow_work,
+ EMAC_SYSTIM_OVERFLOW_PERIOD);
+}
+
+/**
+ * emac_ptp_register
+ * @priv: driver private structure
+ * Description: this function will register the ptp clock driver
+ * to kernel. It also does some house keeping work.
+ */
+void emac_ptp_register(struct emac_priv *priv)
+{
+ unsigned long flags;
+ int max_adj;
+
+ priv->cc.read = emac_cyclecounter_read;
+ priv->cc.mask = CYCLECOUNTER_MASK(64);
+ priv->cc.mult = 1;
+ if (priv->hw_adj)
+ priv->cc.shift = INCVALUE_SHIFT_HW;
+ else
+ priv->cc.shift = INCVALUE_SHIFT_SW;
+
+ spin_lock_init(&priv->ptp_lock);
+ priv->ptp_clock_ops = emac_ptp_clock_ops;
+
+ max_adj = (1 << (24 - INCVALUE_SHIFT_HW)) - 10;
+ max_adj = max_adj * priv->ptp_clk_rate * INCPERIOD;
+ if (max_adj < 0) {
+ netdev_err(priv->ndev, "ptp increment too large\n");
+ max_adj = 1000000000;
+ }
+ priv->ptp_clock_ops.max_adj = min(max_adj, 1000000000);
+
+ INIT_DELAYED_WORK(&priv->systim_overflow_work,
+ emac_systim_overflow_work);
+ schedule_delayed_work(&priv->systim_overflow_work,
+ EMAC_SYSTIM_OVERFLOW_PERIOD);
+#ifdef CONFIG_PPS
+ if (priv->pps_info.enable_pps) {
+ emac_hw_config_systime_increment(priv);
+ emac_ptp_config_pps(priv);
+ netdev_info(priv->ndev, "PPS enabled\n");
+ } else {
+ priv->ptp_clock_ops.pps = 0;
+ }
+#endif
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
+ NULL);
+ if (IS_ERR(priv->ptp_clock)) {
+ netdev_err(priv->ndev, "ptp_clock_register failed\n");
+ priv->ptp_clock = NULL;
+ } else if (priv->ptp_clock)
+ netdev_info(priv->ndev, "registered PTP clock\n");
+ else
+ netdev_info(priv->ndev, "PTP_1588_CLOCK maybe not enabled\n");
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+ emac_timecounter_init(priv, ktime_to_ns(ktime_get_real()));
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ priv->hwptp = &emac_hwptp;
+ pr_info("ptp max_adj:%u, overflow timeout:%ld minutes\n",
+ priv->ptp_clock_ops.max_adj, EMAC_SYSTIM_OVERFLOW_SEC / 60);
+}
+
+/**
+ * emac_ptp_unregister
+ * @priv: driver private structure
+ * Description: this function will remove/unregister the ptp clock driver
+ * from the kernel.
+ */
+void emac_ptp_unregister(struct emac_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->systim_overflow_work);
+
+ if (priv->ptp_clock) {
+ ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
+ pr_debug("Removed PTP HW clock successfully on %s\n",
+ priv->ndev->name);
+ }
+
+ if (priv->irq_pps)
+ free_irq(priv->irq_pps, priv);
+
+ priv->hwptp = NULL;
+}