ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/Kconfig b/marvell/linux/drivers/net/ethernet/qualcomm/Kconfig
new file mode 100644
index 0000000..09a678a
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/Kconfig
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Qualcomm network device configuration
+#
+
+config NET_VENDOR_QUALCOMM
+	bool "Qualcomm devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Qualcomm cards. If you say Y, you will be asked
+	  for your specific card in the following questions.
+
+if NET_VENDOR_QUALCOMM
+
+config QCA7000
+	tristate
+	help
+	  This enables support for the Qualcomm Atheros QCA7000.
+
+config QCA7000_SPI
+	tristate "Qualcomm Atheros QCA7000 SPI support"
+	select QCA7000
+	depends on SPI_MASTER && OF
+	---help---
+	  This SPI protocol driver supports the Qualcomm Atheros QCA7000.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called qcaspi.
+
+config QCA7000_UART
+	tristate "Qualcomm Atheros QCA7000 UART support"
+	select QCA7000
+	depends on SERIAL_DEV_BUS && OF
+	---help---
+	  This UART protocol driver supports the Qualcomm Atheros QCA7000.
+
+	  Currently the driver assumes these device UART settings:
+	    Data bits: 8
+	    Parity: None
+	    Stop bits: 1
+	    Flow control: None
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called qcauart.
+
+config QCOM_EMAC
+	tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support"
+	depends on HAS_DMA && HAS_IOMEM
+	select CRC32
+	select PHYLIB
+	---help---
+	  This driver supports the Qualcomm Technologies, Inc. Gigabit
+	  Ethernet Media Access Controller (EMAC). The controller
+	  supports IEEE 802.3-2002, half-duplex mode at 10/100 Mb/s,
+	  full-duplex mode at 10/100/1000Mb/s, Wake On LAN (WOL) for
+	  low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
+	  Precision Clock Synchronization Protocol.
+
+source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
+
+endif # NET_VENDOR_QUALCOMM
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/Makefile b/marvell/linux/drivers/net/ethernet/qualcomm/Makefile
new file mode 100644
index 0000000..9250976
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Qualcomm network device drivers.
+#
+
+obj-$(CONFIG_QCA7000) += qca_7k_common.o
+obj-$(CONFIG_QCA7000_SPI) += qcaspi.o
+qcaspi-objs := qca_7k.o qca_debug.o qca_spi.o
+obj-$(CONFIG_QCA7000_UART) += qcauart.o
+qcauart-objs := qca_uart.o
+
+obj-y += emac/
+
+obj-$(CONFIG_RMNET) += rmnet/
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/Makefile b/marvell/linux/drivers/net/ethernet/qualcomm/emac/Makefile
new file mode 100644
index 0000000..61d15e0
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Qualcomm Technologies, Inc. EMAC Gigabit Ethernet driver
+#
+
+obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o
+
+qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o emac-ethtool.o \
+		  emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \
+		  emac-sgmii-qdf2400.o
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
new file mode 100644
index 0000000..79e5007
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include "emac.h"
+
+static const char * const emac_ethtool_stat_strings[] = {
+	"rx_ok",
+	"rx_bcast",
+	"rx_mcast",
+	"rx_pause",
+	"rx_ctrl",
+	"rx_fcs_err",
+	"rx_len_err",
+	"rx_byte_cnt",
+	"rx_runt",
+	"rx_frag",
+	"rx_sz_64",
+	"rx_sz_65_127",
+	"rx_sz_128_255",
+	"rx_sz_256_511",
+	"rx_sz_512_1023",
+	"rx_sz_1024_1518",
+	"rx_sz_1519_max",
+	"rx_sz_ov",
+	"rx_rxf_ov",
+	"rx_align_err",
+	"rx_bcast_byte_cnt",
+	"rx_mcast_byte_cnt",
+	"rx_err_addr",
+	"rx_crc_align",
+	"rx_jabbers",
+	"tx_ok",
+	"tx_bcast",
+	"tx_mcast",
+	"tx_pause",
+	"tx_exc_defer",
+	"tx_ctrl",
+	"tx_defer",
+	"tx_byte_cnt",
+	"tx_sz_64",
+	"tx_sz_65_127",
+	"tx_sz_128_255",
+	"tx_sz_256_511",
+	"tx_sz_512_1023",
+	"tx_sz_1024_1518",
+	"tx_sz_1519_max",
+	"tx_1_col",
+	"tx_2_col",
+	"tx_late_col",
+	"tx_abort_col",
+	"tx_underrun",
+	"tx_rd_eop",
+	"tx_len_err",
+	"tx_trunc",
+	"tx_bcast_byte",
+	"tx_mcast_byte",
+	"tx_col",
+};
+
+#define EMAC_STATS_LEN	ARRAY_SIZE(emac_ethtool_stat_strings)
+
+static u32 emac_get_msglevel(struct net_device *netdev)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	return adpt->msg_enable;
+}
+
+static void emac_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	adpt->msg_enable = data;
+}
+
+static int emac_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_PRIV_FLAGS:
+		return 1;
+	case ETH_SS_STATS:
+		return EMAC_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+	unsigned int i;
+
+	switch (stringset) {
+	case ETH_SS_PRIV_FLAGS:
+		strcpy(data, "single-pause-mode");
+		break;
+
+	case ETH_SS_STATS:
+		for (i = 0; i < EMAC_STATS_LEN; i++) {
+			strlcpy(data, emac_ethtool_stat_strings[i],
+				ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static void emac_get_ethtool_stats(struct net_device *netdev,
+				   struct ethtool_stats *stats,
+				   u64 *data)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	spin_lock(&adpt->stats.lock);
+
+	emac_update_hw_stats(adpt);
+	memcpy(data, &adpt->stats, EMAC_STATS_LEN * sizeof(u64));
+
+	spin_unlock(&adpt->stats.lock);
+}
+
+static int emac_nway_reset(struct net_device *netdev)
+{
+	struct phy_device *phydev = netdev->phydev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return genphy_restart_aneg(phydev);
+}
+
+static void emac_get_ringparam(struct net_device *netdev,
+			       struct ethtool_ringparam *ring)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	ring->rx_max_pending = EMAC_MAX_RX_DESCS;
+	ring->tx_max_pending = EMAC_MAX_TX_DESCS;
+	ring->rx_pending = adpt->rx_desc_cnt;
+	ring->tx_pending = adpt->tx_desc_cnt;
+}
+
+static int emac_set_ringparam(struct net_device *netdev,
+			      struct ethtool_ringparam *ring)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	/* We don't have separate queues/rings for small/large frames, so
+	 * reject any attempt to specify those values separately.
+	 */
+	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+		return -EINVAL;
+
+	adpt->tx_desc_cnt =
+		clamp_val(ring->tx_pending, EMAC_MIN_TX_DESCS, EMAC_MAX_TX_DESCS);
+
+	adpt->rx_desc_cnt =
+		clamp_val(ring->rx_pending, EMAC_MIN_RX_DESCS, EMAC_MAX_RX_DESCS);
+
+	if (netif_running(netdev))
+		return emac_reinit_locked(adpt);
+
+	return 0;
+}
+
+static void emac_get_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	pause->autoneg = adpt->automatic ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+	pause->rx_pause = adpt->rx_flow_control ? 1 : 0;
+	pause->tx_pause = adpt->tx_flow_control ? 1 : 0;
+}
+
+static int emac_set_pauseparam(struct net_device *netdev,
+			       struct ethtool_pauseparam *pause)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	adpt->automatic = pause->autoneg == AUTONEG_ENABLE;
+	adpt->rx_flow_control = pause->rx_pause != 0;
+	adpt->tx_flow_control = pause->tx_pause != 0;
+
+	if (netif_running(netdev))
+		return emac_reinit_locked(adpt);
+
+	return 0;
+}
+
+/* Selected registers that might want to track during runtime. */
+static const u16 emac_regs[] = {
+	EMAC_DMA_MAS_CTRL,
+	EMAC_MAC_CTRL,
+	EMAC_TXQ_CTRL_0,
+	EMAC_RXQ_CTRL_0,
+	EMAC_DMA_CTRL,
+	EMAC_INT_MASK,
+	EMAC_AXI_MAST_CTRL,
+	EMAC_CORE_HW_VERSION,
+	EMAC_MISC_CTRL,
+};
+
+/* Every time emac_regs[] above is changed, increase this version number. */
+#define EMAC_REGS_VERSION	0
+
+#define EMAC_MAX_REG_SIZE	ARRAY_SIZE(emac_regs)
+
+static void emac_get_regs(struct net_device *netdev,
+			  struct ethtool_regs *regs, void *buff)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+	u32 *val = buff;
+	unsigned int i;
+
+	regs->version = EMAC_REGS_VERSION;
+	regs->len = EMAC_MAX_REG_SIZE * sizeof(u32);
+
+	for (i = 0; i < EMAC_MAX_REG_SIZE; i++)
+		val[i] = readl(adpt->base + emac_regs[i]);
+}
+
+static int emac_get_regs_len(struct net_device *netdev)
+{
+	return EMAC_MAX_REG_SIZE * sizeof(u32);
+}
+
+#define EMAC_PRIV_ENABLE_SINGLE_PAUSE	BIT(0)
+
+static int emac_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE);
+
+	if (netif_running(netdev))
+		return emac_reinit_locked(adpt);
+
+	return 0;
+}
+
+static u32 emac_get_priv_flags(struct net_device *netdev)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0;
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.set_link_ksettings = phy_ethtool_set_link_ksettings,
+
+	.get_msglevel    = emac_get_msglevel,
+	.set_msglevel    = emac_set_msglevel,
+
+	.get_sset_count  = emac_get_sset_count,
+	.get_strings = emac_get_strings,
+	.get_ethtool_stats = emac_get_ethtool_stats,
+
+	.get_ringparam = emac_get_ringparam,
+	.set_ringparam = emac_set_ringparam,
+
+	.get_pauseparam = emac_get_pauseparam,
+	.set_pauseparam = emac_set_pauseparam,
+
+	.nway_reset = emac_nway_reset,
+
+	.get_link = ethtool_op_get_link,
+
+	.get_regs_len    = emac_get_regs_len,
+	.get_regs        = emac_get_regs,
+
+	.set_priv_flags = emac_set_priv_flags,
+	.get_priv_flags = emac_get_priv_flags,
+};
+
+void emac_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &emac_ethtool_ops;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-mac.c
new file mode 100644
index 0000000..ab1b4da
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -0,0 +1,1482 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC Ethernet Controller MAC layer support
+ */
+
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <net/ip6_checksum.h>
+#include "emac.h"
+#include "emac-sgmii.h"
+
+/* EMAC_MAC_CTRL */
+#define SINGLE_PAUSE_MODE       	0x10000000
+#define DEBUG_MODE                      0x08000000
+#define BROAD_EN                        0x04000000
+#define MULTI_ALL                       0x02000000
+#define RX_CHKSUM_EN                    0x01000000
+#define HUGE                            0x00800000
+#define SPEED(x)			(((x) & 0x3) << 20)
+#define SPEED_MASK			SPEED(0x3)
+#define SIMR                            0x00080000
+#define TPAUSE                          0x00010000
+#define PROM_MODE                       0x00008000
+#define VLAN_STRIP                      0x00004000
+#define PRLEN_BMSK                      0x00003c00
+#define PRLEN_SHFT                      10
+#define HUGEN                           0x00000200
+#define FLCHK                           0x00000100
+#define PCRCE                           0x00000080
+#define CRCE                            0x00000040
+#define FULLD                           0x00000020
+#define MAC_LP_EN                       0x00000010
+#define RXFC                            0x00000008
+#define TXFC                            0x00000004
+#define RXEN                            0x00000002
+#define TXEN                            0x00000001
+
+/* EMAC_DESC_CTRL_3 */
+#define RFD_RING_SIZE_BMSK                                       0xfff
+
+/* EMAC_DESC_CTRL_4 */
+#define RX_BUFFER_SIZE_BMSK                                     0xffff
+
+/* EMAC_DESC_CTRL_6 */
+#define RRD_RING_SIZE_BMSK                                       0xfff
+
+/* EMAC_DESC_CTRL_9 */
+#define TPD_RING_SIZE_BMSK                                      0xffff
+
+/* EMAC_TXQ_CTRL_0 */
+#define NUM_TXF_BURST_PREF_BMSK                             0xffff0000
+#define NUM_TXF_BURST_PREF_SHFT                                     16
+#define LS_8023_SP                                                0x80
+#define TXQ_MODE                                                  0x40
+#define TXQ_EN                                                    0x20
+#define IP_OP_SP                                                  0x10
+#define NUM_TPD_BURST_PREF_BMSK                                    0xf
+#define NUM_TPD_BURST_PREF_SHFT                                      0
+
+/* EMAC_TXQ_CTRL_1 */
+#define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK                        0x7ff
+
+/* EMAC_TXQ_CTRL_2 */
+#define TXF_HWM_BMSK                                         0xfff0000
+#define TXF_LWM_BMSK                                             0xfff
+
+/* EMAC_RXQ_CTRL_0 */
+#define RXQ_EN                                                 BIT(31)
+#define CUT_THRU_EN                                            BIT(30)
+#define RSS_HASH_EN                                            BIT(29)
+#define NUM_RFD_BURST_PREF_BMSK                              0x3f00000
+#define NUM_RFD_BURST_PREF_SHFT                                     20
+#define IDT_TABLE_SIZE_BMSK                                    0x1ff00
+#define IDT_TABLE_SIZE_SHFT                                          8
+#define SP_IPV6                                                   0x80
+
+/* EMAC_RXQ_CTRL_1 */
+#define JUMBO_1KAH_BMSK                                         0xf000
+#define JUMBO_1KAH_SHFT                                             12
+#define RFD_PREF_LOW_TH                                           0x10
+#define RFD_PREF_LOW_THRESHOLD_BMSK                              0xfc0
+#define RFD_PREF_LOW_THRESHOLD_SHFT                                  6
+#define RFD_PREF_UP_TH                                            0x10
+#define RFD_PREF_UP_THRESHOLD_BMSK                                0x3f
+#define RFD_PREF_UP_THRESHOLD_SHFT                                   0
+
+/* EMAC_RXQ_CTRL_2 */
+#define RXF_DOF_THRESFHOLD                                       0x1a0
+#define RXF_DOF_THRESHOLD_BMSK                               0xfff0000
+#define RXF_DOF_THRESHOLD_SHFT                                      16
+#define RXF_UOF_THRESFHOLD                                        0xbe
+#define RXF_UOF_THRESHOLD_BMSK                                   0xfff
+#define RXF_UOF_THRESHOLD_SHFT                                       0
+
+/* EMAC_RXQ_CTRL_3 */
+#define RXD_TIMER_BMSK                                      0xffff0000
+#define RXD_THRESHOLD_BMSK                                       0xfff
+#define RXD_THRESHOLD_SHFT                                           0
+
+/* EMAC_DMA_CTRL */
+#define DMAW_DLY_CNT_BMSK                                      0xf0000
+#define DMAW_DLY_CNT_SHFT                                           16
+#define DMAR_DLY_CNT_BMSK                                       0xf800
+#define DMAR_DLY_CNT_SHFT                                           11
+#define DMAR_REQ_PRI                                             0x400
+#define REGWRBLEN_BMSK                                           0x380
+#define REGWRBLEN_SHFT                                               7
+#define REGRDBLEN_BMSK                                            0x70
+#define REGRDBLEN_SHFT                                               4
+#define OUT_ORDER_MODE                                             0x4
+#define ENH_ORDER_MODE                                             0x2
+#define IN_ORDER_MODE                                              0x1
+
+/* EMAC_MAILBOX_13 */
+#define RFD3_PROC_IDX_BMSK                                   0xfff0000
+#define RFD3_PROC_IDX_SHFT                                          16
+#define RFD3_PROD_IDX_BMSK                                       0xfff
+#define RFD3_PROD_IDX_SHFT                                           0
+
+/* EMAC_MAILBOX_2 */
+#define NTPD_CONS_IDX_BMSK                                  0xffff0000
+#define NTPD_CONS_IDX_SHFT                                          16
+
+/* EMAC_MAILBOX_3 */
+#define RFD0_CONS_IDX_BMSK                                       0xfff
+#define RFD0_CONS_IDX_SHFT                                           0
+
+/* EMAC_MAILBOX_11 */
+#define H3TPD_PROD_IDX_BMSK                                 0xffff0000
+#define H3TPD_PROD_IDX_SHFT                                         16
+
+/* EMAC_AXI_MAST_CTRL */
+#define DATA_BYTE_SWAP                                             0x8
+#define MAX_BOUND                                                  0x2
+#define MAX_BTYPE                                                  0x1
+
+/* EMAC_MAILBOX_12 */
+#define H3TPD_CONS_IDX_BMSK                                 0xffff0000
+#define H3TPD_CONS_IDX_SHFT                                         16
+
+/* EMAC_MAILBOX_9 */
+#define H2TPD_PROD_IDX_BMSK                                     0xffff
+#define H2TPD_PROD_IDX_SHFT                                          0
+
+/* EMAC_MAILBOX_10 */
+#define H1TPD_CONS_IDX_BMSK                                 0xffff0000
+#define H1TPD_CONS_IDX_SHFT                                         16
+#define H2TPD_CONS_IDX_BMSK                                     0xffff
+#define H2TPD_CONS_IDX_SHFT                                          0
+
+/* EMAC_ATHR_HEADER_CTRL */
+#define HEADER_CNT_EN                                              0x2
+#define HEADER_ENABLE                                              0x1
+
+/* EMAC_MAILBOX_0 */
+#define RFD0_PROC_IDX_BMSK                                   0xfff0000
+#define RFD0_PROC_IDX_SHFT                                          16
+#define RFD0_PROD_IDX_BMSK                                       0xfff
+#define RFD0_PROD_IDX_SHFT                                           0
+
+/* EMAC_MAILBOX_5 */
+#define RFD1_PROC_IDX_BMSK                                   0xfff0000
+#define RFD1_PROC_IDX_SHFT                                          16
+#define RFD1_PROD_IDX_BMSK                                       0xfff
+#define RFD1_PROD_IDX_SHFT                                           0
+
+/* EMAC_MISC_CTRL */
+#define RX_UNCPL_INT_EN                                            0x1
+
+/* EMAC_MAILBOX_7 */
+#define RFD2_CONS_IDX_BMSK                                   0xfff0000
+#define RFD2_CONS_IDX_SHFT                                          16
+#define RFD1_CONS_IDX_BMSK                                       0xfff
+#define RFD1_CONS_IDX_SHFT                                           0
+
+/* EMAC_MAILBOX_8 */
+#define RFD3_CONS_IDX_BMSK                                       0xfff
+#define RFD3_CONS_IDX_SHFT                                           0
+
+/* EMAC_MAILBOX_15 */
+#define NTPD_PROD_IDX_BMSK                                      0xffff
+#define NTPD_PROD_IDX_SHFT                                           0
+
+/* EMAC_MAILBOX_16 */
+#define H1TPD_PROD_IDX_BMSK                                     0xffff
+#define H1TPD_PROD_IDX_SHFT                                          0
+
+#define RXQ0_RSS_HSTYP_IPV6_TCP_EN                                0x20
+#define RXQ0_RSS_HSTYP_IPV6_EN                                    0x10
+#define RXQ0_RSS_HSTYP_IPV4_TCP_EN                                 0x8
+#define RXQ0_RSS_HSTYP_IPV4_EN                                     0x4
+
+/* EMAC_EMAC_WRAPPER_TX_TS_INX */
+#define EMAC_WRAPPER_TX_TS_EMPTY                               BIT(31)
+#define EMAC_WRAPPER_TX_TS_INX_BMSK                             0xffff
+
+struct emac_skb_cb {
+	u32           tpd_idx;
+	unsigned long jiffies;
+};
+
+#define EMAC_SKB_CB(skb)	((struct emac_skb_cb *)(skb)->cb)
+#define EMAC_RSS_IDT_SIZE	256
+#define JUMBO_1KAH		0x4
+#define RXD_TH			0x100
+#define EMAC_TPD_LAST_FRAGMENT	0x80000000
+#define EMAC_TPD_TSTAMP_SAVE	0x80000000
+
+/* EMAC Errors in emac_rrd.word[3] */
+#define EMAC_RRD_L4F		BIT(14)
+#define EMAC_RRD_IPF		BIT(15)
+#define EMAC_RRD_CRC		BIT(21)
+#define EMAC_RRD_FAE		BIT(22)
+#define EMAC_RRD_TRN		BIT(23)
+#define EMAC_RRD_RNT		BIT(24)
+#define EMAC_RRD_INC		BIT(25)
+#define EMAC_RRD_FOV		BIT(29)
+#define EMAC_RRD_LEN		BIT(30)
+
+/* Error bits that will result in a received frame being discarded */
+#define EMAC_RRD_ERROR (EMAC_RRD_IPF | EMAC_RRD_CRC | EMAC_RRD_FAE | \
+			EMAC_RRD_TRN | EMAC_RRD_RNT | EMAC_RRD_INC | \
+			EMAC_RRD_FOV | EMAC_RRD_LEN)
+#define EMAC_RRD_STATS_DW_IDX 3
+
+#define EMAC_RRD(RXQ, SIZE, IDX)	((RXQ)->rrd.v_addr + (SIZE * (IDX)))
+#define EMAC_RFD(RXQ, SIZE, IDX)	((RXQ)->rfd.v_addr + (SIZE * (IDX)))
+#define EMAC_TPD(TXQ, SIZE, IDX)	((TXQ)->tpd.v_addr + (SIZE * (IDX)))
+
+#define GET_RFD_BUFFER(RXQ, IDX)	(&((RXQ)->rfd.rfbuff[(IDX)]))
+#define GET_TPD_BUFFER(RTQ, IDX)	(&((RTQ)->tpd.tpbuff[(IDX)]))
+
+#define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD	8
+
+#define ISR_RX_PKT      (\
+	RX_PKT_INT0     |\
+	RX_PKT_INT1     |\
+	RX_PKT_INT2     |\
+	RX_PKT_INT3)
+
+void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr)
+{
+	u32 crc32, bit, reg, mta;
+
+	/* Calculate the CRC of the MAC address */
+	crc32 = ether_crc(ETH_ALEN, addr);
+
+	/* The HASH Table is an array of 2 32-bit registers. It is
+	 * treated like an array of 64 bits (BitArray[hash_value]).
+	 * Use the upper 6 bits of the above CRC as the hash value.
+	 */
+	reg = (crc32 >> 31) & 0x1;
+	bit = (crc32 >> 26) & 0x1F;
+
+	mta = readl(adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
+	mta |= BIT(bit);
+	writel(mta, adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
+}
+
+void emac_mac_multicast_addr_clear(struct emac_adapter *adpt)
+{
+	writel(0, adpt->base + EMAC_HASH_TAB_REG0);
+	writel(0, adpt->base + EMAC_HASH_TAB_REG1);
+}
+
+/* definitions for RSS */
+#define EMAC_RSS_KEY(_i, _type) \
+		(EMAC_RSS_KEY0 + ((_i) * sizeof(_type)))
+#define EMAC_RSS_TBL(_i, _type) \
+		(EMAC_IDT_TABLE0 + ((_i) * sizeof(_type)))
+
+/* Config MAC modes */
+void emac_mac_mode_config(struct emac_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+	u32 mac;
+
+	mac = readl(adpt->base + EMAC_MAC_CTRL);
+	mac &= ~(VLAN_STRIP | PROM_MODE | MULTI_ALL | MAC_LP_EN);
+
+	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+		mac |= VLAN_STRIP;
+
+	if (netdev->flags & IFF_PROMISC)
+		mac |= PROM_MODE;
+
+	if (netdev->flags & IFF_ALLMULTI)
+		mac |= MULTI_ALL;
+
+	writel(mac, adpt->base + EMAC_MAC_CTRL);
+}
+
+/* Config descriptor rings */
+static void emac_mac_dma_rings_config(struct emac_adapter *adpt)
+{
+	/* TPD (Transmit Packet Descriptor) */
+	writel(upper_32_bits(adpt->tx_q.tpd.dma_addr),
+	       adpt->base + EMAC_DESC_CTRL_1);
+
+	writel(lower_32_bits(adpt->tx_q.tpd.dma_addr),
+	       adpt->base + EMAC_DESC_CTRL_8);
+
+	writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK,
+	       adpt->base + EMAC_DESC_CTRL_9);
+
+	/* RFD (Receive Free Descriptor) & RRD (Receive Return Descriptor) */
+	writel(upper_32_bits(adpt->rx_q.rfd.dma_addr),
+	       adpt->base + EMAC_DESC_CTRL_0);
+
+	writel(lower_32_bits(adpt->rx_q.rfd.dma_addr),
+	       adpt->base + EMAC_DESC_CTRL_2);
+	writel(lower_32_bits(adpt->rx_q.rrd.dma_addr),
+	       adpt->base + EMAC_DESC_CTRL_5);
+
+	writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK,
+	       adpt->base + EMAC_DESC_CTRL_3);
+	writel(adpt->rx_q.rrd.count & RRD_RING_SIZE_BMSK,
+	       adpt->base + EMAC_DESC_CTRL_6);
+
+	writel(adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK,
+	       adpt->base + EMAC_DESC_CTRL_4);
+
+	writel(0, adpt->base + EMAC_DESC_CTRL_11);
+
+	/* Load all of the base addresses above and ensure that triggering HW to
+	 * read ring pointers is flushed
+	 */
+	writel(1, adpt->base + EMAC_INTER_SRAM_PART9);
+}
+
+/* Config transmit parameters */
+static void emac_mac_tx_config(struct emac_adapter *adpt)
+{
+	u32 val;
+
+	writel((EMAC_MAX_TX_OFFLOAD_THRESH >> 3) &
+	       JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK, adpt->base + EMAC_TXQ_CTRL_1);
+
+	val = (adpt->tpd_burst << NUM_TPD_BURST_PREF_SHFT) &
+	       NUM_TPD_BURST_PREF_BMSK;
+
+	val |= TXQ_MODE | LS_8023_SP;
+	val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) &
+		NUM_TXF_BURST_PREF_BMSK;
+
+	writel(val, adpt->base + EMAC_TXQ_CTRL_0);
+	emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_2,
+			  (TXF_HWM_BMSK | TXF_LWM_BMSK), 0);
+}
+
+/* Config receive parameters */
+static void emac_mac_rx_config(struct emac_adapter *adpt)
+{
+	u32 val;
+
+	val = (adpt->rfd_burst << NUM_RFD_BURST_PREF_SHFT) &
+	       NUM_RFD_BURST_PREF_BMSK;
+	val |= (SP_IPV6 | CUT_THRU_EN);
+
+	writel(val, adpt->base + EMAC_RXQ_CTRL_0);
+
+	val = readl(adpt->base + EMAC_RXQ_CTRL_1);
+	val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK |
+		 RFD_PREF_UP_THRESHOLD_BMSK);
+	val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) |
+		(RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) |
+		(RFD_PREF_UP_TH  << RFD_PREF_UP_THRESHOLD_SHFT);
+	writel(val, adpt->base + EMAC_RXQ_CTRL_1);
+
+	val = readl(adpt->base + EMAC_RXQ_CTRL_2);
+	val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK);
+	val |= (RXF_DOF_THRESFHOLD  << RXF_DOF_THRESHOLD_SHFT) |
+		(RXF_UOF_THRESFHOLD << RXF_UOF_THRESHOLD_SHFT);
+	writel(val, adpt->base + EMAC_RXQ_CTRL_2);
+
+	val = readl(adpt->base + EMAC_RXQ_CTRL_3);
+	val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK);
+	val |= RXD_TH << RXD_THRESHOLD_SHFT;
+	writel(val, adpt->base + EMAC_RXQ_CTRL_3);
+}
+
+/* Config dma */
+static void emac_mac_dma_config(struct emac_adapter *adpt)
+{
+	u32 dma_ctrl = DMAR_REQ_PRI;
+
+	switch (adpt->dma_order) {
+	case emac_dma_ord_in:
+		dma_ctrl |= IN_ORDER_MODE;
+		break;
+	case emac_dma_ord_enh:
+		dma_ctrl |= ENH_ORDER_MODE;
+		break;
+	case emac_dma_ord_out:
+		dma_ctrl |= OUT_ORDER_MODE;
+		break;
+	default:
+		break;
+	}
+
+	dma_ctrl |= (((u32)adpt->dmar_block) << REGRDBLEN_SHFT) &
+						REGRDBLEN_BMSK;
+	dma_ctrl |= (((u32)adpt->dmaw_block) << REGWRBLEN_SHFT) &
+						REGWRBLEN_BMSK;
+	dma_ctrl |= (((u32)adpt->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) &
+						DMAR_DLY_CNT_BMSK;
+	dma_ctrl |= (((u32)adpt->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) &
+						DMAW_DLY_CNT_BMSK;
+
+	/* config DMA and ensure that configuration is flushed to HW */
+	writel(dma_ctrl, adpt->base + EMAC_DMA_CTRL);
+}
+
+/* set MAC address */
+static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr)
+{
+	u32 sta;
+
+	/* for example: 00-A0-C6-11-22-33
+	 * 0<-->C6112233, 1<-->00A0.
+	 */
+
+	/* low 32bit word */
+	sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) |
+	      (((u32)addr[4]) << 8)  | (((u32)addr[5]));
+	writel(sta, adpt->base + EMAC_MAC_STA_ADDR0);
+
+	/* hight 32bit word */
+	sta = (((u32)addr[0]) << 8) | (u32)addr[1];
+	writel(sta, adpt->base + EMAC_MAC_STA_ADDR1);
+}
+
+static void emac_mac_config(struct emac_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+	unsigned int max_frame;
+	u32 val;
+
+	emac_set_mac_address(adpt, netdev->dev_addr);
+
+	max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+	adpt->rxbuf_size = netdev->mtu > EMAC_DEF_RX_BUF_SIZE ?
+		ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE;
+
+	emac_mac_dma_rings_config(adpt);
+
+	writel(netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
+	       adpt->base + EMAC_MAX_FRAM_LEN_CTRL);
+
+	emac_mac_tx_config(adpt);
+	emac_mac_rx_config(adpt);
+	emac_mac_dma_config(adpt);
+
+	val = readl(adpt->base + EMAC_AXI_MAST_CTRL);
+	val &= ~(DATA_BYTE_SWAP | MAX_BOUND);
+	val |= MAX_BTYPE;
+	writel(val, adpt->base + EMAC_AXI_MAST_CTRL);
+	writel(0, adpt->base + EMAC_CLK_GATE_CTRL);
+	writel(RX_UNCPL_INT_EN, adpt->base + EMAC_MISC_CTRL);
+}
+
+void emac_mac_reset(struct emac_adapter *adpt)
+{
+	emac_mac_stop(adpt);
+
+	emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, SOFT_RST);
+	usleep_range(100, 150); /* reset may take up to 100usec */
+
+	/* interrupt clear-on-read */
+	emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
+}
+
+static void emac_mac_start(struct emac_adapter *adpt)
+{
+	struct phy_device *phydev = adpt->phydev;
+	u32 mac, csr1;
+
+	/* enable tx queue */
+	emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, 0, TXQ_EN);
+
+	/* enable rx queue */
+	emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, 0, RXQ_EN);
+
+	/* enable mac control */
+	mac = readl(adpt->base + EMAC_MAC_CTRL);
+	csr1 = readl(adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
+
+	mac |= TXEN | RXEN;     /* enable RX/TX */
+
+	/* Configure MAC flow control. If set to automatic, then match
+	 * whatever the PHY does. Otherwise, enable or disable it, depending
+	 * on what the user configured via ethtool.
+	 */
+	mac &= ~(RXFC | TXFC);
+
+	if (adpt->automatic) {
+		/* If it's set to automatic, then update our local values */
+		adpt->rx_flow_control = phydev->pause;
+		adpt->tx_flow_control = phydev->pause != phydev->asym_pause;
+	}
+	mac |= adpt->rx_flow_control ? RXFC : 0;
+	mac |= adpt->tx_flow_control ? TXFC : 0;
+
+	/* setup link speed */
+	mac &= ~SPEED_MASK;
+	if (phydev->speed == SPEED_1000) {
+		mac |= SPEED(2);
+		csr1 |= FREQ_MODE;
+	} else {
+		mac |= SPEED(1);
+		csr1 &= ~FREQ_MODE;
+	}
+
+	if (phydev->duplex == DUPLEX_FULL)
+		mac |= FULLD;
+	else
+		mac &= ~FULLD;
+
+	/* other parameters */
+	mac |= (CRCE | PCRCE);
+	mac |= ((adpt->preamble << PRLEN_SHFT) & PRLEN_BMSK);
+	mac |= BROAD_EN;
+	mac |= FLCHK;
+	mac &= ~RX_CHKSUM_EN;
+	mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
+		 DEBUG_MODE | SINGLE_PAUSE_MODE);
+
+	/* Enable single-pause-frame mode if requested.
+	 *
+	 * If enabled, the EMAC will send a single pause frame when the RX
+	 * queue is full.  This normally leads to packet loss because
+	 * the pause frame disables the remote MAC only for 33ms (the quanta),
+	 * and then the remote MAC continues sending packets even though
+	 * the RX queue is still full.
+	 *
+	 * If disabled, the EMAC sends a pause frame every 31ms until the RX
+	 * queue is no longer full.  Normally, this is the preferred
+	 * method of operation.  However, when the system is hung (e.g.
+	 * cores are halted), the EMAC interrupt handler is never called
+	 * and so the RX queue fills up quickly and stays full.  The resuling
+	 * non-stop "flood" of pause frames sometimes has the effect of
+	 * disabling nearby switches.  In some cases, other nearby switches
+	 * are also affected, shutting down the entire network.
+	 *
+	 * The user can enable or disable single-pause-frame mode
+	 * via ethtool.
+	 */
+	mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
+
+	writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
+
+	writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
+
+	/* enable interrupt read clear, low power sleep mode and
+	 * the irq moderators
+	 */
+
+	writel_relaxed(adpt->irq_mod, adpt->base + EMAC_IRQ_MOD_TIM_INIT);
+	writel_relaxed(INT_RD_CLR_EN | LPW_MODE | IRQ_MODERATOR_EN |
+			IRQ_MODERATOR2_EN, adpt->base + EMAC_DMA_MAS_CTRL);
+
+	emac_mac_mode_config(adpt);
+
+	emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL,
+			  (HEADER_ENABLE | HEADER_CNT_EN), 0);
+}
+
+void emac_mac_stop(struct emac_adapter *adpt)
+{
+	emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, RXQ_EN, 0);
+	emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, TXQ_EN, 0);
+	emac_reg_update32(adpt->base + EMAC_MAC_CTRL, TXEN | RXEN, 0);
+	usleep_range(1000, 1050); /* stopping mac may take upto 1msec */
+}
+
+/* Free all descriptors of given transmit queue */
+static void emac_tx_q_descs_free(struct emac_adapter *adpt)
+{
+	struct emac_tx_queue *tx_q = &adpt->tx_q;
+	unsigned int i;
+	size_t size;
+
+	/* ring already cleared, nothing to do */
+	if (!tx_q->tpd.tpbuff)
+		return;
+
+	for (i = 0; i < tx_q->tpd.count; i++) {
+		struct emac_buffer *tpbuf = GET_TPD_BUFFER(tx_q, i);
+
+		if (tpbuf->dma_addr) {
+			dma_unmap_single(adpt->netdev->dev.parent,
+					 tpbuf->dma_addr, tpbuf->length,
+					 DMA_TO_DEVICE);
+			tpbuf->dma_addr = 0;
+		}
+		if (tpbuf->skb) {
+			dev_kfree_skb_any(tpbuf->skb);
+			tpbuf->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct emac_buffer) * tx_q->tpd.count;
+	memset(tx_q->tpd.tpbuff, 0, size);
+
+	/* clear the descriptor ring */
+	memset(tx_q->tpd.v_addr, 0, tx_q->tpd.size);
+
+	tx_q->tpd.consume_idx = 0;
+	tx_q->tpd.produce_idx = 0;
+}
+
+/* Free all descriptors of given receive queue */
+static void emac_rx_q_free_descs(struct emac_adapter *adpt)
+{
+	struct device *dev = adpt->netdev->dev.parent;
+	struct emac_rx_queue *rx_q = &adpt->rx_q;
+	unsigned int i;
+	size_t size;
+
+	/* ring already cleared, nothing to do */
+	if (!rx_q->rfd.rfbuff)
+		return;
+
+	for (i = 0; i < rx_q->rfd.count; i++) {
+		struct emac_buffer *rfbuf = GET_RFD_BUFFER(rx_q, i);
+
+		if (rfbuf->dma_addr) {
+			dma_unmap_single(dev, rfbuf->dma_addr, rfbuf->length,
+					 DMA_FROM_DEVICE);
+			rfbuf->dma_addr = 0;
+		}
+		if (rfbuf->skb) {
+			dev_kfree_skb(rfbuf->skb);
+			rfbuf->skb = NULL;
+		}
+	}
+
+	size =  sizeof(struct emac_buffer) * rx_q->rfd.count;
+	memset(rx_q->rfd.rfbuff, 0, size);
+
+	/* clear the descriptor rings */
+	memset(rx_q->rrd.v_addr, 0, rx_q->rrd.size);
+	rx_q->rrd.produce_idx = 0;
+	rx_q->rrd.consume_idx = 0;
+
+	memset(rx_q->rfd.v_addr, 0, rx_q->rfd.size);
+	rx_q->rfd.produce_idx = 0;
+	rx_q->rfd.consume_idx = 0;
+}
+
+/* Free all buffers associated with given transmit queue */
+static void emac_tx_q_bufs_free(struct emac_adapter *adpt)
+{
+	struct emac_tx_queue *tx_q = &adpt->tx_q;
+
+	emac_tx_q_descs_free(adpt);
+
+	kfree(tx_q->tpd.tpbuff);
+	tx_q->tpd.tpbuff = NULL;
+	tx_q->tpd.v_addr = NULL;
+	tx_q->tpd.dma_addr = 0;
+	tx_q->tpd.size = 0;
+}
+
+/* Allocate TX descriptor ring for the given transmit queue */
+static int emac_tx_q_desc_alloc(struct emac_adapter *adpt,
+				struct emac_tx_queue *tx_q)
+{
+	struct emac_ring_header *ring_header = &adpt->ring_header;
+	int node = dev_to_node(adpt->netdev->dev.parent);
+	size_t size;
+
+	size = sizeof(struct emac_buffer) * tx_q->tpd.count;
+	tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node);
+	if (!tx_q->tpd.tpbuff)
+		return -ENOMEM;
+
+	tx_q->tpd.size = tx_q->tpd.count * (adpt->tpd_size * 4);
+	tx_q->tpd.dma_addr = ring_header->dma_addr + ring_header->used;
+	tx_q->tpd.v_addr = ring_header->v_addr + ring_header->used;
+	ring_header->used += ALIGN(tx_q->tpd.size, 8);
+	tx_q->tpd.produce_idx = 0;
+	tx_q->tpd.consume_idx = 0;
+
+	return 0;
+}
+
+/* Free all buffers associated with given transmit queue */
+static void emac_rx_q_bufs_free(struct emac_adapter *adpt)
+{
+	struct emac_rx_queue *rx_q = &adpt->rx_q;
+
+	emac_rx_q_free_descs(adpt);
+
+	kfree(rx_q->rfd.rfbuff);
+	rx_q->rfd.rfbuff   = NULL;
+
+	rx_q->rfd.v_addr   = NULL;
+	rx_q->rfd.dma_addr = 0;
+	rx_q->rfd.size     = 0;
+
+	rx_q->rrd.v_addr   = NULL;
+	rx_q->rrd.dma_addr = 0;
+	rx_q->rrd.size     = 0;
+}
+
+/* Allocate RX descriptor rings for the given receive queue */
+static int emac_rx_descs_alloc(struct emac_adapter *adpt)
+{
+	struct emac_ring_header *ring_header = &adpt->ring_header;
+	int node = dev_to_node(adpt->netdev->dev.parent);
+	struct emac_rx_queue *rx_q = &adpt->rx_q;
+	size_t size;
+
+	size = sizeof(struct emac_buffer) * rx_q->rfd.count;
+	rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node);
+	if (!rx_q->rfd.rfbuff)
+		return -ENOMEM;
+
+	rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4);
+	rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4);
+
+	rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used;
+	rx_q->rrd.v_addr   = ring_header->v_addr + ring_header->used;
+	ring_header->used += ALIGN(rx_q->rrd.size, 8);
+
+	rx_q->rfd.dma_addr = ring_header->dma_addr + ring_header->used;
+	rx_q->rfd.v_addr   = ring_header->v_addr + ring_header->used;
+	ring_header->used += ALIGN(rx_q->rfd.size, 8);
+
+	rx_q->rrd.produce_idx = 0;
+	rx_q->rrd.consume_idx = 0;
+
+	rx_q->rfd.produce_idx = 0;
+	rx_q->rfd.consume_idx = 0;
+
+	return 0;
+}
+
+/* Allocate all TX and RX descriptor rings */
+int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
+{
+	struct emac_ring_header *ring_header = &adpt->ring_header;
+	struct device *dev = adpt->netdev->dev.parent;
+	unsigned int num_tx_descs = adpt->tx_desc_cnt;
+	unsigned int num_rx_descs = adpt->rx_desc_cnt;
+	int ret;
+
+	adpt->tx_q.tpd.count = adpt->tx_desc_cnt;
+
+	adpt->rx_q.rrd.count = adpt->rx_desc_cnt;
+	adpt->rx_q.rfd.count = adpt->rx_desc_cnt;
+
+	/* Ring DMA buffer. Each ring may need up to 8 bytes for alignment,
+	 * hence the additional padding bytes are allocated.
+	 */
+	ring_header->size = num_tx_descs * (adpt->tpd_size * 4) +
+			    num_rx_descs * (adpt->rfd_size * 4) +
+			    num_rx_descs * (adpt->rrd_size * 4) +
+			    8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
+
+	ring_header->used = 0;
+	ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
+						 &ring_header->dma_addr,
+						 GFP_KERNEL);
+	if (!ring_header->v_addr)
+		return -ENOMEM;
+
+	ring_header->used = ALIGN(ring_header->dma_addr, 8) -
+							ring_header->dma_addr;
+
+	ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q);
+	if (ret) {
+		netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n");
+		goto err_alloc_tx;
+	}
+
+	ret = emac_rx_descs_alloc(adpt);
+	if (ret) {
+		netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n");
+		goto err_alloc_rx;
+	}
+
+	return 0;
+
+err_alloc_rx:
+	emac_tx_q_bufs_free(adpt);
+err_alloc_tx:
+	dma_free_coherent(dev, ring_header->size,
+			  ring_header->v_addr, ring_header->dma_addr);
+
+	ring_header->v_addr   = NULL;
+	ring_header->dma_addr = 0;
+	ring_header->size     = 0;
+	ring_header->used     = 0;
+
+	return ret;
+}
+
+/* Free all TX and RX descriptor rings */
+void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt)
+{
+	struct emac_ring_header *ring_header = &adpt->ring_header;
+	struct device *dev = adpt->netdev->dev.parent;
+
+	emac_tx_q_bufs_free(adpt);
+	emac_rx_q_bufs_free(adpt);
+
+	dma_free_coherent(dev, ring_header->size,
+			  ring_header->v_addr, ring_header->dma_addr);
+
+	ring_header->v_addr   = NULL;
+	ring_header->dma_addr = 0;
+	ring_header->size     = 0;
+	ring_header->used     = 0;
+}
+
+/* Initialize descriptor rings */
+static void emac_mac_rx_tx_ring_reset_all(struct emac_adapter *adpt)
+{
+	unsigned int i;
+
+	adpt->tx_q.tpd.produce_idx = 0;
+	adpt->tx_q.tpd.consume_idx = 0;
+	for (i = 0; i < adpt->tx_q.tpd.count; i++)
+		adpt->tx_q.tpd.tpbuff[i].dma_addr = 0;
+
+	adpt->rx_q.rrd.produce_idx = 0;
+	adpt->rx_q.rrd.consume_idx = 0;
+	adpt->rx_q.rfd.produce_idx = 0;
+	adpt->rx_q.rfd.consume_idx = 0;
+	for (i = 0; i < adpt->rx_q.rfd.count; i++)
+		adpt->rx_q.rfd.rfbuff[i].dma_addr = 0;
+}
+
+/* Produce new receive free descriptor */
+static void emac_mac_rx_rfd_create(struct emac_adapter *adpt,
+				   struct emac_rx_queue *rx_q,
+				   dma_addr_t addr)
+{
+	u32 *hw_rfd = EMAC_RFD(rx_q, adpt->rfd_size, rx_q->rfd.produce_idx);
+
+	*(hw_rfd++) = lower_32_bits(addr);
+	*hw_rfd = upper_32_bits(addr);
+
+	if (++rx_q->rfd.produce_idx == rx_q->rfd.count)
+		rx_q->rfd.produce_idx = 0;
+}
+
+/* Fill up receive queue's RFD with preallocated receive buffers */
+static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
+				    struct emac_rx_queue *rx_q)
+{
+	struct emac_buffer *curr_rxbuf;
+	struct emac_buffer *next_rxbuf;
+	unsigned int count = 0;
+	u32 next_produce_idx;
+
+	next_produce_idx = rx_q->rfd.produce_idx + 1;
+	if (next_produce_idx == rx_q->rfd.count)
+		next_produce_idx = 0;
+
+	curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
+	next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
+
+	/* this always has a blank rx_buffer*/
+	while (!next_rxbuf->dma_addr) {
+		struct sk_buff *skb;
+		int ret;
+
+		skb = netdev_alloc_skb_ip_align(adpt->netdev, adpt->rxbuf_size);
+		if (!skb)
+			break;
+
+		curr_rxbuf->dma_addr =
+			dma_map_single(adpt->netdev->dev.parent, skb->data,
+				       adpt->rxbuf_size, DMA_FROM_DEVICE);
+
+		ret = dma_mapping_error(adpt->netdev->dev.parent,
+					curr_rxbuf->dma_addr);
+		if (ret) {
+			dev_kfree_skb(skb);
+			break;
+		}
+		curr_rxbuf->skb = skb;
+		curr_rxbuf->length = adpt->rxbuf_size;
+
+		emac_mac_rx_rfd_create(adpt, rx_q, curr_rxbuf->dma_addr);
+		next_produce_idx = rx_q->rfd.produce_idx + 1;
+		if (next_produce_idx == rx_q->rfd.count)
+			next_produce_idx = 0;
+
+		curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
+		next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
+		count++;
+	}
+
+	if (count) {
+		u32 prod_idx = (rx_q->rfd.produce_idx << rx_q->produce_shift) &
+				rx_q->produce_mask;
+		emac_reg_update32(adpt->base + rx_q->produce_reg,
+				  rx_q->produce_mask, prod_idx);
+	}
+}
+
+static void emac_adjust_link(struct net_device *netdev)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+	struct phy_device *phydev = netdev->phydev;
+
+	if (phydev->link) {
+		emac_mac_start(adpt);
+		emac_sgmii_link_change(adpt, true);
+	} else {
+		emac_sgmii_link_change(adpt, false);
+		emac_mac_stop(adpt);
+	}
+
+	phy_print_status(phydev);
+}
+
+/* Bringup the interface/HW */
+int emac_mac_up(struct emac_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+	int ret;
+
+	emac_mac_rx_tx_ring_reset_all(adpt);
+	emac_mac_config(adpt);
+	emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
+
+	adpt->phydev->irq = PHY_POLL;
+	ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
+				 PHY_INTERFACE_MODE_SGMII);
+	if (ret) {
+		netdev_err(adpt->netdev, "could not connect phy\n");
+		return ret;
+	}
+
+	phy_attached_print(adpt->phydev, NULL);
+
+	/* enable mac irq */
+	writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
+	writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
+
+	phy_start(adpt->phydev);
+
+	napi_enable(&adpt->rx_q.napi);
+	netif_start_queue(netdev);
+
+	return 0;
+}
+
+/* Bring down the interface/HW */
+void emac_mac_down(struct emac_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+
+	netif_stop_queue(netdev);
+	napi_disable(&adpt->rx_q.napi);
+
+	phy_stop(adpt->phydev);
+
+	/* Interrupts must be disabled before the PHY is disconnected, to
+	 * avoid a race condition where adjust_link is null when we get
+	 * an interrupt.
+	 */
+	writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
+	writel(0, adpt->base + EMAC_INT_MASK);
+	synchronize_irq(adpt->irq.irq);
+
+	phy_disconnect(adpt->phydev);
+
+	emac_mac_reset(adpt);
+
+	emac_tx_q_descs_free(adpt);
+	netdev_reset_queue(adpt->netdev);
+	emac_rx_q_free_descs(adpt);
+}
+
+/* Consume next received packet descriptor */
+static bool emac_rx_process_rrd(struct emac_adapter *adpt,
+				struct emac_rx_queue *rx_q,
+				struct emac_rrd *rrd)
+{
+	u32 *hw_rrd = EMAC_RRD(rx_q, adpt->rrd_size, rx_q->rrd.consume_idx);
+
+	rrd->word[3] = *(hw_rrd + 3);
+
+	if (!RRD_UPDT(rrd))
+		return false;
+
+	rrd->word[4] = 0;
+	rrd->word[5] = 0;
+
+	rrd->word[0] = *(hw_rrd++);
+	rrd->word[1] = *(hw_rrd++);
+	rrd->word[2] = *(hw_rrd++);
+
+	if (unlikely(RRD_NOR(rrd) != 1)) {
+		netdev_err(adpt->netdev,
+			   "error: multi-RFD not support yet! nor:%lu\n",
+			   RRD_NOR(rrd));
+	}
+
+	/* mark rrd as processed */
+	RRD_UPDT_SET(rrd, 0);
+	*hw_rrd = rrd->word[3];
+
+	if (++rx_q->rrd.consume_idx == rx_q->rrd.count)
+		rx_q->rrd.consume_idx = 0;
+
+	return true;
+}
+
+/* Produce new transmit descriptor */
+static void emac_tx_tpd_create(struct emac_adapter *adpt,
+			       struct emac_tx_queue *tx_q, struct emac_tpd *tpd)
+{
+	u32 *hw_tpd;
+
+	tx_q->tpd.last_produce_idx = tx_q->tpd.produce_idx;
+	hw_tpd = EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.produce_idx);
+
+	if (++tx_q->tpd.produce_idx == tx_q->tpd.count)
+		tx_q->tpd.produce_idx = 0;
+
+	*(hw_tpd++) = tpd->word[0];
+	*(hw_tpd++) = tpd->word[1];
+	*(hw_tpd++) = tpd->word[2];
+	*hw_tpd = tpd->word[3];
+}
+
+/* Mark the last transmit descriptor as such (for the transmit packet) */
+static void emac_tx_tpd_mark_last(struct emac_adapter *adpt,
+				  struct emac_tx_queue *tx_q)
+{
+	u32 *hw_tpd =
+		EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.last_produce_idx);
+	u32 tmp_tpd;
+
+	tmp_tpd = *(hw_tpd + 1);
+	tmp_tpd |= EMAC_TPD_LAST_FRAGMENT;
+	*(hw_tpd + 1) = tmp_tpd;
+}
+
+static void emac_rx_rfd_clean(struct emac_rx_queue *rx_q, struct emac_rrd *rrd)
+{
+	struct emac_buffer *rfbuf = rx_q->rfd.rfbuff;
+	u32 consume_idx = RRD_SI(rrd);
+	unsigned int i;
+
+	for (i = 0; i < RRD_NOR(rrd); i++) {
+		rfbuf[consume_idx].skb = NULL;
+		if (++consume_idx == rx_q->rfd.count)
+			consume_idx = 0;
+	}
+
+	rx_q->rfd.consume_idx = consume_idx;
+	rx_q->rfd.process_idx = consume_idx;
+}
+
+/* Push the received skb to upper layers */
+static void emac_receive_skb(struct emac_rx_queue *rx_q,
+			     struct sk_buff *skb,
+			     u16 vlan_tag, bool vlan_flag)
+{
+	if (vlan_flag) {
+		u16 vlan;
+
+		EMAC_TAG_TO_VLAN(vlan_tag, vlan);
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+	}
+
+	napi_gro_receive(&rx_q->napi, skb);
+}
+
+/* Process receive event */
+void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
+			 int *num_pkts, int max_pkts)
+{
+	u32 proc_idx, hw_consume_idx, num_consume_pkts;
+	struct net_device *netdev  = adpt->netdev;
+	struct emac_buffer *rfbuf;
+	unsigned int count = 0;
+	struct emac_rrd rrd;
+	struct sk_buff *skb;
+	u32 reg;
+
+	reg = readl_relaxed(adpt->base + rx_q->consume_reg);
+
+	hw_consume_idx = (reg & rx_q->consume_mask) >> rx_q->consume_shift;
+	num_consume_pkts = (hw_consume_idx >= rx_q->rrd.consume_idx) ?
+		(hw_consume_idx -  rx_q->rrd.consume_idx) :
+		(hw_consume_idx + rx_q->rrd.count - rx_q->rrd.consume_idx);
+
+	do {
+		if (!num_consume_pkts)
+			break;
+
+		if (!emac_rx_process_rrd(adpt, rx_q, &rrd))
+			break;
+
+		if (likely(RRD_NOR(&rrd) == 1)) {
+			/* good receive */
+			rfbuf = GET_RFD_BUFFER(rx_q, RRD_SI(&rrd));
+			dma_unmap_single(adpt->netdev->dev.parent,
+					 rfbuf->dma_addr, rfbuf->length,
+					 DMA_FROM_DEVICE);
+			rfbuf->dma_addr = 0;
+			skb = rfbuf->skb;
+		} else {
+			netdev_err(adpt->netdev,
+				   "error: multi-RFD not support yet!\n");
+			break;
+		}
+		emac_rx_rfd_clean(rx_q, &rrd);
+		num_consume_pkts--;
+		count++;
+
+		/* Due to a HW issue in L4 check sum detection (UDP/TCP frags
+		 * with DF set are marked as error), drop packets based on the
+		 * error mask rather than the summary bit (ignoring L4F errors)
+		 */
+		if (rrd.word[EMAC_RRD_STATS_DW_IDX] & EMAC_RRD_ERROR) {
+			netif_dbg(adpt, rx_status, adpt->netdev,
+				  "Drop error packet[RRD: 0x%x:0x%x:0x%x:0x%x]\n",
+				  rrd.word[0], rrd.word[1],
+				  rrd.word[2], rrd.word[3]);
+
+			dev_kfree_skb(skb);
+			continue;
+		}
+
+		skb_put(skb, RRD_PKT_SIZE(&rrd) - ETH_FCS_LEN);
+		skb->dev = netdev;
+		skb->protocol = eth_type_trans(skb, skb->dev);
+		if (netdev->features & NETIF_F_RXCSUM)
+			skb->ip_summed = RRD_L4F(&rrd) ?
+					  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+		else
+			skb_checksum_none_assert(skb);
+
+		emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
+				 (bool)RRD_CVTAG(&rrd));
+
+		(*num_pkts)++;
+	} while (*num_pkts < max_pkts);
+
+	if (count) {
+		proc_idx = (rx_q->rfd.process_idx << rx_q->process_shft) &
+				rx_q->process_mask;
+		emac_reg_update32(adpt->base + rx_q->process_reg,
+				  rx_q->process_mask, proc_idx);
+		emac_mac_rx_descs_refill(adpt, rx_q);
+	}
+}
+
+/* get the number of free transmit descriptors */
+static unsigned int emac_tpd_num_free_descs(struct emac_tx_queue *tx_q)
+{
+	u32 produce_idx = tx_q->tpd.produce_idx;
+	u32 consume_idx = tx_q->tpd.consume_idx;
+
+	return (consume_idx > produce_idx) ?
+		(consume_idx - produce_idx - 1) :
+		(tx_q->tpd.count + consume_idx - produce_idx - 1);
+}
+
+/* Process transmit event */
+void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
+{
+	u32 reg = readl_relaxed(adpt->base + tx_q->consume_reg);
+	u32 hw_consume_idx, pkts_compl = 0, bytes_compl = 0;
+	struct emac_buffer *tpbuf;
+
+	hw_consume_idx = (reg & tx_q->consume_mask) >> tx_q->consume_shift;
+
+	while (tx_q->tpd.consume_idx != hw_consume_idx) {
+		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
+		if (tpbuf->dma_addr) {
+			dma_unmap_page(adpt->netdev->dev.parent,
+				       tpbuf->dma_addr, tpbuf->length,
+				       DMA_TO_DEVICE);
+			tpbuf->dma_addr = 0;
+		}
+
+		if (tpbuf->skb) {
+			pkts_compl++;
+			bytes_compl += tpbuf->skb->len;
+			dev_consume_skb_irq(tpbuf->skb);
+			tpbuf->skb = NULL;
+		}
+
+		if (++tx_q->tpd.consume_idx == tx_q->tpd.count)
+			tx_q->tpd.consume_idx = 0;
+	}
+
+	netdev_completed_queue(adpt->netdev, pkts_compl, bytes_compl);
+
+	if (netif_queue_stopped(adpt->netdev))
+		if (emac_tpd_num_free_descs(tx_q) > (MAX_SKB_FRAGS + 1))
+			netif_wake_queue(adpt->netdev);
+}
+
+/* Initialize all queue data structures */
+void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
+				  struct emac_adapter *adpt)
+{
+	adpt->rx_q.netdev = adpt->netdev;
+
+	adpt->rx_q.produce_reg  = EMAC_MAILBOX_0;
+	adpt->rx_q.produce_mask = RFD0_PROD_IDX_BMSK;
+	adpt->rx_q.produce_shift = RFD0_PROD_IDX_SHFT;
+
+	adpt->rx_q.process_reg  = EMAC_MAILBOX_0;
+	adpt->rx_q.process_mask = RFD0_PROC_IDX_BMSK;
+	adpt->rx_q.process_shft = RFD0_PROC_IDX_SHFT;
+
+	adpt->rx_q.consume_reg  = EMAC_MAILBOX_3;
+	adpt->rx_q.consume_mask = RFD0_CONS_IDX_BMSK;
+	adpt->rx_q.consume_shift = RFD0_CONS_IDX_SHFT;
+
+	adpt->rx_q.irq          = &adpt->irq;
+	adpt->rx_q.intr         = adpt->irq.mask & ISR_RX_PKT;
+
+	adpt->tx_q.produce_reg  = EMAC_MAILBOX_15;
+	adpt->tx_q.produce_mask = NTPD_PROD_IDX_BMSK;
+	adpt->tx_q.produce_shift = NTPD_PROD_IDX_SHFT;
+
+	adpt->tx_q.consume_reg  = EMAC_MAILBOX_2;
+	adpt->tx_q.consume_mask = NTPD_CONS_IDX_BMSK;
+	adpt->tx_q.consume_shift = NTPD_CONS_IDX_SHFT;
+}
+
+/* Fill up transmit descriptors with TSO and Checksum offload information */
+static int emac_tso_csum(struct emac_adapter *adpt,
+			 struct emac_tx_queue *tx_q,
+			 struct sk_buff *skb,
+			 struct emac_tpd *tpd)
+{
+	unsigned int hdr_len;
+	int ret;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (unlikely(ret))
+				return ret;
+		}
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data)
+				       + ntohs(ip_hdr(skb)->tot_len);
+			if (skb->len > pkt_len)
+				pskb_trim(skb, pkt_len);
+		}
+
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		if (unlikely(skb->len == hdr_len)) {
+			/* we only need to do csum */
+			netif_warn(adpt, tx_err, adpt->netdev,
+				   "tso not needed for packet with 0 data\n");
+			goto do_csum;
+		}
+
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+			ip_hdr(skb)->check = 0;
+			tcp_hdr(skb)->check =
+				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+						   ip_hdr(skb)->daddr,
+						   0, IPPROTO_TCP, 0);
+			TPD_IPV4_SET(tpd, 1);
+		}
+
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+			/* ipv6 tso need an extra tpd */
+			struct emac_tpd extra_tpd;
+
+			memset(tpd, 0, sizeof(*tpd));
+			memset(&extra_tpd, 0, sizeof(extra_tpd));
+
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						 &ipv6_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0);
+			TPD_PKT_LEN_SET(&extra_tpd, skb->len);
+			TPD_LSO_SET(&extra_tpd, 1);
+			TPD_LSOV_SET(&extra_tpd, 1);
+			emac_tx_tpd_create(adpt, tx_q, &extra_tpd);
+			TPD_LSOV_SET(tpd, 1);
+		}
+
+		TPD_LSO_SET(tpd, 1);
+		TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb));
+		TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size);
+		return 0;
+	}
+
+do_csum:
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+		unsigned int css, cso;
+
+		cso = skb_transport_offset(skb);
+		if (unlikely(cso & 0x1)) {
+			netdev_err(adpt->netdev,
+				   "error: payload offset should be even\n");
+			return -EINVAL;
+		}
+		css = cso + skb->csum_offset;
+
+		TPD_PAYLOAD_OFFSET_SET(tpd, cso >> 1);
+		TPD_CXSUM_OFFSET_SET(tpd, css >> 1);
+		TPD_CSX_SET(tpd, 1);
+	}
+
+	return 0;
+}
+
+/* Fill up transmit descriptors */
+static void emac_tx_fill_tpd(struct emac_adapter *adpt,
+			     struct emac_tx_queue *tx_q, struct sk_buff *skb,
+			     struct emac_tpd *tpd)
+{
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+	unsigned int first = tx_q->tpd.produce_idx;
+	unsigned int len = skb_headlen(skb);
+	struct emac_buffer *tpbuf = NULL;
+	unsigned int mapped_len = 0;
+	unsigned int i;
+	int count = 0;
+	int ret;
+
+	/* if Large Segment Offload is (in TCP Segmentation Offload struct) */
+	if (TPD_LSO(tpd)) {
+		mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+		tpbuf->length = mapped_len;
+		tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+					       virt_to_page(skb->data),
+					       offset_in_page(skb->data),
+					       tpbuf->length,
+					       DMA_TO_DEVICE);
+		ret = dma_mapping_error(adpt->netdev->dev.parent,
+					tpbuf->dma_addr);
+		if (ret)
+			goto error;
+
+		TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+		TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+		TPD_BUF_LEN_SET(tpd, tpbuf->length);
+		emac_tx_tpd_create(adpt, tx_q, tpd);
+		count++;
+	}
+
+	if (mapped_len < len) {
+		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+		tpbuf->length = len - mapped_len;
+		tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
+					       virt_to_page(skb->data +
+							    mapped_len),
+					       offset_in_page(skb->data +
+							      mapped_len),
+					       tpbuf->length, DMA_TO_DEVICE);
+		ret = dma_mapping_error(adpt->netdev->dev.parent,
+					tpbuf->dma_addr);
+		if (ret)
+			goto error;
+
+		TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+		TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+		TPD_BUF_LEN_SET(tpd, tpbuf->length);
+		emac_tx_tpd_create(adpt, tx_q, tpd);
+		count++;
+	}
+
+	for (i = 0; i < nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
+		tpbuf->length = skb_frag_size(frag);
+		tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent,
+						   frag, 0, tpbuf->length,
+						   DMA_TO_DEVICE);
+		ret = dma_mapping_error(adpt->netdev->dev.parent,
+					tpbuf->dma_addr);
+		if (ret)
+			goto error;
+
+		TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
+		TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
+		TPD_BUF_LEN_SET(tpd, tpbuf->length);
+		emac_tx_tpd_create(adpt, tx_q, tpd);
+		count++;
+	}
+
+	/* The last tpd */
+	wmb();
+	emac_tx_tpd_mark_last(adpt, tx_q);
+
+	/* The last buffer info contain the skb address,
+	 * so it will be freed after unmap
+	 */
+	tpbuf->skb = skb;
+
+	return;
+
+error:
+	/* One of the memory mappings failed, so undo everything */
+	tx_q->tpd.produce_idx = first;
+
+	while (count--) {
+		tpbuf = GET_TPD_BUFFER(tx_q, first);
+		dma_unmap_page(adpt->netdev->dev.parent, tpbuf->dma_addr,
+			       tpbuf->length, DMA_TO_DEVICE);
+		tpbuf->dma_addr = 0;
+		tpbuf->length = 0;
+
+		if (++first == tx_q->tpd.count)
+			first = 0;
+	}
+
+	dev_kfree_skb(skb);
+}
+
+/* Transmit the packet using specified transmit queue */
+int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
+			 struct sk_buff *skb)
+{
+	struct emac_tpd tpd;
+	u32 prod_idx;
+	int len;
+
+	memset(&tpd, 0, sizeof(tpd));
+
+	if (emac_tso_csum(adpt, tx_q, skb, &tpd) != 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		u16 tag;
+
+		EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag);
+		TPD_CVLAN_TAG_SET(&tpd, tag);
+		TPD_INSTC_SET(&tpd, 1);
+	}
+
+	if (skb_network_offset(skb) != ETH_HLEN)
+		TPD_TYP_SET(&tpd, 1);
+
+	len = skb->len;
+	emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
+
+	netdev_sent_queue(adpt->netdev, len);
+
+	/* Make sure the are enough free descriptors to hold one
+	 * maximum-sized SKB.  We need one desc for each fragment,
+	 * one for the checksum (emac_tso_csum), one for TSO, and
+	 * and one for the SKB header.
+	 */
+	if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3))
+		netif_stop_queue(adpt->netdev);
+
+	/* update produce idx */
+	prod_idx = (tx_q->tpd.produce_idx << tx_q->produce_shift) &
+		    tx_q->produce_mask;
+	emac_reg_update32(adpt->base + tx_q->produce_reg,
+			  tx_q->produce_mask, prod_idx);
+
+	return NETDEV_TX_OK;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-mac.h
new file mode 100644
index 0000000..ae08bdd
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ */
+
+/* EMAC DMA HW engine uses three rings:
+ * Tx:
+ *   TPD: Transmit Packet Descriptor ring.
+ * Rx:
+ *   RFD: Receive Free Descriptor ring.
+ *     Ring of descriptors with empty buffers to be filled by Rx HW.
+ *   RRD: Receive Return Descriptor ring.
+ *     Ring of descriptors with buffers filled with received data.
+ */
+
+#ifndef _EMAC_HW_H_
+#define _EMAC_HW_H_
+
+/* EMAC_CSR register offsets */
+#define EMAC_EMAC_WRAPPER_CSR1                                0x000000
+#define EMAC_EMAC_WRAPPER_CSR2                                0x000004
+#define EMAC_EMAC_WRAPPER_TX_TS_LO                            0x000104
+#define EMAC_EMAC_WRAPPER_TX_TS_HI                            0x000108
+#define EMAC_EMAC_WRAPPER_TX_TS_INX                           0x00010c
+
+/* DMA Order Settings */
+enum emac_dma_order {
+	emac_dma_ord_in = 1,
+	emac_dma_ord_enh = 2,
+	emac_dma_ord_out = 4
+};
+
+enum emac_dma_req_block {
+	emac_dma_req_128 = 0,
+	emac_dma_req_256 = 1,
+	emac_dma_req_512 = 2,
+	emac_dma_req_1024 = 3,
+	emac_dma_req_2048 = 4,
+	emac_dma_req_4096 = 5
+};
+
+/* Returns the value of bits idx...idx+n_bits */
+#define BITS_GET(val, lo, hi) ((le32_to_cpu(val) & GENMASK((hi), (lo))) >> lo)
+#define BITS_SET(val, lo, hi, new_val) \
+	val = cpu_to_le32((le32_to_cpu(val) & (~GENMASK((hi), (lo)))) |	\
+		(((new_val) << (lo)) & GENMASK((hi), (lo))))
+
+/* RRD (Receive Return Descriptor) */
+struct emac_rrd {
+	u32	word[6];
+
+/* number of RFD */
+#define RRD_NOR(rrd)			BITS_GET((rrd)->word[0], 16, 19)
+/* start consumer index of rfd-ring */
+#define RRD_SI(rrd)			BITS_GET((rrd)->word[0], 20, 31)
+/* vlan-tag (CVID, CFI and PRI) */
+#define RRD_CVALN_TAG(rrd)		BITS_GET((rrd)->word[2], 0, 15)
+/* length of the packet */
+#define RRD_PKT_SIZE(rrd)		BITS_GET((rrd)->word[3], 0, 13)
+/* L4(TCP/UDP) checksum failed */
+#define RRD_L4F(rrd)			BITS_GET((rrd)->word[3], 14, 14)
+/* vlan tagged */
+#define RRD_CVTAG(rrd)			BITS_GET((rrd)->word[3], 16, 16)
+/* When set, indicates that the descriptor is updated by the IP core.
+ * When cleared, indicates that the descriptor is invalid.
+ */
+#define RRD_UPDT(rrd)			BITS_GET((rrd)->word[3], 31, 31)
+#define RRD_UPDT_SET(rrd, val)		BITS_SET((rrd)->word[3], 31, 31, val)
+/* timestamp low */
+#define RRD_TS_LOW(rrd)			BITS_GET((rrd)->word[4], 0, 29)
+/* timestamp high */
+#define RRD_TS_HI(rrd)			le32_to_cpu((rrd)->word[5])
+};
+
+/* TPD (Transmit Packet Descriptor) */
+struct emac_tpd {
+	u32				word[4];
+
+/* Number of bytes of the transmit packet. (include 4-byte CRC) */
+#define TPD_BUF_LEN_SET(tpd, val)	BITS_SET((tpd)->word[0], 0, 15, val)
+/* Custom Checksum Offload: When set, ask IP core to offload custom checksum */
+#define TPD_CSX_SET(tpd, val)		BITS_SET((tpd)->word[1], 8, 8, val)
+/* TCP Large Send Offload: When set, ask IP core to do offload TCP Large Send */
+#define TPD_LSO(tpd)			BITS_GET((tpd)->word[1], 12, 12)
+#define TPD_LSO_SET(tpd, val)		BITS_SET((tpd)->word[1], 12, 12, val)
+/*  Large Send Offload Version: When set, indicates this is an LSOv2
+ * (for both IPv4 and IPv6). When cleared, indicates this is an LSOv1
+ * (only for IPv4).
+ */
+#define TPD_LSOV_SET(tpd, val)		BITS_SET((tpd)->word[1], 13, 13, val)
+/* IPv4 packet: When set, indicates this is an  IPv4 packet, this bit is only
+ * for LSOV2 format.
+ */
+#define TPD_IPV4_SET(tpd, val)		BITS_SET((tpd)->word[1], 16, 16, val)
+/* 0: Ethernet   frame (DA+SA+TYPE+DATA+CRC)
+ * 1: IEEE 802.3 frame (DA+SA+LEN+DSAP+SSAP+CTL+ORG+TYPE+DATA+CRC)
+ */
+#define TPD_TYP_SET(tpd, val)		BITS_SET((tpd)->word[1], 17, 17, val)
+/* Low-32bit Buffer Address */
+#define TPD_BUFFER_ADDR_L_SET(tpd, val)	((tpd)->word[2] = cpu_to_le32(val))
+/* CVLAN Tag to be inserted if INS_VLAN_TAG is set, CVLAN TPID based on global
+ * register configuration.
+ */
+#define TPD_CVLAN_TAG_SET(tpd, val)	BITS_SET((tpd)->word[3], 0, 15, val)
+/*  Insert CVlan Tag: When set, ask MAC to insert CVLAN TAG to outgoing packet
+ */
+#define TPD_INSTC_SET(tpd, val)		BITS_SET((tpd)->word[3], 17, 17, val)
+/* High-14bit Buffer Address, So, the 64b-bit address is
+ * {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L}
+ * Extend TPD_BUFFER_ADDR_H to [31, 18], because we never enable timestamping.
+ */
+#define TPD_BUFFER_ADDR_H_SET(tpd, val)	BITS_SET((tpd)->word[3], 18, 31, val)
+/* Format D. Word offset from the 1st byte of this packet to start to calculate
+ * the custom checksum.
+ */
+#define TPD_PAYLOAD_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val)
+/*  Format D. Word offset from the 1st byte of this packet to fill the custom
+ * checksum to
+ */
+#define TPD_CXSUM_OFFSET_SET(tpd, val)	BITS_SET((tpd)->word[1], 18, 25, val)
+
+/* Format C. TCP Header offset from the 1st byte of this packet. (byte unit) */
+#define TPD_TCPHDR_OFFSET_SET(tpd, val)	BITS_SET((tpd)->word[1], 0, 7, val)
+/* Format C. MSS (Maximum Segment Size) got from the protocol layer. (byte unit)
+ */
+#define TPD_MSS_SET(tpd, val)		BITS_SET((tpd)->word[1], 18, 30, val)
+/* packet length in ext tpd */
+#define TPD_PKT_LEN_SET(tpd, val)	((tpd)->word[2] = cpu_to_le32(val))
+};
+
+/* emac_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd)
+ */
+struct emac_ring_header {
+	void			*v_addr;	/* virtual address */
+	dma_addr_t		dma_addr;	/* dma address */
+	size_t			size;		/* length in bytes */
+	size_t			used;
+};
+
+/* emac_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
+ */
+struct emac_buffer {
+	struct sk_buff		*skb;		/* socket buffer */
+	u16			length;		/* rx buffer length */
+	dma_addr_t		dma_addr;	/* dma address */
+};
+
+/* receive free descriptor (rfd) ring */
+struct emac_rfd_ring {
+	struct emac_buffer	*rfbuff;
+	u32			*v_addr;	/* virtual address */
+	dma_addr_t		dma_addr;	/* dma address */
+	size_t			size;		/* length in bytes */
+	unsigned int		count;		/* number of desc in the ring */
+	unsigned int		produce_idx;
+	unsigned int		process_idx;
+	unsigned int		consume_idx;	/* unused */
+};
+
+/* Receive Return Desciptor (RRD) ring */
+struct emac_rrd_ring {
+	u32			*v_addr;	/* virtual address */
+	dma_addr_t		dma_addr;	/* physical address */
+	size_t			size;		/* length in bytes */
+	unsigned int		count;		/* number of desc in the ring */
+	unsigned int		produce_idx;	/* unused */
+	unsigned int		consume_idx;
+};
+
+/* Rx queue */
+struct emac_rx_queue {
+	struct net_device	*netdev;	/* netdev ring belongs to */
+	struct emac_rrd_ring	rrd;
+	struct emac_rfd_ring	rfd;
+	struct napi_struct	napi;
+	struct emac_irq		*irq;
+
+	u32			intr;
+	u32			produce_mask;
+	u32			process_mask;
+	u32			consume_mask;
+
+	u16			produce_reg;
+	u16			process_reg;
+	u16			consume_reg;
+
+	u8			produce_shift;
+	u8			process_shft;
+	u8			consume_shift;
+};
+
+/* Transimit Packet Descriptor (tpd) ring */
+struct emac_tpd_ring {
+	struct emac_buffer	*tpbuff;
+	u32			*v_addr;	/* virtual address */
+	dma_addr_t		dma_addr;	/* dma address */
+
+	size_t			size;		/* length in bytes */
+	unsigned int		count;		/* number of desc in the ring */
+	unsigned int		produce_idx;
+	unsigned int		consume_idx;
+	unsigned int		last_produce_idx;
+};
+
+/* Tx queue */
+struct emac_tx_queue {
+	struct emac_tpd_ring	tpd;
+
+	u32			produce_mask;
+	u32			consume_mask;
+
+	u16			max_packets;	/* max packets per interrupt */
+	u16			produce_reg;
+	u16			consume_reg;
+
+	u8			produce_shift;
+	u8			consume_shift;
+};
+
+struct emac_adapter;
+
+int  emac_mac_up(struct emac_adapter *adpt);
+void emac_mac_down(struct emac_adapter *adpt);
+void emac_mac_reset(struct emac_adapter *adpt);
+void emac_mac_stop(struct emac_adapter *adpt);
+void emac_mac_mode_config(struct emac_adapter *adpt);
+void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
+			 int *num_pkts, int max_pkts);
+int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
+			 struct sk_buff *skb);
+void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q);
+void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
+				  struct emac_adapter *adpt);
+int  emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt);
+void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt);
+void emac_mac_multicast_addr_clear(struct emac_adapter *adpt);
+void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr);
+
+#endif /*_EMAC_HW_H_*/
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-phy.c
new file mode 100644
index 0000000..5c94af7
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
+ */
+
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+#include <linux/acpi.h>
+#include "emac.h"
+
+/* EMAC base register offsets */
+#define EMAC_MDIO_CTRL                                        0x001414
+#define EMAC_PHY_STS                                          0x001418
+#define EMAC_MDIO_EX_CTRL                                     0x001440
+
+/* EMAC_MDIO_CTRL */
+#define MDIO_MODE                                              BIT(30)
+#define MDIO_PR                                                BIT(29)
+#define MDIO_AP_EN                                             BIT(28)
+#define MDIO_BUSY                                              BIT(27)
+#define MDIO_CLK_SEL_BMSK                                    0x7000000
+#define MDIO_CLK_SEL_SHFT                                           24
+#define MDIO_START                                             BIT(23)
+#define SUP_PREAMBLE                                           BIT(22)
+#define MDIO_RD_NWR                                            BIT(21)
+#define MDIO_REG_ADDR_BMSK                                    0x1f0000
+#define MDIO_REG_ADDR_SHFT                                          16
+#define MDIO_DATA_BMSK                                          0xffff
+#define MDIO_DATA_SHFT                                               0
+
+/* EMAC_PHY_STS */
+#define PHY_ADDR_BMSK                                         0x1f0000
+#define PHY_ADDR_SHFT                                               16
+
+#define MDIO_CLK_25_4                                                0
+#define MDIO_CLK_25_28                                               7
+
+#define MDIO_WAIT_TIMES                                           1000
+#define MDIO_STATUS_DELAY_TIME                                       1
+
+static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+	struct emac_adapter *adpt = bus->priv;
+	u32 reg;
+
+	emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
+			  (addr << PHY_ADDR_SHFT));
+
+	reg = SUP_PREAMBLE |
+	      ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) |
+	      ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) |
+	      MDIO_START | MDIO_RD_NWR;
+
+	writel(reg, adpt->base + EMAC_MDIO_CTRL);
+
+	if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
+			       !(reg & (MDIO_START | MDIO_BUSY)),
+			       MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
+		return -EIO;
+
+	return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+}
+
+static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+	struct emac_adapter *adpt = bus->priv;
+	u32 reg;
+
+	emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
+			  (addr << PHY_ADDR_SHFT));
+
+	reg = SUP_PREAMBLE |
+		((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) |
+		((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) |
+		((val << MDIO_DATA_SHFT) & MDIO_DATA_BMSK) |
+		MDIO_START;
+
+	writel(reg, adpt->base + EMAC_MDIO_CTRL);
+
+	if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
+			       !(reg & (MDIO_START | MDIO_BUSY)),
+			       MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
+		return -EIO;
+
+	return 0;
+}
+
+/* Configure the MDIO bus and connect the external PHY */
+int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct mii_bus *mii_bus;
+	int ret;
+
+	/* Create the mii_bus object for talking to the MDIO bus */
+	adpt->mii_bus = mii_bus = devm_mdiobus_alloc(&pdev->dev);
+	if (!mii_bus)
+		return -ENOMEM;
+
+	mii_bus->name = "emac-mdio";
+	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+	mii_bus->read = emac_mdio_read;
+	mii_bus->write = emac_mdio_write;
+	mii_bus->parent = &pdev->dev;
+	mii_bus->priv = adpt;
+
+	if (has_acpi_companion(&pdev->dev)) {
+		u32 phy_addr;
+
+		ret = mdiobus_register(mii_bus);
+		if (ret) {
+			dev_err(&pdev->dev, "could not register mdio bus\n");
+			return ret;
+		}
+		ret = device_property_read_u32(&pdev->dev, "phy-channel",
+					       &phy_addr);
+		if (ret)
+			/* If we can't read a valid phy address, then assume
+			 * that there is only one phy on this mdio bus.
+			 */
+			adpt->phydev = phy_find_first(mii_bus);
+		else
+			adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
+
+		/* of_phy_find_device() claims a reference to the phydev,
+		 * so we do that here manually as well. When the driver
+		 * later unloads, it can unilaterally drop the reference
+		 * without worrying about ACPI vs DT.
+		 */
+		if (adpt->phydev)
+			get_device(&adpt->phydev->mdio.dev);
+	} else {
+		struct device_node *phy_np;
+
+		ret = of_mdiobus_register(mii_bus, np);
+		if (ret) {
+			dev_err(&pdev->dev, "could not register mdio bus\n");
+			return ret;
+		}
+
+		phy_np = of_parse_phandle(np, "phy-handle", 0);
+		adpt->phydev = of_phy_find_device(phy_np);
+		of_node_put(phy_np);
+	}
+
+	if (!adpt->phydev) {
+		dev_err(&pdev->dev, "could not find external phy\n");
+		mdiobus_unregister(mii_bus);
+		return -ENODEV;
+	}
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-phy.h
new file mode 100644
index 0000000..7d703e5
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-phy.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+*/
+
+#ifndef _EMAC_PHY_H_
+#define _EMAC_PHY_H_
+
+struct emac_adapter;
+
+int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt);
+
+#endif /* _EMAC_PHY_H_ */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
new file mode 100644
index 0000000..bd9ad32
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ */
+
+/* Qualcomm Technologies, Inc. FSM9900 EMAC SGMII Controller driver.
+ */
+
+#include <linux/iopoll.h>
+#include "emac.h"
+
+/* EMAC_QSERDES register offsets */
+#define EMAC_QSERDES_COM_SYS_CLK_CTRL		0x0000
+#define EMAC_QSERDES_COM_PLL_CNTRL		0x0014
+#define EMAC_QSERDES_COM_PLL_IP_SETI		0x0018
+#define EMAC_QSERDES_COM_PLL_CP_SETI		0x0024
+#define EMAC_QSERDES_COM_PLL_IP_SETP		0x0028
+#define EMAC_QSERDES_COM_PLL_CP_SETP		0x002c
+#define EMAC_QSERDES_COM_SYSCLK_EN_SEL		0x0038
+#define EMAC_QSERDES_COM_RESETSM_CNTRL		0x0040
+#define EMAC_QSERDES_COM_PLLLOCK_CMP1		0x0044
+#define EMAC_QSERDES_COM_PLLLOCK_CMP2		0x0048
+#define EMAC_QSERDES_COM_PLLLOCK_CMP3		0x004c
+#define EMAC_QSERDES_COM_PLLLOCK_CMP_EN		0x0050
+#define EMAC_QSERDES_COM_DEC_START1		0x0064
+#define EMAC_QSERDES_COM_DIV_FRAC_START1	0x0098
+#define EMAC_QSERDES_COM_DIV_FRAC_START2	0x009c
+#define EMAC_QSERDES_COM_DIV_FRAC_START3	0x00a0
+#define EMAC_QSERDES_COM_DEC_START2		0x00a4
+#define EMAC_QSERDES_COM_PLL_CRCTRL		0x00ac
+#define EMAC_QSERDES_COM_RESET_SM		0x00bc
+#define EMAC_QSERDES_TX_BIST_MODE_LANENO	0x0100
+#define EMAC_QSERDES_TX_TX_EMP_POST1_LVL	0x0108
+#define EMAC_QSERDES_TX_TX_DRV_LVL		0x010c
+#define EMAC_QSERDES_TX_LANE_MODE		0x0150
+#define EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN	0x0170
+#define EMAC_QSERDES_RX_CDR_CONTROL		0x0200
+#define EMAC_QSERDES_RX_CDR_CONTROL2		0x0210
+#define EMAC_QSERDES_RX_RX_EQ_GAIN12		0x0230
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_SERDES_START		0x0000
+#define EMAC_SGMII_PHY_CMN_PWR_CTRL		0x0004
+#define EMAC_SGMII_PHY_RX_PWR_CTRL		0x0008
+#define EMAC_SGMII_PHY_TX_PWR_CTRL		0x000C
+#define EMAC_SGMII_PHY_LANE_CTRL1		0x0018
+#define EMAC_SGMII_PHY_CDR_CTRL0		0x0058
+#define EMAC_SGMII_PHY_POW_DWN_CTRL0		0x0080
+#define EMAC_SGMII_PHY_INTERRUPT_MASK		0x00b4
+
+#define PLL_IPSETI(x)				((x) & 0x3f)
+
+#define PLL_CPSETI(x)				((x) & 0xff)
+
+#define PLL_IPSETP(x)				((x) & 0x3f)
+
+#define PLL_CPSETP(x)				((x) & 0x1f)
+
+#define PLL_RCTRL(x)				(((x) & 0xf) << 4)
+#define PLL_CCTRL(x)				((x) & 0xf)
+
+#define LANE_MODE(x)				((x) & 0x1f)
+
+#define SYSCLK_CM				BIT(4)
+#define SYSCLK_AC_COUPLE			BIT(3)
+
+#define OCP_EN					BIT(5)
+#define PLL_DIV_FFEN				BIT(2)
+#define PLL_DIV_ORD				BIT(1)
+
+#define SYSCLK_SEL_CMOS				BIT(3)
+
+#define FRQ_TUNE_MODE				BIT(4)
+
+#define PLLLOCK_CMP_EN				BIT(0)
+
+#define DEC_START1_MUX				BIT(7)
+#define DEC_START1(x)				((x) & 0x7f)
+
+#define DIV_FRAC_START_MUX			BIT(7)
+#define DIV_FRAC_START(x)			((x) & 0x7f)
+
+#define DIV_FRAC_START3_MUX			BIT(4)
+#define DIV_FRAC_START3(x)			((x) & 0xf)
+
+#define DEC_START2_MUX				BIT(1)
+#define DEC_START2				BIT(0)
+
+#define READY					BIT(5)
+
+#define TX_EMP_POST1_LVL_MUX			BIT(5)
+#define TX_EMP_POST1_LVL(x)			((x) & 0x1f)
+
+#define TX_DRV_LVL_MUX				BIT(4)
+#define TX_DRV_LVL(x)				((x) & 0xf)
+
+#define EMP_EN_MUX				BIT(1)
+#define EMP_EN					BIT(0)
+
+#define SECONDORDERENABLE			BIT(6)
+#define FIRSTORDER_THRESH(x)			(((x) & 0x7) << 3)
+#define SECONDORDERGAIN(x)			((x) & 0x7)
+
+#define RX_EQ_GAIN2(x)				(((x) & 0xf) << 4)
+#define RX_EQ_GAIN1(x)				((x) & 0xf)
+
+#define SERDES_START				BIT(0)
+
+#define BIAS_EN					BIT(6)
+#define PLL_EN					BIT(5)
+#define SYSCLK_EN				BIT(4)
+#define CLKBUF_L_EN				BIT(3)
+#define PLL_TXCLK_EN				BIT(1)
+#define PLL_RXCLK_EN				BIT(0)
+
+#define L0_RX_SIGDET_EN				BIT(7)
+#define L0_RX_TERM_MODE(x)			(((x) & 3) << 4)
+#define L0_RX_I_EN				BIT(1)
+
+#define L0_TX_EN				BIT(5)
+#define L0_CLKBUF_EN				BIT(4)
+#define L0_TRAN_BIAS_EN				BIT(1)
+
+#define L0_RX_EQUALIZE_ENABLE			BIT(6)
+#define L0_RESET_TSYNC_EN			BIT(4)
+#define L0_DRV_LVL(x)				((x) & 0xf)
+
+#define PWRDN_B					BIT(0)
+#define CDR_MAX_CNT(x)				((x) & 0xff)
+
+#define PLLLOCK_CMP(x)				((x) & 0xff)
+
+#define SERDES_START_WAIT_TIMES			100
+
+struct emac_reg_write {
+	unsigned int offset;
+	u32 val;
+};
+
+static void emac_reg_write_all(void __iomem *base,
+			       const struct emac_reg_write *itr, size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; ++itr, ++i)
+		writel(itr->val, base + itr->offset);
+}
+
+static const struct emac_reg_write physical_coding_sublayer_programming[] = {
+	{EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+	{EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+	{EMAC_SGMII_PHY_CMN_PWR_CTRL,
+		BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | PLL_RXCLK_EN},
+	{EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN | L0_TRAN_BIAS_EN},
+	{EMAC_SGMII_PHY_RX_PWR_CTRL,
+		L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN},
+	{EMAC_SGMII_PHY_CMN_PWR_CTRL,
+		BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN |
+		PLL_RXCLK_EN},
+	{EMAC_SGMII_PHY_LANE_CTRL1,
+		L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN | L0_DRV_LVL(15)},
+};
+
+static const struct emac_reg_write sysclk_refclk_setting[] = {
+	{EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS},
+	{EMAC_QSERDES_COM_SYS_CLK_CTRL,	SYSCLK_CM | SYSCLK_AC_COUPLE},
+};
+
+static const struct emac_reg_write pll_setting[] = {
+	{EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(1)},
+	{EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)},
+	{EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)},
+	{EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)},
+	{EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)},
+	{EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN | PLL_DIV_ORD},
+	{EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)},
+	{EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2},
+	{EMAC_QSERDES_COM_DIV_FRAC_START1,
+		DIV_FRAC_START_MUX | DIV_FRAC_START(85)},
+	{EMAC_QSERDES_COM_DIV_FRAC_START2,
+		DIV_FRAC_START_MUX | DIV_FRAC_START(42)},
+	{EMAC_QSERDES_COM_DIV_FRAC_START3,
+		DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)},
+	{EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)},
+	{EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)},
+	{EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)},
+	{EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN},
+	{EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE},
+};
+
+static const struct emac_reg_write cdr_setting[] = {
+	{EMAC_QSERDES_RX_CDR_CONTROL,
+		SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(2)},
+	{EMAC_QSERDES_RX_CDR_CONTROL2,
+		SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(4)},
+};
+
+static const struct emac_reg_write tx_rx_setting[] = {
+	{EMAC_QSERDES_TX_BIST_MODE_LANENO, 0},
+	{EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | TX_DRV_LVL(15)},
+	{EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN},
+	{EMAC_QSERDES_TX_TX_EMP_POST1_LVL,
+		TX_EMP_POST1_LVL_MUX | TX_EMP_POST1_LVL(1)},
+	{EMAC_QSERDES_RX_RX_EQ_GAIN12, RX_EQ_GAIN2(15) | RX_EQ_GAIN1(15)},
+	{EMAC_QSERDES_TX_LANE_MODE, LANE_MODE(8)},
+};
+
+int emac_sgmii_init_fsm9900(struct emac_adapter *adpt)
+{
+	struct emac_sgmii *phy = &adpt->phy;
+	unsigned int i;
+
+	emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
+			   ARRAY_SIZE(physical_coding_sublayer_programming));
+	emac_reg_write_all(phy->base, sysclk_refclk_setting,
+			   ARRAY_SIZE(sysclk_refclk_setting));
+	emac_reg_write_all(phy->base, pll_setting, ARRAY_SIZE(pll_setting));
+	emac_reg_write_all(phy->base, cdr_setting, ARRAY_SIZE(cdr_setting));
+	emac_reg_write_all(phy->base, tx_rx_setting, ARRAY_SIZE(tx_rx_setting));
+
+	/* Power up the Ser/Des engine */
+	writel(SERDES_START, phy->base + EMAC_SGMII_PHY_SERDES_START);
+
+	for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+		if (readl(phy->base + EMAC_QSERDES_COM_RESET_SM) & READY)
+			break;
+		usleep_range(100, 200);
+	}
+
+	if (i == SERDES_START_WAIT_TIMES) {
+		netdev_err(adpt->netdev, "error: ser/des failed to start\n");
+		return -EIO;
+	}
+	/* Mask out all the SGMII Interrupt */
+	writel(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
new file mode 100644
index 0000000..b29148c
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ */
+
+/* Qualcomm Technologies, Inc. QDF2400 EMAC SGMII Controller driver.
+ */
+
+#include <linux/iopoll.h>
+#include "emac.h"
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_TX_PWR_CTRL		0x000C
+#define EMAC_SGMII_PHY_LANE_CTRL1		0x0018
+#define EMAC_SGMII_PHY_CDR_CTRL0		0x0058
+#define EMAC_SGMII_PHY_POW_DWN_CTRL0		0x0080
+#define EMAC_SGMII_PHY_RESET_CTRL		0x00a8
+#define EMAC_SGMII_PHY_INTERRUPT_MASK		0x00b4
+
+/* SGMII digital lane registers */
+#define EMAC_SGMII_LN_DRVR_CTRL0		0x000C
+#define EMAC_SGMII_LN_DRVR_CTRL1		0x0010
+#define EMAC_SGMII_LN_DRVR_TAP_EN		0x0018
+#define EMAC_SGMII_LN_TX_MARGINING		0x001C
+#define EMAC_SGMII_LN_TX_PRE			0x0020
+#define EMAC_SGMII_LN_TX_POST			0x0024
+#define EMAC_SGMII_LN_TX_BAND_MODE		0x0060
+#define EMAC_SGMII_LN_LANE_MODE			0x0064
+#define EMAC_SGMII_LN_PARALLEL_RATE		0x007C
+#define EMAC_SGMII_LN_CML_CTRL_MODE0		0x00C0
+#define EMAC_SGMII_LN_MIXER_CTRL_MODE0		0x00D8
+#define EMAC_SGMII_LN_VGA_INITVAL		0x013C
+#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0	0x0184
+#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0	0x0190
+#define EMAC_SGMII_LN_UCDR_SO_CONFIG		0x019C
+#define EMAC_SGMII_LN_RX_BAND			0x01A4
+#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0	0x01C0
+#define EMAC_SGMII_LN_RSM_CONFIG		0x01F8
+#define EMAC_SGMII_LN_SIGDET_ENABLES		0x0230
+#define EMAC_SGMII_LN_SIGDET_CNTRL		0x0234
+#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL	0x0238
+#define EMAC_SGMII_LN_RX_EN_SIGNAL		0x02AC
+#define EMAC_SGMII_LN_RX_MISC_CNTRL0		0x02B8
+#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV		0x02C8
+#define EMAC_SGMII_LN_RX_RESECODE_OFFSET	0x02CC
+
+/* SGMII digital lane register values */
+#define UCDR_STEP_BY_TWO_MODE0			BIT(7)
+#define UCDR_xO_GAIN_MODE(x)			((x) & 0x7f)
+#define UCDR_ENABLE				BIT(6)
+#define UCDR_SO_SATURATION(x)			((x) & 0x3f)
+
+#define SIGDET_LP_BYP_PS4			BIT(7)
+#define SIGDET_EN_PS0_TO_PS2			BIT(6)
+
+#define TXVAL_VALID_INIT			BIT(4)
+#define KR_PCIGEN3_MODE				BIT(0)
+
+#define MAIN_EN					BIT(0)
+
+#define TX_MARGINING_MUX			BIT(6)
+#define TX_MARGINING(x)				((x) & 0x3f)
+
+#define TX_PRE_MUX				BIT(6)
+
+#define TX_POST_MUX				BIT(6)
+
+#define CML_GEAR_MODE(x)			(((x) & 7) << 3)
+#define CML2CMOS_IBOOST_MODE(x)			((x) & 7)
+
+#define RESCODE_OFFSET(x)			((x) & 0x1f)
+
+#define MIXER_LOADB_MODE(x)			(((x) & 0xf) << 2)
+#define MIXER_DATARATE_MODE(x)			((x) & 3)
+
+#define VGA_THRESH_DFE(x)			((x) & 0x3f)
+
+#define SIGDET_LP_BYP_PS0_TO_PS2		BIT(5)
+#define SIGDET_FLT_BYP				BIT(0)
+
+#define SIGDET_LVL(x)				(((x) & 0xf) << 4)
+
+#define SIGDET_DEGLITCH_CTRL(x)			(((x) & 0xf) << 1)
+
+#define INVERT_PCS_RX_CLK			BIT(7)
+
+#define DRVR_LOGIC_CLK_EN			BIT(4)
+#define DRVR_LOGIC_CLK_DIV(x)			((x) & 0xf)
+
+#define PARALLEL_RATE_MODE0(x)			((x) & 0x3)
+
+#define BAND_MODE0(x)				((x) & 0x3)
+
+#define LANE_MODE(x)				((x) & 0x1f)
+
+#define CDR_PD_SEL_MODE0(x)			(((x) & 0x3) << 5)
+#define EN_DLL_MODE0				BIT(4)
+#define EN_IQ_DCC_MODE0				BIT(3)
+#define EN_IQCAL_MODE0				BIT(2)
+
+#define BYPASS_RSM_SAMP_CAL			BIT(1)
+#define BYPASS_RSM_DLL_CAL			BIT(0)
+
+#define L0_RX_EQUALIZE_ENABLE			BIT(6)
+
+#define PWRDN_B					BIT(0)
+
+#define CDR_MAX_CNT(x)				((x) & 0xff)
+
+#define SERDES_START_WAIT_TIMES			100
+
+struct emac_reg_write {
+	unsigned int offset;
+	u32 val;
+};
+
+static void emac_reg_write_all(void __iomem *base,
+			       const struct emac_reg_write *itr, size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; ++itr, ++i)
+		writel(itr->val, base + itr->offset);
+}
+
+static const struct emac_reg_write sgmii_laned[] = {
+	/* CDR Settings */
+	{EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
+		UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
+	{EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
+	{EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
+
+	/* TX/RX Settings */
+	{EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
+
+	{EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
+	{EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
+	{EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
+	{EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
+	{EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
+
+	{EMAC_SGMII_LN_CML_CTRL_MODE0,
+		CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
+	{EMAC_SGMII_LN_MIXER_CTRL_MODE0,
+		MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
+	{EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
+	{EMAC_SGMII_LN_SIGDET_ENABLES,
+		SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
+	{EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
+
+	{EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
+	{EMAC_SGMII_LN_RX_MISC_CNTRL0, INVERT_PCS_RX_CLK},
+	{EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
+		DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
+
+	{EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
+	{EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(1)},
+	{EMAC_SGMII_LN_RX_BAND, BAND_MODE0(2)},
+	{EMAC_SGMII_LN_DRVR_CTRL1, RESCODE_OFFSET(7)},
+	{EMAC_SGMII_LN_RX_RESECODE_OFFSET, RESCODE_OFFSET(9)},
+	{EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
+	{EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(2) |
+		EN_DLL_MODE0 | EN_IQ_DCC_MODE0 | EN_IQCAL_MODE0},
+	{EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
+};
+
+static const struct emac_reg_write physical_coding_sublayer_programming[] = {
+	{EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+	{EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+	{EMAC_SGMII_PHY_TX_PWR_CTRL, 0},
+	{EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE},
+};
+
+int emac_sgmii_init_qdf2400(struct emac_adapter *adpt)
+{
+	struct emac_sgmii *phy = &adpt->phy;
+	void __iomem *phy_regs = phy->base;
+	void __iomem *laned = phy->digital;
+	unsigned int i;
+	u32 lnstatus;
+
+	/* PCS lane-x init */
+	emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
+			   ARRAY_SIZE(physical_coding_sublayer_programming));
+
+	/* SGMII lane-x init */
+	emac_reg_write_all(phy->digital, sgmii_laned, ARRAY_SIZE(sgmii_laned));
+
+	/* Power up PCS and start reset lane state machine */
+
+	writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
+	writel(1, laned + SGMII_LN_RSM_START);
+
+	/* Wait for c_ready assertion */
+	for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+		lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS);
+		if (lnstatus & BIT(1))
+			break;
+		usleep_range(100, 200);
+	}
+
+	if (i == SERDES_START_WAIT_TIMES) {
+		netdev_err(adpt->netdev, "SGMII failed to start\n");
+		return -EIO;
+	}
+
+	/* Disable digital and SERDES loopback */
+	writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
+	writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
+	writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
+
+	/* Mask out all the SGMII Interrupt */
+	writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
new file mode 100644
index 0000000..65519ee
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ */
+
+/* Qualcomm Technologies, Inc. QDF2432 EMAC SGMII Controller driver.
+ */
+
+#include <linux/iopoll.h>
+#include "emac.h"
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_TX_PWR_CTRL		0x000C
+#define EMAC_SGMII_PHY_LANE_CTRL1		0x0018
+#define EMAC_SGMII_PHY_CDR_CTRL0		0x0058
+#define EMAC_SGMII_PHY_POW_DWN_CTRL0		0x0080
+#define EMAC_SGMII_PHY_RESET_CTRL		0x00a8
+#define EMAC_SGMII_PHY_INTERRUPT_MASK		0x00b4
+
+/* SGMII digital lane registers */
+#define EMAC_SGMII_LN_DRVR_CTRL0		0x000C
+#define EMAC_SGMII_LN_DRVR_TAP_EN		0x0018
+#define EMAC_SGMII_LN_TX_MARGINING		0x001C
+#define EMAC_SGMII_LN_TX_PRE			0x0020
+#define EMAC_SGMII_LN_TX_POST			0x0024
+#define EMAC_SGMII_LN_TX_BAND_MODE		0x0060
+#define EMAC_SGMII_LN_LANE_MODE			0x0064
+#define EMAC_SGMII_LN_PARALLEL_RATE		0x0078
+#define EMAC_SGMII_LN_CML_CTRL_MODE0		0x00B8
+#define EMAC_SGMII_LN_MIXER_CTRL_MODE0		0x00D0
+#define EMAC_SGMII_LN_VGA_INITVAL		0x0134
+#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0	0x017C
+#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0	0x0188
+#define EMAC_SGMII_LN_UCDR_SO_CONFIG		0x0194
+#define EMAC_SGMII_LN_RX_BAND			0x019C
+#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0	0x01B8
+#define EMAC_SGMII_LN_RSM_CONFIG		0x01F0
+#define EMAC_SGMII_LN_SIGDET_ENABLES		0x0224
+#define EMAC_SGMII_LN_SIGDET_CNTRL		0x0228
+#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL	0x022C
+#define EMAC_SGMII_LN_RX_EN_SIGNAL		0x02A0
+#define EMAC_SGMII_LN_RX_MISC_CNTRL0		0x02AC
+#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV		0x02BC
+
+/* SGMII digital lane register values */
+#define UCDR_STEP_BY_TWO_MODE0			BIT(7)
+#define UCDR_xO_GAIN_MODE(x)			((x) & 0x7f)
+#define UCDR_ENABLE				BIT(6)
+#define UCDR_SO_SATURATION(x)			((x) & 0x3f)
+
+#define SIGDET_LP_BYP_PS4			BIT(7)
+#define SIGDET_EN_PS0_TO_PS2			BIT(6)
+
+#define TXVAL_VALID_INIT			BIT(4)
+#define KR_PCIGEN3_MODE				BIT(0)
+
+#define MAIN_EN					BIT(0)
+
+#define TX_MARGINING_MUX			BIT(6)
+#define TX_MARGINING(x)				((x) & 0x3f)
+
+#define TX_PRE_MUX				BIT(6)
+
+#define TX_POST_MUX				BIT(6)
+
+#define CML_GEAR_MODE(x)			(((x) & 7) << 3)
+#define CML2CMOS_IBOOST_MODE(x)			((x) & 7)
+
+#define MIXER_LOADB_MODE(x)			(((x) & 0xf) << 2)
+#define MIXER_DATARATE_MODE(x)			((x) & 3)
+
+#define VGA_THRESH_DFE(x)			((x) & 0x3f)
+
+#define SIGDET_LP_BYP_PS0_TO_PS2		BIT(5)
+#define SIGDET_FLT_BYP				BIT(0)
+
+#define SIGDET_LVL(x)				(((x) & 0xf) << 4)
+
+#define SIGDET_DEGLITCH_CTRL(x)			(((x) & 0xf) << 1)
+
+#define DRVR_LOGIC_CLK_EN			BIT(4)
+#define DRVR_LOGIC_CLK_DIV(x)			((x) & 0xf)
+
+#define PARALLEL_RATE_MODE0(x)			((x) & 0x3)
+
+#define BAND_MODE0(x)				((x) & 0x3)
+
+#define LANE_MODE(x)				((x) & 0x1f)
+
+#define CDR_PD_SEL_MODE0(x)			(((x) & 0x3) << 5)
+#define BYPASS_RSM_SAMP_CAL			BIT(1)
+#define BYPASS_RSM_DLL_CAL			BIT(0)
+
+#define L0_RX_EQUALIZE_ENABLE			BIT(6)
+
+#define PWRDN_B					BIT(0)
+
+#define CDR_MAX_CNT(x)				((x) & 0xff)
+
+#define SERDES_START_WAIT_TIMES			100
+
+struct emac_reg_write {
+	unsigned int offset;
+	u32 val;
+};
+
+static void emac_reg_write_all(void __iomem *base,
+			       const struct emac_reg_write *itr, size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; ++itr, ++i)
+		writel(itr->val, base + itr->offset);
+}
+
+static const struct emac_reg_write sgmii_laned[] = {
+	/* CDR Settings */
+	{EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
+		UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
+	{EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
+	{EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
+
+	/* TX/RX Settings */
+	{EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
+
+	{EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
+	{EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
+	{EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
+	{EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
+	{EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
+
+	{EMAC_SGMII_LN_CML_CTRL_MODE0,
+		CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
+	{EMAC_SGMII_LN_MIXER_CTRL_MODE0,
+		MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
+	{EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
+	{EMAC_SGMII_LN_SIGDET_ENABLES,
+		SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
+	{EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
+
+	{EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
+	{EMAC_SGMII_LN_RX_MISC_CNTRL0, 0},
+	{EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
+		DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
+
+	{EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
+	{EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)},
+	{EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)},
+	{EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
+	{EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)},
+	{EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
+};
+
+static const struct emac_reg_write physical_coding_sublayer_programming[] = {
+	{EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
+	{EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
+	{EMAC_SGMII_PHY_TX_PWR_CTRL, 0},
+	{EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE},
+};
+
+int emac_sgmii_init_qdf2432(struct emac_adapter *adpt)
+{
+	struct emac_sgmii *phy = &adpt->phy;
+	void __iomem *phy_regs = phy->base;
+	void __iomem *laned = phy->digital;
+	unsigned int i;
+	u32 lnstatus;
+
+	/* PCS lane-x init */
+	emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
+			   ARRAY_SIZE(physical_coding_sublayer_programming));
+
+	/* SGMII lane-x init */
+	emac_reg_write_all(phy->digital, sgmii_laned, ARRAY_SIZE(sgmii_laned));
+
+	/* Power up PCS and start reset lane state machine */
+
+	writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
+	writel(1, laned + SGMII_LN_RSM_START);
+
+	/* Wait for c_ready assertion */
+	for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
+		lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS);
+		if (lnstatus & BIT(1))
+			break;
+		usleep_range(100, 200);
+	}
+
+	if (i == SERDES_START_WAIT_TIMES) {
+		netdev_err(adpt->netdev, "SGMII failed to start\n");
+		return -EIO;
+	}
+
+	/* Disable digital and SERDES loopback */
+	writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
+	writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
+	writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
+
+	/* Mask out all the SGMII Interrupt */
+	writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
new file mode 100644
index 0000000..802ef81
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC SGMII Controller driver.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/acpi.h>
+#include <linux/of_device.h>
+#include "emac.h"
+#include "emac-mac.h"
+#include "emac-sgmii.h"
+
+/* EMAC_SGMII register offsets */
+#define EMAC_SGMII_PHY_AUTONEG_CFG2		0x0048
+#define EMAC_SGMII_PHY_SPEED_CFG1		0x0074
+#define EMAC_SGMII_PHY_IRQ_CMD			0x00ac
+#define EMAC_SGMII_PHY_INTERRUPT_CLEAR		0x00b0
+#define EMAC_SGMII_PHY_INTERRUPT_MASK		0x00b4
+#define EMAC_SGMII_PHY_INTERRUPT_STATUS		0x00b8
+#define EMAC_SGMII_PHY_RX_CHK_STATUS		0x00d4
+
+#define FORCE_AN_TX_CFG				BIT(5)
+#define FORCE_AN_RX_CFG				BIT(4)
+#define AN_ENABLE				BIT(0)
+
+#define DUPLEX_MODE				BIT(4)
+#define SPDMODE_1000				BIT(1)
+#define SPDMODE_100				BIT(0)
+#define SPDMODE_10				0
+
+#define CDR_ALIGN_DET				BIT(6)
+
+#define IRQ_GLOBAL_CLEAR			BIT(0)
+
+#define DECODE_CODE_ERR				BIT(7)
+#define DECODE_DISP_ERR				BIT(6)
+
+#define SGMII_PHY_IRQ_CLR_WAIT_TIME		10
+
+#define SGMII_PHY_INTERRUPT_ERR		(DECODE_CODE_ERR | DECODE_DISP_ERR)
+#define SGMII_ISR_MASK  		(SGMII_PHY_INTERRUPT_ERR)
+
+#define SERDES_START_WAIT_TIMES			100
+
+int emac_sgmii_init(struct emac_adapter *adpt)
+{
+	if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->init))
+		return 0;
+
+	return adpt->phy.sgmii_ops->init(adpt);
+}
+
+int emac_sgmii_open(struct emac_adapter *adpt)
+{
+	if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->open))
+		return 0;
+
+	return adpt->phy.sgmii_ops->open(adpt);
+}
+
+void emac_sgmii_close(struct emac_adapter *adpt)
+{
+	if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->close))
+		return;
+
+	adpt->phy.sgmii_ops->close(adpt);
+}
+
+int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state)
+{
+	if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->link_change))
+		return 0;
+
+	return adpt->phy.sgmii_ops->link_change(adpt, link_state);
+}
+
+void emac_sgmii_reset(struct emac_adapter *adpt)
+{
+	if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->reset))
+		return;
+
+	adpt->phy.sgmii_ops->reset(adpt);
+}
+
+/* Initialize the SGMII link between the internal and external PHYs. */
+static void emac_sgmii_link_init(struct emac_adapter *adpt)
+{
+	struct emac_sgmii *phy = &adpt->phy;
+	u32 val;
+
+	/* Always use autonegotiation. It works no matter how the external
+	 * PHY is configured.
+	 */
+	val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
+	val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
+	val |= AN_ENABLE;
+	writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
+}
+
+static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u8 irq_bits)
+{
+	struct emac_sgmii *phy = &adpt->phy;
+	u8 status;
+
+	writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
+	writel_relaxed(IRQ_GLOBAL_CLEAR, phy->base + EMAC_SGMII_PHY_IRQ_CMD);
+	/* Ensure interrupt clear command is written to HW */
+	wmb();
+
+	/* After set the IRQ_GLOBAL_CLEAR bit, the status clearing must
+	 * be confirmed before clearing the bits in other registers.
+	 * It takes a few cycles for hw to clear the interrupt status.
+	 */
+	if (readl_poll_timeout_atomic(phy->base +
+				      EMAC_SGMII_PHY_INTERRUPT_STATUS,
+				      status, !(status & irq_bits), 1,
+				      SGMII_PHY_IRQ_CLR_WAIT_TIME)) {
+		net_err_ratelimited("%s: failed to clear SGMII irq: status:0x%x bits:0x%x\n",
+				    adpt->netdev->name, status, irq_bits);
+		return -EIO;
+	}
+
+	/* Finalize clearing procedure */
+	writel_relaxed(0, phy->base + EMAC_SGMII_PHY_IRQ_CMD);
+	writel_relaxed(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
+
+	/* Ensure that clearing procedure finalization is written to HW */
+	wmb();
+
+	return 0;
+}
+
+/* The number of decode errors that triggers a reset */
+#define DECODE_ERROR_LIMIT	2
+
+static irqreturn_t emac_sgmii_interrupt(int irq, void *data)
+{
+	struct emac_adapter *adpt = data;
+	struct emac_sgmii *phy = &adpt->phy;
+	u8 status;
+
+	status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS);
+	status &= SGMII_ISR_MASK;
+	if (!status)
+		return IRQ_HANDLED;
+
+	/* If we get a decoding error and CDR is not locked, then try
+	 * resetting the internal PHY.  The internal PHY uses an embedded
+	 * clock with Clock and Data Recovery (CDR) to recover the
+	 * clock and data.
+	 */
+	if (status & SGMII_PHY_INTERRUPT_ERR) {
+		int count;
+
+		/* The SGMII is capable of recovering from some decode
+		 * errors automatically.  However, if we get multiple
+		 * decode errors in a row, then assume that something
+		 * is wrong and reset the interface.
+		 */
+		count = atomic_inc_return(&phy->decode_error_count);
+		if (count == DECODE_ERROR_LIMIT) {
+			schedule_work(&adpt->work_thread);
+			atomic_set(&phy->decode_error_count, 0);
+		}
+	} else {
+		/* We only care about consecutive decode errors. */
+		atomic_set(&phy->decode_error_count, 0);
+	}
+
+	if (emac_sgmii_irq_clear(adpt, status))
+		schedule_work(&adpt->work_thread);
+
+	return IRQ_HANDLED;
+}
+
+static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
+{
+	struct emac_sgmii *phy = &adpt->phy;
+	u32 val;
+
+	/* Reset PHY */
+	val = readl(phy->base + EMAC_EMAC_WRAPPER_CSR2);
+	writel(((val & ~PHY_RESET) | PHY_RESET), phy->base +
+	       EMAC_EMAC_WRAPPER_CSR2);
+	/* Ensure phy-reset command is written to HW before the release cmd */
+	msleep(50);
+	val = readl(phy->base + EMAC_EMAC_WRAPPER_CSR2);
+	writel((val & ~PHY_RESET), phy->base + EMAC_EMAC_WRAPPER_CSR2);
+	/* Ensure phy-reset release command is written to HW before initializing
+	 * SGMII
+	 */
+	msleep(50);
+}
+
+static void emac_sgmii_common_reset(struct emac_adapter *adpt)
+{
+	int ret;
+
+	emac_sgmii_reset_prepare(adpt);
+	emac_sgmii_link_init(adpt);
+
+	ret = emac_sgmii_init(adpt);
+	if (ret)
+		netdev_err(adpt->netdev,
+			   "could not reinitialize internal PHY (error=%i)\n",
+			   ret);
+}
+
+static int emac_sgmii_common_open(struct emac_adapter *adpt)
+{
+	struct emac_sgmii *sgmii = &adpt->phy;
+	int ret;
+
+	if (sgmii->irq) {
+		/* Make sure interrupts are cleared and disabled first */
+		ret = emac_sgmii_irq_clear(adpt, 0xff);
+		if (ret)
+			return ret;
+		writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+		ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0,
+				  "emac-sgmii", adpt);
+		if (ret) {
+			netdev_err(adpt->netdev,
+				   "could not register handler for internal PHY\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void emac_sgmii_common_close(struct emac_adapter *adpt)
+{
+	struct emac_sgmii *sgmii = &adpt->phy;
+
+	/* Make sure interrupts are disabled */
+	writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+	free_irq(sgmii->irq, adpt);
+}
+
+/* The error interrupts are only valid after the link is up */
+static int emac_sgmii_common_link_change(struct emac_adapter *adpt, bool linkup)
+{
+	struct emac_sgmii *sgmii = &adpt->phy;
+	int ret;
+
+	if (linkup) {
+		/* Clear and enable interrupts */
+		ret = emac_sgmii_irq_clear(adpt, 0xff);
+		if (ret)
+			return ret;
+
+		writel(SGMII_ISR_MASK,
+		       sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+	} else {
+		/* Disable interrupts */
+		writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+		synchronize_irq(sgmii->irq);
+	}
+
+	return 0;
+}
+
+static struct sgmii_ops fsm9900_ops = {
+	.init = emac_sgmii_init_fsm9900,
+	.open = emac_sgmii_common_open,
+	.close = emac_sgmii_common_close,
+	.link_change = emac_sgmii_common_link_change,
+	.reset = emac_sgmii_common_reset,
+};
+
+static struct sgmii_ops qdf2432_ops = {
+	.init = emac_sgmii_init_qdf2432,
+	.open = emac_sgmii_common_open,
+	.close = emac_sgmii_common_close,
+	.link_change = emac_sgmii_common_link_change,
+	.reset = emac_sgmii_common_reset,
+};
+
+#ifdef CONFIG_ACPI
+static struct sgmii_ops qdf2400_ops = {
+	.init = emac_sgmii_init_qdf2400,
+	.open = emac_sgmii_common_open,
+	.close = emac_sgmii_common_close,
+	.link_change = emac_sgmii_common_link_change,
+	.reset = emac_sgmii_common_reset,
+};
+#endif
+
+static int emac_sgmii_acpi_match(struct device *dev, void *data)
+{
+#ifdef CONFIG_ACPI
+	static const struct acpi_device_id match_table[] = {
+		{
+			.id = "QCOM8071",
+		},
+		{}
+	};
+	const struct acpi_device_id *id = acpi_match_device(match_table, dev);
+	struct sgmii_ops **ops = data;
+
+	if (id) {
+		acpi_handle handle = ACPI_HANDLE(dev);
+		unsigned long long hrv;
+		acpi_status status;
+
+		status = acpi_evaluate_integer(handle, "_HRV", NULL, &hrv);
+		if (status) {
+			if (status == AE_NOT_FOUND)
+				/* Older versions of the QDF2432 ACPI tables do
+				 * not have an _HRV property.
+				 */
+				hrv = 1;
+			else
+				/* Something is wrong with the tables */
+				return 0;
+		}
+
+		switch (hrv) {
+		case 1:
+			*ops = &qdf2432_ops;
+			return 1;
+		case 2:
+			*ops = &qdf2400_ops;
+			return 1;
+		}
+	}
+#endif
+
+	return 0;
+}
+
+static const struct of_device_id emac_sgmii_dt_match[] = {
+	{
+		.compatible = "qcom,fsm9900-emac-sgmii",
+		.data = &fsm9900_ops,
+	},
+	{
+		.compatible = "qcom,qdf2432-emac-sgmii",
+		.data = &qdf2432_ops,
+	},
+	{}
+};
+
+int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
+{
+	struct platform_device *sgmii_pdev = NULL;
+	struct emac_sgmii *phy = &adpt->phy;
+	struct resource *res;
+	int ret;
+
+	if (has_acpi_companion(&pdev->dev)) {
+		struct device *dev;
+
+		dev = device_find_child(&pdev->dev, &phy->sgmii_ops,
+					emac_sgmii_acpi_match);
+
+		if (!dev) {
+			dev_warn(&pdev->dev, "cannot find internal phy node\n");
+			return 0;
+		}
+
+		sgmii_pdev = to_platform_device(dev);
+	} else {
+		const struct of_device_id *match;
+		struct device_node *np;
+
+		np = of_parse_phandle(pdev->dev.of_node, "internal-phy", 0);
+		if (!np) {
+			dev_err(&pdev->dev, "missing internal-phy property\n");
+			return -ENODEV;
+		}
+
+		sgmii_pdev = of_find_device_by_node(np);
+		of_node_put(np);
+		if (!sgmii_pdev) {
+			dev_err(&pdev->dev, "invalid internal-phy property\n");
+			return -ENODEV;
+		}
+
+		match = of_match_device(emac_sgmii_dt_match, &sgmii_pdev->dev);
+		if (!match) {
+			dev_err(&pdev->dev, "unrecognized internal phy node\n");
+			ret = -ENODEV;
+			goto error_put_device;
+		}
+
+		phy->sgmii_ops = (struct sgmii_ops *)match->data;
+	}
+
+	/* Base address is the first address */
+	res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -EINVAL;
+		goto error_put_device;
+	}
+
+	phy->base = ioremap(res->start, resource_size(res));
+	if (!phy->base) {
+		ret = -ENOMEM;
+		goto error_put_device;
+	}
+
+	/* v2 SGMII has a per-lane digital digital, so parse it if it exists */
+	res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1);
+	if (res) {
+		phy->digital = ioremap(res->start, resource_size(res));
+		if (!phy->digital) {
+			ret = -ENOMEM;
+			goto error_unmap_base;
+		}
+	}
+
+	ret = emac_sgmii_init(adpt);
+	if (ret)
+		goto error;
+
+	emac_sgmii_link_init(adpt);
+
+	ret = platform_get_irq(sgmii_pdev, 0);
+	if (ret > 0)
+		phy->irq = ret;
+
+	/* We've remapped the addresses, so we don't need the device any
+	 * more.  of_find_device_by_node() says we should release it.
+	 */
+	put_device(&sgmii_pdev->dev);
+
+	return 0;
+
+error:
+	if (phy->digital)
+		iounmap(phy->digital);
+error_unmap_base:
+	iounmap(phy->base);
+error_put_device:
+	put_device(&sgmii_pdev->dev);
+
+	return ret;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
new file mode 100644
index 0000000..6daedda
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _EMAC_SGMII_H_
+#define _EMAC_SGMII_H_
+
+struct emac_adapter;
+struct platform_device;
+
+/** emac_sgmii - internal emac phy
+ * @init initialization function
+ * @open called when the driver is opened
+ * @close called when the driver is closed
+ * @link_change called when the link state changes
+ */
+struct sgmii_ops {
+	int (*init)(struct emac_adapter *adpt);
+	int (*open)(struct emac_adapter *adpt);
+	void (*close)(struct emac_adapter *adpt);
+	int (*link_change)(struct emac_adapter *adpt, bool link_state);
+	void (*reset)(struct emac_adapter *adpt);
+};
+
+/** emac_sgmii - internal emac phy
+ * @base base address
+ * @digital per-lane digital block
+ * @irq the interrupt number
+ * @decode_error_count reference count of consecutive decode errors
+ * @sgmii_ops sgmii ops
+ */
+struct emac_sgmii {
+	void __iomem		*base;
+	void __iomem		*digital;
+	unsigned int		irq;
+	atomic_t		decode_error_count;
+	struct	sgmii_ops	*sgmii_ops;
+};
+
+int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
+
+int emac_sgmii_init_fsm9900(struct emac_adapter *adpt);
+int emac_sgmii_init_qdf2432(struct emac_adapter *adpt);
+int emac_sgmii_init_qdf2400(struct emac_adapter *adpt);
+
+int emac_sgmii_init(struct emac_adapter *adpt);
+int emac_sgmii_open(struct emac_adapter *adpt);
+void emac_sgmii_close(struct emac_adapter *adpt);
+int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state);
+void emac_sgmii_reset(struct emac_adapter *adpt);
+#endif
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac.c b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac.c
new file mode 100644
index 0000000..5fe28de
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ */
+
+/* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include "emac.h"
+#include "emac-mac.h"
+#include "emac-phy.h"
+#include "emac-sgmii.h"
+
+#define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |  \
+		NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define EMAC_RRD_SIZE					     4
+/* The RRD size if timestamping is enabled: */
+#define EMAC_TS_RRD_SIZE				     6
+#define EMAC_TPD_SIZE					     4
+#define EMAC_RFD_SIZE					     2
+
+#define REG_MAC_RX_STATUS_BIN		 EMAC_RXMAC_STATC_REG0
+#define REG_MAC_RX_STATUS_END		EMAC_RXMAC_STATC_REG22
+#define REG_MAC_TX_STATUS_BIN		 EMAC_TXMAC_STATC_REG0
+#define REG_MAC_TX_STATUS_END		EMAC_TXMAC_STATC_REG24
+
+#define RXQ0_NUM_RFD_PREF_DEF				     8
+#define TXQ0_NUM_TPD_PREF_DEF				     5
+
+#define EMAC_PREAMBLE_DEF				     7
+
+#define DMAR_DLY_CNT_DEF				    15
+#define DMAW_DLY_CNT_DEF				     4
+
+#define IMR_NORMAL_MASK		(ISR_ERROR | ISR_OVER | ISR_TX_PKT)
+
+#define ISR_TX_PKT      (\
+	TX_PKT_INT      |\
+	TX_PKT_INT1     |\
+	TX_PKT_INT2     |\
+	TX_PKT_INT3)
+
+#define ISR_OVER        (\
+	RFD0_UR_INT     |\
+	RFD1_UR_INT     |\
+	RFD2_UR_INT     |\
+	RFD3_UR_INT     |\
+	RFD4_UR_INT     |\
+	RXF_OF_INT      |\
+	TXF_UR_INT)
+
+#define ISR_ERROR       (\
+	DMAR_TO_INT     |\
+	DMAW_TO_INT     |\
+	TXQ_TO_INT)
+
+/* in sync with enum emac_clk_id */
+static const char * const emac_clk_name[] = {
+	"axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk",
+	"rx_clk", "sys_clk"
+};
+
+void emac_reg_update32(void __iomem *addr, u32 mask, u32 val)
+{
+	u32 data = readl(addr);
+
+	writel(((data & ~mask) | val), addr);
+}
+
+/* reinitialize */
+int emac_reinit_locked(struct emac_adapter *adpt)
+{
+	int ret;
+
+	mutex_lock(&adpt->reset_lock);
+
+	emac_mac_down(adpt);
+	emac_sgmii_reset(adpt);
+	ret = emac_mac_up(adpt);
+
+	mutex_unlock(&adpt->reset_lock);
+
+	return ret;
+}
+
+/* NAPI */
+static int emac_napi_rtx(struct napi_struct *napi, int budget)
+{
+	struct emac_rx_queue *rx_q =
+		container_of(napi, struct emac_rx_queue, napi);
+	struct emac_adapter *adpt = netdev_priv(rx_q->netdev);
+	struct emac_irq *irq = rx_q->irq;
+	int work_done = 0;
+
+	emac_mac_rx_process(adpt, rx_q, &work_done, budget);
+
+	if (work_done < budget) {
+		napi_complete_done(napi, work_done);
+
+		irq->mask |= rx_q->intr;
+		writel(irq->mask, adpt->base + EMAC_INT_MASK);
+	}
+
+	return work_done;
+}
+
+/* Transmit the packet */
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb);
+}
+
+static irqreturn_t emac_isr(int _irq, void *data)
+{
+	struct emac_irq *irq = data;
+	struct emac_adapter *adpt =
+		container_of(irq, struct emac_adapter, irq);
+	struct emac_rx_queue *rx_q = &adpt->rx_q;
+	u32 isr, status;
+
+	/* disable the interrupt */
+	writel(0, adpt->base + EMAC_INT_MASK);
+
+	isr = readl_relaxed(adpt->base + EMAC_INT_STATUS);
+
+	status = isr & irq->mask;
+	if (status == 0)
+		goto exit;
+
+	if (status & ISR_ERROR) {
+		net_err_ratelimited("%s: error interrupt 0x%lx\n",
+				    adpt->netdev->name, status & ISR_ERROR);
+		/* reset MAC */
+		schedule_work(&adpt->work_thread);
+	}
+
+	/* Schedule the napi for receive queue with interrupt
+	 * status bit set
+	 */
+	if (status & rx_q->intr) {
+		if (napi_schedule_prep(&rx_q->napi)) {
+			irq->mask &= ~rx_q->intr;
+			__napi_schedule(&rx_q->napi);
+		}
+	}
+
+	if (status & TX_PKT_INT)
+		emac_mac_tx_process(adpt, &adpt->tx_q);
+
+	if (status & ISR_OVER)
+		net_warn_ratelimited("%s: TX/RX overflow interrupt\n",
+				     adpt->netdev->name);
+
+exit:
+	/* enable the interrupt */
+	writel(irq->mask, adpt->base + EMAC_INT_MASK);
+
+	return IRQ_HANDLED;
+}
+
+/* Configure VLAN tag strip/insert feature */
+static int emac_set_features(struct net_device *netdev,
+			     netdev_features_t features)
+{
+	netdev_features_t changed = features ^ netdev->features;
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	/* We only need to reprogram the hardware if the VLAN tag features
+	 * have changed, and if it's already running.
+	 */
+	if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)))
+		return 0;
+
+	if (!netif_running(netdev))
+		return 0;
+
+	/* emac_mac_mode_config() uses netdev->features to configure the EMAC,
+	 * so make sure it's set first.
+	 */
+	netdev->features = features;
+
+	return emac_reinit_locked(adpt);
+}
+
+/* Configure Multicast and Promiscuous modes */
+static void emac_rx_mode_set(struct net_device *netdev)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+	struct netdev_hw_addr *ha;
+
+	emac_mac_mode_config(adpt);
+
+	/* update multicast address filtering */
+	emac_mac_multicast_addr_clear(adpt);
+	netdev_for_each_mc_addr(ha, netdev)
+		emac_mac_multicast_addr_set(adpt, ha->addr);
+}
+
+/* Change the Maximum Transfer Unit (MTU) */
+static int emac_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	netif_info(adpt, hw, adpt->netdev,
+		   "changing MTU from %d to %d\n", netdev->mtu,
+		   new_mtu);
+	netdev->mtu = new_mtu;
+
+	if (netif_running(netdev))
+		return emac_reinit_locked(adpt);
+
+	return 0;
+}
+
+/* Called when the network interface is made active */
+static int emac_open(struct net_device *netdev)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+	struct emac_irq	*irq = &adpt->irq;
+	int ret;
+
+	ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq);
+	if (ret) {
+		netdev_err(adpt->netdev, "could not request emac-core0 irq\n");
+		return ret;
+	}
+
+	/* allocate rx/tx dma buffer & descriptors */
+	ret = emac_mac_rx_tx_rings_alloc_all(adpt);
+	if (ret) {
+		netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
+		free_irq(irq->irq, irq);
+		return ret;
+	}
+
+	ret = emac_sgmii_open(adpt);
+	if (ret) {
+		emac_mac_rx_tx_rings_free_all(adpt);
+		free_irq(irq->irq, irq);
+		return ret;
+	}
+
+	ret = emac_mac_up(adpt);
+	if (ret) {
+		emac_mac_rx_tx_rings_free_all(adpt);
+		free_irq(irq->irq, irq);
+		emac_sgmii_close(adpt);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Called when the network interface is disabled */
+static int emac_close(struct net_device *netdev)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	mutex_lock(&adpt->reset_lock);
+
+	emac_sgmii_close(adpt);
+	emac_mac_down(adpt);
+	emac_mac_rx_tx_rings_free_all(adpt);
+
+	free_irq(adpt->irq.irq, &adpt->irq);
+
+	mutex_unlock(&adpt->reset_lock);
+
+	return 0;
+}
+
+/* Respond to a TX hang */
+static void emac_tx_timeout(struct net_device *netdev)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	schedule_work(&adpt->work_thread);
+}
+
+/* IOCTL support for the interface */
+static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	if (!netif_running(netdev))
+		return -EINVAL;
+
+	if (!netdev->phydev)
+		return -ENODEV;
+
+	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+/**
+ * emac_update_hw_stats - read the EMAC stat registers
+ *
+ * Reads the stats registers and write the values to adpt->stats.
+ *
+ * adpt->stats.lock must be held while calling this function,
+ * and while reading from adpt->stats.
+ */
+void emac_update_hw_stats(struct emac_adapter *adpt)
+{
+	struct emac_stats *stats = &adpt->stats;
+	u64 *stats_itr = &adpt->stats.rx_ok;
+	void __iomem *base = adpt->base;
+	unsigned int addr;
+
+	addr = REG_MAC_RX_STATUS_BIN;
+	while (addr <= REG_MAC_RX_STATUS_END) {
+		*stats_itr += readl_relaxed(base + addr);
+		stats_itr++;
+		addr += sizeof(u32);
+	}
+
+	/* additional rx status */
+	stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23);
+	stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24);
+
+	/* update tx status */
+	addr = REG_MAC_TX_STATUS_BIN;
+	stats_itr = &stats->tx_ok;
+
+	while (addr <= REG_MAC_TX_STATUS_END) {
+		*stats_itr += readl_relaxed(base + addr);
+		stats_itr++;
+		addr += sizeof(u32);
+	}
+
+	/* additional tx status */
+	stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25);
+}
+
+/* Provide network statistics info for the interface */
+static void emac_get_stats64(struct net_device *netdev,
+			     struct rtnl_link_stats64 *net_stats)
+{
+	struct emac_adapter *adpt = netdev_priv(netdev);
+	struct emac_stats *stats = &adpt->stats;
+
+	spin_lock(&stats->lock);
+
+	emac_update_hw_stats(adpt);
+
+	/* return parsed statistics */
+	net_stats->rx_packets = stats->rx_ok;
+	net_stats->tx_packets = stats->tx_ok;
+	net_stats->rx_bytes = stats->rx_byte_cnt;
+	net_stats->tx_bytes = stats->tx_byte_cnt;
+	net_stats->multicast = stats->rx_mcast;
+	net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 +
+				stats->tx_late_col + stats->tx_abort_col;
+
+	net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err +
+			       stats->rx_len_err + stats->rx_sz_ov +
+			       stats->rx_align_err;
+	net_stats->rx_fifo_errors = stats->rx_rxf_ov;
+	net_stats->rx_length_errors = stats->rx_len_err;
+	net_stats->rx_crc_errors = stats->rx_fcs_err;
+	net_stats->rx_frame_errors = stats->rx_align_err;
+	net_stats->rx_over_errors = stats->rx_rxf_ov;
+	net_stats->rx_missed_errors = stats->rx_rxf_ov;
+
+	net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col +
+			       stats->tx_underrun + stats->tx_trunc;
+	net_stats->tx_fifo_errors = stats->tx_underrun;
+	net_stats->tx_aborted_errors = stats->tx_abort_col;
+	net_stats->tx_window_errors = stats->tx_late_col;
+
+	spin_unlock(&stats->lock);
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+	.ndo_open		= emac_open,
+	.ndo_stop		= emac_close,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_start_xmit		= emac_start_xmit,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_change_mtu		= emac_change_mtu,
+	.ndo_do_ioctl		= emac_ioctl,
+	.ndo_tx_timeout		= emac_tx_timeout,
+	.ndo_get_stats64	= emac_get_stats64,
+	.ndo_set_features       = emac_set_features,
+	.ndo_set_rx_mode        = emac_rx_mode_set,
+};
+
+/* Watchdog task routine, called to reinitialize the EMAC */
+static void emac_work_thread(struct work_struct *work)
+{
+	struct emac_adapter *adpt =
+		container_of(work, struct emac_adapter, work_thread);
+
+	emac_reinit_locked(adpt);
+}
+
+/* Initialize various data structures  */
+static void emac_init_adapter(struct emac_adapter *adpt)
+{
+	u32 reg;
+
+	adpt->rrd_size = EMAC_RRD_SIZE;
+	adpt->tpd_size = EMAC_TPD_SIZE;
+	adpt->rfd_size = EMAC_RFD_SIZE;
+
+	/* descriptors */
+	adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
+	adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
+
+	/* dma */
+	adpt->dma_order = emac_dma_ord_out;
+	adpt->dmar_block = emac_dma_req_4096;
+	adpt->dmaw_block = emac_dma_req_128;
+	adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF;
+	adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF;
+	adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF;
+	adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF;
+
+	/* irq moderator */
+	reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) |
+	      ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT);
+	adpt->irq_mod = reg;
+
+	/* others */
+	adpt->preamble = EMAC_PREAMBLE_DEF;
+
+	/* default to automatic flow control */
+	adpt->automatic = true;
+
+	/* Disable single-pause-frame mode by default */
+	adpt->single_pause_mode = false;
+}
+
+/* Get the clock */
+static int emac_clks_get(struct platform_device *pdev,
+			 struct emac_adapter *adpt)
+{
+	unsigned int i;
+
+	for (i = 0; i < EMAC_CLK_CNT; i++) {
+		struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]);
+
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev,
+				"could not claim clock %s (error=%li)\n",
+				emac_clk_name[i], PTR_ERR(clk));
+
+			return PTR_ERR(clk);
+		}
+
+		adpt->clk[i] = clk;
+	}
+
+	return 0;
+}
+
+/* Initialize clocks */
+static int emac_clks_phase1_init(struct platform_device *pdev,
+				 struct emac_adapter *adpt)
+{
+	int ret;
+
+	/* On ACPI platforms, clocks are controlled by firmware and/or
+	 * ACPI, not by drivers.
+	 */
+	if (has_acpi_companion(&pdev->dev))
+		return 0;
+
+	ret = emac_clks_get(pdev, adpt);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
+	if (ret)
+		goto disable_clk_axi;
+
+	ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
+	if (ret)
+		goto disable_clk_cfg_ahb;
+
+	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
+	if (ret)
+		goto disable_clk_cfg_ahb;
+
+	return 0;
+
+disable_clk_cfg_ahb:
+	clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
+disable_clk_axi:
+	clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
+
+	return ret;
+}
+
+/* Enable clocks; needs emac_clks_phase1_init to be called before */
+static int emac_clks_phase2_init(struct platform_device *pdev,
+				 struct emac_adapter *adpt)
+{
+	int ret;
+
+	if (has_acpi_companion(&pdev->dev))
+		return 0;
+
+	ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]);
+	if (ret)
+		return ret;
+
+	ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
+	if (ret)
+		return ret;
+
+	ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]);
+	if (ret)
+		return ret;
+
+	return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]);
+}
+
+static void emac_clks_teardown(struct emac_adapter *adpt)
+{
+
+	unsigned int i;
+
+	for (i = 0; i < EMAC_CLK_CNT; i++)
+		clk_disable_unprepare(adpt->clk[i]);
+}
+
+/* Get the resources */
+static int emac_probe_resources(struct platform_device *pdev,
+				struct emac_adapter *adpt)
+{
+	struct net_device *netdev = adpt->netdev;
+	char maddr[ETH_ALEN];
+	int ret = 0;
+
+	/* get mac address */
+	if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN))
+		ether_addr_copy(netdev->dev_addr, maddr);
+	else
+		eth_hw_addr_random(netdev);
+
+	/* Core 0 interrupt */
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0)
+		return ret;
+	adpt->irq.irq = ret;
+
+	/* base register address */
+	adpt->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(adpt->base))
+		return PTR_ERR(adpt->base);
+
+	/* CSR register address */
+	adpt->csr = devm_platform_ioremap_resource(pdev, 1);
+	if (IS_ERR(adpt->csr))
+		return PTR_ERR(adpt->csr);
+
+	netdev->base_addr = (unsigned long)adpt->base;
+
+	return 0;
+}
+
+static const struct of_device_id emac_dt_match[] = {
+	{
+		.compatible = "qcom,fsm9900-emac",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, emac_dt_match);
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id emac_acpi_match[] = {
+	{
+		.id = "QCOM8070",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, emac_acpi_match);
+#endif
+
+static int emac_probe(struct platform_device *pdev)
+{
+	struct net_device *netdev;
+	struct emac_adapter *adpt;
+	struct emac_sgmii *phy;
+	u16 devid, revid;
+	u32 reg;
+	int ret;
+
+	/* The TPD buffer address is limited to:
+	 * 1. PTP:	45bits. (Driver doesn't support yet.)
+	 * 2. NON-PTP:	46bits.
+	 */
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
+	if (ret) {
+		dev_err(&pdev->dev, "could not set DMA mask\n");
+		return ret;
+	}
+
+	netdev = alloc_etherdev(sizeof(struct emac_adapter));
+	if (!netdev)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, netdev);
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	emac_set_ethtool_ops(netdev);
+
+	adpt = netdev_priv(netdev);
+	adpt->netdev = netdev;
+	adpt->msg_enable = EMAC_MSG_DEFAULT;
+
+	phy = &adpt->phy;
+	atomic_set(&phy->decode_error_count, 0);
+
+	mutex_init(&adpt->reset_lock);
+	spin_lock_init(&adpt->stats.lock);
+
+	adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK;
+
+	ret = emac_probe_resources(pdev, adpt);
+	if (ret)
+		goto err_undo_netdev;
+
+	/* initialize clocks */
+	ret = emac_clks_phase1_init(pdev, adpt);
+	if (ret) {
+		dev_err(&pdev->dev, "could not initialize clocks\n");
+		goto err_undo_netdev;
+	}
+
+	netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
+	netdev->irq = adpt->irq.irq;
+
+	netdev->netdev_ops = &emac_netdev_ops;
+
+	emac_init_adapter(adpt);
+
+	/* init external phy */
+	ret = emac_phy_config(pdev, adpt);
+	if (ret)
+		goto err_undo_clocks;
+
+	/* init internal sgmii phy */
+	ret = emac_sgmii_config(pdev, adpt);
+	if (ret)
+		goto err_undo_mdiobus;
+
+	/* enable clocks */
+	ret = emac_clks_phase2_init(pdev, adpt);
+	if (ret) {
+		dev_err(&pdev->dev, "could not initialize clocks\n");
+		goto err_undo_mdiobus;
+	}
+
+	/* set hw features */
+	netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+			NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
+			NETIF_F_HW_VLAN_CTAG_TX;
+	netdev->hw_features = netdev->features;
+
+	netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM |
+				 NETIF_F_TSO | NETIF_F_TSO6;
+
+	/* MTU range: 46 - 9194 */
+	netdev->min_mtu = EMAC_MIN_ETH_FRAME_SIZE -
+			  (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+	netdev->max_mtu = EMAC_MAX_ETH_FRAME_SIZE -
+			  (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
+	INIT_WORK(&adpt->work_thread, emac_work_thread);
+
+	/* Initialize queues */
+	emac_mac_rx_tx_ring_init_all(pdev, adpt);
+
+	netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx,
+		       NAPI_POLL_WEIGHT);
+
+	ret = register_netdev(netdev);
+	if (ret) {
+		dev_err(&pdev->dev, "could not register net device\n");
+		goto err_undo_napi;
+	}
+
+	reg =  readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL);
+	devid = (reg & DEV_ID_NUM_BMSK)  >> DEV_ID_NUM_SHFT;
+	revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT;
+	reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION);
+
+	netif_info(adpt, probe, netdev,
+		   "hardware id %d.%d, hardware version %d.%d.%d\n",
+		   devid, revid,
+		   (reg & MAJOR_BMSK) >> MAJOR_SHFT,
+		   (reg & MINOR_BMSK) >> MINOR_SHFT,
+		   (reg & STEP_BMSK)  >> STEP_SHFT);
+
+	return 0;
+
+err_undo_napi:
+	netif_napi_del(&adpt->rx_q.napi);
+err_undo_mdiobus:
+	put_device(&adpt->phydev->mdio.dev);
+	mdiobus_unregister(adpt->mii_bus);
+err_undo_clocks:
+	emac_clks_teardown(adpt);
+err_undo_netdev:
+	free_netdev(netdev);
+
+	return ret;
+}
+
+static int emac_remove(struct platform_device *pdev)
+{
+	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	netif_carrier_off(netdev);
+	netif_tx_disable(netdev);
+
+	unregister_netdev(netdev);
+	netif_napi_del(&adpt->rx_q.napi);
+
+	free_irq(adpt->irq.irq, &adpt->irq);
+	cancel_work_sync(&adpt->work_thread);
+
+	emac_clks_teardown(adpt);
+
+	put_device(&adpt->phydev->mdio.dev);
+	mdiobus_unregister(adpt->mii_bus);
+
+	if (adpt->phy.digital)
+		iounmap(adpt->phy.digital);
+	iounmap(adpt->phy.base);
+
+	free_netdev(netdev);
+
+	return 0;
+}
+
+static void emac_shutdown(struct platform_device *pdev)
+{
+	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+	struct emac_adapter *adpt = netdev_priv(netdev);
+
+	if (netdev->flags & IFF_UP) {
+		/* Closing the SGMII turns off its interrupts */
+		emac_sgmii_close(adpt);
+
+		/* Resetting the MAC turns off all DMA and its interrupts */
+		emac_mac_reset(adpt);
+	}
+}
+
+static struct platform_driver emac_platform_driver = {
+	.probe	= emac_probe,
+	.remove	= emac_remove,
+	.driver = {
+		.name		= "qcom-emac",
+		.of_match_table = emac_dt_match,
+		.acpi_match_table = ACPI_PTR(emac_acpi_match),
+	},
+	.shutdown = emac_shutdown,
+};
+
+module_platform_driver(emac_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-emac");
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac.h b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac.h
new file mode 100644
index 0000000..7e9e151
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/emac/emac.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _EMAC_H_
+#define _EMAC_H_
+
+#include <linux/irqreturn.h>
+#include <linux/netdevice.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include "emac-mac.h"
+#include "emac-phy.h"
+#include "emac-sgmii.h"
+
+/* EMAC base register offsets */
+#define EMAC_DMA_MAS_CTRL		0x1400
+#define EMAC_IRQ_MOD_TIM_INIT		0x1408
+#define EMAC_BLK_IDLE_STS		0x140c
+#define EMAC_PHY_LINK_DELAY		0x141c
+#define EMAC_SYS_ALIV_CTRL		0x1434
+#define EMAC_MAC_CTRL			0x1480
+#define EMAC_MAC_IPGIFG_CTRL		0x1484
+#define EMAC_MAC_STA_ADDR0		0x1488
+#define EMAC_MAC_STA_ADDR1		0x148c
+#define EMAC_HASH_TAB_REG0		0x1490
+#define EMAC_HASH_TAB_REG1		0x1494
+#define EMAC_MAC_HALF_DPLX_CTRL		0x1498
+#define EMAC_MAX_FRAM_LEN_CTRL		0x149c
+#define EMAC_WOL_CTRL0			0x14a0
+#define EMAC_RSS_KEY0			0x14b0
+#define EMAC_H1TPD_BASE_ADDR_LO		0x14e0
+#define EMAC_H2TPD_BASE_ADDR_LO		0x14e4
+#define EMAC_H3TPD_BASE_ADDR_LO		0x14e8
+#define EMAC_INTER_SRAM_PART9		0x1534
+#define EMAC_DESC_CTRL_0		0x1540
+#define EMAC_DESC_CTRL_1		0x1544
+#define EMAC_DESC_CTRL_2		0x1550
+#define EMAC_DESC_CTRL_10		0x1554
+#define EMAC_DESC_CTRL_12		0x1558
+#define EMAC_DESC_CTRL_13		0x155c
+#define EMAC_DESC_CTRL_3		0x1560
+#define EMAC_DESC_CTRL_4		0x1564
+#define EMAC_DESC_CTRL_5		0x1568
+#define EMAC_DESC_CTRL_14		0x156c
+#define EMAC_DESC_CTRL_15		0x1570
+#define EMAC_DESC_CTRL_16		0x1574
+#define EMAC_DESC_CTRL_6		0x1578
+#define EMAC_DESC_CTRL_8		0x1580
+#define EMAC_DESC_CTRL_9		0x1584
+#define EMAC_DESC_CTRL_11		0x1588
+#define EMAC_TXQ_CTRL_0			0x1590
+#define EMAC_TXQ_CTRL_1			0x1594
+#define EMAC_TXQ_CTRL_2			0x1598
+#define EMAC_RXQ_CTRL_0			0x15a0
+#define EMAC_RXQ_CTRL_1			0x15a4
+#define EMAC_RXQ_CTRL_2			0x15a8
+#define EMAC_RXQ_CTRL_3			0x15ac
+#define EMAC_BASE_CPU_NUMBER		0x15b8
+#define EMAC_DMA_CTRL			0x15c0
+#define EMAC_MAILBOX_0			0x15e0
+#define EMAC_MAILBOX_5			0x15e4
+#define EMAC_MAILBOX_6			0x15e8
+#define EMAC_MAILBOX_13			0x15ec
+#define EMAC_MAILBOX_2			0x15f4
+#define EMAC_MAILBOX_3			0x15f8
+#define EMAC_INT_STATUS			0x1600
+#define EMAC_INT_MASK			0x1604
+#define EMAC_MAILBOX_11			0x160c
+#define EMAC_AXI_MAST_CTRL		0x1610
+#define EMAC_MAILBOX_12			0x1614
+#define EMAC_MAILBOX_9			0x1618
+#define EMAC_MAILBOX_10			0x161c
+#define EMAC_ATHR_HEADER_CTRL		0x1620
+#define EMAC_RXMAC_STATC_REG0		0x1700
+#define EMAC_RXMAC_STATC_REG22		0x1758
+#define EMAC_TXMAC_STATC_REG0		0x1760
+#define EMAC_TXMAC_STATC_REG24		0x17c0
+#define EMAC_CLK_GATE_CTRL		0x1814
+#define EMAC_CORE_HW_VERSION		0x1974
+#define EMAC_MISC_CTRL			0x1990
+#define EMAC_MAILBOX_7			0x19e0
+#define EMAC_MAILBOX_8			0x19e4
+#define EMAC_IDT_TABLE0			0x1b00
+#define EMAC_RXMAC_STATC_REG23		0x1bc8
+#define EMAC_RXMAC_STATC_REG24		0x1bcc
+#define EMAC_TXMAC_STATC_REG25		0x1bd0
+#define EMAC_MAILBOX_15			0x1bd4
+#define EMAC_MAILBOX_16			0x1bd8
+#define EMAC_INT1_MASK			0x1bf0
+#define EMAC_INT1_STATUS		0x1bf4
+#define EMAC_INT2_MASK			0x1bf8
+#define EMAC_INT2_STATUS		0x1bfc
+#define EMAC_INT3_MASK			0x1c00
+#define EMAC_INT3_STATUS		0x1c04
+
+/* EMAC_DMA_MAS_CTRL */
+#define DEV_ID_NUM_BMSK                                     0x7f000000
+#define DEV_ID_NUM_SHFT                                             24
+#define DEV_REV_NUM_BMSK                                      0xff0000
+#define DEV_REV_NUM_SHFT                                            16
+#define INT_RD_CLR_EN                                           0x4000
+#define IRQ_MODERATOR2_EN                                        0x800
+#define IRQ_MODERATOR_EN                                         0x400
+#define LPW_CLK_SEL                                               0x80
+#define LPW_STATE                                                 0x20
+#define LPW_MODE                                                  0x10
+#define SOFT_RST                                                   0x1
+
+/* EMAC_IRQ_MOD_TIM_INIT */
+#define IRQ_MODERATOR2_INIT_BMSK                            0xffff0000
+#define IRQ_MODERATOR2_INIT_SHFT                                    16
+#define IRQ_MODERATOR_INIT_BMSK                                 0xffff
+#define IRQ_MODERATOR_INIT_SHFT                                      0
+
+/* EMAC_INT_STATUS */
+#define DIS_INT                                                BIT(31)
+#define PTP_INT                                                BIT(30)
+#define RFD4_UR_INT                                            BIT(29)
+#define TX_PKT_INT3                                            BIT(26)
+#define TX_PKT_INT2                                            BIT(25)
+#define TX_PKT_INT1                                            BIT(24)
+#define RX_PKT_INT3                                            BIT(19)
+#define RX_PKT_INT2                                            BIT(18)
+#define RX_PKT_INT1                                            BIT(17)
+#define RX_PKT_INT0                                            BIT(16)
+#define TX_PKT_INT                                             BIT(15)
+#define TXQ_TO_INT                                             BIT(14)
+#define GPHY_WAKEUP_INT                                        BIT(13)
+#define GPHY_LINK_DOWN_INT                                     BIT(12)
+#define GPHY_LINK_UP_INT                                       BIT(11)
+#define DMAW_TO_INT                                            BIT(10)
+#define DMAR_TO_INT                                             BIT(9)
+#define TXF_UR_INT                                              BIT(8)
+#define RFD3_UR_INT                                             BIT(7)
+#define RFD2_UR_INT                                             BIT(6)
+#define RFD1_UR_INT                                             BIT(5)
+#define RFD0_UR_INT                                             BIT(4)
+#define RXF_OF_INT                                              BIT(3)
+#define SW_MAN_INT                                              BIT(2)
+
+/* EMAC_MAILBOX_6 */
+#define RFD2_PROC_IDX_BMSK                                   0xfff0000
+#define RFD2_PROC_IDX_SHFT                                          16
+#define RFD2_PROD_IDX_BMSK                                       0xfff
+#define RFD2_PROD_IDX_SHFT                                           0
+
+/* EMAC_CORE_HW_VERSION */
+#define MAJOR_BMSK                                          0xf0000000
+#define MAJOR_SHFT                                                  28
+#define MINOR_BMSK                                           0xfff0000
+#define MINOR_SHFT                                                  16
+#define STEP_BMSK                                               0xffff
+#define STEP_SHFT                                                    0
+
+/* EMAC_EMAC_WRAPPER_CSR1 */
+#define TX_INDX_FIFO_SYNC_RST                                  BIT(23)
+#define TX_TS_FIFO_SYNC_RST                                    BIT(22)
+#define RX_TS_FIFO2_SYNC_RST                                   BIT(21)
+#define RX_TS_FIFO1_SYNC_RST                                   BIT(20)
+#define TX_TS_ENABLE                                           BIT(16)
+#define DIS_1588_CLKS                                          BIT(11)
+#define FREQ_MODE                                               BIT(9)
+#define ENABLE_RRD_TIMESTAMP                                    BIT(3)
+
+/* EMAC_EMAC_WRAPPER_CSR2 */
+#define HDRIVE_BMSK                                             0x3000
+#define HDRIVE_SHFT                                                 12
+#define SLB_EN                                                  BIT(9)
+#define PLB_EN                                                  BIT(8)
+#define WOL_EN                                                  BIT(3)
+#define PHY_RESET                                               BIT(0)
+
+#define EMAC_DEV_ID                                             0x0040
+
+/* SGMII v2 per lane registers */
+#define SGMII_LN_RSM_START             0x029C
+
+/* SGMII v2 PHY common registers */
+#define SGMII_PHY_CMN_CTRL            0x0408
+#define SGMII_PHY_CMN_RESET_CTRL      0x0410
+
+/* SGMII v2 PHY registers per lane */
+#define SGMII_PHY_LN_OFFSET          0x0400
+#define SGMII_PHY_LN_LANE_STATUS     0x00DC
+#define SGMII_PHY_LN_BIST_GEN0       0x008C
+#define SGMII_PHY_LN_BIST_GEN1       0x0090
+#define SGMII_PHY_LN_BIST_GEN2       0x0094
+#define SGMII_PHY_LN_BIST_GEN3       0x0098
+#define SGMII_PHY_LN_CDR_CTRL1       0x005C
+
+enum emac_clk_id {
+	EMAC_CLK_AXI,
+	EMAC_CLK_CFG_AHB,
+	EMAC_CLK_HIGH_SPEED,
+	EMAC_CLK_MDIO,
+	EMAC_CLK_TX,
+	EMAC_CLK_RX,
+	EMAC_CLK_SYS,
+	EMAC_CLK_CNT
+};
+
+#define EMAC_LINK_SPEED_UNKNOWN                                    0x0
+#define EMAC_LINK_SPEED_10_HALF                                 BIT(0)
+#define EMAC_LINK_SPEED_10_FULL                                 BIT(1)
+#define EMAC_LINK_SPEED_100_HALF                                BIT(2)
+#define EMAC_LINK_SPEED_100_FULL                                BIT(3)
+#define EMAC_LINK_SPEED_1GB_FULL                                BIT(5)
+
+#define EMAC_MAX_SETUP_LNK_CYCLE                                   100
+
+struct emac_stats {
+	/* rx */
+	u64 rx_ok;              /* good packets */
+	u64 rx_bcast;           /* good broadcast packets */
+	u64 rx_mcast;           /* good multicast packets */
+	u64 rx_pause;           /* pause packet */
+	u64 rx_ctrl;            /* control packets other than pause frame. */
+	u64 rx_fcs_err;         /* packets with bad FCS. */
+	u64 rx_len_err;         /* packets with length mismatch */
+	u64 rx_byte_cnt;        /* good bytes count (without FCS) */
+	u64 rx_runt;            /* runt packets */
+	u64 rx_frag;            /* fragment count */
+	u64 rx_sz_64;	        /* packets that are 64 bytes */
+	u64 rx_sz_65_127;       /* packets that are 65-127 bytes */
+	u64 rx_sz_128_255;      /* packets that are 128-255 bytes */
+	u64 rx_sz_256_511;      /* packets that are 256-511 bytes */
+	u64 rx_sz_512_1023;     /* packets that are 512-1023 bytes */
+	u64 rx_sz_1024_1518;    /* packets that are 1024-1518 bytes */
+	u64 rx_sz_1519_max;     /* packets that are 1519-MTU bytes*/
+	u64 rx_sz_ov;           /* packets that are >MTU bytes (truncated) */
+	u64 rx_rxf_ov;          /* packets dropped due to RX FIFO overflow */
+	u64 rx_align_err;       /* alignment errors */
+	u64 rx_bcast_byte_cnt;  /* broadcast packets byte count (without FCS) */
+	u64 rx_mcast_byte_cnt;  /* multicast packets byte count (without FCS) */
+	u64 rx_err_addr;        /* packets dropped due to address filtering */
+	u64 rx_crc_align;       /* CRC align errors */
+	u64 rx_jabbers;         /* jabbers */
+
+	/* tx */
+	u64 tx_ok;              /* good packets */
+	u64 tx_bcast;           /* good broadcast packets */
+	u64 tx_mcast;           /* good multicast packets */
+	u64 tx_pause;           /* pause packets */
+	u64 tx_exc_defer;       /* packets with excessive deferral */
+	u64 tx_ctrl;            /* control packets other than pause frame */
+	u64 tx_defer;           /* packets that are deferred. */
+	u64 tx_byte_cnt;        /* good bytes count (without FCS) */
+	u64 tx_sz_64;           /* packets that are 64 bytes */
+	u64 tx_sz_65_127;       /* packets that are 65-127 bytes */
+	u64 tx_sz_128_255;      /* packets that are 128-255 bytes */
+	u64 tx_sz_256_511;      /* packets that are 256-511 bytes */
+	u64 tx_sz_512_1023;     /* packets that are 512-1023 bytes */
+	u64 tx_sz_1024_1518;    /* packets that are 1024-1518 bytes */
+	u64 tx_sz_1519_max;     /* packets that are 1519-MTU bytes */
+	u64 tx_1_col;           /* packets single prior collision */
+	u64 tx_2_col;           /* packets with multiple prior collisions */
+	u64 tx_late_col;        /* packets with late collisions */
+	u64 tx_abort_col;       /* packets aborted due to excess collisions */
+	u64 tx_underrun;        /* packets aborted due to FIFO underrun */
+	u64 tx_rd_eop;          /* count of reads beyond EOP */
+	u64 tx_len_err;         /* packets with length mismatch */
+	u64 tx_trunc;           /* packets truncated due to size >MTU */
+	u64 tx_bcast_byte;      /* broadcast packets byte count (without FCS) */
+	u64 tx_mcast_byte;      /* multicast packets byte count (without FCS) */
+	u64 tx_col;             /* collisions */
+
+	spinlock_t lock;	/* prevent multiple simultaneous readers */
+};
+
+/* RSS hstype Definitions */
+#define EMAC_RSS_HSTYP_IPV4_EN				    0x00000001
+#define EMAC_RSS_HSTYP_TCP4_EN				    0x00000002
+#define EMAC_RSS_HSTYP_IPV6_EN				    0x00000004
+#define EMAC_RSS_HSTYP_TCP6_EN				    0x00000008
+#define EMAC_RSS_HSTYP_ALL_EN (\
+		EMAC_RSS_HSTYP_IPV4_EN   |\
+		EMAC_RSS_HSTYP_TCP4_EN   |\
+		EMAC_RSS_HSTYP_IPV6_EN   |\
+		EMAC_RSS_HSTYP_TCP6_EN)
+
+#define EMAC_VLAN_TO_TAG(_vlan, _tag) \
+		(_tag =  ((((_vlan) >> 8) & 0xFF) | (((_vlan) & 0xFF) << 8)))
+
+#define EMAC_TAG_TO_VLAN(_tag, _vlan) \
+		(_vlan = ((((_tag) >> 8) & 0xFF) | (((_tag) & 0xFF) << 8)))
+
+#define EMAC_DEF_RX_BUF_SIZE					  1536
+#define EMAC_MAX_JUMBO_PKT_SIZE				    (9 * 1024)
+#define EMAC_MAX_TX_OFFLOAD_THRESH			    (9 * 1024)
+
+#define EMAC_MAX_ETH_FRAME_SIZE		       EMAC_MAX_JUMBO_PKT_SIZE
+#define EMAC_MIN_ETH_FRAME_SIZE					    68
+
+#define EMAC_DEF_TX_QUEUES					     1
+#define EMAC_DEF_RX_QUEUES					     1
+
+#define EMAC_MIN_TX_DESCS					   128
+#define EMAC_MIN_RX_DESCS					   128
+
+#define EMAC_MAX_TX_DESCS					 16383
+#define EMAC_MAX_RX_DESCS					  2047
+
+#define EMAC_DEF_TX_DESCS					   512
+#define EMAC_DEF_RX_DESCS					   256
+
+#define EMAC_DEF_RX_IRQ_MOD					   250
+#define EMAC_DEF_TX_IRQ_MOD					   250
+
+#define EMAC_WATCHDOG_TIME				      (5 * HZ)
+
+/* by default check link every 4 seconds */
+#define EMAC_TRY_LINK_TIMEOUT				      (4 * HZ)
+
+/* emac_irq per-device (per-adapter) irq properties.
+ * @irq:	irq number.
+ * @mask	mask to use over status register.
+ */
+struct emac_irq {
+	unsigned int	irq;
+	u32		mask;
+};
+
+/* The device's main data structure */
+struct emac_adapter {
+	struct net_device		*netdev;
+	struct mii_bus			*mii_bus;
+	struct phy_device		*phydev;
+
+	void __iomem			*base;
+	void __iomem			*csr;
+
+	struct emac_sgmii		phy;
+	struct emac_stats		stats;
+
+	struct emac_irq			irq;
+	struct clk			*clk[EMAC_CLK_CNT];
+
+	/* All Descriptor memory */
+	struct emac_ring_header		ring_header;
+	struct emac_tx_queue		tx_q;
+	struct emac_rx_queue		rx_q;
+	unsigned int			tx_desc_cnt;
+	unsigned int			rx_desc_cnt;
+	unsigned int			rrd_size; /* in quad words */
+	unsigned int			rfd_size; /* in quad words */
+	unsigned int			tpd_size; /* in quad words */
+
+	unsigned int			rxbuf_size;
+
+	/* Flow control / pause frames support. If automatic=True, do whatever
+	 * the PHY does. Otherwise, use tx_flow_control and rx_flow_control.
+	 */
+	bool				automatic;
+	bool				tx_flow_control;
+	bool				rx_flow_control;
+
+	/* True == use single-pause-frame mode. */
+	bool				single_pause_mode;
+
+	/* Ring parameter */
+	u8				tpd_burst;
+	u8				rfd_burst;
+	unsigned int			dmaw_dly_cnt;
+	unsigned int			dmar_dly_cnt;
+	enum emac_dma_req_block		dmar_block;
+	enum emac_dma_req_block		dmaw_block;
+	enum emac_dma_order		dma_order;
+
+	u32				irq_mod;
+	u32				preamble;
+
+	struct work_struct		work_thread;
+
+	u16				msg_enable;
+
+	struct mutex			reset_lock;
+};
+
+int emac_reinit_locked(struct emac_adapter *adpt);
+void emac_reg_update32(void __iomem *addr, u32 mask, u32 val);
+
+void emac_set_ethtool_ops(struct net_device *netdev);
+void emac_update_hw_stats(struct emac_adapter *adpt);
+
+#endif /* _EMAC_H_ */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k.c b/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k.c
new file mode 100644
index 0000000..4292c89
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -0,0 +1,149 @@
+/*
+ *
+ *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ *   Copyright (c) 2014, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*   This module implements the Qualcomm Atheros SPI protocol for
+ *   kernel-based SPI device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/spi/spi.h>
+
+#include "qca_7k.h"
+
+void
+qcaspi_spi_error(struct qcaspi *qca)
+{
+	if (qca->sync != QCASPI_SYNC_READY)
+		return;
+
+	netdev_err(qca->net_dev, "spi error\n");
+	qca->sync = QCASPI_SYNC_UNKNOWN;
+	qca->stats.spi_err++;
+}
+
+int
+qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
+{
+	__be16 rx_data;
+	__be16 tx_data;
+	struct spi_transfer transfer[2];
+	struct spi_message msg;
+	int ret;
+
+	memset(transfer, 0, sizeof(transfer));
+
+	spi_message_init(&msg);
+
+	tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
+	*result = 0;
+
+	transfer[0].tx_buf = &tx_data;
+	transfer[0].len = QCASPI_CMD_LEN;
+	transfer[1].rx_buf = &rx_data;
+	transfer[1].len = QCASPI_CMD_LEN;
+
+	spi_message_add_tail(&transfer[0], &msg);
+
+	if (qca->legacy_mode) {
+		spi_sync(qca->spi_dev, &msg);
+		spi_message_init(&msg);
+	}
+	spi_message_add_tail(&transfer[1], &msg);
+	ret = spi_sync(qca->spi_dev, &msg);
+
+	if (!ret)
+		ret = msg.status;
+
+	if (ret)
+		qcaspi_spi_error(qca);
+	else
+		*result = be16_to_cpu(rx_data);
+
+	return ret;
+}
+
+static int
+__qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
+{
+	__be16 tx_data[2];
+	struct spi_transfer transfer[2];
+	struct spi_message msg;
+	int ret;
+
+	memset(&transfer, 0, sizeof(transfer));
+
+	spi_message_init(&msg);
+
+	tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
+	tx_data[1] = cpu_to_be16(value);
+
+	transfer[0].tx_buf = &tx_data[0];
+	transfer[0].len = QCASPI_CMD_LEN;
+	transfer[1].tx_buf = &tx_data[1];
+	transfer[1].len = QCASPI_CMD_LEN;
+
+	spi_message_add_tail(&transfer[0], &msg);
+	if (qca->legacy_mode) {
+		spi_sync(qca->spi_dev, &msg);
+		spi_message_init(&msg);
+	}
+	spi_message_add_tail(&transfer[1], &msg);
+	ret = spi_sync(qca->spi_dev, &msg);
+
+	if (!ret)
+		ret = msg.status;
+
+	if (ret)
+		qcaspi_spi_error(qca);
+
+	return ret;
+}
+
+int
+qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry)
+{
+	int ret, i = 0;
+	u16 confirmed;
+
+	do {
+		ret = __qcaspi_write_register(qca, reg, value);
+		if (ret)
+			return ret;
+
+		if (!retry)
+			return 0;
+
+		ret = qcaspi_read_register(qca, reg, &confirmed);
+		if (ret)
+			return ret;
+
+		ret = confirmed != value;
+		if (!ret)
+			return 0;
+
+		i++;
+		qca->stats.write_verify_failed++;
+
+	} while (i <= retry);
+
+	return ret;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k.h b/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k.h
new file mode 100644
index 0000000..356de8e
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k.h
@@ -0,0 +1,71 @@
+/*
+ *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ *   Copyright (c) 2014, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*   Qualcomm Atheros SPI register definition.
+ *
+ *   This module is designed to define the Qualcomm Atheros SPI
+ *   register placeholders.
+ */
+
+#ifndef _QCA_7K_H
+#define _QCA_7K_H
+
+#include <linux/types.h>
+
+#include "qca_spi.h"
+
+#define QCA7K_SPI_READ     (1 << 15)
+#define QCA7K_SPI_WRITE    (0 << 15)
+#define QCA7K_SPI_INTERNAL (1 << 14)
+#define QCA7K_SPI_EXTERNAL (0 << 14)
+
+#define QCASPI_CMD_LEN    2
+#define QCASPI_HW_PKT_LEN 4
+#define QCASPI_HW_BUF_LEN 0xC5B
+
+/*   SPI registers;                               */
+#define SPI_REG_BFR_SIZE        0x0100
+#define SPI_REG_WRBUF_SPC_AVA   0x0200
+#define SPI_REG_RDBUF_BYTE_AVA  0x0300
+#define SPI_REG_SPI_CONFIG      0x0400
+#define SPI_REG_SPI_STATUS      0x0500
+#define SPI_REG_INTR_CAUSE      0x0C00
+#define SPI_REG_INTR_ENABLE     0x0D00
+#define SPI_REG_RDBUF_WATERMARK 0x1200
+#define SPI_REG_WRBUF_WATERMARK 0x1300
+#define SPI_REG_SIGNATURE       0x1A00
+#define SPI_REG_ACTION_CTRL     0x1B00
+
+/*   SPI_CONFIG register definition;             */
+#define QCASPI_SLAVE_RESET_BIT  BIT(6)
+
+/*   INTR_CAUSE/ENABLE register definition.      */
+#define SPI_INT_WRBUF_BELOW_WM  BIT(10)
+#define SPI_INT_CPU_ON          BIT(6)
+#define SPI_INT_ADDR_ERR        BIT(3)
+#define SPI_INT_WRBUF_ERR       BIT(2)
+#define SPI_INT_RDBUF_ERR       BIT(1)
+#define SPI_INT_PKT_AVLBL       BIT(0)
+
+void qcaspi_spi_error(struct qcaspi *qca);
+int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result);
+int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry);
+
+#endif /* _QCA_7K_H */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k_common.c b/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k_common.c
new file mode 100644
index 0000000..6b511f0
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k_common.c
@@ -0,0 +1,166 @@
+/*
+ *   Copyright (c) 2011, 2012, Atheros Communications Inc.
+ *   Copyright (c) 2014, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*   Atheros ethernet framing. Every Ethernet frame is surrounded
+ *   by an atheros frame while transmitted over a serial channel;
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "qca_7k_common.h"
+
+u16
+qcafrm_create_header(u8 *buf, u16 length)
+{
+	__le16 len;
+
+	if (!buf)
+		return 0;
+
+	len = cpu_to_le16(length);
+
+	buf[0] = 0xAA;
+	buf[1] = 0xAA;
+	buf[2] = 0xAA;
+	buf[3] = 0xAA;
+	buf[4] = len & 0xff;
+	buf[5] = (len >> 8) & 0xff;
+	buf[6] = 0;
+	buf[7] = 0;
+
+	return QCAFRM_HEADER_LEN;
+}
+EXPORT_SYMBOL_GPL(qcafrm_create_header);
+
+u16
+qcafrm_create_footer(u8 *buf)
+{
+	if (!buf)
+		return 0;
+
+	buf[0] = 0x55;
+	buf[1] = 0x55;
+	return QCAFRM_FOOTER_LEN;
+}
+EXPORT_SYMBOL_GPL(qcafrm_create_footer);
+
+/*   Gather received bytes and try to extract a full ethernet frame by
+ *   following a simple state machine.
+ *
+ * Return:   QCAFRM_GATHER       No ethernet frame fully received yet.
+ *           QCAFRM_NOHEAD       Header expected but not found.
+ *           QCAFRM_INVLEN       Atheros frame length is invalid
+ *           QCAFRM_NOTAIL       Footer expected but not found.
+ *           > 0                 Number of byte in the fully received
+ *                               Ethernet frame
+ */
+
+s32
+qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte)
+{
+	s32 ret = QCAFRM_GATHER;
+	u16 len;
+
+	switch (handle->state) {
+	case QCAFRM_HW_LEN0:
+	case QCAFRM_HW_LEN1:
+		/* by default, just go to next state */
+		handle->state--;
+
+		if (recv_byte != 0x00) {
+			/* first two bytes of length must be 0 */
+			handle->state = handle->init;
+		}
+		break;
+	case QCAFRM_HW_LEN2:
+	case QCAFRM_HW_LEN3:
+		handle->state--;
+		break;
+	/* 4 bytes header pattern */
+	case QCAFRM_WAIT_AA1:
+	case QCAFRM_WAIT_AA2:
+	case QCAFRM_WAIT_AA3:
+	case QCAFRM_WAIT_AA4:
+		if (recv_byte != 0xAA) {
+			ret = QCAFRM_NOHEAD;
+			handle->state = handle->init;
+		} else {
+			handle->state--;
+		}
+		break;
+		/* 2 bytes length. */
+		/* Borrow offset field to hold length for now. */
+	case QCAFRM_WAIT_LEN_BYTE0:
+		handle->offset = recv_byte;
+		handle->state = QCAFRM_WAIT_LEN_BYTE1;
+		break;
+	case QCAFRM_WAIT_LEN_BYTE1:
+		handle->offset = handle->offset | (recv_byte << 8);
+		handle->state = QCAFRM_WAIT_RSVD_BYTE1;
+		break;
+	case QCAFRM_WAIT_RSVD_BYTE1:
+		handle->state = QCAFRM_WAIT_RSVD_BYTE2;
+		break;
+	case QCAFRM_WAIT_RSVD_BYTE2:
+		len = handle->offset;
+		if (len > buf_len || len < QCAFRM_MIN_LEN) {
+			ret = QCAFRM_INVLEN;
+			handle->state = handle->init;
+		} else {
+			handle->state = (enum qcafrm_state)(len + 1);
+			/* Remaining number of bytes. */
+			handle->offset = 0;
+		}
+		break;
+	default:
+		/* Receiving Ethernet frame itself. */
+		buf[handle->offset] = recv_byte;
+		handle->offset++;
+		handle->state--;
+		break;
+	case QCAFRM_WAIT_551:
+		if (recv_byte != 0x55) {
+			ret = QCAFRM_NOTAIL;
+			handle->state = handle->init;
+		} else {
+			handle->state = QCAFRM_WAIT_552;
+		}
+		break;
+	case QCAFRM_WAIT_552:
+		if (recv_byte != 0x55) {
+			ret = QCAFRM_NOTAIL;
+			handle->state = handle->init;
+		} else {
+			ret = handle->offset;
+			/* Frame is fully received. */
+			handle->state = handle->init;
+		}
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(qcafrm_fsm_decode);
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common");
+MODULE_AUTHOR("Qualcomm Atheros Communications");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k_common.h b/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k_common.h
new file mode 100644
index 0000000..928554f
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_7k_common.h
@@ -0,0 +1,144 @@
+/*
+ *   Copyright (c) 2011, 2012, Atheros Communications Inc.
+ *   Copyright (c) 2014, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*   Atheros Ethernet framing. Every Ethernet frame is surrounded by an atheros
+ *   frame while transmitted over a serial channel.
+ */
+
+#ifndef _QCA_FRAMING_H
+#define _QCA_FRAMING_H
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+
+/* Frame is currently being received */
+#define QCAFRM_GATHER 0
+
+/*  No header byte while expecting it */
+#define QCAFRM_NOHEAD (QCAFRM_ERR_BASE - 1)
+
+/* No tailer byte while expecting it */
+#define QCAFRM_NOTAIL (QCAFRM_ERR_BASE - 2)
+
+/* Frame length is invalid */
+#define QCAFRM_INVLEN (QCAFRM_ERR_BASE - 3)
+
+/* Frame length is invalid */
+#define QCAFRM_INVFRAME (QCAFRM_ERR_BASE - 4)
+
+/* Min/Max Ethernet MTU: 46/1500 */
+#define QCAFRM_MIN_MTU (ETH_ZLEN - ETH_HLEN)
+#define QCAFRM_MAX_MTU ETH_DATA_LEN
+
+/* Min/Max frame lengths */
+#define QCAFRM_MIN_LEN (QCAFRM_MIN_MTU + ETH_HLEN)
+#define QCAFRM_MAX_LEN (QCAFRM_MAX_MTU + VLAN_ETH_HLEN)
+
+/* QCA7K header len */
+#define QCAFRM_HEADER_LEN 8
+
+/* QCA7K footer len */
+#define QCAFRM_FOOTER_LEN 2
+
+/* QCA7K Framing. */
+#define QCAFRM_ERR_BASE -1000
+
+enum qcafrm_state {
+	/* HW length is only available on SPI */
+	QCAFRM_HW_LEN0 = 0x8000,
+	QCAFRM_HW_LEN1 = QCAFRM_HW_LEN0 - 1,
+	QCAFRM_HW_LEN2 = QCAFRM_HW_LEN1 - 1,
+	QCAFRM_HW_LEN3 = QCAFRM_HW_LEN2 - 1,
+
+	/*  Waiting first 0xAA of header */
+	QCAFRM_WAIT_AA1 = QCAFRM_HW_LEN3 - 1,
+
+	/*  Waiting second 0xAA of header */
+	QCAFRM_WAIT_AA2 = QCAFRM_WAIT_AA1 - 1,
+
+	/*  Waiting third 0xAA of header */
+	QCAFRM_WAIT_AA3 = QCAFRM_WAIT_AA2 - 1,
+
+	/*  Waiting fourth 0xAA of header */
+	QCAFRM_WAIT_AA4 = QCAFRM_WAIT_AA3 - 1,
+
+	/*  Waiting Byte 0-1 of length (litte endian) */
+	QCAFRM_WAIT_LEN_BYTE0 = QCAFRM_WAIT_AA4 - 1,
+	QCAFRM_WAIT_LEN_BYTE1 = QCAFRM_WAIT_AA4 - 2,
+
+	/* Reserved bytes */
+	QCAFRM_WAIT_RSVD_BYTE1 = QCAFRM_WAIT_AA4 - 3,
+	QCAFRM_WAIT_RSVD_BYTE2 = QCAFRM_WAIT_AA4 - 4,
+
+	/*  The frame length is used as the state until
+	 *  the end of the Ethernet frame
+	 *  Waiting for first 0x55 of footer
+	 */
+	QCAFRM_WAIT_551 = 1,
+
+	/*  Waiting for second 0x55 of footer */
+	QCAFRM_WAIT_552 = QCAFRM_WAIT_551 - 1
+};
+
+/*   Structure to maintain the frame decoding during reception. */
+
+struct qcafrm_handle {
+	/*  Current decoding state */
+	enum qcafrm_state state;
+	/* Initial state depends on connection type */
+	enum qcafrm_state init;
+
+	/* Offset in buffer (borrowed for length too) */
+	u16 offset;
+
+	/* Frame length as kept by this module */
+	u16 len;
+};
+
+u16 qcafrm_create_header(u8 *buf, u16 len);
+
+u16 qcafrm_create_footer(u8 *buf);
+
+static inline void qcafrm_fsm_init_spi(struct qcafrm_handle *handle)
+{
+	handle->init = QCAFRM_HW_LEN0;
+	handle->state = handle->init;
+}
+
+static inline void qcafrm_fsm_init_uart(struct qcafrm_handle *handle)
+{
+	handle->init = QCAFRM_WAIT_AA1;
+	handle->state = handle->init;
+}
+
+/*   Gather received bytes and try to extract a full Ethernet frame
+ *   by following a simple state machine.
+ *
+ * Return:   QCAFRM_GATHER       No Ethernet frame fully received yet.
+ *           QCAFRM_NOHEAD       Header expected but not found.
+ *           QCAFRM_INVLEN       QCA7K frame length is invalid
+ *           QCAFRM_NOTAIL       Footer expected but not found.
+ *           > 0                 Number of byte in the fully received
+ *                               Ethernet frame
+ */
+
+s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte);
+
+#endif /* _QCA_FRAMING_H */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_debug.c b/marvell/linux/drivers/net/ethernet/qualcomm/qca_debug.c
new file mode 100644
index 0000000..66229b3
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -0,0 +1,298 @@
+/*
+ *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ *   Copyright (c) 2014, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*   This file contains debugging routines for use in the QCA7K driver.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include "qca_7k.h"
+#include "qca_debug.h"
+
+#define QCASPI_MAX_REGS 0x20
+
+#define QCASPI_RX_MAX_FRAMES 4
+
+static const u16 qcaspi_spi_regs[] = {
+	SPI_REG_BFR_SIZE,
+	SPI_REG_WRBUF_SPC_AVA,
+	SPI_REG_RDBUF_BYTE_AVA,
+	SPI_REG_SPI_CONFIG,
+	SPI_REG_SPI_STATUS,
+	SPI_REG_INTR_CAUSE,
+	SPI_REG_INTR_ENABLE,
+	SPI_REG_RDBUF_WATERMARK,
+	SPI_REG_WRBUF_WATERMARK,
+	SPI_REG_SIGNATURE,
+	SPI_REG_ACTION_CTRL
+};
+
+/* The order of these strings must match the order of the fields in
+ * struct qcaspi_stats
+ * See qca_spi.h
+ */
+static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"Triggered resets",
+	"Device resets",
+	"Reset timeouts",
+	"Read errors",
+	"Write errors",
+	"Read buffer errors",
+	"Write buffer errors",
+	"Out of memory",
+	"Write buffer misses",
+	"Transmit ring full",
+	"SPI errors",
+	"Write verify errors",
+	"Buffer available errors",
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static int
+qcaspi_info_show(struct seq_file *s, void *what)
+{
+	struct qcaspi *qca = s->private;
+
+	seq_printf(s, "RX buffer size   : %lu\n",
+		   (unsigned long)qca->buffer_size);
+
+	seq_puts(s, "TX ring state    : ");
+
+	if (qca->txr.skb[qca->txr.head] == NULL)
+		seq_puts(s, "empty");
+	else if (qca->txr.skb[qca->txr.tail])
+		seq_puts(s, "full");
+	else
+		seq_puts(s, "in use");
+
+	seq_puts(s, "\n");
+
+	seq_printf(s, "TX ring size     : %u\n",
+		   qca->txr.size);
+
+	seq_printf(s, "Sync state       : %u (",
+		   (unsigned int)qca->sync);
+	switch (qca->sync) {
+	case QCASPI_SYNC_UNKNOWN:
+		seq_puts(s, "QCASPI_SYNC_UNKNOWN");
+		break;
+	case QCASPI_SYNC_RESET:
+		seq_puts(s, "QCASPI_SYNC_RESET");
+		break;
+	case QCASPI_SYNC_READY:
+		seq_puts(s, "QCASPI_SYNC_READY");
+		break;
+	default:
+		seq_puts(s, "INVALID");
+		break;
+	}
+	seq_puts(s, ")\n");
+
+	seq_printf(s, "IRQ              : %d\n",
+		   qca->spi_dev->irq);
+	seq_printf(s, "INTR REQ         : %u\n",
+		   qca->intr_req);
+	seq_printf(s, "INTR SVC         : %u\n",
+		   qca->intr_svc);
+
+	seq_printf(s, "SPI max speed    : %lu\n",
+		   (unsigned long)qca->spi_dev->max_speed_hz);
+	seq_printf(s, "SPI mode         : %x\n",
+		   qca->spi_dev->mode);
+	seq_printf(s, "SPI chip select  : %u\n",
+		   (unsigned int)qca->spi_dev->chip_select);
+	seq_printf(s, "SPI legacy mode  : %u\n",
+		   (unsigned int)qca->legacy_mode);
+	seq_printf(s, "SPI burst length : %u\n",
+		   (unsigned int)qca->burst_len);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(qcaspi_info);
+
+void
+qcaspi_init_device_debugfs(struct qcaspi *qca)
+{
+	qca->device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev),
+					      NULL);
+
+	debugfs_create_file("info", S_IFREG | 0444, qca->device_root, qca,
+			    &qcaspi_info_fops);
+}
+
+void
+qcaspi_remove_device_debugfs(struct qcaspi *qca)
+{
+	debugfs_remove_recursive(qca->device_root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+void
+qcaspi_init_device_debugfs(struct qcaspi *qca)
+{
+}
+
+void
+qcaspi_remove_device_debugfs(struct qcaspi *qca)
+{
+}
+
+#endif
+
+static void
+qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+
+	strlcpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver));
+	strlcpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version));
+	strlcpy(p->fw_version, "QCA7000", sizeof(p->fw_version));
+	strlcpy(p->bus_info, dev_name(&qca->spi_dev->dev),
+		sizeof(p->bus_info));
+}
+
+static int
+qcaspi_get_link_ksettings(struct net_device *dev,
+			  struct ethtool_link_ksettings *cmd)
+{
+	ethtool_link_ksettings_zero_link_mode(cmd, supported);
+	ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+
+	cmd->base.speed = SPEED_10;
+	cmd->base.duplex = DUPLEX_HALF;
+	cmd->base.port = PORT_OTHER;
+	cmd->base.autoneg = AUTONEG_DISABLE;
+
+	return 0;
+}
+
+static void
+qcaspi_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *data)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+	struct qcaspi_stats *st = &qca->stats;
+
+	memcpy(data, st, ARRAY_SIZE(qcaspi_gstrings_stats) * sizeof(u64));
+}
+
+static void
+qcaspi_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, &qcaspi_gstrings_stats,
+		       sizeof(qcaspi_gstrings_stats));
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+static int
+qcaspi_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(qcaspi_gstrings_stats);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int
+qcaspi_get_regs_len(struct net_device *dev)
+{
+	return sizeof(u32) * QCASPI_MAX_REGS;
+}
+
+static void
+qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+	u32 *regs_buff = p;
+	unsigned int i;
+
+	regs->version = 1;
+	memset(regs_buff, 0, sizeof(u32) * QCASPI_MAX_REGS);
+
+	for (i = 0; i < ARRAY_SIZE(qcaspi_spi_regs); i++) {
+		u16 offset, value;
+
+		qcaspi_read_register(qca, qcaspi_spi_regs[i], &value);
+		offset = qcaspi_spi_regs[i] >> 8;
+		regs_buff[offset] = value;
+	}
+}
+
+static void
+qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+
+	ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
+	ring->tx_max_pending = TX_RING_MAX_LEN;
+	ring->rx_pending = QCASPI_RX_MAX_FRAMES;
+	ring->tx_pending = qca->txr.count;
+}
+
+static int
+qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+
+	if (ring->rx_pending != QCASPI_RX_MAX_FRAMES ||
+	    (ring->rx_mini_pending) ||
+	    (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	if (qca->spi_thread)
+		kthread_park(qca->spi_thread);
+
+	qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
+	qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
+
+	if (qca->spi_thread)
+		kthread_unpark(qca->spi_thread);
+
+	return 0;
+}
+
+static const struct ethtool_ops qcaspi_ethtool_ops = {
+	.get_drvinfo = qcaspi_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_ethtool_stats = qcaspi_get_ethtool_stats,
+	.get_strings = qcaspi_get_strings,
+	.get_sset_count = qcaspi_get_sset_count,
+	.get_regs_len = qcaspi_get_regs_len,
+	.get_regs = qcaspi_get_regs,
+	.get_ringparam = qcaspi_get_ringparam,
+	.set_ringparam = qcaspi_set_ringparam,
+	.get_link_ksettings = qcaspi_get_link_ksettings,
+};
+
+void qcaspi_set_ethtool_ops(struct net_device *dev)
+{
+	dev->ethtool_ops = &qcaspi_ethtool_ops;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_debug.h b/marvell/linux/drivers/net/ethernet/qualcomm/qca_debug.h
new file mode 100644
index 0000000..46a7858
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_debug.h
@@ -0,0 +1,34 @@
+/*
+ *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ *   Copyright (c) 2014, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*   This file contains debugging routines for use in the QCA7K driver.
+ */
+
+#ifndef _QCA_DEBUG_H
+#define _QCA_DEBUG_H
+
+#include "qca_spi.h"
+
+void qcaspi_init_device_debugfs(struct qcaspi *qca);
+
+void qcaspi_remove_device_debugfs(struct qcaspi *qca);
+
+void qcaspi_set_ethtool_ops(struct net_device *dev);
+
+#endif /* _QCA_DEBUG_H */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_spi.c b/marvell/linux/drivers/net/ethernet/qualcomm/qca_spi.c
new file mode 100644
index 0000000..f051086
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -0,0 +1,1050 @@
+/*
+ *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ *   Copyright (c) 2014, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*   This module implements the Qualcomm Atheros SPI protocol for
+ *   kernel-based SPI device; it is essentially an Ethernet-to-SPI
+ *   serial converter;
+ */
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "qca_7k.h"
+#include "qca_7k_common.h"
+#include "qca_debug.h"
+#include "qca_spi.h"
+
+#define MAX_DMA_BURST_LEN 5000
+
+/*   Modules parameters     */
+#define QCASPI_CLK_SPEED_MIN 1000000
+#define QCASPI_CLK_SPEED_MAX 16000000
+#define QCASPI_CLK_SPEED     8000000
+static int qcaspi_clkspeed;
+module_param(qcaspi_clkspeed, int, 0);
+MODULE_PARM_DESC(qcaspi_clkspeed, "SPI bus clock speed (Hz). Use 1000000-16000000.");
+
+#define QCASPI_BURST_LEN_MIN 1
+#define QCASPI_BURST_LEN_MAX MAX_DMA_BURST_LEN
+static int qcaspi_burst_len = MAX_DMA_BURST_LEN;
+module_param(qcaspi_burst_len, int, 0);
+MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000.");
+
+#define QCASPI_PLUGGABLE_MIN 0
+#define QCASPI_PLUGGABLE_MAX 1
+static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
+module_param(qcaspi_pluggable, int, 0);
+MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
+
+#define QCASPI_WRITE_VERIFY_MIN 0
+#define QCASPI_WRITE_VERIFY_MAX 3
+static int wr_verify = QCASPI_WRITE_VERIFY_MIN;
+module_param(wr_verify, int, 0);
+MODULE_PARM_DESC(wr_verify, "SPI register write verify trails. Use 0-3.");
+
+#define QCASPI_TX_TIMEOUT (1 * HZ)
+#define QCASPI_QCA7K_REBOOT_TIME_MS 1000
+
+static void
+start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause)
+{
+	*intr_cause = 0;
+
+	qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
+	qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause);
+	netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause);
+}
+
+static void
+end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause)
+{
+	u16 intr_enable = (SPI_INT_CPU_ON |
+			   SPI_INT_PKT_AVLBL |
+			   SPI_INT_RDBUF_ERR |
+			   SPI_INT_WRBUF_ERR);
+
+	qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause, 0);
+	qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable, wr_verify);
+	netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause);
+}
+
+static u32
+qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
+{
+	__be16 cmd;
+	struct spi_message msg;
+	struct spi_transfer transfer[2];
+	int ret;
+
+	memset(&transfer, 0, sizeof(transfer));
+	spi_message_init(&msg);
+
+	cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
+	transfer[0].tx_buf = &cmd;
+	transfer[0].len = QCASPI_CMD_LEN;
+	transfer[1].tx_buf = src;
+	transfer[1].len = len;
+
+	spi_message_add_tail(&transfer[0], &msg);
+	spi_message_add_tail(&transfer[1], &msg);
+	ret = spi_sync(qca->spi_dev, &msg);
+
+	if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
+		qcaspi_spi_error(qca);
+		return 0;
+	}
+
+	return len;
+}
+
+static u32
+qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
+{
+	struct spi_message msg;
+	struct spi_transfer transfer;
+	int ret;
+
+	memset(&transfer, 0, sizeof(transfer));
+	spi_message_init(&msg);
+
+	transfer.tx_buf = src;
+	transfer.len = len;
+
+	spi_message_add_tail(&transfer, &msg);
+	ret = spi_sync(qca->spi_dev, &msg);
+
+	if (ret || (msg.actual_length != len)) {
+		qcaspi_spi_error(qca);
+		return 0;
+	}
+
+	return len;
+}
+
+static u32
+qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
+{
+	struct spi_message msg;
+	__be16 cmd;
+	struct spi_transfer transfer[2];
+	int ret;
+
+	memset(&transfer, 0, sizeof(transfer));
+	spi_message_init(&msg);
+
+	cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
+	transfer[0].tx_buf = &cmd;
+	transfer[0].len = QCASPI_CMD_LEN;
+	transfer[1].rx_buf = dst;
+	transfer[1].len = len;
+
+	spi_message_add_tail(&transfer[0], &msg);
+	spi_message_add_tail(&transfer[1], &msg);
+	ret = spi_sync(qca->spi_dev, &msg);
+
+	if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
+		qcaspi_spi_error(qca);
+		return 0;
+	}
+
+	return len;
+}
+
+static u32
+qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
+{
+	struct spi_message msg;
+	struct spi_transfer transfer;
+	int ret;
+
+	memset(&transfer, 0, sizeof(transfer));
+	spi_message_init(&msg);
+
+	transfer.rx_buf = dst;
+	transfer.len = len;
+
+	spi_message_add_tail(&transfer, &msg);
+	ret = spi_sync(qca->spi_dev, &msg);
+
+	if (ret || (msg.actual_length != len)) {
+		qcaspi_spi_error(qca);
+		return 0;
+	}
+
+	return len;
+}
+
+static int
+qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
+{
+	__be16 tx_data;
+	struct spi_message msg;
+	struct spi_transfer transfer;
+	int ret;
+
+	memset(&transfer, 0, sizeof(transfer));
+
+	spi_message_init(&msg);
+
+	tx_data = cpu_to_be16(cmd);
+	transfer.len = sizeof(cmd);
+	transfer.tx_buf = &tx_data;
+	spi_message_add_tail(&transfer, &msg);
+
+	ret = spi_sync(qca->spi_dev, &msg);
+
+	if (!ret)
+		ret = msg.status;
+
+	if (ret)
+		qcaspi_spi_error(qca);
+
+	return ret;
+}
+
+static int
+qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb)
+{
+	u32 count;
+	u32 written;
+	u32 offset;
+	u32 len;
+
+	len = skb->len;
+
+	qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len, wr_verify);
+	if (qca->legacy_mode)
+		qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
+
+	offset = 0;
+	while (len) {
+		count = len;
+		if (count > qca->burst_len)
+			count = qca->burst_len;
+
+		if (qca->legacy_mode) {
+			written = qcaspi_write_legacy(qca,
+						      skb->data + offset,
+						      count);
+		} else {
+			written = qcaspi_write_burst(qca,
+						     skb->data + offset,
+						     count);
+		}
+
+		if (written != count)
+			return -1;
+
+		offset += count;
+		len -= count;
+	}
+
+	return 0;
+}
+
+static int
+qcaspi_transmit(struct qcaspi *qca)
+{
+	struct net_device_stats *n_stats = &qca->net_dev->stats;
+	u16 available = 0;
+	u32 pkt_len;
+	u16 new_head;
+	u16 packets = 0;
+
+	if (qca->txr.skb[qca->txr.head] == NULL)
+		return 0;
+
+	qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available);
+
+	if (available > QCASPI_HW_BUF_LEN) {
+		/* This could only happen by interferences on the SPI line.
+		 * So retry later ...
+		 */
+		qca->stats.buf_avail_err++;
+		return -1;
+	}
+
+	while (qca->txr.skb[qca->txr.head]) {
+		pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN;
+
+		if (available < pkt_len) {
+			if (packets == 0)
+				qca->stats.write_buf_miss++;
+			break;
+		}
+
+		if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) {
+			qca->stats.write_err++;
+			return -1;
+		}
+
+		packets++;
+		n_stats->tx_packets++;
+		n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len;
+		available -= pkt_len;
+
+		/* remove the skb from the queue */
+		/* XXX After inconsistent lock states netif_tx_lock()
+		 * has been replaced by netif_tx_lock_bh() and so on.
+		 */
+		netif_tx_lock_bh(qca->net_dev);
+		dev_kfree_skb(qca->txr.skb[qca->txr.head]);
+		qca->txr.skb[qca->txr.head] = NULL;
+		qca->txr.size -= pkt_len;
+		new_head = qca->txr.head + 1;
+		if (new_head >= qca->txr.count)
+			new_head = 0;
+		qca->txr.head = new_head;
+		if (netif_queue_stopped(qca->net_dev))
+			netif_wake_queue(qca->net_dev);
+		netif_tx_unlock_bh(qca->net_dev);
+	}
+
+	return 0;
+}
+
+static int
+qcaspi_receive(struct qcaspi *qca)
+{
+	struct net_device *net_dev = qca->net_dev;
+	struct net_device_stats *n_stats = &net_dev->stats;
+	u16 available = 0;
+	u32 bytes_read;
+	u8 *cp;
+
+	/* Allocate rx SKB if we don't have one available. */
+	if (!qca->rx_skb) {
+		qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
+							net_dev->mtu +
+							VLAN_ETH_HLEN);
+		if (!qca->rx_skb) {
+			netdev_dbg(net_dev, "out of RX resources\n");
+			qca->stats.out_of_mem++;
+			return -1;
+		}
+	}
+
+	/* Read the packet size. */
+	qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available);
+
+	netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
+		   available);
+
+	if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
+		/* This could only happen by interferences on the SPI line.
+		 * So retry later ...
+		 */
+		qca->stats.buf_avail_err++;
+		return -1;
+	} else if (available == 0) {
+		netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n");
+		return -1;
+	}
+
+	qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available, wr_verify);
+
+	if (qca->legacy_mode)
+		qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
+
+	while (available) {
+		u32 count = available;
+
+		if (count > qca->burst_len)
+			count = qca->burst_len;
+
+		if (qca->legacy_mode) {
+			bytes_read = qcaspi_read_legacy(qca, qca->rx_buffer,
+							count);
+		} else {
+			bytes_read = qcaspi_read_burst(qca, qca->rx_buffer,
+						       count);
+		}
+
+		netdev_dbg(net_dev, "available: %d, byte read: %d\n",
+			   available, bytes_read);
+
+		if (bytes_read) {
+			available -= bytes_read;
+		} else {
+			qca->stats.read_err++;
+			return -1;
+		}
+
+		cp = qca->rx_buffer;
+
+		while ((bytes_read--) && (qca->rx_skb)) {
+			s32 retcode;
+
+			retcode = qcafrm_fsm_decode(&qca->frm_handle,
+						    qca->rx_skb->data,
+						    skb_tailroom(qca->rx_skb),
+						    *cp);
+			cp++;
+			switch (retcode) {
+			case QCAFRM_GATHER:
+			case QCAFRM_NOHEAD:
+				break;
+			case QCAFRM_NOTAIL:
+				netdev_dbg(net_dev, "no RX tail\n");
+				n_stats->rx_errors++;
+				n_stats->rx_dropped++;
+				break;
+			case QCAFRM_INVLEN:
+				netdev_dbg(net_dev, "invalid RX length\n");
+				n_stats->rx_errors++;
+				n_stats->rx_dropped++;
+				break;
+			default:
+				qca->rx_skb->dev = qca->net_dev;
+				n_stats->rx_packets++;
+				n_stats->rx_bytes += retcode;
+				skb_put(qca->rx_skb, retcode);
+				qca->rx_skb->protocol = eth_type_trans(
+					qca->rx_skb, qca->rx_skb->dev);
+				skb_checksum_none_assert(qca->rx_skb);
+				netif_rx_ni(qca->rx_skb);
+				qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
+					net_dev->mtu + VLAN_ETH_HLEN);
+				if (!qca->rx_skb) {
+					netdev_dbg(net_dev, "out of RX resources\n");
+					n_stats->rx_errors++;
+					qca->stats.out_of_mem++;
+					break;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*   Check that tx ring stores only so much bytes
+ *   that fit into the internal QCA buffer.
+ */
+
+static int
+qcaspi_tx_ring_has_space(struct tx_ring *txr)
+{
+	if (txr->skb[txr->tail])
+		return 0;
+
+	return (txr->size + QCAFRM_MAX_LEN < QCASPI_HW_BUF_LEN) ? 1 : 0;
+}
+
+/*   Flush the tx ring. This function is only safe to
+ *   call from the qcaspi_spi_thread.
+ */
+
+static void
+qcaspi_flush_tx_ring(struct qcaspi *qca)
+{
+	int i;
+
+	/* XXX After inconsistent lock states netif_tx_lock()
+	 * has been replaced by netif_tx_lock_bh() and so on.
+	 */
+	netif_tx_lock_bh(qca->net_dev);
+	for (i = 0; i < TX_RING_MAX_LEN; i++) {
+		if (qca->txr.skb[i]) {
+			dev_kfree_skb(qca->txr.skb[i]);
+			qca->txr.skb[i] = NULL;
+			qca->net_dev->stats.tx_dropped++;
+		}
+	}
+	qca->txr.tail = 0;
+	qca->txr.head = 0;
+	qca->txr.size = 0;
+	netif_tx_unlock_bh(qca->net_dev);
+}
+
+static void
+qcaspi_qca7k_sync(struct qcaspi *qca, int event)
+{
+	u16 signature = 0;
+	u16 spi_config;
+	u16 wrbuf_space = 0;
+
+	if (event == QCASPI_EVENT_CPUON) {
+		/* Read signature twice, if not valid
+		 * go back to unknown state.
+		 */
+		qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
+		qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
+		if (signature != QCASPI_GOOD_SIGNATURE) {
+			qca->sync = QCASPI_SYNC_UNKNOWN;
+			netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n");
+		} else {
+			/* ensure that the WRBUF is empty */
+			qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA,
+					     &wrbuf_space);
+			if (wrbuf_space != QCASPI_HW_BUF_LEN) {
+				netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n");
+				qca->sync = QCASPI_SYNC_UNKNOWN;
+			} else {
+				netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n");
+				qca->sync = QCASPI_SYNC_READY;
+				return;
+			}
+		}
+	}
+
+	switch (qca->sync) {
+	case QCASPI_SYNC_READY:
+		/* Read signature, if not valid go to unknown state. */
+		qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
+		if (signature != QCASPI_GOOD_SIGNATURE) {
+			qca->sync = QCASPI_SYNC_UNKNOWN;
+			netdev_dbg(qca->net_dev, "sync: bad signature, restart\n");
+			/* don't reset right away */
+			return;
+		}
+		break;
+	case QCASPI_SYNC_UNKNOWN:
+		/* Read signature, if not valid stay in unknown state */
+		qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
+		if (signature != QCASPI_GOOD_SIGNATURE) {
+			netdev_dbg(qca->net_dev, "sync: could not read signature to reset device, retry.\n");
+			return;
+		}
+
+		/* TODO: use GPIO to reset QCA7000 in legacy mode*/
+		netdev_dbg(qca->net_dev, "sync: resetting device.\n");
+		qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config);
+		spi_config |= QCASPI_SLAVE_RESET_BIT;
+		qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config, 0);
+
+		qca->sync = QCASPI_SYNC_RESET;
+		qca->stats.trig_reset++;
+		qca->reset_count = 0;
+		break;
+	case QCASPI_SYNC_RESET:
+		qca->reset_count++;
+		netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
+			   qca->reset_count);
+		if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
+			/* reset did not seem to take place, try again */
+			qca->sync = QCASPI_SYNC_UNKNOWN;
+			qca->stats.reset_timeout++;
+			netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n");
+		}
+		break;
+	}
+}
+
+static int
+qcaspi_spi_thread(void *data)
+{
+	struct qcaspi *qca = data;
+	u16 intr_cause = 0;
+
+	netdev_info(qca->net_dev, "SPI thread created\n");
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_park()) {
+			netif_tx_disable(qca->net_dev);
+			netif_carrier_off(qca->net_dev);
+			qcaspi_flush_tx_ring(qca);
+			kthread_parkme();
+			if (qca->sync == QCASPI_SYNC_READY) {
+				netif_carrier_on(qca->net_dev);
+				netif_wake_queue(qca->net_dev);
+			}
+			continue;
+		}
+
+		if ((qca->intr_req == qca->intr_svc) &&
+		    !qca->txr.skb[qca->txr.head])
+			schedule();
+
+		set_current_state(TASK_RUNNING);
+
+		netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
+			   qca->intr_req - qca->intr_svc,
+			   qca->txr.skb[qca->txr.head]);
+
+		qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
+
+		if (qca->sync != QCASPI_SYNC_READY) {
+			netdev_dbg(qca->net_dev, "sync: not ready %u, turn off carrier and flush\n",
+				   (unsigned int)qca->sync);
+			netif_stop_queue(qca->net_dev);
+			netif_carrier_off(qca->net_dev);
+			qcaspi_flush_tx_ring(qca);
+			msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
+		}
+
+		if (qca->intr_svc != qca->intr_req) {
+			qca->intr_svc = qca->intr_req;
+			start_spi_intr_handling(qca, &intr_cause);
+
+			if (intr_cause & SPI_INT_CPU_ON) {
+				qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON);
+
+				/* Frame decoding in progress */
+				if (qca->frm_handle.state != qca->frm_handle.init)
+					qca->net_dev->stats.rx_dropped++;
+
+				qcafrm_fsm_init_spi(&qca->frm_handle);
+				qca->stats.device_reset++;
+
+				/* not synced. */
+				if (qca->sync != QCASPI_SYNC_READY)
+					continue;
+
+				netif_wake_queue(qca->net_dev);
+				netif_carrier_on(qca->net_dev);
+			}
+
+			if (intr_cause & SPI_INT_RDBUF_ERR) {
+				/* restart sync */
+				netdev_dbg(qca->net_dev, "===> rdbuf error!\n");
+				qca->stats.read_buf_err++;
+				qca->sync = QCASPI_SYNC_UNKNOWN;
+				continue;
+			}
+
+			if (intr_cause & SPI_INT_WRBUF_ERR) {
+				/* restart sync */
+				netdev_dbg(qca->net_dev, "===> wrbuf error!\n");
+				qca->stats.write_buf_err++;
+				qca->sync = QCASPI_SYNC_UNKNOWN;
+				continue;
+			}
+
+			/* can only handle other interrupts
+			 * if sync has occurred
+			 */
+			if (qca->sync == QCASPI_SYNC_READY) {
+				if (intr_cause & SPI_INT_PKT_AVLBL)
+					qcaspi_receive(qca);
+			}
+
+			end_spi_intr_handling(qca, intr_cause);
+		}
+
+		if (qca->sync == QCASPI_SYNC_READY)
+			qcaspi_transmit(qca);
+	}
+	set_current_state(TASK_RUNNING);
+	netdev_info(qca->net_dev, "SPI thread exit\n");
+
+	return 0;
+}
+
+static irqreturn_t
+qcaspi_intr_handler(int irq, void *data)
+{
+	struct qcaspi *qca = data;
+
+	qca->intr_req++;
+	if (qca->spi_thread &&
+	    qca->spi_thread->state != TASK_RUNNING)
+		wake_up_process(qca->spi_thread);
+
+	return IRQ_HANDLED;
+}
+
+static int
+qcaspi_netdev_open(struct net_device *dev)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+	int ret = 0;
+
+	if (!qca)
+		return -EINVAL;
+
+	qca->intr_req = 1;
+	qca->intr_svc = 0;
+	qca->sync = QCASPI_SYNC_UNKNOWN;
+	qcafrm_fsm_init_spi(&qca->frm_handle);
+
+	qca->spi_thread = kthread_run((void *)qcaspi_spi_thread,
+				      qca, "%s", dev->name);
+
+	if (IS_ERR(qca->spi_thread)) {
+		netdev_err(dev, "%s: unable to start kernel thread.\n",
+			   QCASPI_DRV_NAME);
+		return PTR_ERR(qca->spi_thread);
+	}
+
+	ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0,
+			  dev->name, qca);
+	if (ret) {
+		netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
+			   QCASPI_DRV_NAME, qca->spi_dev->irq, ret);
+		kthread_stop(qca->spi_thread);
+		return ret;
+	}
+
+	/* SPI thread takes care of TX queue */
+
+	return 0;
+}
+
+static int
+qcaspi_netdev_close(struct net_device *dev)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
+	free_irq(qca->spi_dev->irq, qca);
+
+	kthread_stop(qca->spi_thread);
+	qca->spi_thread = NULL;
+	qcaspi_flush_tx_ring(qca);
+
+	return 0;
+}
+
+static netdev_tx_t
+qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	u32 frame_len;
+	u8 *ptmp;
+	struct qcaspi *qca = netdev_priv(dev);
+	u16 new_tail;
+	struct sk_buff *tskb;
+	u8 pad_len = 0;
+
+	if (skb->len < QCAFRM_MIN_LEN)
+		pad_len = QCAFRM_MIN_LEN - skb->len;
+
+	if (qca->txr.skb[qca->txr.tail]) {
+		netdev_warn(qca->net_dev, "queue was unexpectedly full!\n");
+		netif_stop_queue(qca->net_dev);
+		qca->stats.ring_full++;
+		return NETDEV_TX_BUSY;
+	}
+
+	if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) ||
+	    (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) {
+		tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN,
+				       QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC);
+		if (!tskb) {
+			qca->stats.out_of_mem++;
+			return NETDEV_TX_BUSY;
+		}
+		dev_kfree_skb(skb);
+		skb = tskb;
+	}
+
+	frame_len = skb->len + pad_len;
+
+	ptmp = skb_push(skb, QCAFRM_HEADER_LEN);
+	qcafrm_create_header(ptmp, frame_len);
+
+	if (pad_len) {
+		ptmp = skb_put_zero(skb, pad_len);
+	}
+
+	ptmp = skb_put(skb, QCAFRM_FOOTER_LEN);
+	qcafrm_create_footer(ptmp);
+
+	netdev_dbg(qca->net_dev, "Tx-ing packet: Size: 0x%08x\n",
+		   skb->len);
+
+	qca->txr.size += skb->len + QCASPI_HW_PKT_LEN;
+
+	new_tail = qca->txr.tail + 1;
+	if (new_tail >= qca->txr.count)
+		new_tail = 0;
+
+	qca->txr.skb[qca->txr.tail] = skb;
+	qca->txr.tail = new_tail;
+
+	if (!qcaspi_tx_ring_has_space(&qca->txr)) {
+		netif_stop_queue(qca->net_dev);
+		qca->stats.ring_full++;
+	}
+
+	netif_trans_update(dev);
+
+	if (qca->spi_thread &&
+	    qca->spi_thread->state != TASK_RUNNING)
+		wake_up_process(qca->spi_thread);
+
+	return NETDEV_TX_OK;
+}
+
+static void
+qcaspi_netdev_tx_timeout(struct net_device *dev)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+
+	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
+		    jiffies, jiffies - dev_trans_start(dev));
+	qca->net_dev->stats.tx_errors++;
+	/* Trigger tx queue flush and QCA7000 reset */
+	qca->sync = QCASPI_SYNC_UNKNOWN;
+
+	if (qca->spi_thread)
+		wake_up_process(qca->spi_thread);
+}
+
+static int
+qcaspi_netdev_init(struct net_device *dev)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+
+	dev->mtu = QCAFRM_MAX_MTU;
+	dev->type = ARPHRD_ETHER;
+	qca->burst_len = qcaspi_burst_len;
+	qca->spi_thread = NULL;
+	qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
+		QCAFRM_FOOTER_LEN + 4) * 4;
+
+	memset(&qca->stats, 0, sizeof(struct qcaspi_stats));
+
+	qca->rx_buffer = kmalloc(qca->buffer_size, GFP_KERNEL);
+	if (!qca->rx_buffer)
+		return -ENOBUFS;
+
+	qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
+						VLAN_ETH_HLEN);
+	if (!qca->rx_skb) {
+		kfree(qca->rx_buffer);
+		netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
+		return -ENOBUFS;
+	}
+
+	return 0;
+}
+
+static void
+qcaspi_netdev_uninit(struct net_device *dev)
+{
+	struct qcaspi *qca = netdev_priv(dev);
+
+	kfree(qca->rx_buffer);
+	qca->buffer_size = 0;
+	dev_kfree_skb(qca->rx_skb);
+}
+
+static const struct net_device_ops qcaspi_netdev_ops = {
+	.ndo_init = qcaspi_netdev_init,
+	.ndo_uninit = qcaspi_netdev_uninit,
+	.ndo_open = qcaspi_netdev_open,
+	.ndo_stop = qcaspi_netdev_close,
+	.ndo_start_xmit = qcaspi_netdev_xmit,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_tx_timeout = qcaspi_netdev_tx_timeout,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static void
+qcaspi_netdev_setup(struct net_device *dev)
+{
+	struct qcaspi *qca = NULL;
+
+	dev->netdev_ops = &qcaspi_netdev_ops;
+	qcaspi_set_ethtool_ops(dev);
+	dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
+	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	dev->tx_queue_len = 100;
+
+	/* MTU range: 46 - 1500 */
+	dev->min_mtu = QCAFRM_MIN_MTU;
+	dev->max_mtu = QCAFRM_MAX_MTU;
+
+	qca = netdev_priv(dev);
+	memset(qca, 0, sizeof(struct qcaspi));
+
+	memset(&qca->txr, 0, sizeof(qca->txr));
+	qca->txr.count = TX_RING_MAX_LEN;
+}
+
+static const struct of_device_id qca_spi_of_match[] = {
+	{ .compatible = "qca,qca7000" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qca_spi_of_match);
+
+static int
+qca_spi_probe(struct spi_device *spi)
+{
+	struct qcaspi *qca = NULL;
+	struct net_device *qcaspi_devs = NULL;
+	u8 legacy_mode = 0;
+	u16 signature;
+	const char *mac;
+
+	if (!spi->dev.of_node) {
+		dev_err(&spi->dev, "Missing device tree\n");
+		return -EINVAL;
+	}
+
+	legacy_mode = of_property_read_bool(spi->dev.of_node,
+					    "qca,legacy-mode");
+
+	if (qcaspi_clkspeed)
+		spi->max_speed_hz = qcaspi_clkspeed;
+	else if (!spi->max_speed_hz)
+		spi->max_speed_hz = QCASPI_CLK_SPEED;
+
+	if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
+	    spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
+		dev_err(&spi->dev, "Invalid clkspeed: %u\n",
+			spi->max_speed_hz);
+		return -EINVAL;
+	}
+
+	if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
+	    (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
+		dev_err(&spi->dev, "Invalid burst len: %d\n",
+			qcaspi_burst_len);
+		return -EINVAL;
+	}
+
+	if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
+	    (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
+		dev_err(&spi->dev, "Invalid pluggable: %d\n",
+			qcaspi_pluggable);
+		return -EINVAL;
+	}
+
+	if (wr_verify < QCASPI_WRITE_VERIFY_MIN ||
+	    wr_verify > QCASPI_WRITE_VERIFY_MAX) {
+		dev_err(&spi->dev, "Invalid write verify: %d\n",
+			wr_verify);
+		return -EINVAL;
+	}
+
+	dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
+		 QCASPI_DRV_VERSION,
+		 spi->max_speed_hz,
+		 qcaspi_burst_len,
+		 qcaspi_pluggable);
+
+	spi->mode = SPI_MODE_3;
+	if (spi_setup(spi) < 0) {
+		dev_err(&spi->dev, "Unable to setup SPI device\n");
+		return -EFAULT;
+	}
+
+	qcaspi_devs = alloc_etherdev(sizeof(struct qcaspi));
+	if (!qcaspi_devs)
+		return -ENOMEM;
+
+	qcaspi_netdev_setup(qcaspi_devs);
+	SET_NETDEV_DEV(qcaspi_devs, &spi->dev);
+
+	qca = netdev_priv(qcaspi_devs);
+	if (!qca) {
+		free_netdev(qcaspi_devs);
+		dev_err(&spi->dev, "Fail to retrieve private structure\n");
+		return -ENOMEM;
+	}
+	qca->net_dev = qcaspi_devs;
+	qca->spi_dev = spi;
+	qca->legacy_mode = legacy_mode;
+
+	spi_set_drvdata(spi, qcaspi_devs);
+
+	mac = of_get_mac_address(spi->dev.of_node);
+
+	if (!IS_ERR(mac))
+		ether_addr_copy(qca->net_dev->dev_addr, mac);
+
+	if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
+		eth_hw_addr_random(qca->net_dev);
+		dev_info(&spi->dev, "Using random MAC address: %pM\n",
+			 qca->net_dev->dev_addr);
+	}
+
+	netif_carrier_off(qca->net_dev);
+
+	if (!qcaspi_pluggable) {
+		qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
+		qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
+
+		if (signature != QCASPI_GOOD_SIGNATURE) {
+			dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
+				signature);
+			free_netdev(qcaspi_devs);
+			return -EFAULT;
+		}
+	}
+
+	if (register_netdev(qcaspi_devs)) {
+		dev_err(&spi->dev, "Unable to register net device %s\n",
+			qcaspi_devs->name);
+		free_netdev(qcaspi_devs);
+		return -EFAULT;
+	}
+
+	qcaspi_init_device_debugfs(qca);
+
+	return 0;
+}
+
+static int
+qca_spi_remove(struct spi_device *spi)
+{
+	struct net_device *qcaspi_devs = spi_get_drvdata(spi);
+	struct qcaspi *qca = netdev_priv(qcaspi_devs);
+
+	qcaspi_remove_device_debugfs(qca);
+
+	unregister_netdev(qcaspi_devs);
+	free_netdev(qcaspi_devs);
+
+	return 0;
+}
+
+static const struct spi_device_id qca_spi_id[] = {
+	{ "qca7000", 0 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, qca_spi_id);
+
+static struct spi_driver qca_spi_driver = {
+	.driver	= {
+		.name	= QCASPI_DRV_NAME,
+		.of_match_table = qca_spi_of_match,
+	},
+	.id_table = qca_spi_id,
+	.probe    = qca_spi_probe,
+	.remove   = qca_spi_remove,
+};
+module_spi_driver(qca_spi_driver);
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver");
+MODULE_AUTHOR("Qualcomm Atheros Communications");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QCASPI_DRV_VERSION);
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_spi.h b/marvell/linux/drivers/net/ethernet/qualcomm/qca_spi.h
new file mode 100644
index 0000000..203941d
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -0,0 +1,108 @@
+/*
+ *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ *   Copyright (c) 2014, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*   Qualcomm Atheros SPI register definition.
+ *
+ *   This module is designed to define the Qualcomm Atheros SPI register
+ *   placeholders;
+ */
+
+#ifndef _QCA_SPI_H
+#define _QCA_SPI_H
+
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "qca_7k_common.h"
+
+#define QCASPI_DRV_VERSION "0.2.7-i"
+#define QCASPI_DRV_NAME    "qcaspi"
+
+#define QCASPI_GOOD_SIGNATURE 0xAA55
+
+#define TX_RING_MAX_LEN 10
+#define TX_RING_MIN_LEN 2
+
+/* sync related constants */
+#define QCASPI_SYNC_UNKNOWN 0
+#define QCASPI_SYNC_RESET   1
+#define QCASPI_SYNC_READY   2
+
+#define QCASPI_RESET_TIMEOUT 10
+
+/* sync events */
+#define QCASPI_EVENT_UPDATE 0
+#define QCASPI_EVENT_CPUON  1
+
+struct tx_ring {
+	struct sk_buff *skb[TX_RING_MAX_LEN];
+	u16 head;
+	u16 tail;
+	u16 size;
+	u16 count;
+};
+
+struct qcaspi_stats {
+	u64 trig_reset;
+	u64 device_reset;
+	u64 reset_timeout;
+	u64 read_err;
+	u64 write_err;
+	u64 read_buf_err;
+	u64 write_buf_err;
+	u64 out_of_mem;
+	u64 write_buf_miss;
+	u64 ring_full;
+	u64 spi_err;
+	u64 write_verify_failed;
+	u64 buf_avail_err;
+};
+
+struct qcaspi {
+	struct net_device *net_dev;
+	struct spi_device *spi_dev;
+	struct task_struct *spi_thread;
+
+	struct tx_ring txr;
+	struct qcaspi_stats stats;
+
+	u8 *rx_buffer;
+	u32 buffer_size;
+	u8 sync;
+
+	struct qcafrm_handle frm_handle;
+	struct sk_buff *rx_skb;
+
+	unsigned int intr_req;
+	unsigned int intr_svc;
+	u16 reset_count;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *device_root;
+#endif
+
+	/* user configurable options */
+	u8 legacy_mode;
+	u16 burst_len;
+};
+
+#endif /* _QCA_SPI_H */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/qca_uart.c b/marvell/linux/drivers/net/ethernet/qualcomm/qca_uart.c
new file mode 100644
index 0000000..ade70f5
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -0,0 +1,422 @@
+/*
+ *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
+ *   Copyright (c) 2017, I2SE GmbH
+ *
+ *   Permission to use, copy, modify, and/or distribute this software
+ *   for any purpose with or without fee is hereby granted, provided
+ *   that the above copyright notice and this permission notice appear
+ *   in all copies.
+ *
+ *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*   This module implements the Qualcomm Atheros UART protocol for
+ *   kernel-based UART device; it is essentially an Ethernet-to-UART
+ *   serial converter;
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "qca_7k_common.h"
+
+#define QCAUART_DRV_VERSION "0.1.0"
+#define QCAUART_DRV_NAME "qcauart"
+#define QCAUART_TX_TIMEOUT (1 * HZ)
+
+struct qcauart {
+	struct net_device *net_dev;
+	spinlock_t lock;			/* transmit lock */
+	struct work_struct tx_work;		/* Flushes transmit buffer   */
+
+	struct serdev_device *serdev;
+	struct qcafrm_handle frm_handle;
+	struct sk_buff *rx_skb;
+
+	unsigned char *tx_head;			/* pointer to next XMIT byte */
+	int tx_left;				/* bytes left in XMIT queue  */
+	unsigned char *tx_buffer;
+};
+
+static int
+qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
+		size_t count)
+{
+	struct qcauart *qca = serdev_device_get_drvdata(serdev);
+	struct net_device *netdev = qca->net_dev;
+	struct net_device_stats *n_stats = &netdev->stats;
+	size_t i;
+
+	if (!qca->rx_skb) {
+		qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
+							netdev->mtu +
+							VLAN_ETH_HLEN);
+		if (!qca->rx_skb) {
+			n_stats->rx_errors++;
+			n_stats->rx_dropped++;
+			return 0;
+		}
+	}
+
+	for (i = 0; i < count; i++) {
+		s32 retcode;
+
+		retcode = qcafrm_fsm_decode(&qca->frm_handle,
+					    qca->rx_skb->data,
+					    skb_tailroom(qca->rx_skb),
+					    data[i]);
+
+		switch (retcode) {
+		case QCAFRM_GATHER:
+		case QCAFRM_NOHEAD:
+			break;
+		case QCAFRM_NOTAIL:
+			netdev_dbg(netdev, "recv: no RX tail\n");
+			n_stats->rx_errors++;
+			n_stats->rx_dropped++;
+			break;
+		case QCAFRM_INVLEN:
+			netdev_dbg(netdev, "recv: invalid RX length\n");
+			n_stats->rx_errors++;
+			n_stats->rx_dropped++;
+			break;
+		default:
+			n_stats->rx_packets++;
+			n_stats->rx_bytes += retcode;
+			skb_put(qca->rx_skb, retcode);
+			qca->rx_skb->protocol = eth_type_trans(
+						qca->rx_skb, qca->rx_skb->dev);
+			skb_checksum_none_assert(qca->rx_skb);
+			netif_rx_ni(qca->rx_skb);
+			qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
+								netdev->mtu +
+								VLAN_ETH_HLEN);
+			if (!qca->rx_skb) {
+				netdev_dbg(netdev, "recv: out of RX resources\n");
+				n_stats->rx_errors++;
+				return i;
+			}
+		}
+	}
+
+	return i;
+}
+
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void qcauart_transmit(struct work_struct *work)
+{
+	struct qcauart *qca = container_of(work, struct qcauart, tx_work);
+	struct net_device_stats *n_stats = &qca->net_dev->stats;
+	int written;
+
+	spin_lock_bh(&qca->lock);
+
+	/* First make sure we're connected. */
+	if (!netif_running(qca->net_dev)) {
+		spin_unlock_bh(&qca->lock);
+		return;
+	}
+
+	if (qca->tx_left <= 0)  {
+		/* Now serial buffer is almost free & we can start
+		 * transmission of another packet
+		 */
+		n_stats->tx_packets++;
+		spin_unlock_bh(&qca->lock);
+		netif_wake_queue(qca->net_dev);
+		return;
+	}
+
+	written = serdev_device_write_buf(qca->serdev, qca->tx_head,
+					  qca->tx_left);
+	if (written > 0) {
+		qca->tx_left -= written;
+		qca->tx_head += written;
+	}
+	spin_unlock_bh(&qca->lock);
+}
+
+/* Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void qca_tty_wakeup(struct serdev_device *serdev)
+{
+	struct qcauart *qca = serdev_device_get_drvdata(serdev);
+
+	schedule_work(&qca->tx_work);
+}
+
+static struct serdev_device_ops qca_serdev_ops = {
+	.receive_buf = qca_tty_receive,
+	.write_wakeup = qca_tty_wakeup,
+};
+
+static int qcauart_netdev_open(struct net_device *dev)
+{
+	struct qcauart *qca = netdev_priv(dev);
+
+	netif_start_queue(qca->net_dev);
+
+	return 0;
+}
+
+static int qcauart_netdev_close(struct net_device *dev)
+{
+	struct qcauart *qca = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	flush_work(&qca->tx_work);
+
+	spin_lock_bh(&qca->lock);
+	qca->tx_left = 0;
+	spin_unlock_bh(&qca->lock);
+
+	return 0;
+}
+
+static netdev_tx_t
+qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct net_device_stats *n_stats = &dev->stats;
+	struct qcauart *qca = netdev_priv(dev);
+	u8 pad_len = 0;
+	int written;
+	u8 *pos;
+
+	spin_lock(&qca->lock);
+
+	WARN_ON(qca->tx_left);
+
+	if (!netif_running(dev))  {
+		spin_unlock(&qca->lock);
+		netdev_warn(qca->net_dev, "xmit: iface is down\n");
+		goto out;
+	}
+
+	pos = qca->tx_buffer;
+
+	if (skb->len < QCAFRM_MIN_LEN)
+		pad_len = QCAFRM_MIN_LEN - skb->len;
+
+	pos += qcafrm_create_header(pos, skb->len + pad_len);
+
+	memcpy(pos, skb->data, skb->len);
+	pos += skb->len;
+
+	if (pad_len) {
+		memset(pos, 0, pad_len);
+		pos += pad_len;
+	}
+
+	pos += qcafrm_create_footer(pos);
+
+	netif_stop_queue(qca->net_dev);
+
+	written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
+					  pos - qca->tx_buffer);
+	if (written > 0) {
+		qca->tx_left = (pos - qca->tx_buffer) - written;
+		qca->tx_head = qca->tx_buffer + written;
+		n_stats->tx_bytes += written;
+	}
+	spin_unlock(&qca->lock);
+
+	netif_trans_update(dev);
+out:
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+static void qcauart_netdev_tx_timeout(struct net_device *dev)
+{
+	struct qcauart *qca = netdev_priv(dev);
+
+	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
+		    jiffies, dev_trans_start(dev));
+	dev->stats.tx_errors++;
+	dev->stats.tx_dropped++;
+}
+
+static int qcauart_netdev_init(struct net_device *dev)
+{
+	struct qcauart *qca = netdev_priv(dev);
+	size_t len;
+
+	/* Finish setting up the device info. */
+	dev->mtu = QCAFRM_MAX_MTU;
+	dev->type = ARPHRD_ETHER;
+
+	len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
+	qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
+	if (!qca->tx_buffer)
+		return -ENOMEM;
+
+	qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
+						qca->net_dev->mtu +
+						VLAN_ETH_HLEN);
+	if (!qca->rx_skb)
+		return -ENOBUFS;
+
+	return 0;
+}
+
+static void qcauart_netdev_uninit(struct net_device *dev)
+{
+	struct qcauart *qca = netdev_priv(dev);
+
+	dev_kfree_skb(qca->rx_skb);
+}
+
+static const struct net_device_ops qcauart_netdev_ops = {
+	.ndo_init = qcauart_netdev_init,
+	.ndo_uninit = qcauart_netdev_uninit,
+	.ndo_open = qcauart_netdev_open,
+	.ndo_stop = qcauart_netdev_close,
+	.ndo_start_xmit = qcauart_netdev_xmit,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_tx_timeout = qcauart_netdev_tx_timeout,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static void qcauart_netdev_setup(struct net_device *dev)
+{
+	dev->netdev_ops = &qcauart_netdev_ops;
+	dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
+	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	dev->tx_queue_len = 100;
+
+	/* MTU range: 46 - 1500 */
+	dev->min_mtu = QCAFRM_MIN_MTU;
+	dev->max_mtu = QCAFRM_MAX_MTU;
+}
+
+static const struct of_device_id qca_uart_of_match[] = {
+	{
+	 .compatible = "qca,qca7000",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, qca_uart_of_match);
+
+static int qca_uart_probe(struct serdev_device *serdev)
+{
+	struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
+	struct qcauart *qca;
+	const char *mac;
+	u32 speed = 115200;
+	int ret;
+
+	if (!qcauart_dev)
+		return -ENOMEM;
+
+	qcauart_netdev_setup(qcauart_dev);
+	SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
+
+	qca = netdev_priv(qcauart_dev);
+	if (!qca) {
+		pr_err("qca_uart: Fail to retrieve private structure\n");
+		ret = -ENOMEM;
+		goto free;
+	}
+	qca->net_dev = qcauart_dev;
+	qca->serdev = serdev;
+	qcafrm_fsm_init_uart(&qca->frm_handle);
+
+	spin_lock_init(&qca->lock);
+	INIT_WORK(&qca->tx_work, qcauart_transmit);
+
+	of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
+
+	mac = of_get_mac_address(serdev->dev.of_node);
+
+	if (!IS_ERR(mac))
+		ether_addr_copy(qca->net_dev->dev_addr, mac);
+
+	if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
+		eth_hw_addr_random(qca->net_dev);
+		dev_info(&serdev->dev, "Using random MAC address: %pM\n",
+			 qca->net_dev->dev_addr);
+	}
+
+	netif_carrier_on(qca->net_dev);
+	serdev_device_set_drvdata(serdev, qca);
+	serdev_device_set_client_ops(serdev, &qca_serdev_ops);
+
+	ret = serdev_device_open(serdev);
+	if (ret) {
+		dev_err(&serdev->dev, "Unable to open device %s\n",
+			qcauart_dev->name);
+		goto free;
+	}
+
+	speed = serdev_device_set_baudrate(serdev, speed);
+	dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
+
+	serdev_device_set_flow_control(serdev, false);
+
+	ret = register_netdev(qcauart_dev);
+	if (ret) {
+		dev_err(&serdev->dev, "Unable to register net device %s\n",
+			qcauart_dev->name);
+		serdev_device_close(serdev);
+		cancel_work_sync(&qca->tx_work);
+		goto free;
+	}
+
+	return 0;
+
+free:
+	free_netdev(qcauart_dev);
+	return ret;
+}
+
+static void qca_uart_remove(struct serdev_device *serdev)
+{
+	struct qcauart *qca = serdev_device_get_drvdata(serdev);
+
+	unregister_netdev(qca->net_dev);
+
+	/* Flush any pending characters in the driver. */
+	serdev_device_close(serdev);
+	cancel_work_sync(&qca->tx_work);
+
+	free_netdev(qca->net_dev);
+}
+
+static struct serdev_device_driver qca_uart_driver = {
+	.probe = qca_uart_probe,
+	.remove = qca_uart_remove,
+	.driver = {
+		.name = QCAUART_DRV_NAME,
+		.of_match_table = of_match_ptr(qca_uart_of_match),
+	},
+};
+
+module_serdev_device_driver(qca_uart_driver);
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
+MODULE_AUTHOR("Qualcomm Atheros Communications");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QCAUART_DRV_VERSION);
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/Kconfig b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/Kconfig
new file mode 100644
index 0000000..9f92795
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RMNET MAP driver
+#
+
+menuconfig RMNET
+	tristate "RmNet MAP driver"
+	default n
+	select GRO_CELLS
+	---help---
+	  If you select this, you will enable the RMNET module which is used
+	  for handling data in the multiplexing and aggregation protocol (MAP)
+	  format in the embedded data path. RMNET devices can be attached to
+	  any IP mode physical device.
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/Makefile b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/Makefile
new file mode 100644
index 0000000..8252e40
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RMNET module
+#
+
+rmnet-y		 := rmnet_config.o
+rmnet-y		 += rmnet_vnd.o
+rmnet-y		 += rmnet_handlers.o
+rmnet-y		 += rmnet_map_data.o
+rmnet-y		 += rmnet_map_command.o
+obj-$(CONFIG_RMNET) += rmnet.o
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
new file mode 100644
index 0000000..deb8d00
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * RMNET configuration engine
+ */
+
+#include <net/sock.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include "rmnet_config.h"
+#include "rmnet_handlers.h"
+#include "rmnet_vnd.h"
+#include "rmnet_private.h"
+
+/* Local Definitions and Declarations */
+
+static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
+	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
+	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
+};
+
+static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
+{
+	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
+}
+
+/* Needs rtnl lock */
+static struct rmnet_port*
+rmnet_get_port_rtnl(const struct net_device *real_dev)
+{
+	return rtnl_dereference(real_dev->rx_handler_data);
+}
+
+static int rmnet_unregister_real_device(struct net_device *real_dev)
+{
+	struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
+
+	if (port->nr_rmnet_devs)
+		return -EINVAL;
+
+	netdev_rx_handler_unregister(real_dev);
+
+	kfree(port);
+
+	netdev_dbg(real_dev, "Removed from rmnet\n");
+	return 0;
+}
+
+static int rmnet_register_real_device(struct net_device *real_dev,
+				      struct netlink_ext_ack *extack)
+{
+	struct rmnet_port *port;
+	int rc, entry;
+
+	ASSERT_RTNL();
+
+	if (rmnet_is_real_dev_registered(real_dev)) {
+		port = rmnet_get_port_rtnl(real_dev);
+		if (port->rmnet_mode != RMNET_EPMODE_VND) {
+			NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	port = kzalloc(sizeof(*port), GFP_ATOMIC);
+	if (!port)
+		return -ENOMEM;
+
+	port->dev = real_dev;
+	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
+	if (rc) {
+		kfree(port);
+		return -EBUSY;
+	}
+
+	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
+		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
+
+	netdev_dbg(real_dev, "registered with rmnet\n");
+	return 0;
+}
+
+static void rmnet_unregister_bridge(struct rmnet_port *port)
+{
+	struct net_device *bridge_dev, *real_dev, *rmnet_dev;
+	struct rmnet_port *real_port;
+
+	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
+		return;
+
+	rmnet_dev = port->rmnet_dev;
+	if (!port->nr_rmnet_devs) {
+		/* bridge device */
+		real_dev = port->bridge_ep;
+		bridge_dev = port->dev;
+
+		real_port = rmnet_get_port_rtnl(real_dev);
+		real_port->bridge_ep = NULL;
+		real_port->rmnet_mode = RMNET_EPMODE_VND;
+	} else {
+		/* real device */
+		bridge_dev = port->bridge_ep;
+
+		port->bridge_ep = NULL;
+		port->rmnet_mode = RMNET_EPMODE_VND;
+	}
+
+	netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
+	rmnet_unregister_real_device(bridge_dev);
+}
+
+static int rmnet_newlink(struct net *src_net, struct net_device *dev,
+			 struct nlattr *tb[], struct nlattr *data[],
+			 struct netlink_ext_ack *extack)
+{
+	u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
+	struct net_device *real_dev;
+	int mode = RMNET_EPMODE_VND;
+	struct rmnet_endpoint *ep;
+	struct rmnet_port *port;
+	int err = 0;
+	u16 mux_id;
+
+	if (!tb[IFLA_LINK]) {
+		NL_SET_ERR_MSG_MOD(extack, "link not specified");
+		return -EINVAL;
+	}
+
+	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+	if (!real_dev || !dev)
+		return -ENODEV;
+
+	if (!data[IFLA_RMNET_MUX_ID])
+		return -EINVAL;
+
+	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+	if (!ep)
+		return -ENOMEM;
+
+	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+
+	err = rmnet_register_real_device(real_dev, extack);
+	if (err)
+		goto err0;
+
+	port = rmnet_get_port_rtnl(real_dev);
+	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
+	if (err)
+		goto err1;
+
+	err = netdev_upper_dev_link(real_dev, dev, extack);
+	if (err < 0)
+		goto err2;
+
+	port->rmnet_mode = mode;
+	port->rmnet_dev = dev;
+
+	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+	if (data[IFLA_RMNET_FLAGS]) {
+		struct ifla_rmnet_flags *flags;
+
+		flags = nla_data(data[IFLA_RMNET_FLAGS]);
+		data_format = flags->flags & flags->mask;
+	}
+
+	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+	port->data_format = data_format;
+
+	return 0;
+
+err2:
+	unregister_netdevice(dev);
+	rmnet_vnd_dellink(mux_id, port, ep);
+err1:
+	rmnet_unregister_real_device(real_dev);
+err0:
+	kfree(ep);
+	return err;
+}
+
+static void rmnet_dellink(struct net_device *dev, struct list_head *head)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct net_device *real_dev, *bridge_dev;
+	struct rmnet_port *real_port, *bridge_port;
+	struct rmnet_endpoint *ep;
+	u8 mux_id = priv->mux_id;
+
+	real_dev = priv->real_dev;
+
+	if (!rmnet_is_real_dev_registered(real_dev))
+		return;
+
+	real_port = rmnet_get_port_rtnl(real_dev);
+	bridge_dev = real_port->bridge_ep;
+	if (bridge_dev) {
+		bridge_port = rmnet_get_port_rtnl(bridge_dev);
+		rmnet_unregister_bridge(bridge_port);
+	}
+
+	ep = rmnet_get_endpoint(real_port, mux_id);
+	if (ep) {
+		hlist_del_init_rcu(&ep->hlnode);
+		rmnet_vnd_dellink(mux_id, real_port, ep);
+		kfree(ep);
+	}
+
+	netdev_upper_dev_unlink(real_dev, dev);
+	rmnet_unregister_real_device(real_dev);
+	unregister_netdevice_queue(dev, head);
+}
+
+static void rmnet_force_unassociate_device(struct net_device *real_dev)
+{
+	struct hlist_node *tmp_ep;
+	struct rmnet_endpoint *ep;
+	struct rmnet_port *port;
+	unsigned long bkt_ep;
+	LIST_HEAD(list);
+
+	port = rmnet_get_port_rtnl(real_dev);
+
+	if (port->nr_rmnet_devs) {
+		/* real device */
+		rmnet_unregister_bridge(port);
+		hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+			unregister_netdevice_queue(ep->egress_dev, &list);
+			netdev_upper_dev_unlink(real_dev, ep->egress_dev);
+			rmnet_vnd_dellink(ep->mux_id, port, ep);
+			hlist_del_init_rcu(&ep->hlnode);
+			kfree(ep);
+		}
+		rmnet_unregister_real_device(real_dev);
+		unregister_netdevice_many(&list);
+	} else {
+		rmnet_unregister_bridge(port);
+	}
+}
+
+static int rmnet_config_notify_cb(struct notifier_block *nb,
+				  unsigned long event, void *data)
+{
+	struct net_device *real_dev = netdev_notifier_info_to_dev(data);
+
+	if (!rmnet_is_real_dev_registered(real_dev))
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		netdev_dbg(real_dev, "Kernel unregister\n");
+		rmnet_force_unassociate_device(real_dev);
+		break;
+
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block rmnet_dev_notifier __read_mostly = {
+	.notifier_call = rmnet_config_notify_cb,
+};
+
+static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
+			       struct netlink_ext_ack *extack)
+{
+	u16 mux_id;
+
+	if (!data || !data[IFLA_RMNET_MUX_ID])
+		return -EINVAL;
+
+	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
+		return -ERANGE;
+
+	return 0;
+}
+
+static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+			    struct nlattr *data[],
+			    struct netlink_ext_ack *extack)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct net_device *real_dev;
+	struct rmnet_port *port;
+	u16 mux_id;
+
+	if (!dev)
+		return -ENODEV;
+
+	real_dev = priv->real_dev;
+	if (!rmnet_is_real_dev_registered(real_dev))
+		return -ENODEV;
+
+	port = rmnet_get_port_rtnl(real_dev);
+
+	if (data[IFLA_RMNET_MUX_ID]) {
+		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+
+		if (mux_id != priv->mux_id) {
+			struct rmnet_endpoint *ep;
+
+			ep = rmnet_get_endpoint(port, priv->mux_id);
+			if (!ep)
+				return -ENODEV;
+
+			if (rmnet_get_endpoint(port, mux_id)) {
+				NL_SET_ERR_MSG_MOD(extack,
+						   "MUX ID already exists");
+				return -EINVAL;
+			}
+
+			hlist_del_init_rcu(&ep->hlnode);
+			hlist_add_head_rcu(&ep->hlnode,
+					   &port->muxed_ep[mux_id]);
+
+			ep->mux_id = mux_id;
+			priv->mux_id = mux_id;
+		}
+	}
+
+	if (data[IFLA_RMNET_FLAGS]) {
+		struct ifla_rmnet_flags *flags;
+
+		flags = nla_data(data[IFLA_RMNET_FLAGS]);
+		port->data_format = flags->flags & flags->mask;
+	}
+
+	return 0;
+}
+
+static size_t rmnet_get_size(const struct net_device *dev)
+{
+	return
+		/* IFLA_RMNET_MUX_ID */
+		nla_total_size(2) +
+		/* IFLA_RMNET_FLAGS */
+		nla_total_size(sizeof(struct ifla_rmnet_flags));
+}
+
+static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct net_device *real_dev;
+	struct ifla_rmnet_flags f;
+	struct rmnet_port *port;
+
+	real_dev = priv->real_dev;
+
+	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
+		goto nla_put_failure;
+
+	if (rmnet_is_real_dev_registered(real_dev)) {
+		port = rmnet_get_port_rtnl(real_dev);
+		f.flags = port->data_format;
+	} else {
+		f.flags = 0;
+	}
+
+	f.mask  = ~0;
+
+	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+struct rtnl_link_ops rmnet_link_ops __read_mostly = {
+	.kind		= "rmnet",
+	.maxtype	= IFLA_RMNET_MAX,
+	.priv_size	= sizeof(struct rmnet_priv),
+	.setup		= rmnet_vnd_setup,
+	.validate	= rmnet_rtnl_validate,
+	.newlink	= rmnet_newlink,
+	.dellink	= rmnet_dellink,
+	.get_size	= rmnet_get_size,
+	.changelink     = rmnet_changelink,
+	.policy		= rmnet_policy,
+	.fill_info	= rmnet_fill_info,
+};
+
+struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
+{
+	if (rmnet_is_real_dev_registered(real_dev))
+		return rcu_dereference_bh(real_dev->rx_handler_data);
+	else
+		return NULL;
+}
+
+struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
+{
+	struct rmnet_endpoint *ep;
+
+	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
+		if (ep->mux_id == mux_id)
+			return ep;
+	}
+
+	return NULL;
+}
+
+int rmnet_add_bridge(struct net_device *rmnet_dev,
+		     struct net_device *slave_dev,
+		     struct netlink_ext_ack *extack)
+{
+	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+	struct net_device *real_dev = priv->real_dev;
+	struct rmnet_port *port, *slave_port;
+	int err;
+
+	port = rmnet_get_port_rtnl(real_dev);
+
+	/* If there is more than one rmnet dev attached, its probably being
+	 * used for muxing. Skip the briding in that case
+	 */
+	if (port->nr_rmnet_devs > 1)
+		return -EINVAL;
+
+	if (rmnet_is_real_dev_registered(slave_dev))
+		return -EBUSY;
+
+	err = rmnet_register_real_device(slave_dev, extack);
+	if (err)
+		return -EBUSY;
+
+	err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
+					   extack);
+	if (err) {
+		rmnet_unregister_real_device(slave_dev);
+		return err;
+	}
+
+	slave_port = rmnet_get_port_rtnl(slave_dev);
+	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+	slave_port->bridge_ep = real_dev;
+	slave_port->rmnet_dev = rmnet_dev;
+
+	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+	port->bridge_ep = slave_dev;
+
+	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
+	return 0;
+}
+
+int rmnet_del_bridge(struct net_device *rmnet_dev,
+		     struct net_device *slave_dev)
+{
+	struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
+
+	rmnet_unregister_bridge(port);
+
+	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
+	return 0;
+}
+
+/* Startup/Shutdown */
+
+static int __init rmnet_init(void)
+{
+	int rc;
+
+	rc = register_netdevice_notifier(&rmnet_dev_notifier);
+	if (rc != 0)
+		return rc;
+
+	rc = rtnl_link_register(&rmnet_link_ops);
+	if (rc != 0) {
+		unregister_netdevice_notifier(&rmnet_dev_notifier);
+		return rc;
+	}
+	return rc;
+}
+
+static void __exit rmnet_exit(void)
+{
+	rtnl_link_unregister(&rmnet_link_ops);
+	unregister_netdevice_notifier(&rmnet_dev_notifier);
+}
+
+module_init(rmnet_init)
+module_exit(rmnet_exit)
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
new file mode 100644
index 0000000..be51598
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data configuration engine
+ */
+
+#include <linux/skbuff.h>
+#include <net/gro_cells.h>
+
+#ifndef _RMNET_CONFIG_H_
+#define _RMNET_CONFIG_H_
+
+#define RMNET_MAX_LOGICAL_EP 255
+
+struct rmnet_endpoint {
+	u8 mux_id;
+	struct net_device *egress_dev;
+	struct hlist_node hlnode;
+};
+
+/* One instance of this structure is instantiated for each real_dev associated
+ * with rmnet.
+ */
+struct rmnet_port {
+	struct net_device *dev;
+	u32 data_format;
+	u8 nr_rmnet_devs;
+	u8 rmnet_mode;
+	struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
+	struct net_device *bridge_ep;
+	struct net_device *rmnet_dev;
+};
+
+extern struct rtnl_link_ops rmnet_link_ops;
+
+struct rmnet_vnd_stats {
+	u64 rx_pkts;
+	u64 rx_bytes;
+	u64 tx_pkts;
+	u64 tx_bytes;
+	u32 tx_drops;
+};
+
+struct rmnet_pcpu_stats {
+	struct rmnet_vnd_stats stats;
+	struct u64_stats_sync syncp;
+};
+
+struct rmnet_priv_stats {
+	u64 csum_ok;
+	u64 csum_valid_unset;
+	u64 csum_validation_failed;
+	u64 csum_err_bad_buffer;
+	u64 csum_err_invalid_ip_version;
+	u64 csum_err_invalid_transport;
+	u64 csum_fragmented_pkt;
+	u64 csum_skipped;
+	u64 csum_sw;
+};
+
+struct rmnet_priv {
+	u8 mux_id;
+	struct net_device *real_dev;
+	struct rmnet_pcpu_stats __percpu *pcpu_stats;
+	struct gro_cells gro_cells;
+	struct rmnet_priv_stats stats;
+};
+
+struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev);
+struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
+int rmnet_add_bridge(struct net_device *rmnet_dev,
+		     struct net_device *slave_dev,
+		     struct netlink_ext_ack *extack);
+int rmnet_del_bridge(struct net_device *rmnet_dev,
+		     struct net_device *slave_dev);
+#endif /* _RMNET_CONFIG_H_ */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
new file mode 100644
index 0000000..3d7d3ab
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data ingress/egress handler
+ */
+
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
+#include "rmnet_private.h"
+#include "rmnet_config.h"
+#include "rmnet_vnd.h"
+#include "rmnet_map.h"
+#include "rmnet_handlers.h"
+
+#define RMNET_IP_VERSION_4 0x40
+#define RMNET_IP_VERSION_6 0x60
+
+/* Helper Functions */
+
+static void rmnet_set_skb_proto(struct sk_buff *skb)
+{
+	switch (skb->data[0] & 0xF0) {
+	case RMNET_IP_VERSION_4:
+		skb->protocol = htons(ETH_P_IP);
+		break;
+	case RMNET_IP_VERSION_6:
+		skb->protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		skb->protocol = htons(ETH_P_MAP);
+		break;
+	}
+}
+
+/* Generic handler */
+
+static void
+rmnet_deliver_skb(struct sk_buff *skb)
+{
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+
+	skb_reset_transport_header(skb);
+	skb_reset_network_header(skb);
+	rmnet_vnd_rx_fixup(skb, skb->dev);
+
+	skb->pkt_type = PACKET_HOST;
+	skb_set_mac_header(skb, 0);
+	gro_cells_receive(&priv->gro_cells, skb);
+}
+
+/* MAP handler */
+
+static void
+__rmnet_map_ingress_handler(struct sk_buff *skb,
+			    struct rmnet_port *port)
+{
+	struct rmnet_endpoint *ep;
+	u16 len, pad;
+	u8 mux_id;
+
+	if (RMNET_MAP_GET_CD_BIT(skb)) {
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
+			return rmnet_map_command(skb, port);
+
+		goto free_skb;
+	}
+
+	mux_id = RMNET_MAP_GET_MUX_ID(skb);
+	pad = RMNET_MAP_GET_PAD(skb);
+	len = RMNET_MAP_GET_LENGTH(skb) - pad;
+
+	if (mux_id >= RMNET_MAX_LOGICAL_EP)
+		goto free_skb;
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (!ep)
+		goto free_skb;
+
+	skb->dev = ep->egress_dev;
+
+	/* Subtract MAP header */
+	skb_pull(skb, sizeof(struct rmnet_map_header));
+	rmnet_set_skb_proto(skb);
+
+	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
+		if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+
+	skb_trim(skb, len);
+	rmnet_deliver_skb(skb);
+	return;
+
+free_skb:
+	kfree_skb(skb);
+}
+
+static void
+rmnet_map_ingress_handler(struct sk_buff *skb,
+			  struct rmnet_port *port)
+{
+	struct sk_buff *skbn;
+
+	if (skb->dev->type == ARPHRD_ETHER) {
+		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_push(skb, ETH_HLEN);
+	}
+
+	if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
+		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
+			__rmnet_map_ingress_handler(skbn, port);
+
+		consume_skb(skb);
+	} else {
+		__rmnet_map_ingress_handler(skb, port);
+	}
+}
+
+static int rmnet_map_egress_handler(struct sk_buff *skb,
+				    struct rmnet_port *port, u8 mux_id,
+				    struct net_device *orig_dev)
+{
+	int required_headroom, additional_header_len;
+	struct rmnet_map_header *map_header;
+
+	additional_header_len = 0;
+	required_headroom = sizeof(struct rmnet_map_header);
+
+	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
+		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+		required_headroom += additional_header_len;
+	}
+
+	if (skb_headroom(skb) < required_headroom) {
+		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
+			return -ENOMEM;
+	}
+
+	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
+		rmnet_map_checksum_uplink_packet(skb, orig_dev);
+
+	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
+	if (!map_header)
+		return -ENOMEM;
+
+	map_header->mux_id = mux_id;
+
+	skb->protocol = htons(ETH_P_MAP);
+
+	return 0;
+}
+
+static void
+rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
+{
+	if (skb_mac_header_was_set(skb))
+		skb_push(skb, skb->mac_len);
+
+	if (bridge_dev) {
+		skb->dev = bridge_dev;
+		dev_queue_xmit(skb);
+	}
+}
+
+/* Ingress / Egress Entry Points */
+
+/* Processes packet as per ingress data format for receiving device. Logical
+ * endpoint is determined from packet inspection. Packet is then sent to the
+ * egress device listed in the logical endpoint configuration.
+ */
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+	struct rmnet_port *port;
+	struct net_device *dev;
+
+	if (!skb)
+		goto done;
+
+	if (skb->pkt_type == PACKET_LOOPBACK)
+		return RX_HANDLER_PASS;
+
+	dev = skb->dev;
+	port = rmnet_get_port_rcu(dev);
+	if (unlikely(!port)) {
+		atomic_long_inc(&skb->dev->rx_nohandler);
+		kfree_skb(skb);
+		goto done;
+	}
+
+	switch (port->rmnet_mode) {
+	case RMNET_EPMODE_VND:
+		rmnet_map_ingress_handler(skb, port);
+		break;
+	case RMNET_EPMODE_BRIDGE:
+		rmnet_bridge_handler(skb, port->bridge_ep);
+		break;
+	}
+
+done:
+	return RX_HANDLER_CONSUMED;
+}
+
+/* Modifies packet as per logical endpoint configuration and egress data format
+ * for egress device configured in logical endpoint. Packet is then transmitted
+ * on the egress device.
+ */
+void rmnet_egress_handler(struct sk_buff *skb)
+{
+	struct net_device *orig_dev;
+	struct rmnet_port *port;
+	struct rmnet_priv *priv;
+	u8 mux_id;
+
+	sk_pacing_shift_update(skb->sk, 8);
+
+	orig_dev = skb->dev;
+	priv = netdev_priv(orig_dev);
+	skb->dev = priv->real_dev;
+	mux_id = priv->mux_id;
+
+	port = rmnet_get_port_rcu(skb->dev);
+	if (!port)
+		goto drop;
+
+	if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+		goto drop;
+
+	rmnet_vnd_tx_fixup(skb, orig_dev);
+
+	dev_queue_xmit(skb);
+	return;
+
+drop:
+	this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+	kfree_skb(skb);
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
new file mode 100644
index 0000000..c4571dc
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data ingress/egress handler
+ */
+
+#ifndef _RMNET_HANDLERS_H_
+#define _RMNET_HANDLERS_H_
+
+#include "rmnet_config.h"
+
+void rmnet_egress_handler(struct sk_buff *skb);
+
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
+
+#endif /* _RMNET_HANDLERS_H_ */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
new file mode 100644
index 0000000..576501d
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _RMNET_MAP_H_
+#define _RMNET_MAP_H_
+#include <linux/if_rmnet.h>
+
+struct rmnet_map_control_command {
+	u8  command_name;
+	u8  cmd_type:2;
+	u8  reserved:6;
+	u16 reserved2;
+	u32 transaction_id;
+	union {
+		struct {
+			u16 ip_family:2;
+			u16 reserved:14;
+			__be16 flow_control_seq_num;
+			__be32 qos_id;
+		} flow_control;
+		u8 data[0];
+	};
+}  __aligned(1);
+
+enum rmnet_map_commands {
+	RMNET_MAP_COMMAND_NONE,
+	RMNET_MAP_COMMAND_FLOW_DISABLE,
+	RMNET_MAP_COMMAND_FLOW_ENABLE,
+	/* These should always be the last 2 elements */
+	RMNET_MAP_COMMAND_UNKNOWN,
+	RMNET_MAP_COMMAND_ENUM_LENGTH
+};
+
+#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
+				 (Y)->data)->mux_id)
+#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
+				(Y)->data)->cd_bit)
+#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
+				(Y)->data)->pad_len)
+#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
+				    ((Y)->data + \
+				      sizeof(struct rmnet_map_header)))
+#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
+					(Y)->data)->pkt_len))
+
+#define RMNET_MAP_COMMAND_REQUEST     0
+#define RMNET_MAP_COMMAND_ACK         1
+#define RMNET_MAP_COMMAND_UNSUPPORTED 2
+#define RMNET_MAP_COMMAND_INVALID     3
+
+#define RMNET_MAP_NO_PAD_BYTES        0
+#define RMNET_MAP_ADD_PAD_BYTES       1
+
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_port *port);
+struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
+						  int hdrlen, int pad);
+void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct net_device *orig_dev);
+
+#endif /* _RMNET_MAP_H_ */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
new file mode 100644
index 0000000..beaee49
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/netdevice.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+#include "rmnet_private.h"
+#include "rmnet_vnd.h"
+
+static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
+				    struct rmnet_port *port,
+				    int enable)
+{
+	struct rmnet_endpoint *ep;
+	struct net_device *vnd;
+	u8 mux_id;
+	int r;
+
+	mux_id = RMNET_MAP_GET_MUX_ID(skb);
+
+	if (mux_id >= RMNET_MAX_LOGICAL_EP) {
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (!ep) {
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	vnd = ep->egress_dev;
+
+	/* Ignore the ip family and pass the sequence number for both v4 and v6
+	 * sequence. User space does not support creating dedicated flows for
+	 * the 2 protocols
+	 */
+	r = rmnet_vnd_do_flow_control(vnd, enable);
+	if (r) {
+		kfree_skb(skb);
+		return RMNET_MAP_COMMAND_UNSUPPORTED;
+	} else {
+		return RMNET_MAP_COMMAND_ACK;
+	}
+}
+
+static void rmnet_map_send_ack(struct sk_buff *skb,
+			       unsigned char type,
+			       struct rmnet_port *port)
+{
+	struct rmnet_map_control_command *cmd;
+	struct net_device *dev = skb->dev;
+
+	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+		skb_trim(skb,
+			 skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
+
+	skb->protocol = htons(ETH_P_MAP);
+
+	cmd = RMNET_MAP_GET_CMD_START(skb);
+	cmd->cmd_type = type & 0x03;
+
+	netif_tx_lock(dev);
+	dev->netdev_ops->ndo_start_xmit(skb, dev);
+	netif_tx_unlock(dev);
+}
+
+/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
+ * name is decoded here and appropriate handler is called.
+ */
+void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
+{
+	struct rmnet_map_control_command *cmd;
+	unsigned char command_name;
+	unsigned char rc = 0;
+
+	cmd = RMNET_MAP_GET_CMD_START(skb);
+	command_name = cmd->command_name;
+
+	switch (command_name) {
+	case RMNET_MAP_COMMAND_FLOW_ENABLE:
+		rc = rmnet_map_do_flow_control(skb, port, 1);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_DISABLE:
+		rc = rmnet_map_do_flow_control(skb, port, 0);
+		break;
+
+	default:
+		rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+		kfree_skb(skb);
+		break;
+	}
+	if (rc == RMNET_MAP_COMMAND_ACK)
+		rmnet_map_send_ack(skb, rc, port);
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
new file mode 100644
index 0000000..21d3816
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data MAP protocol
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+#include "rmnet_private.h"
+
+#define RMNET_MAP_DEAGGR_SPACING  64
+#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+					 const void *txporthdr)
+{
+	__sum16 *check = NULL;
+
+	switch (protocol) {
+	case IPPROTO_TCP:
+		check = &(((struct tcphdr *)txporthdr)->check);
+		break;
+
+	case IPPROTO_UDP:
+		check = &(((struct udphdr *)txporthdr)->check);
+		break;
+
+	default:
+		check = NULL;
+		break;
+	}
+
+	return check;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+			       struct rmnet_map_dl_csum_trailer *csum_trailer,
+			       struct rmnet_priv *priv)
+{
+	__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
+	u16 csum_value, csum_value_final;
+	struct iphdr *ip4h;
+	void *txporthdr;
+	__be16 addend;
+
+	ip4h = (struct iphdr *)(skb->data);
+	if ((ntohs(ip4h->frag_off) & IP_MF) ||
+	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
+		priv->stats.csum_fragmented_pkt++;
+		return -EOPNOTSUPP;
+	}
+
+	txporthdr = skb->data + ip4h->ihl * 4;
+
+	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+
+	if (!csum_field) {
+		priv->stats.csum_err_invalid_transport++;
+		return -EPROTONOSUPPORT;
+	}
+
+	/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+	if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
+		priv->stats.csum_skipped++;
+		return 0;
+	}
+
+	csum_value = ~ntohs(csum_trailer->csum_value);
+	hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+	ip_payload_csum = csum16_sub((__force __sum16)csum_value,
+				     (__force __be16)hdr_csum);
+
+	pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+					 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+					 ip4h->protocol, 0);
+	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+	pseudo_csum = csum16_add(ip_payload_csum, addend);
+
+	addend = (__force __be16)ntohs((__force __be16)*csum_field);
+	csum_temp = ~csum16_sub(pseudo_csum, addend);
+	csum_value_final = (__force u16)csum_temp;
+
+	if (unlikely(csum_value_final == 0)) {
+		switch (ip4h->protocol) {
+		case IPPROTO_UDP:
+			/* RFC 768 - DL4 1's complement rule for UDP csum 0 */
+			csum_value_final = ~csum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			/* DL4 Non-RFC compliant TCP checksum found */
+			if (*csum_field == (__force __sum16)0xFFFF)
+				csum_value_final = ~csum_value_final;
+			break;
+		}
+	}
+
+	if (csum_value_final == ntohs((__force __be16)*csum_field)) {
+		priv->stats.csum_ok++;
+		return 0;
+	} else {
+		priv->stats.csum_validation_failed++;
+		return -EINVAL;
+	}
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+			       struct rmnet_map_dl_csum_trailer *csum_trailer,
+			       struct rmnet_priv *priv)
+{
+	__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
+	u16 csum_value, csum_value_final;
+	__be16 ip6_hdr_csum, addend;
+	struct ipv6hdr *ip6h;
+	void *txporthdr;
+	u32 length;
+
+	ip6h = (struct ipv6hdr *)(skb->data);
+
+	txporthdr = skb->data + sizeof(struct ipv6hdr);
+	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+
+	if (!csum_field) {
+		priv->stats.csum_err_invalid_transport++;
+		return -EPROTONOSUPPORT;
+	}
+
+	csum_value = ~ntohs(csum_trailer->csum_value);
+	ip6_hdr_csum = (__force __be16)
+			~ntohs((__force __be16)ip_compute_csum(ip6h,
+			       (int)(txporthdr - (void *)(skb->data))));
+	ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
+				      ip6_hdr_csum);
+
+	length = (ip6h->nexthdr == IPPROTO_UDP) ?
+		 ntohs(((struct udphdr *)txporthdr)->len) :
+		 ntohs(ip6h->payload_len);
+	pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+			     length, ip6h->nexthdr, 0));
+	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+	pseudo_csum = csum16_add(ip6_payload_csum, addend);
+
+	addend = (__force __be16)ntohs((__force __be16)*csum_field);
+	csum_temp = ~csum16_sub(pseudo_csum, addend);
+	csum_value_final = (__force u16)csum_temp;
+
+	if (unlikely(csum_value_final == 0)) {
+		switch (ip6h->nexthdr) {
+		case IPPROTO_UDP:
+			/* RFC 2460 section 8.1
+			 * DL6 One's complement rule for UDP checksum 0
+			 */
+			csum_value_final = ~csum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			/* DL6 Non-RFC compliant TCP checksum found */
+			if (*csum_field == (__force __sum16)0xFFFF)
+				csum_value_final = ~csum_value_final;
+			break;
+		}
+	}
+
+	if (csum_value_final == ntohs((__force __be16)*csum_field)) {
+		priv->stats.csum_ok++;
+		return 0;
+	} else {
+		priv->stats.csum_validation_failed++;
+		return -EINVAL;
+	}
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	void *txphdr;
+	u16 *csum;
+
+	txphdr = iphdr + ip4h->ihl * 4;
+
+	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+			      struct rmnet_map_ul_csum_header *ul_header,
+			      struct sk_buff *skb)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	__be16 *hdr = (__be16 *)ul_header, offset;
+
+	offset = htons((__force u16)(skb_transport_header(skb) -
+				     (unsigned char *)iphdr));
+	ul_header->csum_start_offset = offset;
+	ul_header->csum_insert_offset = skb->csum_offset;
+	ul_header->csum_enabled = 1;
+	if (ip4h->protocol == IPPROTO_UDP)
+		ul_header->udp_ind = 1;
+	else
+		ul_header->udp_ind = 0;
+
+	/* Changing remaining fields to network order */
+	hdr++;
+	*hdr = htons((__force u16)*hdr);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+	void *txphdr;
+	u16 *csum;
+
+	txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+			      struct rmnet_map_ul_csum_header *ul_header,
+			      struct sk_buff *skb)
+{
+	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+	__be16 *hdr = (__be16 *)ul_header, offset;
+
+	offset = htons((__force u16)(skb_transport_header(skb) -
+				     (unsigned char *)ip6hdr));
+	ul_header->csum_start_offset = offset;
+	ul_header->csum_insert_offset = skb->csum_offset;
+	ul_header->csum_enabled = 1;
+
+	if (ip6h->nexthdr == IPPROTO_UDP)
+		ul_header->udp_ind = 1;
+	else
+		ul_header->udp_ind = 0;
+
+	/* Changing remaining fields to network order */
+	hdr++;
+	*hdr = htons((__force u16)*hdr);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
+/* Adds MAP header to front of skb->data
+ * Padding is calculated and set appropriately in MAP header. Mux ID is
+ * initialized to 0.
+ */
+struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
+						  int hdrlen, int pad)
+{
+	struct rmnet_map_header *map_header;
+	u32 padding, map_datalen;
+	u8 *padbytes;
+
+	map_datalen = skb->len - hdrlen;
+	map_header = (struct rmnet_map_header *)
+			skb_push(skb, sizeof(struct rmnet_map_header));
+	memset(map_header, 0, sizeof(struct rmnet_map_header));
+
+	if (pad == RMNET_MAP_NO_PAD_BYTES) {
+		map_header->pkt_len = htons(map_datalen);
+		return map_header;
+	}
+
+	padding = ALIGN(map_datalen, 4) - map_datalen;
+
+	if (padding == 0)
+		goto done;
+
+	if (skb_tailroom(skb) < padding)
+		return NULL;
+
+	padbytes = (u8 *)skb_put(skb, padding);
+	memset(padbytes, 0, padding);
+
+done:
+	map_header->pkt_len = htons(map_datalen + padding);
+	map_header->pad_len = padding & 0x3F;
+
+	return map_header;
+}
+
+/* Deaggregates a single packet
+ * A whole new buffer is allocated for each portion of an aggregated frame.
+ * Caller should keep calling deaggregate() on the source skb until 0 is
+ * returned, indicating that there are no more packets to deaggregate. Caller
+ * is responsible for freeing the original skb.
+ */
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_port *port)
+{
+	struct rmnet_map_header *maph;
+	struct sk_buff *skbn;
+	u32 packet_len;
+
+	if (skb->len == 0)
+		return NULL;
+
+	maph = (struct rmnet_map_header *)skb->data;
+	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
+
+	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+
+	if (((int)skb->len - (int)packet_len) < 0)
+		return NULL;
+
+	/* Some hardware can send us empty frames. Catch them */
+	if (ntohs(maph->pkt_len) == 0)
+		return NULL;
+
+	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
+	if (!skbn)
+		return NULL;
+
+	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+	skb_put(skbn, packet_len);
+	memcpy(skbn->data, skb->data, packet_len);
+	skb_pull(skb, packet_len);
+
+	return skbn;
+}
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+	struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+		priv->stats.csum_sw++;
+		return -EOPNOTSUPP;
+	}
+
+	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+
+	if (!csum_trailer->valid) {
+		priv->stats.csum_valid_unset++;
+		return -EINVAL;
+	}
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
+#else
+		priv->stats.csum_err_invalid_ip_version++;
+		return -EPROTONOSUPPORT;
+#endif
+	} else {
+		priv->stats.csum_err_invalid_ip_version++;
+		return -EPROTONOSUPPORT;
+	}
+
+	return 0;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct net_device *orig_dev)
+{
+	struct rmnet_priv *priv = netdev_priv(orig_dev);
+	struct rmnet_map_ul_csum_header *ul_header;
+	void *iphdr;
+
+	ul_header = (struct rmnet_map_ul_csum_header *)
+		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+	if (unlikely(!(orig_dev->features &
+		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+		goto sw_csum;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		iphdr = (char *)ul_header +
+			sizeof(struct rmnet_map_ul_csum_header);
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+			return;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+			rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+			return;
+#else
+			priv->stats.csum_err_invalid_ip_version++;
+			goto sw_csum;
+#endif
+		} else {
+			priv->stats.csum_err_invalid_ip_version++;
+		}
+	}
+
+sw_csum:
+	ul_header->csum_start_offset = 0;
+	ul_header->csum_insert_offset = 0;
+	ul_header->csum_enabled = 0;
+	ul_header->udp_ind = 0;
+
+	priv->stats.csum_sw++;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
new file mode 100644
index 0000000..e1337f1
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _RMNET_PRIVATE_H_
+#define _RMNET_PRIVATE_H_
+
+#define RMNET_MAX_PACKET_SIZE      16384
+#define RMNET_DFLT_PACKET_SIZE     1500
+#define RMNET_NEEDED_HEADROOM      16
+#define RMNET_TX_QUEUE_LEN         1000
+
+/* Replace skb->dev to a virtual rmnet device and pass up the stack */
+#define RMNET_EPMODE_VND (1)
+/* Pass the frame directly to another device with dev_queue_xmit() */
+#define RMNET_EPMODE_BRIDGE (2)
+
+#endif /* _RMNET_PRIVATE_H_ */
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
new file mode 100644
index 0000000..26ad40f
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data virtual network driver
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <net/pkt_sched.h>
+#include "rmnet_config.h"
+#include "rmnet_handlers.h"
+#include "rmnet_private.h"
+#include "rmnet_map.h"
+#include "rmnet_vnd.h"
+
+/* RX/TX Fixup */
+
+void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_pcpu_stats *pcpu_ptr;
+
+	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+	u64_stats_update_begin(&pcpu_ptr->syncp);
+	pcpu_ptr->stats.rx_pkts++;
+	pcpu_ptr->stats.rx_bytes += skb->len;
+	u64_stats_update_end(&pcpu_ptr->syncp);
+}
+
+void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_pcpu_stats *pcpu_ptr;
+
+	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+	u64_stats_update_begin(&pcpu_ptr->syncp);
+	pcpu_ptr->stats.tx_pkts++;
+	pcpu_ptr->stats.tx_bytes += skb->len;
+	u64_stats_update_end(&pcpu_ptr->syncp);
+}
+
+/* Network Device Operations */
+
+static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
+					struct net_device *dev)
+{
+	struct rmnet_priv *priv;
+
+	priv = netdev_priv(dev);
+	if (priv->real_dev) {
+		rmnet_egress_handler(skb);
+	} else {
+		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+		kfree_skb(skb);
+	}
+	return NETDEV_TX_OK;
+}
+
+static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
+{
+	if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
+		return -EINVAL;
+
+	rmnet_dev->mtu = new_mtu;
+	return 0;
+}
+
+static int rmnet_vnd_get_iflink(const struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+
+	return priv->real_dev->ifindex;
+}
+
+static int rmnet_vnd_init(struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	int err;
+
+	priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
+	if (!priv->pcpu_stats)
+		return -ENOMEM;
+
+	err = gro_cells_init(&priv->gro_cells, dev);
+	if (err) {
+		free_percpu(priv->pcpu_stats);
+		return err;
+	}
+
+	return 0;
+}
+
+static void rmnet_vnd_uninit(struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+
+	gro_cells_destroy(&priv->gro_cells);
+	free_percpu(priv->pcpu_stats);
+}
+
+static void rmnet_get_stats64(struct net_device *dev,
+			      struct rtnl_link_stats64 *s)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_vnd_stats total_stats;
+	struct rmnet_pcpu_stats *pcpu_ptr;
+	unsigned int cpu, start;
+
+	memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
+
+	for_each_possible_cpu(cpu) {
+		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
+
+		do {
+			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
+			total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
+			total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
+			total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
+			total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
+
+		total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+	}
+
+	s->rx_packets = total_stats.rx_pkts;
+	s->rx_bytes = total_stats.rx_bytes;
+	s->tx_packets = total_stats.tx_pkts;
+	s->tx_bytes = total_stats.tx_bytes;
+	s->tx_dropped = total_stats.tx_drops;
+}
+
+static const struct net_device_ops rmnet_vnd_ops = {
+	.ndo_start_xmit = rmnet_vnd_start_xmit,
+	.ndo_change_mtu = rmnet_vnd_change_mtu,
+	.ndo_get_iflink = rmnet_vnd_get_iflink,
+	.ndo_add_slave  = rmnet_add_bridge,
+	.ndo_del_slave  = rmnet_del_bridge,
+	.ndo_init       = rmnet_vnd_init,
+	.ndo_uninit     = rmnet_vnd_uninit,
+	.ndo_get_stats64 = rmnet_get_stats64,
+};
+
+static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"Checksum ok",
+	"Checksum valid bit not set",
+	"Checksum validation failed",
+	"Checksum error bad buffer",
+	"Checksum error bad ip version",
+	"Checksum error bad transport",
+	"Checksum skipped on ip fragment",
+	"Checksum skipped",
+	"Checksum computed in software",
+};
+
+static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, &rmnet_gstrings_stats,
+		       sizeof(rmnet_gstrings_stats));
+		break;
+	}
+}
+
+static int rmnet_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(rmnet_gstrings_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void rmnet_get_ethtool_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_priv_stats *st = &priv->stats;
+
+	if (!data)
+		return;
+
+	memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
+}
+
+static const struct ethtool_ops rmnet_ethtool_ops = {
+	.get_ethtool_stats = rmnet_get_ethtool_stats,
+	.get_strings = rmnet_get_strings,
+	.get_sset_count = rmnet_get_sset_count,
+};
+
+/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
+ * flags, ARP type, needed headroom, etc...
+ */
+void rmnet_vnd_setup(struct net_device *rmnet_dev)
+{
+	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
+	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
+	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
+	eth_random_addr(rmnet_dev->dev_addr);
+	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
+
+	/* Raw IP mode */
+	rmnet_dev->header_ops = NULL;  /* No header */
+	rmnet_dev->type = ARPHRD_RAWIP;
+	rmnet_dev->hard_header_len = 0;
+	rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+
+	rmnet_dev->needs_free_netdev = true;
+	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
+
+	/* This perm addr will be used as interface identifier by IPv6 */
+	rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
+	eth_random_addr(rmnet_dev->perm_addr);
+}
+
+/* Exposed API */
+
+int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
+		      struct rmnet_port *port,
+		      struct net_device *real_dev,
+		      struct rmnet_endpoint *ep)
+{
+	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+	int rc;
+
+	if (ep->egress_dev)
+		return -EINVAL;
+
+	if (rmnet_get_endpoint(port, id))
+		return -EBUSY;
+
+	rmnet_dev->hw_features = NETIF_F_RXCSUM;
+	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	rmnet_dev->hw_features |= NETIF_F_SG;
+
+	priv->real_dev = real_dev;
+
+	rc = register_netdevice(rmnet_dev);
+	if (!rc) {
+		ep->egress_dev = rmnet_dev;
+		ep->mux_id = id;
+		port->nr_rmnet_devs++;
+
+		rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
+
+		priv->mux_id = id;
+
+		netdev_dbg(rmnet_dev, "rmnet dev created\n");
+	}
+
+	return rc;
+}
+
+int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+		      struct rmnet_endpoint *ep)
+{
+	if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
+		return -EINVAL;
+
+	ep->egress_dev = NULL;
+	port->nr_rmnet_devs--;
+	return 0;
+}
+
+int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
+{
+	netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
+	/* Although we expect similar number of enable/disable
+	 * commands, optimize for the disable. That is more
+	 * latency sensitive than enable
+	 */
+	if (unlikely(enable))
+		netif_wake_queue(rmnet_dev);
+	else
+		netif_stop_queue(rmnet_dev);
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
new file mode 100644
index 0000000..14d77c7
--- /dev/null
+++ b/marvell/linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data Virtual Network Device APIs
+ */
+
+#ifndef _RMNET_VND_H_
+#define _RMNET_VND_H_
+
+int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
+int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
+		      struct rmnet_port *port,
+		      struct net_device *real_dev,
+		      struct rmnet_endpoint *ep);
+int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+		      struct rmnet_endpoint *ep);
+void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
+void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
+void rmnet_vnd_setup(struct net_device *dev);
+#endif /* _RMNET_VND_H_ */