ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.c b/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.c
new file mode 100644
index 0000000..545e398
--- /dev/null
+++ b/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.c
@@ -0,0 +1,1892 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/switch.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/lockdep.h>
+#include <linux/workqueue.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/mdio.h>
+#include <linux/gpio.h>
+
+#include "ar40xx.h"
+
+static struct ar40xx_priv *ar40xx_priv;
+
+#define MIB_DESC(_s , _o, _n)	\
+	{			\
+		.size = (_s),	\
+		.offset = (_o),	\
+		.name = (_n),	\
+	}
+
+static const struct ar40xx_mib_desc ar40xx_mibs[] = {
+	MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"),
+	MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"),
+	MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"),
+	MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"),
+	MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"),
+	MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"),
+	MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"),
+	MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"),
+	MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"),
+	MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"),
+	MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"),
+	MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"),
+	MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"),
+	MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"),
+	MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"),
+	MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"),
+	MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"),
+	MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"),
+	MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"),
+	MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"),
+	MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"),
+	MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"),
+	MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"),
+	MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"),
+	MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"),
+	MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"),
+	MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"),
+	MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"),
+	MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"),
+	MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"),
+	MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"),
+	MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"),
+	MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"),
+	MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"),
+	MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"),
+	MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"),
+	MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"),
+	MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"),
+	MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"),
+};
+
+static u32
+ar40xx_read(struct ar40xx_priv *priv, int reg)
+{
+	return readl(priv->hw_addr + reg);
+}
+
+static u32
+ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg)
+{
+	return readl(priv->psgmii_hw_addr + reg);
+}
+
+static void
+ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val)
+{
+	writel(val, priv->hw_addr + reg);
+}
+
+static u32
+ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
+{
+	u32 ret;
+
+	ret = ar40xx_read(priv, reg);
+	ret &= ~mask;
+	ret |= val;
+	ar40xx_write(priv, reg, ret);
+	return ret;
+}
+
+static void
+ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val)
+{
+	writel(val, priv->psgmii_hw_addr + reg);
+}
+
+static void
+ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr,
+		     u16 dbg_addr, u16 dbg_data)
+{
+	struct mii_bus *bus = priv->mii_bus;
+
+	mutex_lock(&bus->mdio_lock);
+	bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
+	bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data);
+	mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr,
+		    u16 dbg_addr, u16 *dbg_data)
+{
+	struct mii_bus *bus = priv->mii_bus;
+
+	mutex_lock(&bus->mdio_lock);
+	bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr);
+	*dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA);
+	mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id,
+		     u16 mmd_num, u16 reg_id, u16 reg_val)
+{
+	struct mii_bus *bus = priv->mii_bus;
+
+	mutex_lock(&bus->mdio_lock);
+	bus->write(bus, phy_id,
+			AR40XX_MII_ATH_MMD_ADDR, mmd_num);
+	bus->write(bus, phy_id,
+			AR40XX_MII_ATH_MMD_DATA, reg_id);
+	bus->write(bus, phy_id,
+			AR40XX_MII_ATH_MMD_ADDR,
+			0x4000 | mmd_num);
+	bus->write(bus, phy_id,
+		AR40XX_MII_ATH_MMD_DATA, reg_val);
+	mutex_unlock(&bus->mdio_lock);
+}
+
+static u16
+ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id,
+		    u16 mmd_num, u16 reg_id)
+{
+	u16 value;
+	struct mii_bus *bus = priv->mii_bus;
+
+	mutex_lock(&bus->mdio_lock);
+	bus->write(bus, phy_id,
+			AR40XX_MII_ATH_MMD_ADDR, mmd_num);
+	bus->write(bus, phy_id,
+			AR40XX_MII_ATH_MMD_DATA, reg_id);
+	bus->write(bus, phy_id,
+			AR40XX_MII_ATH_MMD_ADDR,
+			0x4000 | mmd_num);
+	value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA);
+	mutex_unlock(&bus->mdio_lock);
+	return value;
+}
+
+/* Start of swconfig support */
+
+static void
+ar40xx_phy_poll_reset(struct ar40xx_priv *priv)
+{
+	u32 i, in_reset, retries = 500;
+	struct mii_bus *bus = priv->mii_bus;
+
+	/* Assume RESET was recently issued to some or all of the phys */
+	in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0);
+
+	while (retries--) {
+		/* 1ms should be plenty of time.
+		 * 802.3 spec allows for a max wait time of 500ms
+		 */
+		usleep_range(1000, 2000);
+
+		for (i = 0; i < AR40XX_NUM_PHYS; i++) {
+			int val;
+
+			/* skip devices which have completed reset */
+			if (!(in_reset & BIT(i)))
+				continue;
+
+			val = mdiobus_read(bus, i, MII_BMCR);
+			if (val < 0)
+				continue;
+
+			/* mark when phy is no longer in reset state */
+			if (!(val & BMCR_RESET))
+				in_reset &= ~BIT(i);
+		}
+
+		if (!in_reset)
+			return;
+	}
+
+	dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n",
+		 in_reset);
+}
+
+static void
+ar40xx_phy_init(struct ar40xx_priv *priv)
+{
+	int i;
+	struct mii_bus *bus;
+	u16 val;
+
+	bus = priv->mii_bus;
+	for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
+		ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
+		val &= ~AR40XX_PHY_MANU_CTRL_EN;
+		ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
+		mdiobus_write(bus, i,
+			      MII_ADVERTISE, ADVERTISE_ALL |
+			      ADVERTISE_PAUSE_CAP |
+			      ADVERTISE_PAUSE_ASYM);
+		mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
+		mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+	}
+
+	ar40xx_phy_poll_reset(priv);
+}
+
+static void
+ar40xx_port_phy_linkdown(struct ar40xx_priv *priv)
+{
+	struct mii_bus *bus;
+	int i;
+	u16 val;
+
+	bus = priv->mii_bus;
+	for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) {
+		mdiobus_write(bus, i, MII_CTRL1000, 0);
+		mdiobus_write(bus, i, MII_ADVERTISE, 0);
+		mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+		ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val);
+		val |= AR40XX_PHY_MANU_CTRL_EN;
+		ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val);
+		/* disable transmit */
+		ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val);
+		val &= 0xf00f;
+		ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val);
+	}
+}
+
+static void
+ar40xx_set_mirror_regs(struct ar40xx_priv *priv)
+{
+	int port;
+
+	/* reset all mirror registers */
+	ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
+		   AR40XX_FWD_CTRL0_MIRROR_PORT,
+		   (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
+	for (port = 0; port < AR40XX_NUM_PORTS; port++) {
+		ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port),
+			   AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0);
+
+		ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port),
+			   AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0);
+	}
+
+	/* now enable mirroring if necessary */
+	if (priv->source_port >= AR40XX_NUM_PORTS ||
+	    priv->monitor_port >= AR40XX_NUM_PORTS ||
+	    priv->source_port == priv->monitor_port) {
+		return;
+	}
+
+	ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0,
+		   AR40XX_FWD_CTRL0_MIRROR_PORT,
+		   (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S));
+
+	if (priv->mirror_rx)
+		ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0,
+			   AR40XX_PORT_LOOKUP_ING_MIRROR_EN);
+
+	if (priv->mirror_tx)
+		ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port),
+			   0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN);
+}
+
+static int
+ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+	u8 ports = priv->vlan_table[val->port_vlan];
+	int i;
+
+	val->len = 0;
+	for (i = 0; i < dev->ports; i++) {
+		struct switch_port *p;
+
+		if (!(ports & BIT(i)))
+			continue;
+
+		p = &val->value.ports[val->len++];
+		p->id = i;
+		if ((priv->vlan_tagged & BIT(i)) ||
+		    (priv->pvid[i] != val->port_vlan))
+			p->flags = BIT(SWITCH_PORT_FLAG_TAGGED);
+		else
+			p->flags = 0;
+	}
+	return 0;
+}
+
+static int
+ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+	u8 *vt = &priv->vlan_table[val->port_vlan];
+	int i;
+
+	*vt = 0;
+	for (i = 0; i < val->len; i++) {
+		struct switch_port *p = &val->value.ports[i];
+
+		if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) {
+			if (val->port_vlan == priv->pvid[p->id])
+				priv->vlan_tagged |= BIT(p->id);
+		} else {
+			priv->vlan_tagged &= ~BIT(p->id);
+			priv->pvid[p->id] = val->port_vlan;
+		}
+
+		*vt |= BIT(p->id);
+	}
+	return 0;
+}
+
+static int
+ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val,
+		unsigned timeout)
+{
+	int i;
+
+	for (i = 0; i < timeout; i++) {
+		u32 t;
+
+		t = ar40xx_read(priv, reg);
+		if ((t & mask) == val)
+			return 0;
+
+		usleep_range(1000, 2000);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int
+ar40xx_mib_op(struct ar40xx_priv *priv, u32 op)
+{
+	int ret;
+
+	lockdep_assert_held(&priv->mib_lock);
+
+	/* Capture the hardware statistics for all ports */
+	ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC,
+		   AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S));
+
+	/* Wait for the capturing to complete. */
+	ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC,
+			      AR40XX_MIB_BUSY, 0, 10);
+
+	return ret;
+}
+
+static void
+ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush)
+{
+	unsigned int base;
+	u64 *mib_stats;
+	int i;
+	u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
+
+	WARN_ON(port >= priv->dev.ports);
+
+	lockdep_assert_held(&priv->mib_lock);
+
+	base = AR40XX_REG_PORT_STATS_START +
+	       AR40XX_REG_PORT_STATS_LEN * port;
+
+	mib_stats = &priv->mib_stats[port * num_mibs];
+	if (flush) {
+		u32 len;
+
+		len = num_mibs * sizeof(*mib_stats);
+		memset(mib_stats, 0, len);
+		return;
+	}
+	for (i = 0; i < num_mibs; i++) {
+		const struct ar40xx_mib_desc *mib;
+		u64 t;
+
+		mib = &ar40xx_mibs[i];
+		t = ar40xx_read(priv, base + mib->offset);
+		if (mib->size == 2) {
+			u64 hi;
+
+			hi = ar40xx_read(priv, base + mib->offset + 4);
+			t |= hi << 32;
+		}
+
+		mib_stats[i] += t;
+	}
+}
+
+static int
+ar40xx_mib_capture(struct ar40xx_priv *priv)
+{
+	return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE);
+}
+
+static int
+ar40xx_mib_flush(struct ar40xx_priv *priv)
+{
+	return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH);
+}
+
+static int
+ar40xx_sw_set_reset_mibs(struct switch_dev *dev,
+			 const struct switch_attr *attr,
+			 struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+	unsigned int len;
+	int ret;
+	u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
+
+	mutex_lock(&priv->mib_lock);
+
+	len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats);
+	memset(priv->mib_stats, 0, len);
+	ret = ar40xx_mib_flush(priv);
+
+	mutex_unlock(&priv->mib_lock);
+	return ret;
+}
+
+static int
+ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
+		   struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	priv->vlan = !!val->value.i;
+	return 0;
+}
+
+static int
+ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
+		   struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	val->value.i = priv->vlan;
+	return 0;
+}
+
+static int
+ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev,
+			       const struct switch_attr *attr,
+			       struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	mutex_lock(&priv->reg_mutex);
+	priv->mirror_rx = !!val->value.i;
+	ar40xx_set_mirror_regs(priv);
+	mutex_unlock(&priv->reg_mutex);
+
+	return 0;
+}
+
+static int
+ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev,
+			       const struct switch_attr *attr,
+			       struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	mutex_lock(&priv->reg_mutex);
+	val->value.i = priv->mirror_rx;
+	mutex_unlock(&priv->reg_mutex);
+	return 0;
+}
+
+static int
+ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev,
+			       const struct switch_attr *attr,
+			       struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	mutex_lock(&priv->reg_mutex);
+	priv->mirror_tx = !!val->value.i;
+	ar40xx_set_mirror_regs(priv);
+	mutex_unlock(&priv->reg_mutex);
+
+	return 0;
+}
+
+static int
+ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev,
+			       const struct switch_attr *attr,
+			       struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	mutex_lock(&priv->reg_mutex);
+	val->value.i = priv->mirror_tx;
+	mutex_unlock(&priv->reg_mutex);
+	return 0;
+}
+
+static int
+ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev,
+				  const struct switch_attr *attr,
+				  struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	mutex_lock(&priv->reg_mutex);
+	priv->monitor_port = val->value.i;
+	ar40xx_set_mirror_regs(priv);
+	mutex_unlock(&priv->reg_mutex);
+
+	return 0;
+}
+
+static int
+ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev,
+				  const struct switch_attr *attr,
+				  struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	mutex_lock(&priv->reg_mutex);
+	val->value.i = priv->monitor_port;
+	mutex_unlock(&priv->reg_mutex);
+	return 0;
+}
+
+static int
+ar40xx_sw_set_mirror_source_port(struct switch_dev *dev,
+				 const struct switch_attr *attr,
+				 struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	mutex_lock(&priv->reg_mutex);
+	priv->source_port = val->value.i;
+	ar40xx_set_mirror_regs(priv);
+	mutex_unlock(&priv->reg_mutex);
+
+	return 0;
+}
+
+static int
+ar40xx_sw_get_mirror_source_port(struct switch_dev *dev,
+				 const struct switch_attr *attr,
+				 struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	mutex_lock(&priv->reg_mutex);
+	val->value.i = priv->source_port;
+	mutex_unlock(&priv->reg_mutex);
+	return 0;
+}
+
+static int
+ar40xx_sw_set_linkdown(struct switch_dev *dev,
+		       const struct switch_attr *attr,
+		       struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	if (val->value.i == 1)
+		ar40xx_port_phy_linkdown(priv);
+	else
+		ar40xx_phy_init(priv);
+
+	return 0;
+}
+
+static int
+ar40xx_sw_set_port_reset_mib(struct switch_dev *dev,
+			     const struct switch_attr *attr,
+			     struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+	int port;
+	int ret;
+
+	port = val->port_vlan;
+	if (port >= dev->ports)
+		return -EINVAL;
+
+	mutex_lock(&priv->mib_lock);
+	ret = ar40xx_mib_capture(priv);
+	if (ret)
+		goto unlock;
+
+	ar40xx_mib_fetch_port_stat(priv, port, true);
+
+unlock:
+	mutex_unlock(&priv->mib_lock);
+	return ret;
+}
+
+static int
+ar40xx_sw_get_port_mib(struct switch_dev *dev,
+		       const struct switch_attr *attr,
+		       struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+	u64 *mib_stats;
+	int port;
+	int ret;
+	char *buf = priv->buf;
+	int i, len = 0;
+	u32 num_mibs = ARRAY_SIZE(ar40xx_mibs);
+
+	port = val->port_vlan;
+	if (port >= dev->ports)
+		return -EINVAL;
+
+	mutex_lock(&priv->mib_lock);
+	ret = ar40xx_mib_capture(priv);
+	if (ret)
+		goto unlock;
+
+	ar40xx_mib_fetch_port_stat(priv, port, false);
+
+	len += snprintf(buf + len, sizeof(priv->buf) - len,
+			"Port %d MIB counters\n",
+			port);
+
+	mib_stats = &priv->mib_stats[port * num_mibs];
+	for (i = 0; i < num_mibs; i++)
+		len += snprintf(buf + len, sizeof(priv->buf) - len,
+				"%-12s: %llu\n",
+				ar40xx_mibs[i].name,
+				mib_stats[i]);
+
+	val->value.s = buf;
+	val->len = len;
+
+unlock:
+	mutex_unlock(&priv->mib_lock);
+	return ret;
+}
+
+static int
+ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
+		  struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	priv->vlan_id[val->port_vlan] = val->value.i;
+	return 0;
+}
+
+static int
+ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
+		  struct switch_val *val)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	val->value.i = priv->vlan_id[val->port_vlan];
+	return 0;
+}
+
+static int
+ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+	*vlan = priv->pvid[port];
+	return 0;
+}
+
+static int
+ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	/* make sure no invalid PVIDs get set */
+	if (vlan >= dev->vlans)
+		return -EINVAL;
+
+	priv->pvid[port] = vlan;
+	return 0;
+}
+
+static void
+ar40xx_read_port_link(struct ar40xx_priv *priv, int port,
+		      struct switch_port_link *link)
+{
+	u32 status;
+	u32 speed;
+
+	memset(link, 0, sizeof(*link));
+
+	status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port));
+
+	link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN);
+	if (link->aneg || (port != AR40XX_PORT_CPU))
+		link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP);
+	else
+		link->link = true;
+
+	if (!link->link)
+		return;
+
+	link->duplex = !!(status & AR40XX_PORT_DUPLEX);
+	link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW);
+	link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW);
+
+	speed = (status & AR40XX_PORT_SPEED) >>
+		 AR40XX_PORT_STATUS_SPEED_S;
+
+	switch (speed) {
+	case AR40XX_PORT_SPEED_10M:
+		link->speed = SWITCH_PORT_SPEED_10;
+		break;
+	case AR40XX_PORT_SPEED_100M:
+		link->speed = SWITCH_PORT_SPEED_100;
+		break;
+	case AR40XX_PORT_SPEED_1000M:
+		link->speed = SWITCH_PORT_SPEED_1000;
+		break;
+	default:
+		link->speed = SWITCH_PORT_SPEED_UNKNOWN;
+		break;
+	}
+}
+
+static int
+ar40xx_sw_get_port_link(struct switch_dev *dev, int port,
+			struct switch_port_link *link)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+
+	ar40xx_read_port_link(priv, port, link);
+	return 0;
+}
+
+static const struct switch_attr ar40xx_sw_attr_globals[] = {
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "enable_vlan",
+		.description = "Enable VLAN mode",
+		.set = ar40xx_sw_set_vlan,
+		.get = ar40xx_sw_get_vlan,
+		.max = 1
+	},
+	{
+		.type = SWITCH_TYPE_NOVAL,
+		.name = "reset_mibs",
+		.description = "Reset all MIB counters",
+		.set = ar40xx_sw_set_reset_mibs,
+	},
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "enable_mirror_rx",
+		.description = "Enable mirroring of RX packets",
+		.set = ar40xx_sw_set_mirror_rx_enable,
+		.get = ar40xx_sw_get_mirror_rx_enable,
+		.max = 1
+	},
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "enable_mirror_tx",
+		.description = "Enable mirroring of TX packets",
+		.set = ar40xx_sw_set_mirror_tx_enable,
+		.get = ar40xx_sw_get_mirror_tx_enable,
+		.max = 1
+	},
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "mirror_monitor_port",
+		.description = "Mirror monitor port",
+		.set = ar40xx_sw_set_mirror_monitor_port,
+		.get = ar40xx_sw_get_mirror_monitor_port,
+		.max = AR40XX_NUM_PORTS - 1
+	},
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "mirror_source_port",
+		.description = "Mirror source port",
+		.set = ar40xx_sw_set_mirror_source_port,
+		.get = ar40xx_sw_get_mirror_source_port,
+		.max = AR40XX_NUM_PORTS - 1
+	},
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "linkdown",
+		.description = "Link down all the PHYs",
+		.set = ar40xx_sw_set_linkdown,
+		.max = 1
+	},
+};
+
+static const struct switch_attr ar40xx_sw_attr_port[] = {
+	{
+		.type = SWITCH_TYPE_NOVAL,
+		.name = "reset_mib",
+		.description = "Reset single port MIB counters",
+		.set = ar40xx_sw_set_port_reset_mib,
+	},
+	{
+		.type = SWITCH_TYPE_STRING,
+		.name = "mib",
+		.description = "Get port's MIB counters",
+		.set = NULL,
+		.get = ar40xx_sw_get_port_mib,
+	},
+};
+
+const struct switch_attr ar40xx_sw_attr_vlan[] = {
+	{
+		.type = SWITCH_TYPE_INT,
+		.name = "vid",
+		.description = "VLAN ID (0-4094)",
+		.set = ar40xx_sw_set_vid,
+		.get = ar40xx_sw_get_vid,
+		.max = 4094,
+	},
+};
+
+/* End of swconfig support */
+
+static int
+ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val)
+{
+	int timeout = 20;
+	u32 t;
+
+	while (1) {
+		t = ar40xx_read(priv, reg);
+		if ((t & mask) == val)
+			return 0;
+
+		if (timeout-- <= 0)
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n",
+	       (unsigned int)reg, t, mask, val);
+	return -ETIMEDOUT;
+}
+
+static int
+ar40xx_atu_flush(struct ar40xx_priv *priv)
+{
+	int ret;
+
+	ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC,
+			      AR40XX_ATU_FUNC_BUSY, 0);
+	if (!ret)
+		ar40xx_write(priv, AR40XX_REG_ATU_FUNC,
+			     AR40XX_ATU_FUNC_OP_FLUSH |
+			     AR40XX_ATU_FUNC_BUSY);
+
+	return ret;
+}
+
+static void
+ar40xx_ess_reset(struct ar40xx_priv *priv)
+{
+	reset_control_assert(priv->ess_rst);
+	mdelay(10);
+	reset_control_deassert(priv->ess_rst);
+	/* Waiting for all inner tables init done.
+	  * It cost 5~10ms.
+	  */
+	mdelay(10);
+
+	pr_info("ESS reset ok!\n");
+}
+
+/* Start of psgmii self test */
+
+static void
+ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv)
+{
+	u32 n;
+	struct mii_bus *bus = priv->mii_bus;
+	/* reset phy psgmii */
+	/* fix phy psgmii RX 20bit */
+	mdiobus_write(bus, 5, 0x0, 0x005b);
+	/* reset phy psgmii */
+	mdiobus_write(bus, 5, 0x0, 0x001b);
+	/* release reset phy psgmii */
+	mdiobus_write(bus, 5, 0x0, 0x005b);
+
+	for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
+		u16 status;
+
+		status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28);
+		if (status & BIT(0))
+			break;
+		/* Polling interval to check PSGMII PLL in malibu is ready
+		  * the worst time is 8.67ms
+		  * for 25MHz reference clock
+		  * [512+(128+2048)*49]*80ns+100us
+		  */
+		mdelay(2);
+	}
+	mdelay(50);
+
+	/*check malibu psgmii calibration done end..*/
+
+	/*freeze phy psgmii RX CDR*/
+	mdiobus_write(bus, 5, 0x1a, 0x2230);
+
+	ar40xx_ess_reset(priv);
+
+	/*check psgmii calibration done start*/
+	for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) {
+		u32 status;
+
+		status = ar40xx_psgmii_read(priv, 0xa0);
+		if (status & BIT(0))
+			break;
+		/* Polling interval to check PSGMII PLL in ESS is ready */
+		mdelay(2);
+	}
+	mdelay(50);
+
+	/* check dakota psgmii calibration done end..*/
+
+	/* relesae phy psgmii RX CDR */
+	mdiobus_write(bus, 5, 0x1a, 0x3230);
+	/* release phy psgmii RX 20bit */
+	mdiobus_write(bus, 5, 0x0, 0x005f);
+	mdelay(200);
+}
+
+static void
+ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy)
+{
+	int j;
+	u32 tx_ok, tx_error;
+	u32 rx_ok, rx_error;
+	u32 tx_ok_high16;
+	u32 rx_ok_high16;
+	u32 tx_all_ok, rx_all_ok;
+	struct mii_bus *bus = priv->mii_bus;
+
+	mdiobus_write(bus, phy, 0x0, 0x9000);
+	mdiobus_write(bus, phy, 0x0, 0x4140);
+
+	for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
+		u16 status;
+
+		status = mdiobus_read(bus, phy, 0x11);
+		if (status & AR40XX_PHY_SPEC_STATUS_LINK)
+			break;
+		/* the polling interval to check if the PHY link up or not
+		  * maxwait_timer: 750 ms +/-10 ms
+		  * minwait_timer : 1 us +/- 0.1us
+		  * time resides in minwait_timer ~ maxwait_timer
+		  * see IEEE 802.3 section 40.4.5.2
+		  */
+		mdelay(8);
+	}
+
+	/* enable check */
+	ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000);
+	ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003);
+
+	/* start traffic */
+	ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000);
+	/* wait for all traffic end
+	  * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
+	  */
+	mdelay(50);
+
+	/* check counter */
+	tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
+	tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
+	tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
+	rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
+	rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
+	rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
+	tx_all_ok = tx_ok + (tx_ok_high16 << 16);
+	rx_all_ok = rx_ok + (rx_ok_high16 << 16);
+	if (tx_all_ok == 0x1000 && tx_error == 0) {
+		/* success */
+		priv->phy_t_status &= (~BIT(phy));
+	} else {
+		pr_info("PHY %d single test PSGMII issue happen!\n", phy);
+		priv->phy_t_status |= BIT(phy);
+	}
+
+	mdiobus_write(bus, phy, 0x0, 0x1840);
+}
+
+static void
+ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv)
+{
+	int phy, j;
+	struct mii_bus *bus = priv->mii_bus;
+
+	mdiobus_write(bus, 0x1f, 0x0, 0x9000);
+	mdiobus_write(bus, 0x1f, 0x0, 0x4140);
+
+	for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) {
+		for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
+			u16 status;
+
+			status = mdiobus_read(bus, phy, 0x11);
+			if (!(status & BIT(10)))
+				break;
+		}
+
+		if (phy >= (AR40XX_NUM_PORTS - 1))
+			break;
+		/* The polling interva to check if the PHY link up or not */
+		mdelay(8);
+	}
+	/* enable check */
+	ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000);
+	ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003);
+
+	/* start traffic */
+	ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000);
+	/* wait for all traffic end
+	  * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms
+	  */
+	mdelay(50);
+
+	for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
+		u32 tx_ok, tx_error;
+		u32 rx_ok, rx_error;
+		u32 tx_ok_high16;
+		u32 rx_ok_high16;
+		u32 tx_all_ok, rx_all_ok;
+
+		/* check counter */
+		tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e);
+		tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d);
+		tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f);
+		rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b);
+		rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a);
+		rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c);
+		tx_all_ok = tx_ok + (tx_ok_high16<<16);
+		rx_all_ok = rx_ok + (rx_ok_high16<<16);
+		if (tx_all_ok == 0x1000 && tx_error == 0) {
+			/* success */
+			priv->phy_t_status &= ~BIT(phy + 8);
+		} else {
+			pr_info("PHY%d test see issue!\n", phy);
+			priv->phy_t_status |= BIT(phy + 8);
+		}
+	}
+
+	pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status);
+}
+
+void
+ar40xx_psgmii_self_test(struct ar40xx_priv *priv)
+{
+	u32 i, phy;
+	struct mii_bus *bus = priv->mii_bus;
+
+	ar40xx_malibu_psgmii_ess_reset(priv);
+
+	/* switch to access MII reg for copper */
+	mdiobus_write(bus, 4, 0x1f, 0x8500);
+	for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
+		/*enable phy mdio broadcast write*/
+		ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f);
+	}
+	/* force no link by power down */
+	mdiobus_write(bus, 0x1f, 0x0, 0x1840);
+	/*packet number*/
+	ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000);
+	ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0);
+
+	/*fix mdi status */
+	mdiobus_write(bus, 0x1f, 0x10, 0x6800);
+	for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) {
+		priv->phy_t_status = 0;
+
+		for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
+			ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
+				AR40XX_PORT_LOOKUP_LOOPBACK,
+				AR40XX_PORT_LOOKUP_LOOPBACK);
+		}
+
+		for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++)
+			ar40xx_psgmii_single_phy_testing(priv, phy);
+
+		ar40xx_psgmii_all_phy_testing(priv);
+
+		if (priv->phy_t_status)
+			ar40xx_malibu_psgmii_ess_reset(priv);
+		else
+			break;
+	}
+
+	if (i >= AR40XX_PSGMII_CALB_NUM)
+		pr_info("PSGMII cannot recover\n");
+	else
+		pr_debug("PSGMII recovered after %d times reset\n", i);
+
+	/* configuration recover */
+	/* packet number */
+	ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0);
+	/* disable check */
+	ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0);
+	/* disable traffic */
+	ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0);
+}
+
+void
+ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv)
+{
+	int phy;
+	struct mii_bus *bus = priv->mii_bus;
+
+	/* disable phy internal loopback */
+	mdiobus_write(bus, 0x1f, 0x10, 0x6860);
+	mdiobus_write(bus, 0x1f, 0x0, 0x9040);
+
+	for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) {
+		/* disable mac loop back */
+		ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1),
+				AR40XX_PORT_LOOKUP_LOOPBACK, 0);
+		/* disable phy mdio broadcast write */
+		ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f);
+	}
+
+	/* clear fdb entry */
+	ar40xx_atu_flush(priv);
+}
+
+/* End of psgmii self test */
+
+static void
+ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode)
+{
+	if (mode == PORT_WRAPPER_PSGMII) {
+		ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200);
+		ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380);
+	}
+}
+
+static
+int ar40xx_cpuport_setup(struct ar40xx_priv *priv)
+{
+	u32 t;
+
+	t = AR40XX_PORT_STATUS_TXFLOW |
+	     AR40XX_PORT_STATUS_RXFLOW |
+	     AR40XX_PORT_TXHALF_FLOW |
+	     AR40XX_PORT_DUPLEX |
+	     AR40XX_PORT_SPEED_1000M;
+	ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
+	usleep_range(10, 20);
+
+	t |= AR40XX_PORT_TX_EN |
+	       AR40XX_PORT_RX_EN;
+	ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t);
+
+	return 0;
+}
+
+static void
+ar40xx_init_port(struct ar40xx_priv *priv, int port)
+{
+	u32 t;
+
+	ar40xx_write(priv, AR40XX_REG_PORT_STATUS(port), 0);
+
+	ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0);
+
+	ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0);
+
+	t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S;
+	ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
+
+	t = AR40XX_PORT_LOOKUP_LEARN;
+	t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
+	ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
+}
+
+void
+ar40xx_init_globals(struct ar40xx_priv *priv)
+{
+	u32 t;
+
+	/* enable CPU port and disable mirror port */
+	t = AR40XX_FWD_CTRL0_CPU_PORT_EN |
+	    AR40XX_FWD_CTRL0_MIRROR_PORT;
+	ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t);
+
+	/* forward multicast and broadcast frames to CPU */
+	t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) |
+	    (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) |
+	    (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S);
+	ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t);
+
+	/* enable jumbo frames */
+	ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE,
+		   AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2);
+
+	/* Enable MIB counters */
+	ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0,
+		   AR40XX_MODULE_EN_MIB);
+
+	/* Disable AZ */
+	ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0);
+
+	/* set flowctrl thershold for cpu port */
+	t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) |
+	      AR40XX_PORT0_FC_THRESH_OFF_DFLT;
+	ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t);
+}
+
+static int
+ar40xx_hw_init(struct ar40xx_priv *priv)
+{
+	u32 i;
+
+	ar40xx_ess_reset(priv);
+
+	if (!priv->mii_bus)
+		return -1;
+
+	ar40xx_psgmii_self_test(priv);
+	ar40xx_psgmii_self_test_clean(priv);
+
+	ar40xx_mac_mode_init(priv, priv->mac_mode);
+
+	for (i = 0; i < priv->dev.ports; i++)
+		ar40xx_init_port(priv, i);
+
+	ar40xx_init_globals(priv);
+
+	return 0;
+}
+
+/* Start of qm error WAR */
+
+static
+int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id)
+{
+	u32 reg;
+
+	if (port_id < 0 || port_id > 6)
+		return -1;
+
+	reg = AR40XX_REG_PORT_STATUS(port_id);
+	return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED,
+			(AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX));
+}
+
+static
+int ar40xx_get_qm_status(struct ar40xx_priv *priv,
+			 u32 port_id, u32 *qm_buffer_err)
+{
+	u32 reg;
+	u32 qm_val;
+
+	if (port_id < 1 || port_id > 5) {
+		*qm_buffer_err = 0;
+		return -1;
+	}
+
+	if (port_id < 4) {
+		reg = AR40XX_REG_QM_PORT0_3_QNUM;
+		ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
+		qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
+		/* every 8 bits for each port */
+		*qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF;
+	} else {
+		reg = AR40XX_REG_QM_PORT4_6_QNUM;
+		ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg);
+		qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE);
+		/* every 8 bits for each port */
+		*qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF;
+	}
+
+	return 0;
+}
+
+static void
+ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv)
+{
+	static int task_count;
+	u32 i;
+	u32 reg, value;
+	u32 link, speed, duplex;
+	u32 qm_buffer_err;
+	u16 port_phy_status[AR40XX_NUM_PORTS];
+	static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
+	static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0};
+	struct mii_bus *bus = NULL;
+
+	if (!priv || !priv->mii_bus)
+		return;
+
+	bus = priv->mii_bus;
+
+	++task_count;
+
+	for (i = 1; i < AR40XX_NUM_PORTS; ++i) {
+		port_phy_status[i] =
+			mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS);
+		speed = link = duplex = port_phy_status[i];
+		speed &= AR40XX_PHY_SPEC_STATUS_SPEED;
+		speed >>= 14;
+		link &= AR40XX_PHY_SPEC_STATUS_LINK;
+		link >>= 10;
+		duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX;
+		duplex >>= 13;
+
+		if (link != priv->ar40xx_port_old_link[i]) {
+			++link_cnt[i];
+			/* Up --> Down */
+			if ((priv->ar40xx_port_old_link[i] ==
+					AR40XX_PORT_LINK_UP) &&
+			    (link == AR40XX_PORT_LINK_DOWN)) {
+				/* LINK_EN disable(MAC force mode)*/
+				reg = AR40XX_REG_PORT_STATUS(i);
+				ar40xx_rmw(priv, reg,
+						AR40XX_PORT_AUTO_LINK_EN, 0);
+
+				/* Check queue buffer */
+				qm_err_cnt[i] = 0;
+				ar40xx_get_qm_status(priv, i, &qm_buffer_err);
+				if (qm_buffer_err) {
+					priv->ar40xx_port_qm_buf[i] =
+						AR40XX_QM_NOT_EMPTY;
+				} else {
+					u16 phy_val = 0;
+
+					priv->ar40xx_port_qm_buf[i] =
+						AR40XX_QM_EMPTY;
+					ar40xx_force_1g_full(priv, i);
+					/* Ref:QCA8337 Datasheet,Clearing
+					 * MENU_CTRL_EN prevents phy to
+					 * stuck in 100BT mode when
+					 * bringing up the link
+					 */
+					ar40xx_phy_dbg_read(priv, i-1,
+							    AR40XX_PHY_DEBUG_0,
+							    &phy_val);
+					phy_val &= (~AR40XX_PHY_MANU_CTRL_EN);
+					ar40xx_phy_dbg_write(priv, i-1,
+							     AR40XX_PHY_DEBUG_0,
+							     phy_val);
+				}
+				priv->ar40xx_port_old_link[i] = link;
+			} else if ((priv->ar40xx_port_old_link[i] ==
+						AR40XX_PORT_LINK_DOWN) &&
+					(link == AR40XX_PORT_LINK_UP)) {
+				/* Down --> Up */
+				if (priv->port_link_up[i] < 1) {
+					++priv->port_link_up[i];
+				} else {
+					/* Change port status */
+					reg = AR40XX_REG_PORT_STATUS(i);
+					value = ar40xx_read(priv, reg);
+					priv->port_link_up[i] = 0;
+
+					value &= ~(AR40XX_PORT_DUPLEX |
+						   AR40XX_PORT_SPEED);
+					value |= speed | (duplex ? BIT(6) : 0);
+					ar40xx_write(priv, reg, value);
+					/* clock switch need such time
+					 * to avoid glitch
+					 */
+					usleep_range(100, 200);
+
+					value |= AR40XX_PORT_AUTO_LINK_EN;
+					ar40xx_write(priv, reg, value);
+					/* HW need such time to make sure link
+					 * stable before enable MAC
+					 */
+					usleep_range(100, 200);
+
+					if (speed == AR40XX_PORT_SPEED_100M) {
+						u16 phy_val = 0;
+						/* Enable @100M, if down to 10M
+						 * clock will change smoothly
+						 */
+						ar40xx_phy_dbg_read(priv, i-1,
+								    0,
+								    &phy_val);
+						phy_val |=
+							AR40XX_PHY_MANU_CTRL_EN;
+						ar40xx_phy_dbg_write(priv, i-1,
+								     0,
+								     phy_val);
+					}
+					priv->ar40xx_port_old_link[i] = link;
+				}
+			}
+		}
+
+		if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) {
+			/* Check QM */
+			ar40xx_get_qm_status(priv, i, &qm_buffer_err);
+			if (qm_buffer_err) {
+				++qm_err_cnt[i];
+			} else {
+				priv->ar40xx_port_qm_buf[i] =
+						AR40XX_QM_EMPTY;
+				qm_err_cnt[i] = 0;
+				ar40xx_force_1g_full(priv, i);
+			}
+		}
+	}
+}
+
+static void
+ar40xx_qm_err_check_work_task(struct work_struct *work)
+{
+	struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv,
+					qm_dwork.work);
+
+	mutex_lock(&priv->qm_lock);
+
+	ar40xx_sw_mac_polling_task(priv);
+
+	mutex_unlock(&priv->qm_lock);
+
+	schedule_delayed_work(&priv->qm_dwork,
+			      msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
+}
+
+static int
+ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv)
+{
+	mutex_init(&priv->qm_lock);
+
+	INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task);
+
+	schedule_delayed_work(&priv->qm_dwork,
+			      msecs_to_jiffies(AR40XX_QM_WORK_DELAY));
+
+	return 0;
+}
+
+/* End of qm error WAR */
+
+static int
+ar40xx_vlan_init(struct ar40xx_priv *priv)
+{
+	int port;
+	unsigned long bmp;
+
+	/* By default Enable VLAN */
+	priv->vlan = 1;
+	priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp;
+	priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp;
+	priv->vlan_tagged = priv->cpu_bmp;
+	bmp = priv->lan_bmp;
+	for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
+			priv->pvid[port] = AR40XX_LAN_VLAN;
+
+	bmp = priv->wan_bmp;
+	for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS)
+			priv->pvid[port] = AR40XX_WAN_VLAN;
+
+	return 0;
+}
+
+static void
+ar40xx_mib_work_func(struct work_struct *work)
+{
+	struct ar40xx_priv *priv;
+	int err;
+
+	priv = container_of(work, struct ar40xx_priv, mib_work.work);
+
+	mutex_lock(&priv->mib_lock);
+
+	err = ar40xx_mib_capture(priv);
+	if (err)
+		goto next_port;
+
+	ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false);
+
+next_port:
+	priv->mib_next_port++;
+	if (priv->mib_next_port >= priv->dev.ports)
+		priv->mib_next_port = 0;
+
+	mutex_unlock(&priv->mib_lock);
+
+	schedule_delayed_work(&priv->mib_work,
+			      msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
+}
+
+static void
+ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members)
+{
+	u32 t;
+	u32 egress, ingress;
+	u32 pvid = priv->vlan_id[priv->pvid[port]];
+
+	if (priv->vlan) {
+		egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD;
+
+		ingress = AR40XX_IN_SECURE;
+	} else {
+		egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH;
+		ingress = AR40XX_IN_PORT_ONLY;
+	}
+
+	t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S;
+	t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S;
+	ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t);
+
+	t = AR40XX_PORT_VLAN1_PORT_VLAN_PROP;
+	t |= egress << AR40XX_PORT_VLAN1_OUT_MODE_S;
+
+	ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t);
+
+	t = members;
+	t |= AR40XX_PORT_LOOKUP_LEARN;
+	t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S;
+	t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S;
+	ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t);
+}
+
+static void
+ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val)
+{
+	if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1,
+			    AR40XX_VTU_FUNC1_BUSY, 0))
+		return;
+
+	if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD)
+		ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val);
+
+	op |= AR40XX_VTU_FUNC1_BUSY;
+	ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op);
+}
+
+static void
+ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask)
+{
+	u32 op;
+	u32 val;
+	int i;
+
+	op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S);
+	val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL;
+	for (i = 0; i < AR40XX_NUM_PORTS; i++) {
+		u32 mode;
+
+		if ((port_mask & BIT(i)) == 0)
+			mode = AR40XX_VTU_FUNC0_EG_MODE_NOT;
+		else if (priv->vlan == 0)
+			mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP;
+		else if ((priv->vlan_tagged & BIT(i)) ||
+			 (priv->vlan_id[priv->pvid[i]] != vid))
+			mode = AR40XX_VTU_FUNC0_EG_MODE_TAG;
+		else
+			mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG;
+
+		val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i);
+	}
+	ar40xx_vtu_op(priv, op, val);
+}
+
+static void
+ar40xx_vtu_flush(struct ar40xx_priv *priv)
+{
+	ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0);
+}
+
+static int
+ar40xx_sw_hw_apply(struct switch_dev *dev)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+	u8 portmask[AR40XX_NUM_PORTS];
+	int i, j;
+
+	mutex_lock(&priv->reg_mutex);
+	/* flush all vlan entries */
+	ar40xx_vtu_flush(priv);
+
+	memset(portmask, 0, sizeof(portmask));
+	if (priv->vlan) {
+		for (j = 0; j < AR40XX_MAX_VLANS; j++) {
+			u8 vp = priv->vlan_table[j];
+
+			if (!vp)
+				continue;
+
+			for (i = 0; i < dev->ports; i++) {
+				u8 mask = BIT(i);
+
+				if (vp & mask)
+					portmask[i] |= vp & ~mask;
+			}
+
+			ar40xx_vtu_load_vlan(priv, priv->vlan_id[j],
+					     priv->vlan_table[j]);
+		}
+	} else {
+		/* 8021q vlan disabled */
+		for (i = 0; i < dev->ports; i++) {
+			if (i == AR40XX_PORT_CPU)
+				continue;
+
+			portmask[i] = BIT(AR40XX_PORT_CPU);
+			portmask[AR40XX_PORT_CPU] |= BIT(i);
+		}
+	}
+
+	/* update the port destination mask registers and tag settings */
+	for (i = 0; i < dev->ports; i++)
+		ar40xx_setup_port(priv, i, portmask[i]);
+
+	ar40xx_set_mirror_regs(priv);
+
+	mutex_unlock(&priv->reg_mutex);
+	return 0;
+}
+
+static int
+ar40xx_sw_reset_switch(struct switch_dev *dev)
+{
+	struct ar40xx_priv *priv = swdev_to_ar40xx(dev);
+	int i, rv;
+
+	mutex_lock(&priv->reg_mutex);
+	memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) -
+		offsetof(struct ar40xx_priv, vlan));
+
+	for (i = 0; i < AR40XX_MAX_VLANS; i++)
+		priv->vlan_id[i] = i;
+
+	ar40xx_vlan_init(priv);
+
+	priv->mirror_rx = false;
+	priv->mirror_tx = false;
+	priv->source_port = 0;
+	priv->monitor_port = 0;
+
+	mutex_unlock(&priv->reg_mutex);
+
+	rv = ar40xx_sw_hw_apply(dev);
+	return rv;
+}
+
+static int
+ar40xx_start(struct ar40xx_priv *priv)
+{
+	int ret;
+
+	ret = ar40xx_hw_init(priv);
+	if (ret)
+		return ret;
+
+	ret = ar40xx_sw_reset_switch(&priv->dev);
+	if (ret)
+		return ret;
+
+	/* at last, setup cpu port */
+	ret = ar40xx_cpuport_setup(priv);
+	if (ret)
+		return ret;
+
+	schedule_delayed_work(&priv->mib_work,
+			      msecs_to_jiffies(AR40XX_MIB_WORK_DELAY));
+
+	ar40xx_qm_err_check_work_start(priv);
+
+	return 0;
+}
+
+static const struct switch_dev_ops ar40xx_sw_ops = {
+	.attr_global = {
+		.attr = ar40xx_sw_attr_globals,
+		.n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals),
+	},
+	.attr_port = {
+		.attr = ar40xx_sw_attr_port,
+		.n_attr = ARRAY_SIZE(ar40xx_sw_attr_port),
+	},
+	.attr_vlan = {
+		.attr = ar40xx_sw_attr_vlan,
+		.n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan),
+	},
+	.get_port_pvid = ar40xx_sw_get_pvid,
+	.set_port_pvid = ar40xx_sw_set_pvid,
+	.get_vlan_ports = ar40xx_sw_get_ports,
+	.set_vlan_ports = ar40xx_sw_set_ports,
+	.apply_config = ar40xx_sw_hw_apply,
+	.reset_switch = ar40xx_sw_reset_switch,
+	.get_port_link = ar40xx_sw_get_port_link,
+};
+
+/* Platform driver probe function */
+
+static int ar40xx_probe(struct platform_device *pdev)
+{
+	struct device_node *switch_node;
+	struct device_node *psgmii_node;
+	struct device_node *mdio_node;
+	const __be32 *mac_mode;
+	struct clk *ess_clk;
+	struct switch_dev *swdev;
+	struct ar40xx_priv *priv;
+	u32 len;
+	u32 num_mibs;
+	struct resource psgmii_base = {0};
+	struct resource switch_base = {0};
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, priv);
+	ar40xx_priv = priv;
+
+	switch_node = of_node_get(pdev->dev.of_node);
+	if (of_address_to_resource(switch_node, 0, &switch_base) != 0)
+		return -EIO;
+
+	priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base);
+	if (IS_ERR(priv->hw_addr)) {
+		dev_err(&pdev->dev, "Failed to ioremap switch_base!\n");
+		return PTR_ERR(priv->hw_addr);
+	}
+
+	/*psgmii dts get*/
+	psgmii_node = of_find_node_by_name(NULL, "ess-psgmii");
+	if (!psgmii_node) {
+		dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n");
+		return -EINVAL;
+	}
+
+	if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0)
+		return -EIO;
+
+	priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base);
+	if (IS_ERR(priv->psgmii_hw_addr)) {
+		dev_err(&pdev->dev, "psgmii ioremap fail!\n");
+		return PTR_ERR(priv->psgmii_hw_addr);
+	}
+
+	mac_mode = of_get_property(switch_node, "switch_mac_mode", &len);
+	if (!mac_mode) {
+		dev_err(&pdev->dev, "Failed to read switch_mac_mode\n");
+		return -EINVAL;
+	}
+	priv->mac_mode = be32_to_cpup(mac_mode);
+
+	ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
+	if (ess_clk)
+		clk_prepare_enable(ess_clk);
+
+	priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
+	if (IS_ERR(priv->ess_rst)) {
+		dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
+		return PTR_ERR(priv->ess_rst);
+	}
+
+	if (of_property_read_u32(switch_node, "switch_cpu_bmp",
+				 &priv->cpu_bmp) ||
+	    of_property_read_u32(switch_node, "switch_lan_bmp",
+				 &priv->lan_bmp) ||
+	    of_property_read_u32(switch_node, "switch_wan_bmp",
+				 &priv->wan_bmp)) {
+		dev_err(&pdev->dev, "Failed to read port properties\n");
+		return -EIO;
+	}
+
+	mutex_init(&priv->reg_mutex);
+	mutex_init(&priv->mib_lock);
+	INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func);
+
+	/* register switch */
+	swdev = &priv->dev;
+
+	mdio_node = of_find_compatible_node(NULL, NULL, "qcom,ipq4019-mdio");
+	if (!mdio_node) {
+		dev_err(&pdev->dev, "Probe failed - Cannot find mdio node by phandle!\n");
+		ret = -ENODEV;
+		goto err_missing_phy;
+	}
+
+	priv->mii_bus = of_mdio_find_bus(mdio_node);
+
+	if (priv->mii_bus == NULL) {
+		dev_err(&pdev->dev, "Probe failed - Missing PHYs!\n");
+		ret = -ENODEV;
+		goto err_missing_phy;
+	}
+
+	swdev->alias = dev_name(&priv->mii_bus->dev);
+
+	swdev->cpu_port = AR40XX_PORT_CPU;
+	swdev->name = "QCA AR40xx";
+	swdev->vlans = AR40XX_MAX_VLANS;
+	swdev->ports = AR40XX_NUM_PORTS;
+	swdev->ops = &ar40xx_sw_ops;
+	ret = register_switch(swdev, NULL);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Switch registration failed!\n");
+		return ret;
+	}
+
+	num_mibs = ARRAY_SIZE(ar40xx_mibs);
+	len = priv->dev.ports * num_mibs *
+	      sizeof(*priv->mib_stats);
+	priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+	if (!priv->mib_stats) {
+		ret = -ENOMEM;
+		goto err_unregister_switch;
+	}
+
+	ar40xx_start(priv);
+
+	return 0;
+
+err_unregister_switch:
+	unregister_switch(&priv->dev);
+err_missing_phy:
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int ar40xx_remove(struct platform_device *pdev)
+{
+	struct ar40xx_priv *priv = platform_get_drvdata(pdev);
+
+	cancel_delayed_work_sync(&priv->qm_dwork);
+	cancel_delayed_work_sync(&priv->mib_work);
+
+	unregister_switch(&priv->dev);
+
+	return 0;
+}
+
+static const struct of_device_id ar40xx_of_mtable[] = {
+	{.compatible = "qcom,ess-switch" },
+	{}
+};
+
+struct platform_driver ar40xx_drv = {
+	.probe = ar40xx_probe,
+	.remove = ar40xx_remove,
+	.driver = {
+		.name    = "ar40xx",
+		.of_match_table = ar40xx_of_mtable,
+	},
+};
+
+module_platform_driver(ar40xx_drv);
+
+MODULE_DESCRIPTION("IPQ40XX ESS driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.h b/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.h
new file mode 100644
index 0000000..7ba40cc
--- /dev/null
+++ b/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+ #ifndef __AR40XX_H
+#define __AR40XX_H
+
+#define AR40XX_MAX_VLANS	128
+#define AR40XX_NUM_PORTS	6
+#define AR40XX_NUM_PHYS	5
+
+#define BITS(_s, _n)	(((1UL << (_n)) - 1) << _s)
+
+struct ar40xx_priv {
+	struct switch_dev dev;
+
+	u8  __iomem      *hw_addr;
+	u8  __iomem      *psgmii_hw_addr;
+	u32 mac_mode;
+	struct reset_control *ess_rst;
+	u32 cpu_bmp;
+	u32 lan_bmp;
+	u32 wan_bmp;
+
+	struct mii_bus *mii_bus;
+	struct phy_device *phy;
+
+	/* mutex for qm task */
+	struct mutex qm_lock;
+	struct delayed_work qm_dwork;
+	u32 port_link_up[AR40XX_NUM_PORTS];
+	u32 ar40xx_port_old_link[AR40XX_NUM_PORTS];
+	u32 ar40xx_port_qm_buf[AR40XX_NUM_PORTS];
+
+	u32 phy_t_status;
+
+	/* mutex for switch reg access */
+	struct mutex reg_mutex;
+
+	/* mutex for mib task */
+	struct mutex mib_lock;
+	struct delayed_work mib_work;
+	int mib_next_port;
+	u64 *mib_stats;
+
+	char buf[2048];
+
+	/* all fields below will be cleared on reset */
+	bool vlan;
+	u16 vlan_id[AR40XX_MAX_VLANS];
+	u8 vlan_table[AR40XX_MAX_VLANS];
+	u8 vlan_tagged;
+	u16 pvid[AR40XX_NUM_PORTS];
+
+	/* mirror */
+	bool mirror_rx;
+	bool mirror_tx;
+	int source_port;
+	int monitor_port;
+};
+
+#define AR40XX_PORT_LINK_UP 1
+#define AR40XX_PORT_LINK_DOWN 0
+#define AR40XX_QM_NOT_EMPTY  1
+#define AR40XX_QM_EMPTY  0
+
+#define AR40XX_LAN_VLAN	1
+#define AR40XX_WAN_VLAN	2
+
+enum ar40xx_port_wrapper_cfg {
+	PORT_WRAPPER_PSGMII = 0,
+};
+
+struct ar40xx_mib_desc {
+	u32 size;
+	u32 offset;
+	const char *name;
+};
+
+#define AR40XX_PORT_CPU	0
+
+#define AR40XX_PSGMII_MODE_CONTROL	0x1b4
+#define   AR40XX_PSGMII_ATHR_CSCO_MODE_25M	BIT(0)
+
+#define AR40XX_PSGMIIPHY_TX_CONTROL	 0x288
+
+#define AR40XX_MII_ATH_MMD_ADDR		0x0d
+#define AR40XX_MII_ATH_MMD_DATA		0x0e
+#define AR40XX_MII_ATH_DBG_ADDR		0x1d
+#define AR40XX_MII_ATH_DBG_DATA		0x1e
+
+#define AR40XX_STATS_RXBROAD		0x00
+#define AR40XX_STATS_RXPAUSE		0x04
+#define AR40XX_STATS_RXMULTI		0x08
+#define AR40XX_STATS_RXFCSERR		0x0c
+#define AR40XX_STATS_RXALIGNERR		0x10
+#define AR40XX_STATS_RXRUNT		0x14
+#define AR40XX_STATS_RXFRAGMENT		0x18
+#define AR40XX_STATS_RX64BYTE		0x1c
+#define AR40XX_STATS_RX128BYTE		0x20
+#define AR40XX_STATS_RX256BYTE		0x24
+#define AR40XX_STATS_RX512BYTE		0x28
+#define AR40XX_STATS_RX1024BYTE		0x2c
+#define AR40XX_STATS_RX1518BYTE		0x30
+#define AR40XX_STATS_RXMAXBYTE		0x34
+#define AR40XX_STATS_RXTOOLONG		0x38
+#define AR40XX_STATS_RXGOODBYTE		0x3c
+#define AR40XX_STATS_RXBADBYTE		0x44
+#define AR40XX_STATS_RXOVERFLOW		0x4c
+#define AR40XX_STATS_FILTERED		0x50
+#define AR40XX_STATS_TXBROAD		0x54
+#define AR40XX_STATS_TXPAUSE		0x58
+#define AR40XX_STATS_TXMULTI		0x5c
+#define AR40XX_STATS_TXUNDERRUN		0x60
+#define AR40XX_STATS_TX64BYTE		0x64
+#define AR40XX_STATS_TX128BYTE		0x68
+#define AR40XX_STATS_TX256BYTE		0x6c
+#define AR40XX_STATS_TX512BYTE		0x70
+#define AR40XX_STATS_TX1024BYTE		0x74
+#define AR40XX_STATS_TX1518BYTE		0x78
+#define AR40XX_STATS_TXMAXBYTE		0x7c
+#define AR40XX_STATS_TXOVERSIZE		0x80
+#define AR40XX_STATS_TXBYTE		0x84
+#define AR40XX_STATS_TXCOLLISION	0x8c
+#define AR40XX_STATS_TXABORTCOL		0x90
+#define AR40XX_STATS_TXMULTICOL		0x94
+#define AR40XX_STATS_TXSINGLECOL	0x98
+#define AR40XX_STATS_TXEXCDEFER		0x9c
+#define AR40XX_STATS_TXDEFER		0xa0
+#define AR40XX_STATS_TXLATECOL		0xa4
+
+#define AR40XX_REG_MODULE_EN			0x030
+#define   AR40XX_MODULE_EN_MIB			BIT(0)
+
+#define AR40XX_REG_MIB_FUNC			0x034
+#define   AR40XX_MIB_BUSY		BIT(17)
+#define   AR40XX_MIB_CPU_KEEP			BIT(20)
+#define   AR40XX_MIB_FUNC		BITS(24, 3)
+#define   AR40XX_MIB_FUNC_S		24
+#define   AR40XX_MIB_FUNC_NO_OP		0x0
+#define   AR40XX_MIB_FUNC_FLUSH		0x1
+
+#define AR40XX_ESS_SERVICE_TAG		0x48
+#define AR40XX_ESS_SERVICE_TAG_STAG	BIT(17)
+
+#define AR40XX_REG_PORT_STATUS(_i)		(0x07c + (_i) * 4)
+#define   AR40XX_PORT_SPEED			BITS(0, 2)
+#define   AR40XX_PORT_STATUS_SPEED_S	0
+#define   AR40XX_PORT_TX_EN			BIT(2)
+#define   AR40XX_PORT_RX_EN			BIT(3)
+#define   AR40XX_PORT_STATUS_TXFLOW	BIT(4)
+#define   AR40XX_PORT_STATUS_RXFLOW	BIT(5)
+#define   AR40XX_PORT_DUPLEX			BIT(6)
+#define   AR40XX_PORT_TXHALF_FLOW		BIT(7)
+#define   AR40XX_PORT_STATUS_LINK_UP	BIT(8)
+#define   AR40XX_PORT_AUTO_LINK_EN		BIT(9)
+#define   AR40XX_PORT_STATUS_FLOW_CONTROL  BIT(12)
+
+#define AR40XX_REG_MAX_FRAME_SIZE		0x078
+#define   AR40XX_MAX_FRAME_SIZE_MTU		BITS(0, 14)
+
+#define AR40XX_REG_PORT_HEADER(_i)		(0x09c + (_i) * 4)
+
+#define AR40XX_REG_EEE_CTRL			0x100
+#define   AR40XX_EEE_CTRL_DISABLE_PHY(_i)	BIT(4 + (_i) * 2)
+
+#define AR40XX_REG_PORT_VLAN0(_i)		(0x420 + (_i) * 0x8)
+#define   AR40XX_PORT_VLAN0_DEF_SVID		BITS(0, 12)
+#define   AR40XX_PORT_VLAN0_DEF_SVID_S		0
+#define   AR40XX_PORT_VLAN0_DEF_CVID		BITS(16, 12)
+#define   AR40XX_PORT_VLAN0_DEF_CVID_S		16
+
+#define AR40XX_REG_PORT_VLAN1(_i)		(0x424 + (_i) * 0x8)
+#define   AR40XX_PORT_VLAN1_CORE_PORT		BIT(9)
+#define   AR40XX_PORT_VLAN1_PORT_TLS_MODE	BIT(7)
+#define   AR40XX_PORT_VLAN1_PORT_VLAN_PROP	BIT(6)
+#define   AR40XX_PORT_VLAN1_OUT_MODE		BITS(12, 2)
+#define   AR40XX_PORT_VLAN1_OUT_MODE_S		12
+#define   AR40XX_PORT_VLAN1_OUT_MODE_UNMOD	0
+#define   AR40XX_PORT_VLAN1_OUT_MODE_UNTAG	1
+#define   AR40XX_PORT_VLAN1_OUT_MODE_TAG		2
+#define   AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH	3
+
+#define AR40XX_REG_VTU_FUNC0			0x0610
+#define   AR40XX_VTU_FUNC0_EG_MODE		BITS(4, 14)
+#define   AR40XX_VTU_FUNC0_EG_MODE_S(_i)	(4 + (_i) * 2)
+#define   AR40XX_VTU_FUNC0_EG_MODE_KEEP		0
+#define   AR40XX_VTU_FUNC0_EG_MODE_UNTAG	1
+#define   AR40XX_VTU_FUNC0_EG_MODE_TAG		2
+#define   AR40XX_VTU_FUNC0_EG_MODE_NOT		3
+#define   AR40XX_VTU_FUNC0_IVL			BIT(19)
+#define   AR40XX_VTU_FUNC0_VALID		BIT(20)
+
+#define AR40XX_REG_VTU_FUNC1			0x0614
+#define   AR40XX_VTU_FUNC1_OP			BITS(0, 3)
+#define   AR40XX_VTU_FUNC1_OP_NOOP		0
+#define   AR40XX_VTU_FUNC1_OP_FLUSH		1
+#define   AR40XX_VTU_FUNC1_OP_LOAD		2
+#define   AR40XX_VTU_FUNC1_OP_PURGE		3
+#define   AR40XX_VTU_FUNC1_OP_REMOVE_PORT	4
+#define   AR40XX_VTU_FUNC1_OP_GET_NEXT		5
+#define   AR40XX7_VTU_FUNC1_OP_GET_ONE		6
+#define   AR40XX_VTU_FUNC1_FULL			BIT(4)
+#define   AR40XX_VTU_FUNC1_PORT			BIT(8, 4)
+#define   AR40XX_VTU_FUNC1_PORT_S		8
+#define   AR40XX_VTU_FUNC1_VID			BIT(16, 12)
+#define   AR40XX_VTU_FUNC1_VID_S		16
+#define   AR40XX_VTU_FUNC1_BUSY			BIT(31)
+
+#define AR40XX_REG_FWD_CTRL0			0x620
+#define   AR40XX_FWD_CTRL0_CPU_PORT_EN		BIT(10)
+#define   AR40XX_FWD_CTRL0_MIRROR_PORT		BITS(4, 4)
+#define   AR40XX_FWD_CTRL0_MIRROR_PORT_S	4
+
+#define AR40XX_REG_FWD_CTRL1			0x624
+#define   AR40XX_FWD_CTRL1_UC_FLOOD		BITS(0, 7)
+#define   AR40XX_FWD_CTRL1_UC_FLOOD_S		0
+#define   AR40XX_FWD_CTRL1_MC_FLOOD		BITS(8, 7)
+#define   AR40XX_FWD_CTRL1_MC_FLOOD_S		8
+#define   AR40XX_FWD_CTRL1_BC_FLOOD		BITS(16, 7)
+#define   AR40XX_FWD_CTRL1_BC_FLOOD_S		16
+#define   AR40XX_FWD_CTRL1_IGMP			BITS(24, 7)
+#define   AR40XX_FWD_CTRL1_IGMP_S		24
+
+#define AR40XX_REG_PORT_LOOKUP(_i)		(0x660 + (_i) * 0xc)
+#define   AR40XX_PORT_LOOKUP_MEMBER		BITS(0, 7)
+#define   AR40XX_PORT_LOOKUP_IN_MODE		BITS(8, 2)
+#define   AR40XX_PORT_LOOKUP_IN_MODE_S		8
+#define   AR40XX_PORT_LOOKUP_STATE		BITS(16, 3)
+#define   AR40XX_PORT_LOOKUP_STATE_S		16
+#define   AR40XX_PORT_LOOKUP_LEARN		BIT(20)
+#define   AR40XX_PORT_LOOKUP_LOOPBACK		BIT(21)
+#define   AR40XX_PORT_LOOKUP_ING_MIRROR_EN	BIT(25)
+
+#define AR40XX_REG_ATU_FUNC			0x60c
+#define   AR40XX_ATU_FUNC_OP			BITS(0, 4)
+#define   AR40XX_ATU_FUNC_OP_NOOP		0x0
+#define   AR40XX_ATU_FUNC_OP_FLUSH		0x1
+#define   AR40XX_ATU_FUNC_OP_LOAD		0x2
+#define   AR40XX_ATU_FUNC_OP_PURGE		0x3
+#define   AR40XX_ATU_FUNC_OP_FLUSH_LOCKED	0x4
+#define   AR40XX_ATU_FUNC_OP_FLUSH_UNICAST	0x5
+#define   AR40XX_ATU_FUNC_OP_GET_NEXT		0x6
+#define   AR40XX_ATU_FUNC_OP_SEARCH_MAC		0x7
+#define   AR40XX_ATU_FUNC_OP_CHANGE_TRUNK	0x8
+#define   AR40XX_ATU_FUNC_BUSY			BIT(31)
+
+#define AR40XX_REG_QM_DEBUG_ADDR		0x820
+#define AR40XX_REG_QM_DEBUG_VALUE		0x824
+#define   AR40XX_REG_QM_PORT0_3_QNUM		0x1d
+#define   AR40XX_REG_QM_PORT4_6_QNUM		0x1e
+
+#define AR40XX_REG_PORT_HOL_CTRL1(_i)		(0x974 + (_i) * 0x8)
+#define   AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN	BIT(16)
+
+#define AR40XX_REG_PORT_FLOWCTRL_THRESH(_i)	(0x9b0 + (_i) * 0x4)
+#define   AR40XX_PORT0_FC_THRESH_ON_DFLT	0x60
+#define   AR40XX_PORT0_FC_THRESH_OFF_DFLT	0x90
+
+#define AR40XX_PHY_DEBUG_0   0
+#define AR40XX_PHY_MANU_CTRL_EN  BIT(12)
+
+#define AR40XX_PHY_DEBUG_2   2
+
+#define AR40XX_PHY_SPEC_STATUS 0x11
+#define   AR40XX_PHY_SPEC_STATUS_LINK		BIT(10)
+#define   AR40XX_PHY_SPEC_STATUS_DUPLEX		BIT(13)
+#define   AR40XX_PHY_SPEC_STATUS_SPEED		BITS(14, 2)
+
+/* port forwarding state */
+enum {
+	AR40XX_PORT_STATE_DISABLED = 0,
+	AR40XX_PORT_STATE_BLOCK = 1,
+	AR40XX_PORT_STATE_LISTEN = 2,
+	AR40XX_PORT_STATE_LEARN = 3,
+	AR40XX_PORT_STATE_FORWARD = 4
+};
+
+/* ingress 802.1q mode */
+enum {
+	AR40XX_IN_PORT_ONLY = 0,
+	AR40XX_IN_PORT_FALLBACK = 1,
+	AR40XX_IN_VLAN_ONLY = 2,
+	AR40XX_IN_SECURE = 3
+};
+
+/* egress 802.1q mode */
+enum {
+	AR40XX_OUT_KEEP = 0,
+	AR40XX_OUT_STRIP_VLAN = 1,
+	AR40XX_OUT_ADD_VLAN = 2
+};
+
+/* port speed */
+enum {
+	AR40XX_PORT_SPEED_10M = 0,
+	AR40XX_PORT_SPEED_100M = 1,
+	AR40XX_PORT_SPEED_1000M = 2,
+	AR40XX_PORT_SPEED_ERR = 3,
+};
+
+#define AR40XX_MIB_WORK_DELAY	2000 /* msecs */
+
+#define AR40XX_QM_WORK_DELAY    100
+
+#define   AR40XX_MIB_FUNC_CAPTURE	0x3
+
+#define AR40XX_REG_PORT_STATS_START	0x1000
+#define AR40XX_REG_PORT_STATS_LEN		0x100
+
+#define AR40XX_PORTS_ALL	0x3f
+
+#define AR40XX_PSGMII_ID	5
+#define AR40XX_PSGMII_CALB_NUM	100
+#define AR40XX_MALIBU_PSGMII_MODE_CTRL	0x6d
+#define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL	0x220c
+#define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL	0x801a
+#define AR40XX_MALIBU_DAC_CTRL_MASK	0x380
+#define AR40XX_MALIBU_DAC_CTRL_VALUE	0x280
+#define AR40XX_MALIBU_PHY_RLP_CTRL       0x805a
+#define AR40XX_PSGMII_TX_DRIVER_1_CTRL	0xb
+#define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP	0x8a
+#define AR40XX_MALIBU_PHY_LAST_ADDR	4
+
+static inline struct ar40xx_priv *
+swdev_to_ar40xx(struct switch_dev *swdev)
+{
+	return container_of(swdev, struct ar40xx_priv, dev);
+}
+
+#endif
diff --git a/target/linux/ipq40xx/files/drivers/net/phy/qca807x.c b/target/linux/ipq40xx/files/drivers/net/phy/qca807x.c
new file mode 100644
index 0000000..16d7a80
--- /dev/null
+++ b/target/linux/ipq40xx/files/drivers/net/phy/qca807x.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2020 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ *
+ * Qualcomm QCA8072 and QCA8075 PHY driver
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/bitfield.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+#include <linux/ethtool_netlink.h>
+#endif
+#include <linux/gpio.h>
+#include <linux/sfp.h>
+
+#include <dt-bindings/net/qcom-qca807x.h>
+
+#define PHY_ID_QCA8072		0x004dd0b2
+#define PHY_ID_QCA8075		0x004dd0b1
+#define PHY_ID_QCA807X_PSGMII	0x06820805
+
+/* Downshift */
+#define QCA807X_SMARTSPEED_EN			BIT(5)
+#define QCA807X_SMARTSPEED_RETRY_LIMIT_MASK	GENMASK(4, 2)
+#define QCA807X_SMARTSPEED_RETRY_LIMIT_DEFAULT	5
+#define QCA807X_SMARTSPEED_RETRY_LIMIT_MIN	2
+#define QCA807X_SMARTSPEED_RETRY_LIMIT_MAX	9
+
+/* Cable diagnostic test (CDT) */
+#define QCA807X_CDT						0x16
+#define QCA807X_CDT_ENABLE					BIT(15)
+#define QCA807X_CDT_ENABLE_INTER_PAIR_SHORT			BIT(13)
+#define QCA807X_CDT_STATUS					BIT(11)
+#define QCA807X_CDT_MMD3_STATUS					0x8064
+#define QCA807X_CDT_MDI0_STATUS_MASK				GENMASK(15, 12)
+#define QCA807X_CDT_MDI1_STATUS_MASK				GENMASK(11, 8)
+#define QCA807X_CDT_MDI2_STATUS_MASK				GENMASK(7, 4)
+#define QCA807X_CDT_MDI3_STATUS_MASK				GENMASK(3, 0)
+#define QCA807X_CDT_RESULTS_INVALID				0x0
+#define QCA807X_CDT_RESULTS_OK					0x1
+#define QCA807X_CDT_RESULTS_OPEN				0x2
+#define QCA807X_CDT_RESULTS_SAME_SHORT				0x3
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OK	0x4
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OK	0x8
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OK	0xc
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OPEN	0x6
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OPEN	0xa
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OPEN	0xe
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_SHORT	0x7
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_SHORT	0xb
+#define QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_SHORT	0xf
+#define QCA807X_CDT_RESULTS_BUSY				0x9
+#define QCA807X_CDT_MMD3_MDI0_LENGTH				0x8065
+#define QCA807X_CDT_MMD3_MDI1_LENGTH				0x8066
+#define QCA807X_CDT_MMD3_MDI2_LENGTH				0x8067
+#define QCA807X_CDT_MMD3_MDI3_LENGTH				0x8068
+#define QCA807X_CDT_SAME_SHORT_LENGTH_MASK			GENMASK(15, 8)
+#define QCA807X_CDT_CROSS_SHORT_LENGTH_MASK			GENMASK(7, 0)
+
+#define QCA807X_CHIP_CONFIGURATION				0x1f
+#define QCA807X_BT_BX_REG_SEL					BIT(15)
+#define QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK		GENMASK(3, 0)
+#define QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII		4
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER		3
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER	0
+
+#define QCA807X_MEDIA_SELECT_STATUS				0x1a
+#define QCA807X_MEDIA_DETECTED_COPPER				BIT(5)
+#define QCA807X_MEDIA_DETECTED_1000_BASE_X			BIT(4)
+#define QCA807X_MEDIA_DETECTED_100_BASE_FX			BIT(3)
+
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION			0x807e
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN		BIT(0)
+
+#define QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH	0x801a
+#define QCA807X_CONTROL_DAC_MASK				GENMASK(2, 0)
+
+#define QCA807X_MMD7_LED_100N_1				0x8074
+#define QCA807X_MMD7_LED_100N_2				0x8075
+#define QCA807X_MMD7_LED_1000N_1			0x8076
+#define QCA807X_MMD7_LED_1000N_2			0x8077
+#define QCA807X_LED_TXACT_BLK_EN_2			BIT(10)
+#define QCA807X_LED_RXACT_BLK_EN_2			BIT(9)
+#define QCA807X_LED_GT_ON_EN_2				BIT(6)
+#define QCA807X_LED_HT_ON_EN_2				BIT(5)
+#define QCA807X_LED_BT_ON_EN_2				BIT(4)
+#define QCA807X_GPIO_FORCE_EN				BIT(15)
+#define QCA807X_GPIO_FORCE_MODE_MASK			GENMASK(14, 13)
+
+#define QCA807X_INTR_ENABLE				0x12
+#define QCA807X_INTR_STATUS				0x13
+#define QCA807X_INTR_ENABLE_AUTONEG_ERR			BIT(15)
+#define QCA807X_INTR_ENABLE_SPEED_CHANGED		BIT(14)
+#define QCA807X_INTR_ENABLE_DUPLEX_CHANGED		BIT(13)
+#define QCA807X_INTR_ENABLE_LINK_FAIL			BIT(11)
+#define QCA807X_INTR_ENABLE_LINK_SUCCESS		BIT(10)
+
+#define QCA807X_FUNCTION_CONTROL			0x10
+#define QCA807X_FC_MDI_CROSSOVER_MODE_MASK		GENMASK(6, 5)
+#define QCA807X_FC_MDI_CROSSOVER_AUTO			3
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDIX		1
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDI		0
+
+#define QCA807X_PHY_SPECIFIC_STATUS			0x11
+#define QCA807X_SS_SPEED_AND_DUPLEX_RESOLVED		BIT(11)
+#define QCA807X_SS_SPEED_MASK				GENMASK(15, 14)
+#define QCA807X_SS_SPEED_1000				2
+#define QCA807X_SS_SPEED_100				1
+#define QCA807X_SS_SPEED_10				0
+#define QCA807X_SS_DUPLEX				BIT(13)
+#define QCA807X_SS_MDIX					BIT(6)
+
+/* PSGMII PHY specific */
+#define PSGMII_QSGMII_DRIVE_CONTROL_1			0xb
+#define PSGMII_QSGMII_TX_DRIVER_MASK			GENMASK(7, 4)
+#define PSGMII_MODE_CTRL				0x6d
+#define PSGMII_MODE_CTRL_AZ_WORKAROUND_MASK		GENMASK(3, 0)
+#define PSGMII_MMD3_SERDES_CONTROL			0x805a
+
+struct qca807x_gpio_priv {
+	struct phy_device *phy;
+};
+
+static int qca807x_get_downshift(struct phy_device *phydev, u8 *data)
+{
+	int val, cnt, enable;
+
+	val = phy_read(phydev, MII_NWAYTEST);
+	if (val < 0)
+		return val;
+
+	enable = FIELD_GET(QCA807X_SMARTSPEED_EN, val);
+	cnt = FIELD_GET(QCA807X_SMARTSPEED_RETRY_LIMIT_MASK, val) + 2;
+
+	*data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+	return 0;
+}
+
+static int qca807x_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+	int ret, val;
+
+	if (cnt > QCA807X_SMARTSPEED_RETRY_LIMIT_MAX ||
+	    (cnt < QCA807X_SMARTSPEED_RETRY_LIMIT_MIN && cnt != DOWNSHIFT_DEV_DISABLE))
+		return -EINVAL;
+
+	if (!cnt) {
+		ret = phy_clear_bits(phydev, MII_NWAYTEST, QCA807X_SMARTSPEED_EN);
+	} else {
+		val = QCA807X_SMARTSPEED_EN;
+		val |= FIELD_PREP(QCA807X_SMARTSPEED_RETRY_LIMIT_MASK, cnt - 2);
+
+		phy_modify(phydev, MII_NWAYTEST,
+			   QCA807X_SMARTSPEED_EN |
+			   QCA807X_SMARTSPEED_RETRY_LIMIT_MASK,
+			   val);
+	}
+
+	ret = genphy_soft_reset(phydev);
+
+	return ret;
+}
+
+static int qca807x_get_tunable(struct phy_device *phydev,
+			       struct ethtool_tunable *tuna, void *data)
+{
+	switch (tuna->id) {
+	case ETHTOOL_PHY_DOWNSHIFT:
+		return qca807x_get_downshift(phydev, data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int qca807x_set_tunable(struct phy_device *phydev,
+			       struct ethtool_tunable *tuna, const void *data)
+{
+	switch (tuna->id) {
+	case ETHTOOL_PHY_DOWNSHIFT:
+		return qca807x_set_downshift(phydev, *(const u8 *)data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+static bool qca807x_distance_valid(int result)
+{
+	switch (result) {
+	case QCA807X_CDT_RESULTS_OPEN:
+	case QCA807X_CDT_RESULTS_SAME_SHORT:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OK:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OK:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OK:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+		return true;
+	}
+	return false;
+}
+
+static int qca807x_report_length(struct phy_device *phydev,
+				 int pair, int result)
+{
+	int length;
+	int ret;
+
+	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA807X_CDT_MMD3_MDI0_LENGTH + pair);
+	if (ret < 0)
+		return ret;
+
+	switch (result) {
+	case ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT:
+		length = (FIELD_GET(QCA807X_CDT_SAME_SHORT_LENGTH_MASK, ret) * 800) / 10;
+		break;
+	case ETHTOOL_A_CABLE_RESULT_CODE_OPEN:
+	case ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT:
+		length = (FIELD_GET(QCA807X_CDT_CROSS_SHORT_LENGTH_MASK, ret) * 800) / 10;
+		break;
+	}
+
+	ethnl_cable_test_fault_length(phydev, pair, length);
+
+	return 0;
+}
+
+static int qca807x_cable_test_report_trans(int result)
+{
+	switch (result) {
+	case QCA807X_CDT_RESULTS_OK:
+		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+	case QCA807X_CDT_RESULTS_OPEN:
+		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+	case QCA807X_CDT_RESULTS_SAME_SHORT:
+		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OK:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OK:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OK:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+	case QCA807X_CDT_RESULTS_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+		return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+	default:
+		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+	}
+}
+
+static int qca807x_cable_test_report(struct phy_device *phydev)
+{
+	int pair0, pair1, pair2, pair3;
+	int ret;
+
+	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA807X_CDT_MMD3_STATUS);
+	if (ret < 0)
+		return ret;
+
+	pair0 = FIELD_GET(QCA807X_CDT_MDI0_STATUS_MASK, ret);
+	pair1 = FIELD_GET(QCA807X_CDT_MDI1_STATUS_MASK, ret);
+	pair2 = FIELD_GET(QCA807X_CDT_MDI2_STATUS_MASK, ret);
+	pair3 = FIELD_GET(QCA807X_CDT_MDI3_STATUS_MASK, ret);
+
+	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+				qca807x_cable_test_report_trans(pair0));
+	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+				qca807x_cable_test_report_trans(pair1));
+	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
+				qca807x_cable_test_report_trans(pair2));
+	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
+				qca807x_cable_test_report_trans(pair3));
+
+	if (qca807x_distance_valid(pair0))
+		qca807x_report_length(phydev, 0, qca807x_cable_test_report_trans(pair0));
+	if (qca807x_distance_valid(pair1))
+		qca807x_report_length(phydev, 1, qca807x_cable_test_report_trans(pair1));
+	if (qca807x_distance_valid(pair2))
+		qca807x_report_length(phydev, 2, qca807x_cable_test_report_trans(pair2));
+	if (qca807x_distance_valid(pair3))
+		qca807x_report_length(phydev, 3, qca807x_cable_test_report_trans(pair3));
+
+	return 0;
+}
+
+static int qca807x_cable_test_get_status(struct phy_device *phydev,
+					 bool *finished)
+{
+	int val;
+
+	*finished = false;
+
+	val = phy_read(phydev, QCA807X_CDT);
+	if (!((val & QCA807X_CDT_ENABLE) && (val & QCA807X_CDT_STATUS))) {
+		*finished = true;
+
+		return qca807x_cable_test_report(phydev);
+	}
+
+	return 0;
+}
+
+static int qca807x_cable_test_start(struct phy_device *phydev)
+{
+	int val, ret;
+
+	val = phy_read(phydev, QCA807X_CDT);
+	/* Enable inter-pair short check as well */
+	val &= ~QCA807X_CDT_ENABLE_INTER_PAIR_SHORT;
+	val |= QCA807X_CDT_ENABLE;
+	ret = phy_write(phydev, QCA807X_CDT, val);
+
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_GPIOLIB
+static int qca807x_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0)
+	return GPIO_LINE_DIRECTION_OUT;
+#else
+	return GPIOF_DIR_OUT;
+#endif
+}
+
+static int qca807x_gpio_get_reg(unsigned int offset)
+{
+	return QCA807X_MMD7_LED_100N_2 + (offset % 2) * 2;
+}
+
+static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+	struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+	int val;
+
+	val = phy_read_mmd(priv->phy, MDIO_MMD_AN, qca807x_gpio_get_reg(offset));
+
+	return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val);
+}
+
+static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+	struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+	int val;
+
+	val = phy_read_mmd(priv->phy, MDIO_MMD_AN, qca807x_gpio_get_reg(offset));
+	val &= ~QCA807X_GPIO_FORCE_MODE_MASK;
+	val |= QCA807X_GPIO_FORCE_EN;
+	val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value);
+
+	phy_write_mmd(priv->phy, MDIO_MMD_AN, qca807x_gpio_get_reg(offset), val);
+}
+
+static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value)
+{
+	qca807x_gpio_set(gc, offset, value);
+
+	return 0;
+}
+
+static int qca807x_gpio(struct phy_device *phydev)
+{
+	struct device *dev = &phydev->mdio.dev;
+	struct qca807x_gpio_priv *priv;
+	struct gpio_chip *gc;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->phy = phydev;
+
+	gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+	if (!gc)
+		return -ENOMEM;
+
+	gc->label = dev_name(dev);
+	gc->base = -1;
+	gc->ngpio = 2;
+	gc->parent = dev;
+	gc->owner = THIS_MODULE;
+	gc->can_sleep = true;
+	gc->get_direction = qca807x_gpio_get_direction;
+	gc->direction_output = qca807x_gpio_dir_out;
+	gc->get = qca807x_gpio_get;
+	gc->set = qca807x_gpio_set;
+
+	return devm_gpiochip_add_data(dev, gc, priv);
+}
+#endif
+
+static int qca807x_read_copper_status(struct phy_device *phydev, bool combo_port)
+{
+	int ss, err, page, old_link = phydev->link;
+
+	/* Only combo port has dual pages */
+	if (combo_port) {
+		/* Check whether copper page is set and set if needed */
+		page = phy_read(phydev, QCA807X_CHIP_CONFIGURATION);
+		if (!(page & QCA807X_BT_BX_REG_SEL)) {
+			page |= QCA807X_BT_BX_REG_SEL;
+			phy_write(phydev, QCA807X_CHIP_CONFIGURATION, page);
+		}
+	}
+
+	/* Update the link, but return if there was an error */
+	err = genphy_update_link(phydev);
+	if (err)
+		return err;
+
+	/* why bother the PHY if nothing can have changed */
+	if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+		return 0;
+
+	phydev->speed = SPEED_UNKNOWN;
+	phydev->duplex = DUPLEX_UNKNOWN;
+	phydev->pause = 0;
+	phydev->asym_pause = 0;
+
+	err = genphy_read_lpa(phydev);
+	if (err < 0)
+		return err;
+
+	/* Read the QCA807x PHY-Specific Status register copper page,
+	 * which indicates the speed and duplex that the PHY is actually
+	 * using, irrespective of whether we are in autoneg mode or not.
+	 */
+	ss = phy_read(phydev, QCA807X_PHY_SPECIFIC_STATUS);
+	if (ss < 0)
+		return ss;
+
+	if (ss & QCA807X_SS_SPEED_AND_DUPLEX_RESOLVED) {
+		int sfc;
+
+		sfc = phy_read(phydev, QCA807X_FUNCTION_CONTROL);
+		if (sfc < 0)
+			return sfc;
+
+		switch (FIELD_GET(QCA807X_SS_SPEED_MASK, ss)) {
+		case QCA807X_SS_SPEED_10:
+			phydev->speed = SPEED_10;
+			break;
+		case QCA807X_SS_SPEED_100:
+			phydev->speed = SPEED_100;
+			break;
+		case QCA807X_SS_SPEED_1000:
+			phydev->speed = SPEED_1000;
+			break;
+		}
+		if (ss & QCA807X_SS_DUPLEX)
+			phydev->duplex = DUPLEX_FULL;
+		else
+			phydev->duplex = DUPLEX_HALF;
+
+		if (ss & QCA807X_SS_MDIX)
+			phydev->mdix = ETH_TP_MDI_X;
+		else
+			phydev->mdix = ETH_TP_MDI;
+
+		switch (FIELD_GET(QCA807X_FC_MDI_CROSSOVER_MODE_MASK, sfc)) {
+		case QCA807X_FC_MDI_CROSSOVER_MANUAL_MDI:
+			phydev->mdix_ctrl = ETH_TP_MDI;
+			break;
+		case QCA807X_FC_MDI_CROSSOVER_MANUAL_MDIX:
+			phydev->mdix_ctrl = ETH_TP_MDI_X;
+			break;
+		case QCA807X_FC_MDI_CROSSOVER_AUTO:
+			phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+			break;
+		}
+	}
+
+	if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+		phy_resolve_aneg_pause(phydev);
+
+	return 0;
+}
+
+static int qca807x_read_fiber_status(struct phy_device *phydev, bool combo_port)
+{
+	int ss, err, page, lpa, old_link = phydev->link;
+
+	/* Check whether fiber page is set and set if needed */
+	page = phy_read(phydev, QCA807X_CHIP_CONFIGURATION);
+	if (page & QCA807X_BT_BX_REG_SEL) {
+		page &= ~QCA807X_BT_BX_REG_SEL;
+		phy_write(phydev, QCA807X_CHIP_CONFIGURATION, page);
+	}
+
+	/* Update the link, but return if there was an error */
+	err = genphy_update_link(phydev);
+	if (err)
+		return err;
+
+	/* why bother the PHY if nothing can have changed */
+	if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+		return 0;
+
+	phydev->speed = SPEED_UNKNOWN;
+	phydev->duplex = DUPLEX_UNKNOWN;
+	phydev->pause = 0;
+	phydev->asym_pause = 0;
+
+	if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+		lpa = phy_read(phydev, MII_LPA);
+		if (lpa < 0)
+			return lpa;
+
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				 phydev->lp_advertising, lpa & LPA_LPACK);
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+				 phydev->lp_advertising, lpa & LPA_1000XFULL);
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+				 phydev->lp_advertising, lpa & LPA_1000XPAUSE);
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+				 phydev->lp_advertising,
+				 lpa & LPA_1000XPAUSE_ASYM);
+
+		phy_resolve_aneg_linkmode(phydev);
+	}
+
+	/* Read the QCA807x PHY-Specific Status register fiber page,
+	 * which indicates the speed and duplex that the PHY is actually
+	 * using, irrespective of whether we are in autoneg mode or not.
+	 */
+	ss = phy_read(phydev, QCA807X_PHY_SPECIFIC_STATUS);
+	if (ss < 0)
+		return ss;
+
+	if (ss & QCA807X_SS_SPEED_AND_DUPLEX_RESOLVED) {
+		switch (FIELD_GET(QCA807X_SS_SPEED_MASK, ss)) {
+		case QCA807X_SS_SPEED_100:
+			phydev->speed = SPEED_100;
+			break;
+		case QCA807X_SS_SPEED_1000:
+			phydev->speed = SPEED_1000;
+			break;
+		}
+
+		if (ss & QCA807X_SS_DUPLEX)
+			phydev->duplex = DUPLEX_FULL;
+		else
+			phydev->duplex = DUPLEX_HALF;
+	}
+
+	return 0;
+}
+
+static int qca807x_read_status(struct phy_device *phydev)
+{
+	int val;
+
+	/* Check for Combo port */
+	if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
+		/* Check for fiber mode first */
+		if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+			/* Check for actual detected media */
+			val = phy_read(phydev, QCA807X_MEDIA_SELECT_STATUS);
+			if (val & QCA807X_MEDIA_DETECTED_COPPER) {
+				qca807x_read_copper_status(phydev, true);
+			} else if ((val & QCA807X_MEDIA_DETECTED_1000_BASE_X) ||
+				   (val & QCA807X_MEDIA_DETECTED_100_BASE_FX)) {
+				qca807x_read_fiber_status(phydev, true);
+			}
+		} else {
+			qca807x_read_copper_status(phydev, true);
+		}
+	} else {
+		qca807x_read_copper_status(phydev, false);
+	}
+
+	return 0;
+}
+
+static int qca807x_config_intr(struct phy_device *phydev)
+{
+	int ret, val;
+
+	val = phy_read(phydev, QCA807X_INTR_ENABLE);
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+		/* Check for combo port as it has fewer interrupts */
+		if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
+			val |= QCA807X_INTR_ENABLE_SPEED_CHANGED;
+			val |= QCA807X_INTR_ENABLE_LINK_FAIL;
+			val |= QCA807X_INTR_ENABLE_LINK_SUCCESS;
+		} else {
+			val |= QCA807X_INTR_ENABLE_AUTONEG_ERR;
+			val |= QCA807X_INTR_ENABLE_SPEED_CHANGED;
+			val |= QCA807X_INTR_ENABLE_DUPLEX_CHANGED;
+			val |= QCA807X_INTR_ENABLE_LINK_FAIL;
+			val |= QCA807X_INTR_ENABLE_LINK_SUCCESS;
+		}
+		ret = phy_write(phydev, QCA807X_INTR_ENABLE, val);
+	} else {
+		ret = phy_write(phydev, QCA807X_INTR_ENABLE, 0);
+	}
+
+	return ret;
+}
+
+static int qca807x_ack_intr(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = phy_read(phydev, QCA807X_INTR_STATUS);
+
+	return (ret < 0) ? ret : 0;
+}
+
+static int qca807x_led_config(struct phy_device *phydev)
+{
+	struct device_node *node = phydev->mdio.dev.of_node;
+	bool led_config = false;
+	int val;
+
+	val = phy_read_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_LED_1000N_1);
+	if (val < 0)
+		return val;
+
+	if (of_property_read_bool(node, "qcom,single-led-1000")) {
+		val |= QCA807X_LED_TXACT_BLK_EN_2;
+		val |= QCA807X_LED_RXACT_BLK_EN_2;
+		val |= QCA807X_LED_GT_ON_EN_2;
+
+		led_config = true;
+	}
+
+	if (of_property_read_bool(node, "qcom,single-led-100")) {
+		val |= QCA807X_LED_HT_ON_EN_2;
+
+		led_config = true;
+	}
+
+	if (of_property_read_bool(node, "qcom,single-led-10")) {
+		val |= QCA807X_LED_BT_ON_EN_2;
+
+		led_config = true;
+	}
+
+	if (led_config)
+		return phy_write_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_LED_1000N_1, val);
+	else
+		return 0;
+}
+
+static const struct sfp_upstream_ops qca807x_sfp_ops = {
+	.attach = phy_sfp_attach,
+	.detach = phy_sfp_detach,
+};
+
+static int qca807x_config(struct phy_device *phydev)
+{
+	struct device_node *node = phydev->mdio.dev.of_node;
+	int control_dac, ret = 0;
+	u32 of_control_dac;
+
+	/* Check for Combo port */
+	if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
+		int fiber_mode_autodect;
+		int psgmii_serdes;
+		int chip_config;
+
+		if (of_property_read_bool(node, "qcom,fiber-enable")) {
+			/* Enable fiber mode autodection (1000Base-X or 100Base-FX) */
+			fiber_mode_autodect = phy_read_mmd(phydev, MDIO_MMD_AN,
+							   QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION);
+			fiber_mode_autodect |= QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN;
+			phy_write_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION,
+				      fiber_mode_autodect);
+
+			/* Enable 4 copper + combo port mode */
+			chip_config = phy_read(phydev, QCA807X_CHIP_CONFIGURATION);
+			chip_config &= ~QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK;
+			chip_config |= FIELD_PREP(QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK,
+						  QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER);
+			phy_write(phydev, QCA807X_CHIP_CONFIGURATION, chip_config);
+
+			linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+			linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->advertising);
+		}
+
+		/* Prevent PSGMII going into hibernation via PSGMII self test */
+		psgmii_serdes = phy_read_mmd(phydev, MDIO_MMD_PCS, PSGMII_MMD3_SERDES_CONTROL);
+		psgmii_serdes &= ~BIT(1);
+		ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+				    PSGMII_MMD3_SERDES_CONTROL,
+				    psgmii_serdes);
+	}
+
+	if (!of_property_read_u32(node, "qcom,control-dac", &of_control_dac)) {
+		control_dac = phy_read_mmd(phydev, MDIO_MMD_AN,
+					   QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH);
+		control_dac &= ~QCA807X_CONTROL_DAC_MASK;
+		control_dac |= FIELD_PREP(QCA807X_CONTROL_DAC_MASK, of_control_dac);
+		ret = phy_write_mmd(phydev, MDIO_MMD_AN,
+				    QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH,
+				    control_dac);
+	}
+
+	/* Optionally configure LED-s */
+	if (IS_ENABLED(CONFIG_GPIOLIB)) {
+		/* Check whether PHY-s pins are used as GPIO-s */
+		if (!of_property_read_bool(node, "gpio-controller"))
+			ret = qca807x_led_config(phydev);
+	} else {
+		ret = qca807x_led_config(phydev);
+	}
+
+	return ret;
+}
+
+static int qca807x_probe(struct phy_device *phydev)
+{
+	struct device_node *node = phydev->mdio.dev.of_node;
+	int ret = 0;
+
+	if (IS_ENABLED(CONFIG_GPIOLIB)) {
+		/* Do not register a GPIO controller unless flagged for it */
+		if (of_property_read_bool(node, "gpio-controller"))
+			ret = qca807x_gpio(phydev);
+	}
+
+	/* Attach SFP bus on combo port*/
+	if (of_property_read_bool(node, "qcom,fiber-enable")) {
+		if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION))
+			ret = phy_sfp_probe(phydev, &qca807x_sfp_ops);
+	}
+
+	return ret;
+}
+
+static int qca807x_psgmii_config(struct phy_device *phydev)
+{
+	struct device_node *node = phydev->mdio.dev.of_node;
+	int psgmii_az, tx_amp, ret = 0;
+	u32 tx_driver_strength;
+
+	/* Workaround to enable AZ transmitting ability */
+	if (of_property_read_bool(node, "qcom,psgmii-az")) {
+		psgmii_az = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PSGMII_MODE_CTRL);
+		psgmii_az &= ~PSGMII_MODE_CTRL_AZ_WORKAROUND_MASK;
+		psgmii_az |= FIELD_PREP(PSGMII_MODE_CTRL_AZ_WORKAROUND_MASK, 0xc);
+		ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PSGMII_MODE_CTRL, psgmii_az);
+		psgmii_az = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PSGMII_MODE_CTRL);
+	}
+
+	/* PSGMII/QSGMII TX amp set to DT defined value instead of default 600mV */
+	if (!of_property_read_u32(node, "qcom,tx-driver-strength", &tx_driver_strength)) {
+		tx_amp = phy_read(phydev, PSGMII_QSGMII_DRIVE_CONTROL_1);
+		tx_amp &= ~PSGMII_QSGMII_TX_DRIVER_MASK;
+		tx_amp |= FIELD_PREP(PSGMII_QSGMII_TX_DRIVER_MASK, tx_driver_strength);
+		ret = phy_write(phydev, PSGMII_QSGMII_DRIVE_CONTROL_1, tx_amp);
+	}
+
+	return ret;
+}
+
+static struct phy_driver qca807x_drivers[] = {
+	{
+		PHY_ID_MATCH_EXACT(PHY_ID_QCA8072),
+		.name           = "Qualcomm QCA8072",
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+		.flags		= PHY_POLL_CABLE_TEST,
+#endif
+		/* PHY_GBIT_FEATURES */
+		.probe		= qca807x_probe,
+		.config_init	= qca807x_config,
+		.read_status	= qca807x_read_status,
+		.config_intr	= qca807x_config_intr,
+		.ack_interrupt	= qca807x_ack_intr,
+		.soft_reset	= genphy_soft_reset,
+		.get_tunable	= qca807x_get_tunable,
+		.set_tunable	= qca807x_set_tunable,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+		.cable_test_start	= qca807x_cable_test_start,
+		.cable_test_get_status	= qca807x_cable_test_get_status,
+#endif
+	},
+	{
+		PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
+		.name           = "Qualcomm QCA8075",
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+		.flags		= PHY_POLL_CABLE_TEST,
+#endif
+		/* PHY_GBIT_FEATURES */
+		.probe		= qca807x_probe,
+		.config_init	= qca807x_config,
+		.read_status	= qca807x_read_status,
+		.config_intr	= qca807x_config_intr,
+		.ack_interrupt	= qca807x_ack_intr,
+		.soft_reset	= genphy_soft_reset,
+		.get_tunable	= qca807x_get_tunable,
+		.set_tunable	= qca807x_set_tunable,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+		.cable_test_start	= qca807x_cable_test_start,
+		.cable_test_get_status	= qca807x_cable_test_get_status,
+#endif
+	},
+	{
+		PHY_ID_MATCH_EXACT(PHY_ID_QCA807X_PSGMII),
+		.name           = "Qualcomm QCA807x PSGMII",
+		.probe		= qca807x_psgmii_config,
+	},
+};
+module_phy_driver(qca807x_drivers);
+
+static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+	{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
+	{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
+	{ PHY_ID_MATCH_MODEL(PHY_ID_QCA807X_PSGMII) },
+	{ }
+};
+
+MODULE_AUTHOR("Robert Marko");
+MODULE_DESCRIPTION("Qualcomm QCA807x PHY driver");
+MODULE_DEVICE_TABLE(mdio, qca807x_tbl);
+MODULE_LICENSE("GPL");