ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/uboot/drivers/qe/Makefile b/marvell/uboot/drivers/qe/Makefile
new file mode 100644
index 0000000..7f1bd06
--- /dev/null
+++ b/marvell/uboot/drivers/qe/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) 2006 Freescale Semiconductor, Inc.
+#
+# SPDX-License-Identifier:	GPL-2.0+
+#
+
+obj-y := qe.o uccf.o uec.o uec_phy.o
+obj-$(CONFIG_OF_LIBFDT) += fdt.o
diff --git a/marvell/uboot/drivers/qe/fdt.c b/marvell/uboot/drivers/qe/fdt.c
new file mode 100644
index 0000000..d9a7d82
--- /dev/null
+++ b/marvell/uboot/drivers/qe/fdt.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ *
+ * (C) Copyright 2000
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include <common.h>
+#include <libfdt.h>
+#include <fdt_support.h>
+#include "qe.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/*
+ * If a QE firmware has been uploaded, then add the 'firmware' node under
+ * the 'qe' node.
+ */
+void fdt_fixup_qe_firmware(void *blob)
+{
+	struct qe_firmware_info *qe_fw_info;
+	int node, ret;
+
+	qe_fw_info = qe_get_firmware_info();
+	if (!qe_fw_info)
+		return;
+
+	node = fdt_path_offset(blob, "/qe");
+	if (node < 0)
+		return;
+
+	/* We assume the node doesn't exist yet */
+	node = fdt_add_subnode(blob, node, "firmware");
+	if (node < 0)
+		return;
+
+	ret = fdt_setprop(blob, node, "extended-modes",
+		&qe_fw_info->extended_modes, sizeof(u64));
+	if (ret < 0)
+		goto error;
+
+	ret = fdt_setprop_string(blob, node, "id", qe_fw_info->id);
+	if (ret < 0)
+		goto error;
+
+	ret = fdt_setprop(blob, node, "virtual-traps", qe_fw_info->vtraps,
+		sizeof(qe_fw_info->vtraps));
+	if (ret < 0)
+		goto error;
+
+	return;
+
+error:
+	fdt_del_node(blob, node);
+}
+
+void ft_qe_setup(void *blob)
+{
+	do_fixup_by_prop_u32(blob, "device_type", "qe", 4,
+		"bus-frequency", gd->arch.qe_clk, 1);
+	do_fixup_by_prop_u32(blob, "device_type", "qe", 4,
+		"brg-frequency", gd->arch.brg_clk, 1);
+	do_fixup_by_compat_u32(blob, "fsl,qe",
+		"clock-frequency", gd->arch.qe_clk, 1);
+	do_fixup_by_compat_u32(blob, "fsl,qe",
+		"bus-frequency", gd->arch.qe_clk, 1);
+	do_fixup_by_compat_u32(blob, "fsl,qe",
+		"brg-frequency", gd->arch.brg_clk, 1);
+	do_fixup_by_compat_u32(blob, "fsl,qe-gtm",
+		"clock-frequency", gd->arch.qe_clk / 2, 1);
+	fdt_fixup_qe_firmware(blob);
+}
diff --git a/marvell/uboot/drivers/qe/qe.c b/marvell/uboot/drivers/qe/qe.c
new file mode 100644
index 0000000..b5ddc4b
--- /dev/null
+++ b/marvell/uboot/drivers/qe/qe.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2006-2009 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include "common.h"
+#include <command.h>
+#include "asm/errno.h"
+#include "asm/io.h"
+#include "asm/immap_qe.h"
+#include "qe.h"
+
+qe_map_t		*qe_immr = NULL;
+static qe_snum_t	snums[QE_NUM_OF_SNUM];
+
+DECLARE_GLOBAL_DATA_PTR;
+
+void qe_issue_cmd(uint cmd, uint sbc, u8 mcn, u32 cmd_data)
+{
+	u32 cecr;
+
+	if (cmd == QE_RESET) {
+		out_be32(&qe_immr->cp.cecr,(u32) (cmd | QE_CR_FLG));
+	} else {
+		out_be32(&qe_immr->cp.cecdr, cmd_data);
+		out_be32(&qe_immr->cp.cecr, (sbc | QE_CR_FLG |
+			 ((u32) mcn<<QE_CR_PROTOCOL_SHIFT) | cmd));
+	}
+	/* Wait for the QE_CR_FLG to clear */
+	do {
+		cecr = in_be32(&qe_immr->cp.cecr);
+	} while (cecr & QE_CR_FLG);
+
+	return;
+}
+
+uint qe_muram_alloc(uint size, uint align)
+{
+	uint	retloc;
+	uint	align_mask, off;
+	uint	savebase;
+
+	align_mask = align - 1;
+	savebase = gd->arch.mp_alloc_base;
+
+	off = gd->arch.mp_alloc_base & align_mask;
+	if (off != 0)
+		gd->arch.mp_alloc_base += (align - off);
+
+	if ((off = size & align_mask) != 0)
+		size += (align - off);
+
+	if ((gd->arch.mp_alloc_base + size) >= gd->arch.mp_alloc_top) {
+		gd->arch.mp_alloc_base = savebase;
+		printf("%s: ran out of ram.\n",  __FUNCTION__);
+	}
+
+	retloc = gd->arch.mp_alloc_base;
+	gd->arch.mp_alloc_base += size;
+
+	memset((void *)&qe_immr->muram[retloc], 0, size);
+
+	__asm__ __volatile__("sync");
+
+	return retloc;
+}
+
+void *qe_muram_addr(uint offset)
+{
+	return (void *)&qe_immr->muram[offset];
+}
+
+static void qe_sdma_init(void)
+{
+	volatile sdma_t	*p;
+	uint		sdma_buffer_base;
+
+	p = (volatile sdma_t *)&qe_immr->sdma;
+
+	/* All of DMA transaction in bus 1 */
+	out_be32(&p->sdaqr, 0);
+	out_be32(&p->sdaqmr, 0);
+
+	/* Allocate 2KB temporary buffer for sdma */
+	sdma_buffer_base = qe_muram_alloc(2048, 4096);
+	out_be32(&p->sdwbcr, sdma_buffer_base & QE_SDEBCR_BA_MASK);
+
+	/* Clear sdma status */
+	out_be32(&p->sdsr, 0x03000000);
+
+	/* Enable global mode on bus 1, and 2KB buffer size */
+	out_be32(&p->sdmr, QE_SDMR_GLB_1_MSK | (0x3 << QE_SDMR_CEN_SHIFT));
+}
+
+/* This table is a list of the serial numbers of the Threads, taken from the
+ * "SNUM Table" chart in the QE Reference Manual. The order is not important,
+ * we just need to know what the SNUMs are for the threads.
+ */
+static u8 thread_snum[] = {
+/* Evthreads 16-29 are not supported in MPC8309 */
+#if !defined(CONFIG_MPC8309)
+	0x04, 0x05, 0x0c, 0x0d,
+	0x14, 0x15, 0x1c, 0x1d,
+	0x24, 0x25, 0x2c, 0x2d,
+	0x34, 0x35,
+#endif
+	0x88, 0x89, 0x98, 0x99,
+	0xa8, 0xa9, 0xb8, 0xb9,
+	0xc8, 0xc9, 0xd8, 0xd9,
+	0xe8, 0xe9, 0x08, 0x09,
+	0x18, 0x19, 0x28, 0x29,
+	0x38, 0x39, 0x48, 0x49,
+	0x58, 0x59, 0x68, 0x69,
+	0x78, 0x79, 0x80, 0x81
+};
+
+static void qe_snums_init(void)
+{
+	int	i;
+
+	for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+		snums[i].state = QE_SNUM_STATE_FREE;
+		snums[i].num   = thread_snum[i];
+	}
+}
+
+int qe_get_snum(void)
+{
+	int	snum = -EBUSY;
+	int	i;
+
+	for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+		if (snums[i].state == QE_SNUM_STATE_FREE) {
+			snums[i].state = QE_SNUM_STATE_USED;
+			snum = snums[i].num;
+			break;
+		}
+	}
+
+	return snum;
+}
+
+void qe_put_snum(u8 snum)
+{
+	int	i;
+
+	for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+		if (snums[i].num == snum) {
+			snums[i].state = QE_SNUM_STATE_FREE;
+			break;
+		}
+	}
+}
+
+void qe_init(uint qe_base)
+{
+	/* Init the QE IMMR base */
+	qe_immr = (qe_map_t *)qe_base;
+
+#ifdef CONFIG_SYS_QE_FMAN_FW_IN_NOR
+	/*
+	 * Upload microcode to IRAM for those SOCs which do not have ROM in QE.
+	 */
+	qe_upload_firmware((const void *)CONFIG_SYS_QE_FMAN_FW_ADDR);
+
+	/* enable the microcode in IRAM */
+	out_be32(&qe_immr->iram.iready,QE_IRAM_READY);
+#endif
+
+	gd->arch.mp_alloc_base = QE_DATAONLY_BASE;
+	gd->arch.mp_alloc_top = gd->arch.mp_alloc_base + QE_DATAONLY_SIZE;
+
+	qe_sdma_init();
+	qe_snums_init();
+}
+
+void qe_reset(void)
+{
+	qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
+			 (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+
+void qe_assign_page(uint snum, uint para_ram_base)
+{
+	u32	cecr;
+
+	out_be32(&qe_immr->cp.cecdr, para_ram_base);
+	out_be32(&qe_immr->cp.cecr, ((u32) snum<<QE_CR_ASSIGN_PAGE_SNUM_SHIFT)
+					 | QE_CR_FLG | QE_ASSIGN_PAGE);
+
+	/* Wait for the QE_CR_FLG to clear */
+	do {
+		cecr = in_be32(&qe_immr->cp.cecr);
+	} while (cecr & QE_CR_FLG );
+
+	return;
+}
+
+/*
+ * brg: 0~15 as BRG1~BRG16
+   rate: baud rate
+ * BRG input clock comes from the BRGCLK (internal clock generated from
+   the QE clock, it is one-half of the QE clock), If need the clock source
+   from CLKn pin, we have te change the function.
+ */
+
+#define BRG_CLK		(gd->arch.brg_clk)
+
+int qe_set_brg(uint brg, uint rate)
+{
+	volatile uint	*bp;
+	u32		divisor;
+	int		div16 = 0;
+
+	if (brg >= QE_NUM_OF_BRGS)
+		return -EINVAL;
+	bp = (uint *)&qe_immr->brg.brgc1;
+	bp += brg;
+
+	divisor = (BRG_CLK / rate);
+	if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+		div16 = 1;
+		divisor /= 16;
+	}
+
+	*bp = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE;
+	__asm__ __volatile__("sync");
+
+	if (div16) {
+		*bp |= QE_BRGC_DIV16;
+		__asm__ __volatile__("sync");
+	}
+
+	return 0;
+}
+
+/* Set ethernet MII clock master
+*/
+int qe_set_mii_clk_src(int ucc_num)
+{
+	u32	cmxgcr;
+
+	/* check if the UCC number is in range. */
+	if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) {
+		printf("%s: ucc num not in ranges\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	cmxgcr = in_be32(&qe_immr->qmx.cmxgcr);
+	cmxgcr &= ~QE_CMXGCR_MII_ENET_MNG_MASK;
+	cmxgcr |= (ucc_num <<QE_CMXGCR_MII_ENET_MNG_SHIFT);
+	out_be32(&qe_immr->qmx.cmxgcr, cmxgcr);
+
+	return 0;
+}
+
+/* Firmware information stored here for qe_get_firmware_info() */
+static struct qe_firmware_info qe_firmware_info;
+
+/*
+ * Set to 1 if QE firmware has been uploaded, and therefore
+ * qe_firmware_info contains valid data.
+ */
+static int qe_firmware_uploaded;
+
+/*
+ * Upload a QE microcode
+ *
+ * This function is a worker function for qe_upload_firmware().  It does
+ * the actual uploading of the microcode.
+ */
+static void qe_upload_microcode(const void *base,
+	const struct qe_microcode *ucode)
+{
+	const u32 *code = base + be32_to_cpu(ucode->code_offset);
+	unsigned int i;
+
+	if (ucode->major || ucode->minor || ucode->revision)
+		printf("QE: uploading microcode '%s' version %u.%u.%u\n",
+			ucode->id, ucode->major, ucode->minor, ucode->revision);
+	else
+		printf("QE: uploading microcode '%s'\n", ucode->id);
+
+	/* Use auto-increment */
+	out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
+		QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+
+	for (i = 0; i < be32_to_cpu(ucode->count); i++)
+		out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+}
+
+/*
+ * Upload a microcode to the I-RAM at a specific address.
+ *
+ * See docs/README.qe_firmware for information on QE microcode uploading.
+ *
+ * Currently, only version 1 is supported, so the 'version' field must be
+ * set to 1.
+ *
+ * The SOC model and revision are not validated, they are only displayed for
+ * informational purposes.
+ *
+ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
+ * all of the microcode structures, minus the CRC.
+ *
+ * 'length' is the size that the structure says it is, including the CRC.
+ */
+int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+	unsigned int i;
+	unsigned int j;
+	u32 crc;
+	size_t calc_size = sizeof(struct qe_firmware);
+	size_t length;
+	const struct qe_header *hdr;
+
+	if (!firmware) {
+		printf("Invalid address\n");
+		return -EINVAL;
+	}
+
+	hdr = &firmware->header;
+	length = be32_to_cpu(hdr->length);
+
+	/* Check the magic */
+	if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+	    (hdr->magic[2] != 'F')) {
+		printf("Not a microcode\n");
+		return -EPERM;
+	}
+
+	/* Check the version */
+	if (hdr->version != 1) {
+		printf("Unsupported version\n");
+		return -EPERM;
+	}
+
+	/* Validate some of the fields */
+	if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+		printf("Invalid data\n");
+		return -EINVAL;
+	}
+
+	/* Validate the length and check if there's a CRC */
+	calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
+
+	for (i = 0; i < firmware->count; i++)
+		/*
+		 * For situations where the second RISC uses the same microcode
+		 * as the first, the 'code_offset' and 'count' fields will be
+		 * zero, so it's okay to add those.
+		 */
+		calc_size += sizeof(u32) *
+			be32_to_cpu(firmware->microcode[i].count);
+
+	/* Validate the length */
+	if (length != calc_size + sizeof(u32)) {
+		printf("Invalid length\n");
+		return -EPERM;
+	}
+
+	/*
+	 * Validate the CRC.  We would normally call crc32_no_comp(), but that
+	 * function isn't available unless you turn on JFFS support.
+	 */
+	crc = be32_to_cpu(*(u32 *)((void *)firmware + calc_size));
+	if (crc != (crc32(-1, (const void *) firmware, calc_size) ^ -1)) {
+		printf("Firmware CRC is invalid\n");
+		return -EIO;
+	}
+
+	/*
+	 * If the microcode calls for it, split the I-RAM.
+	 */
+	if (!firmware->split) {
+		out_be16(&qe_immr->cp.cercr,
+			in_be16(&qe_immr->cp.cercr) | QE_CP_CERCR_CIR);
+	}
+
+	if (firmware->soc.model)
+		printf("Firmware '%s' for %u V%u.%u\n",
+			firmware->id, be16_to_cpu(firmware->soc.model),
+			firmware->soc.major, firmware->soc.minor);
+	else
+		printf("Firmware '%s'\n", firmware->id);
+
+	/*
+	 * The QE only supports one microcode per RISC, so clear out all the
+	 * saved microcode information and put in the new.
+	 */
+	memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
+	strcpy(qe_firmware_info.id, (char *)firmware->id);
+	qe_firmware_info.extended_modes = firmware->extended_modes;
+	memcpy(qe_firmware_info.vtraps, firmware->vtraps,
+		sizeof(firmware->vtraps));
+	qe_firmware_uploaded = 1;
+
+	/* Loop through each microcode. */
+	for (i = 0; i < firmware->count; i++) {
+		const struct qe_microcode *ucode = &firmware->microcode[i];
+
+		/* Upload a microcode if it's present */
+		if (ucode->code_offset)
+			qe_upload_microcode(firmware, ucode);
+
+		/* Program the traps for this processor */
+		for (j = 0; j < 16; j++) {
+			u32 trap = be32_to_cpu(ucode->traps[j]);
+
+			if (trap)
+				out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+		}
+
+		/* Enable traps */
+		out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
+	}
+
+	return 0;
+}
+
+struct qe_firmware_info *qe_get_firmware_info(void)
+{
+	return qe_firmware_uploaded ? &qe_firmware_info : NULL;
+}
+
+static int qe_cmd(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
+{
+	ulong addr;
+
+	if (argc < 3)
+		return cmd_usage(cmdtp);
+
+	if (strcmp(argv[1], "fw") == 0) {
+		addr = simple_strtoul(argv[2], NULL, 16);
+
+		if (!addr) {
+			printf("Invalid address\n");
+			return -EINVAL;
+		}
+
+		/*
+		 * If a length was supplied, compare that with the 'length'
+		 * field.
+		 */
+
+		if (argc > 3) {
+			ulong length = simple_strtoul(argv[3], NULL, 16);
+			struct qe_firmware *firmware = (void *) addr;
+
+			if (length != be32_to_cpu(firmware->header.length)) {
+				printf("Length mismatch\n");
+				return -EINVAL;
+			}
+		}
+
+		return qe_upload_firmware((const struct qe_firmware *) addr);
+	}
+
+	return cmd_usage(cmdtp);
+}
+
+U_BOOT_CMD(
+	qe, 4, 0, qe_cmd,
+	"QUICC Engine commands",
+	"fw <addr> [<length>] - Upload firmware binary at address <addr> to "
+		"the QE,\n"
+	"\twith optional length <length> verification."
+);
diff --git a/marvell/uboot/drivers/qe/qe.h b/marvell/uboot/drivers/qe/qe.h
new file mode 100644
index 0000000..c82ac7b
--- /dev/null
+++ b/marvell/uboot/drivers/qe/qe.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2006-2009 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#ifndef __QE_H__
+#define __QE_H__
+
+#include "common.h"
+
+#define QE_NUM_OF_BRGS	16
+#define UCC_MAX_NUM	8
+
+#define QE_DATAONLY_BASE	0
+#define QE_DATAONLY_SIZE	(QE_MURAM_SIZE - QE_DATAONLY_BASE)
+
+/* QE threads SNUM
+*/
+typedef enum qe_snum_state {
+	QE_SNUM_STATE_USED,   /* used */
+	QE_SNUM_STATE_FREE    /* free */
+} qe_snum_state_e;
+
+typedef struct qe_snum {
+	u8		num;   /* snum	*/
+	qe_snum_state_e	state; /* state */
+} qe_snum_t;
+
+/* QE RISC allocation
+*/
+#define	QE_RISC_ALLOCATION_RISC1	0x1  /* RISC 1 */
+#define	QE_RISC_ALLOCATION_RISC2	0x2  /* RISC 2 */
+#define	QE_RISC_ALLOCATION_RISC3	0x4  /* RISC 3 */
+#define	QE_RISC_ALLOCATION_RISC4	0x8  /* RISC 4 */
+#define	QE_RISC_ALLOCATION_RISC1_AND_RISC2 	(QE_RISC_ALLOCATION_RISC1 | \
+						 QE_RISC_ALLOCATION_RISC2)
+#define	QE_RISC_ALLOCATION_FOUR_RISCS	(QE_RISC_ALLOCATION_RISC1 | \
+					 QE_RISC_ALLOCATION_RISC2 | \
+					 QE_RISC_ALLOCATION_RISC3 | \
+					 QE_RISC_ALLOCATION_RISC4)
+
+/* QE CECR commands for UCC fast.
+*/
+#define QE_CR_FLG			0x00010000
+#define QE_RESET			0x80000000
+#define QE_INIT_TX_RX			0x00000000
+#define QE_INIT_RX			0x00000001
+#define QE_INIT_TX			0x00000002
+#define QE_ENTER_HUNT_MODE		0x00000003
+#define QE_STOP_TX			0x00000004
+#define QE_GRACEFUL_STOP_TX		0x00000005
+#define QE_RESTART_TX			0x00000006
+#define QE_SWITCH_COMMAND		0x00000007
+#define QE_SET_GROUP_ADDRESS		0x00000008
+#define QE_INSERT_CELL			0x00000009
+#define QE_ATM_TRANSMIT			0x0000000a
+#define QE_CELL_POOL_GET		0x0000000b
+#define QE_CELL_POOL_PUT		0x0000000c
+#define QE_IMA_HOST_CMD			0x0000000d
+#define QE_ATM_MULTI_THREAD_INIT	0x00000011
+#define QE_ASSIGN_PAGE			0x00000012
+#define QE_START_FLOW_CONTROL		0x00000014
+#define QE_STOP_FLOW_CONTROL		0x00000015
+#define QE_ASSIGN_PAGE_TO_DEVICE	0x00000016
+#define QE_GRACEFUL_STOP_RX		0x0000001a
+#define QE_RESTART_RX			0x0000001b
+
+/* QE CECR Sub Block Code - sub block code of QE command.
+*/
+#define QE_CR_SUBBLOCK_INVALID		0x00000000
+#define QE_CR_SUBBLOCK_USB		0x03200000
+#define QE_CR_SUBBLOCK_UCCFAST1		0x02000000
+#define QE_CR_SUBBLOCK_UCCFAST2		0x02200000
+#define QE_CR_SUBBLOCK_UCCFAST3		0x02400000
+#define QE_CR_SUBBLOCK_UCCFAST4		0x02600000
+#define QE_CR_SUBBLOCK_UCCFAST5		0x02800000
+#define QE_CR_SUBBLOCK_UCCFAST6		0x02a00000
+#define QE_CR_SUBBLOCK_UCCFAST7		0x02c00000
+#define QE_CR_SUBBLOCK_UCCFAST8		0x02e00000
+#define QE_CR_SUBBLOCK_UCCSLOW1		0x00000000
+#define QE_CR_SUBBLOCK_UCCSLOW2		0x00200000
+#define QE_CR_SUBBLOCK_UCCSLOW3		0x00400000
+#define QE_CR_SUBBLOCK_UCCSLOW4		0x00600000
+#define QE_CR_SUBBLOCK_UCCSLOW5		0x00800000
+#define QE_CR_SUBBLOCK_UCCSLOW6		0x00a00000
+#define QE_CR_SUBBLOCK_UCCSLOW7		0x00c00000
+#define QE_CR_SUBBLOCK_UCCSLOW8		0x00e00000
+#define QE_CR_SUBBLOCK_MCC1		0x03800000
+#define QE_CR_SUBBLOCK_MCC2		0x03a00000
+#define QE_CR_SUBBLOCK_MCC3		0x03000000
+#define QE_CR_SUBBLOCK_IDMA1		0x02800000
+#define QE_CR_SUBBLOCK_IDMA2		0x02a00000
+#define QE_CR_SUBBLOCK_IDMA3		0x02c00000
+#define QE_CR_SUBBLOCK_IDMA4		0x02e00000
+#define QE_CR_SUBBLOCK_HPAC		0x01e00000
+#define QE_CR_SUBBLOCK_SPI1		0x01400000
+#define QE_CR_SUBBLOCK_SPI2		0x01600000
+#define QE_CR_SUBBLOCK_RAND		0x01c00000
+#define QE_CR_SUBBLOCK_TIMER		0x01e00000
+#define QE_CR_SUBBLOCK_GENERAL		0x03c00000
+
+/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command.
+*/
+#define QE_CR_PROTOCOL_UNSPECIFIED	0x00 /* For all other protocols */
+#define QE_CR_PROTOCOL_HDLC_TRANSPARENT	0x00
+#define QE_CR_PROTOCOL_ATM_POS		0x0A
+#define QE_CR_PROTOCOL_ETHERNET		0x0C
+#define QE_CR_PROTOCOL_L2_SWITCH	0x0D
+#define QE_CR_PROTOCOL_SHIFT		6
+
+/* QE ASSIGN PAGE command
+*/
+#define QE_CR_ASSIGN_PAGE_SNUM_SHIFT	17
+
+/* Communication Direction.
+*/
+typedef enum comm_dir {
+	COMM_DIR_NONE		= 0,
+	COMM_DIR_RX		= 1,
+	COMM_DIR_TX		= 2,
+	COMM_DIR_RX_AND_TX	= 3
+} comm_dir_e;
+
+/* Clocks and BRG's
+*/
+typedef enum qe_clock {
+	QE_CLK_NONE = 0,
+	QE_BRG1,     /* Baud Rate Generator  1 */
+	QE_BRG2,     /* Baud Rate Generator  2 */
+	QE_BRG3,     /* Baud Rate Generator  3 */
+	QE_BRG4,     /* Baud Rate Generator  4 */
+	QE_BRG5,     /* Baud Rate Generator  5 */
+	QE_BRG6,     /* Baud Rate Generator  6 */
+	QE_BRG7,     /* Baud Rate Generator  7 */
+	QE_BRG8,     /* Baud Rate Generator  8 */
+	QE_BRG9,     /* Baud Rate Generator  9 */
+	QE_BRG10,    /* Baud Rate Generator 10 */
+	QE_BRG11,    /* Baud Rate Generator 11 */
+	QE_BRG12,    /* Baud Rate Generator 12 */
+	QE_BRG13,    /* Baud Rate Generator 13 */
+	QE_BRG14,    /* Baud Rate Generator 14 */
+	QE_BRG15,    /* Baud Rate Generator 15 */
+	QE_BRG16,    /* Baud Rate Generator 16 */
+	QE_CLK1,     /* Clock  1	       */
+	QE_CLK2,     /* Clock  2	       */
+	QE_CLK3,     /* Clock  3	       */
+	QE_CLK4,     /* Clock  4	       */
+	QE_CLK5,     /* Clock  5	       */
+	QE_CLK6,     /* Clock  6	       */
+	QE_CLK7,     /* Clock  7	       */
+	QE_CLK8,     /* Clock  8	       */
+	QE_CLK9,     /* Clock  9	       */
+	QE_CLK10,    /* Clock 10	       */
+	QE_CLK11,    /* Clock 11	       */
+	QE_CLK12,    /* Clock 12	       */
+	QE_CLK13,    /* Clock 13	       */
+	QE_CLK14,    /* Clock 14	       */
+	QE_CLK15,    /* Clock 15	       */
+	QE_CLK16,    /* Clock 16	       */
+	QE_CLK17,    /* Clock 17	       */
+	QE_CLK18,    /* Clock 18	       */
+	QE_CLK19,    /* Clock 19	       */
+	QE_CLK20,    /* Clock 20	       */
+	QE_CLK21,    /* Clock 21	       */
+	QE_CLK22,    /* Clock 22	       */
+	QE_CLK23,    /* Clock 23	       */
+	QE_CLK24,    /* Clock 24	       */
+	QE_CLK_DUMMY
+} qe_clock_e;
+
+/* QE CMXGCR register
+*/
+#define QE_CMXGCR_MII_ENET_MNG_MASK	0x00007000
+#define QE_CMXGCR_MII_ENET_MNG_SHIFT	12
+
+/* QE CMXUCR registers
+ */
+#define QE_CMXUCR_TX_CLK_SRC_MASK	0x0000000F
+
+/* QE BRG configuration register
+*/
+#define QE_BRGC_ENABLE			0x00010000
+#define QE_BRGC_DIVISOR_SHIFT		1
+#define QE_BRGC_DIVISOR_MAX		0xFFF
+#define QE_BRGC_DIV16			1
+
+/* QE SDMA registers
+*/
+#define QE_SDSR_BER1			0x02000000
+#define QE_SDSR_BER2			0x01000000
+
+#define QE_SDMR_GLB_1_MSK		0x80000000
+#define QE_SDMR_ADR_SEL			0x20000000
+#define QE_SDMR_BER1_MSK		0x02000000
+#define QE_SDMR_BER2_MSK		0x01000000
+#define QE_SDMR_EB1_MSK			0x00800000
+#define QE_SDMR_ER1_MSK			0x00080000
+#define QE_SDMR_ER2_MSK			0x00040000
+#define QE_SDMR_CEN_MASK		0x0000E000
+#define QE_SDMR_SBER_1			0x00000200
+#define QE_SDMR_SBER_2			0x00000200
+#define QE_SDMR_EB1_PR_MASK		0x000000C0
+#define QE_SDMR_ER1_PR			0x00000008
+
+#define QE_SDMR_CEN_SHIFT		13
+#define QE_SDMR_EB1_PR_SHIFT		6
+
+#define QE_SDTM_MSNUM_SHIFT		24
+
+#define QE_SDEBCR_BA_MASK		0x01FFFFFF
+
+/* Communication Processor */
+#define QE_CP_CERCR_MEE		0x8000	/* Multi-user RAM ECC enable */
+#define QE_CP_CERCR_IEE		0x4000	/* Instruction RAM ECC enable */
+#define QE_CP_CERCR_CIR		0x0800	/* Common instruction RAM */
+
+/* I-RAM */
+#define QE_IRAM_IADD_AIE	0x80000000	/* Auto Increment Enable */
+#define QE_IRAM_IADD_BADDR	0x00080000	/* Base Address */
+#define QE_IRAM_READY		0x80000000
+
+/* Structure that defines QE firmware binary files.
+ *
+ * See doc/README.qe_firmware for a description of these fields.
+ */
+struct qe_firmware {
+	struct qe_header {
+		u32 length;	/* Length of the entire structure, in bytes */
+		u8 magic[3];	/* Set to { 'Q', 'E', 'F' } */
+		u8 version;	/* Version of this layout. First ver is '1' */
+	} header;
+	u8 id[62];		/* Null-terminated identifier string */
+	u8 split;		/* 0 = shared I-RAM, 1 = split I-RAM */
+	u8 count;		/* Number of microcode[] structures */
+	struct {
+		u16 model;	/* The SOC model  */
+		u8 major;	/* The SOC revision major */
+		u8 minor;	/* The SOC revision minor */
+	} __attribute__ ((packed)) soc;
+	u8 padding[4];		/* Reserved, for alignment */
+	u64 extended_modes;	/* Extended modes */
+	u32 vtraps[8];		/* Virtual trap addresses */
+	u8 reserved[4];		/* Reserved, for future expansion */
+	struct qe_microcode {
+		u8 id[32];	/* Null-terminated identifier */
+		u32 traps[16];	/* Trap addresses, 0 == ignore */
+		u32 eccr;	/* The value for the ECCR register */
+		u32 iram_offset;/* Offset into I-RAM for the code */
+		u32 count;	/* Number of 32-bit words of the code */
+		u32 code_offset;/* Offset of the actual microcode */
+		u8 major;	/* The microcode version major */
+		u8 minor;	/* The microcode version minor */
+		u8 revision;	/* The microcode version revision */
+		u8 padding;	/* Reserved, for alignment */
+		u8 reserved[4];	/* Reserved, for future expansion */
+	} __attribute__ ((packed)) microcode[1];
+	/* All microcode binaries should be located here */
+	/* CRC32 should be located here, after the microcode binaries */
+} __attribute__ ((packed));
+
+struct qe_firmware_info {
+	char id[64];		/* Firmware name */
+	u32 vtraps[8];		/* Virtual trap addresses */
+	u64 extended_modes;	/* Extended modes */
+};
+
+void qe_config_iopin(u8 port, u8 pin, int dir, int open_drain, int assign);
+void qe_issue_cmd(uint cmd, uint sbc, u8 mcn, u32 cmd_data);
+uint qe_muram_alloc(uint size, uint align);
+void *qe_muram_addr(uint offset);
+int qe_get_snum(void);
+void qe_put_snum(u8 snum);
+void qe_init(uint qe_base);
+void qe_reset(void);
+void qe_assign_page(uint snum, uint para_ram_base);
+int qe_set_brg(uint brg, uint rate);
+int qe_set_mii_clk_src(int ucc_num);
+int qe_upload_firmware(const struct qe_firmware *firmware);
+struct qe_firmware_info *qe_get_firmware_info(void);
+void ft_qe_setup(void *blob);
+
+#endif /* __QE_H__ */
diff --git a/marvell/uboot/drivers/qe/uccf.c b/marvell/uboot/drivers/qe/uccf.c
new file mode 100644
index 0000000..593d96d
--- /dev/null
+++ b/marvell/uboot/drivers/qe/uccf.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include "common.h"
+#include "malloc.h"
+#include "asm/errno.h"
+#include "asm/io.h"
+#include "asm/immap_qe.h"
+#include "qe.h"
+#include "uccf.h"
+
+void ucc_fast_transmit_on_demand(ucc_fast_private_t *uccf)
+{
+	out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
+}
+
+u32 ucc_fast_get_qe_cr_subblock(int ucc_num)
+{
+	switch (ucc_num) {
+		case 0:	return QE_CR_SUBBLOCK_UCCFAST1;
+		case 1:	return QE_CR_SUBBLOCK_UCCFAST2;
+		case 2:	return QE_CR_SUBBLOCK_UCCFAST3;
+		case 3:	return QE_CR_SUBBLOCK_UCCFAST4;
+		case 4:	return QE_CR_SUBBLOCK_UCCFAST5;
+		case 5:	return QE_CR_SUBBLOCK_UCCFAST6;
+		case 6:	return QE_CR_SUBBLOCK_UCCFAST7;
+		case 7:	return QE_CR_SUBBLOCK_UCCFAST8;
+		default:	return QE_CR_SUBBLOCK_INVALID;
+	}
+}
+
+static void ucc_get_cmxucr_reg(int ucc_num, volatile u32 **p_cmxucr,
+				 u8 *reg_num, u8 *shift)
+{
+	switch (ucc_num) {
+		case 0:	/* UCC1 */
+			*p_cmxucr  = &(qe_immr->qmx.cmxucr1);
+			*reg_num = 1;
+			*shift  = 16;
+			break;
+		case 2:	/* UCC3 */
+			*p_cmxucr  = &(qe_immr->qmx.cmxucr1);
+			*reg_num = 1;
+			*shift  = 0;
+			break;
+		case 4:	/* UCC5 */
+			*p_cmxucr  = &(qe_immr->qmx.cmxucr2);
+			*reg_num = 2;
+			*shift  = 16;
+			break;
+		case 6:	/* UCC7 */
+			*p_cmxucr  = &(qe_immr->qmx.cmxucr2);
+			*reg_num = 2;
+			*shift  = 0;
+			break;
+		case 1:	/* UCC2 */
+			*p_cmxucr  = &(qe_immr->qmx.cmxucr3);
+			*reg_num = 3;
+			*shift  = 16;
+			break;
+		case 3:	/* UCC4 */
+			*p_cmxucr  = &(qe_immr->qmx.cmxucr3);
+			*reg_num = 3;
+			*shift  = 0;
+			break;
+		case 5:	/* UCC6 */
+			*p_cmxucr  = &(qe_immr->qmx.cmxucr4);
+			*reg_num = 4;
+			*shift  = 16;
+			break;
+		case 7:	/* UCC8 */
+			*p_cmxucr  = &(qe_immr->qmx.cmxucr4);
+			*reg_num = 4;
+			*shift  = 0;
+			break;
+		default:
+			break;
+	}
+}
+
+static int ucc_set_clk_src(int ucc_num, qe_clock_e clock, comm_dir_e mode)
+{
+	volatile u32	*p_cmxucr = NULL;
+	u8		reg_num = 0;
+	u8		shift = 0;
+	u32		clockBits;
+	u32		clockMask;
+	int		source = -1;
+
+	/* check if the UCC number is in range. */
+	if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
+		return -EINVAL;
+
+	if (! ((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) {
+		printf("%s: bad comm mode type passed\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	ucc_get_cmxucr_reg(ucc_num, &p_cmxucr, &reg_num, &shift);
+
+	switch (reg_num) {
+		case 1:
+			switch (clock) {
+				case QE_BRG1:	source = 1; break;
+				case QE_BRG2:	source = 2; break;
+				case QE_BRG7:	source = 3; break;
+				case QE_BRG8:	source = 4; break;
+				case QE_CLK9:	source = 5; break;
+				case QE_CLK10:	source = 6; break;
+				case QE_CLK11:	source = 7; break;
+				case QE_CLK12:	source = 8; break;
+				case QE_CLK15:	source = 9; break;
+				case QE_CLK16:	source = 10; break;
+				default:	source = -1; break;
+			}
+			break;
+		case 2:
+			switch (clock) {
+				case QE_BRG5:	source = 1; break;
+				case QE_BRG6:	source = 2; break;
+				case QE_BRG7:	source = 3; break;
+				case QE_BRG8:	source = 4; break;
+				case QE_CLK13:	source = 5; break;
+				case QE_CLK14:	source = 6; break;
+				case QE_CLK19:	source = 7; break;
+				case QE_CLK20:	source = 8; break;
+				case QE_CLK15:	source = 9; break;
+				case QE_CLK16:	source = 10; break;
+				default:	source = -1; break;
+			}
+			break;
+		case 3:
+			switch (clock) {
+				case QE_BRG9:	source = 1; break;
+				case QE_BRG10:	source = 2; break;
+				case QE_BRG15:	source = 3; break;
+				case QE_BRG16:	source = 4; break;
+				case QE_CLK3:	source = 5; break;
+				case QE_CLK4:	source = 6; break;
+				case QE_CLK17:	source = 7; break;
+				case QE_CLK18:	source = 8; break;
+				case QE_CLK7:	source = 9; break;
+				case QE_CLK8:	source = 10; break;
+				case QE_CLK16:	source = 11; break;
+				default:	source = -1; break;
+			}
+			break;
+		case 4:
+			switch (clock) {
+				case QE_BRG13:	source = 1; break;
+				case QE_BRG14:	source = 2; break;
+				case QE_BRG15:	source = 3; break;
+				case QE_BRG16:	source = 4; break;
+				case QE_CLK5:	source = 5; break;
+				case QE_CLK6:	source = 6; break;
+				case QE_CLK21:	source = 7; break;
+				case QE_CLK22:	source = 8; break;
+				case QE_CLK7:	source = 9; break;
+				case QE_CLK8:	source = 10; break;
+				case QE_CLK16:	source = 11; break;
+				default:	source = -1; break;
+			}
+			break;
+		default:
+			source = -1;
+			break;
+	}
+
+	if (source == -1) {
+		printf("%s: Bad combination of clock and UCC\n", __FUNCTION__);
+		return -ENOENT;
+	}
+
+	clockBits = (u32) source;
+	clockMask = QE_CMXUCR_TX_CLK_SRC_MASK;
+	if (mode == COMM_DIR_RX) {
+		clockBits <<= 4; /* Rx field is 4 bits to left of Tx field */
+		clockMask <<= 4; /* Rx field is 4 bits to left of Tx field */
+	}
+	clockBits <<= shift;
+	clockMask <<= shift;
+
+	out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clockMask) | clockBits);
+
+	return 0;
+}
+
+static uint ucc_get_reg_baseaddr(int ucc_num)
+{
+	uint base = 0;
+
+	/* check if the UCC number is in range */
+	if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) {
+		printf("%s: the UCC num not in ranges\n", __FUNCTION__);
+		return 0;
+	}
+
+	switch (ucc_num) {
+		case 0:	base = 0x00002000; break;
+		case 1:	base = 0x00003000; break;
+		case 2:	base = 0x00002200; break;
+		case 3:	base = 0x00003200; break;
+		case 4:	base = 0x00002400; break;
+		case 5:	base = 0x00003400; break;
+		case 6:	base = 0x00002600; break;
+		case 7:	base = 0x00003600; break;
+		default: break;
+	}
+
+	base = (uint)qe_immr + base;
+	return base;
+}
+
+void ucc_fast_enable(ucc_fast_private_t *uccf, comm_dir_e mode)
+{
+	ucc_fast_t	*uf_regs;
+	u32		gumr;
+
+	uf_regs = uccf->uf_regs;
+
+	/* Enable reception and/or transmission on this UCC. */
+	gumr = in_be32(&uf_regs->gumr);
+	if (mode & COMM_DIR_TX) {
+		gumr |= UCC_FAST_GUMR_ENT;
+		uccf->enabled_tx = 1;
+	}
+	if (mode & COMM_DIR_RX) {
+		gumr |= UCC_FAST_GUMR_ENR;
+		uccf->enabled_rx = 1;
+	}
+	out_be32(&uf_regs->gumr, gumr);
+}
+
+void ucc_fast_disable(ucc_fast_private_t *uccf, comm_dir_e mode)
+{
+	ucc_fast_t	*uf_regs;
+	u32		gumr;
+
+	uf_regs = uccf->uf_regs;
+
+	/* Disable reception and/or transmission on this UCC. */
+	gumr = in_be32(&uf_regs->gumr);
+	if (mode & COMM_DIR_TX) {
+		gumr &= ~UCC_FAST_GUMR_ENT;
+		uccf->enabled_tx = 0;
+	}
+	if (mode & COMM_DIR_RX) {
+		gumr &= ~UCC_FAST_GUMR_ENR;
+		uccf->enabled_rx = 0;
+	}
+	out_be32(&uf_regs->gumr, gumr);
+}
+
+int ucc_fast_init(ucc_fast_info_t *uf_info, ucc_fast_private_t  **uccf_ret)
+{
+	ucc_fast_private_t	*uccf;
+	ucc_fast_t		*uf_regs;
+
+	if (!uf_info)
+		return -EINVAL;
+
+	if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+		printf("%s: Illagal UCC number!\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	uccf = (ucc_fast_private_t *)malloc(sizeof(ucc_fast_private_t));
+	if (!uccf) {
+		printf("%s: No memory for UCC fast data structure!\n",
+			 __FUNCTION__);
+		return -ENOMEM;
+	}
+	memset(uccf, 0, sizeof(ucc_fast_private_t));
+
+	/* Save fast UCC structure */
+	uccf->uf_info	= uf_info;
+	uccf->uf_regs	= (ucc_fast_t *)ucc_get_reg_baseaddr(uf_info->ucc_num);
+
+	if (uccf->uf_regs == NULL) {
+		printf("%s: No memory map for UCC fast controller!\n",
+			 __FUNCTION__);
+		return -ENOMEM;
+	}
+
+	uccf->enabled_tx	= 0;
+	uccf->enabled_rx	= 0;
+
+	uf_regs			= uccf->uf_regs;
+	uccf->p_ucce		= (u32 *) &(uf_regs->ucce);
+	uccf->p_uccm		= (u32 *) &(uf_regs->uccm);
+
+	/* Init GUEMR register, UCC both Rx and Tx is Fast protocol */
+	out_8(&uf_regs->guemr, UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_FAST_RX
+				 | UCC_GUEMR_MODE_FAST_TX);
+
+	/* Set GUMR, disable UCC both Rx and Tx, Ethernet protocol */
+	out_be32(&uf_regs->gumr, UCC_FAST_GUMR_ETH);
+
+	/* Set the Giga ethernet VFIFO stuff */
+	if (uf_info->eth_type == GIGA_ETH) {
+		/* Allocate memory for Tx Virtual Fifo */
+		uccf->ucc_fast_tx_virtual_fifo_base_offset =
+		qe_muram_alloc(UCC_GETH_UTFS_GIGA_INIT,
+				 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+
+		/* Allocate memory for Rx Virtual Fifo */
+		uccf->ucc_fast_rx_virtual_fifo_base_offset =
+		qe_muram_alloc(UCC_GETH_URFS_GIGA_INIT +
+				 UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD,
+				UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+
+		/* utfb, urfb are offsets from MURAM base */
+		out_be32(&uf_regs->utfb,
+			 uccf->ucc_fast_tx_virtual_fifo_base_offset);
+		out_be32(&uf_regs->urfb,
+			 uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+		/* Set Virtual Fifo registers */
+		out_be16(&uf_regs->urfs, UCC_GETH_URFS_GIGA_INIT);
+		out_be16(&uf_regs->urfet, UCC_GETH_URFET_GIGA_INIT);
+		out_be16(&uf_regs->urfset, UCC_GETH_URFSET_GIGA_INIT);
+		out_be16(&uf_regs->utfs, UCC_GETH_UTFS_GIGA_INIT);
+		out_be16(&uf_regs->utfet, UCC_GETH_UTFET_GIGA_INIT);
+		out_be16(&uf_regs->utftt, UCC_GETH_UTFTT_GIGA_INIT);
+	}
+
+	/* Set the Fast ethernet VFIFO stuff */
+	if (uf_info->eth_type == FAST_ETH) {
+		/* Allocate memory for Tx Virtual Fifo */
+		uccf->ucc_fast_tx_virtual_fifo_base_offset =
+		qe_muram_alloc(UCC_GETH_UTFS_INIT,
+				 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+
+		/* Allocate memory for Rx Virtual Fifo */
+		uccf->ucc_fast_rx_virtual_fifo_base_offset =
+		qe_muram_alloc(UCC_GETH_URFS_INIT +
+				 UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD,
+				UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+
+		/* utfb, urfb are offsets from MURAM base */
+		out_be32(&uf_regs->utfb,
+			 uccf->ucc_fast_tx_virtual_fifo_base_offset);
+		out_be32(&uf_regs->urfb,
+			 uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+		/* Set Virtual Fifo registers */
+		out_be16(&uf_regs->urfs, UCC_GETH_URFS_INIT);
+		out_be16(&uf_regs->urfet, UCC_GETH_URFET_INIT);
+		out_be16(&uf_regs->urfset, UCC_GETH_URFSET_INIT);
+		out_be16(&uf_regs->utfs, UCC_GETH_UTFS_INIT);
+		out_be16(&uf_regs->utfet, UCC_GETH_UTFET_INIT);
+		out_be16(&uf_regs->utftt, UCC_GETH_UTFTT_INIT);
+	}
+
+	/* Rx clock routing */
+	if (uf_info->rx_clock != QE_CLK_NONE) {
+		if (ucc_set_clk_src(uf_info->ucc_num,
+			 uf_info->rx_clock, COMM_DIR_RX)) {
+			printf("%s: Illegal value for parameter 'RxClock'.\n",
+				 __FUNCTION__);
+			return -EINVAL;
+		}
+	}
+
+	/* Tx clock routing */
+	if (uf_info->tx_clock != QE_CLK_NONE) {
+		if (ucc_set_clk_src(uf_info->ucc_num,
+			 uf_info->tx_clock, COMM_DIR_TX)) {
+			printf("%s: Illegal value for parameter 'TxClock'.\n",
+				 __FUNCTION__);
+			return -EINVAL;
+		}
+	}
+
+	/* Clear interrupt mask register to disable all of interrupts */
+	out_be32(&uf_regs->uccm, 0x0);
+
+	/* Writing '1' to clear all of envents */
+	out_be32(&uf_regs->ucce, 0xffffffff);
+
+	*uccf_ret = uccf;
+	return 0;
+}
diff --git a/marvell/uboot/drivers/qe/uccf.h b/marvell/uboot/drivers/qe/uccf.h
new file mode 100644
index 0000000..0b57e2f
--- /dev/null
+++ b/marvell/uboot/drivers/qe/uccf.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#ifndef __UCCF_H__
+#define __UCCF_H__
+
+#include "common.h"
+#include "qe.h"
+#include "asm/immap_qe.h"
+
+/* Fast or Giga ethernet
+*/
+typedef enum enet_type {
+	FAST_ETH,
+	GIGA_ETH,
+} enet_type_e;
+
+/* General UCC Extended Mode Register
+*/
+#define UCC_GUEMR_MODE_MASK_RX		0x02
+#define UCC_GUEMR_MODE_MASK_TX		0x01
+#define UCC_GUEMR_MODE_FAST_RX		0x02
+#define UCC_GUEMR_MODE_FAST_TX		0x01
+#define UCC_GUEMR_MODE_SLOW_RX		0x00
+#define UCC_GUEMR_MODE_SLOW_TX		0x00
+#define UCC_GUEMR_SET_RESERVED3		0x10 /* Bit 3 must be set 1 */
+
+/* General UCC FAST Mode Register
+*/
+#define UCC_FAST_GUMR_TCI		0x20000000
+#define UCC_FAST_GUMR_TRX		0x10000000
+#define UCC_FAST_GUMR_TTX		0x08000000
+#define UCC_FAST_GUMR_CDP		0x04000000
+#define UCC_FAST_GUMR_CTSP		0x02000000
+#define UCC_FAST_GUMR_CDS		0x01000000
+#define UCC_FAST_GUMR_CTSS		0x00800000
+#define UCC_FAST_GUMR_TXSY		0x00020000
+#define UCC_FAST_GUMR_RSYN		0x00010000
+#define UCC_FAST_GUMR_RTSM		0x00002000
+#define UCC_FAST_GUMR_REVD		0x00000400
+#define UCC_FAST_GUMR_ENR		0x00000020
+#define UCC_FAST_GUMR_ENT		0x00000010
+
+/* GUMR [MODE] bit maps
+*/
+#define UCC_FAST_GUMR_HDLC		0x00000000
+#define UCC_FAST_GUMR_QMC		0x00000002
+#define UCC_FAST_GUMR_UART		0x00000004
+#define UCC_FAST_GUMR_BISYNC		0x00000008
+#define UCC_FAST_GUMR_ATM		0x0000000a
+#define UCC_FAST_GUMR_ETH		0x0000000c
+
+/* Transmit On Demand (UTORD)
+*/
+#define UCC_SLOW_TOD			0x8000
+#define UCC_FAST_TOD			0x8000
+
+/* Fast Ethernet (10/100 Mbps)
+*/
+#define UCC_GETH_URFS_INIT		512        /* Rx virtual FIFO size */
+#define UCC_GETH_URFET_INIT		256        /* 1/2 urfs */
+#define UCC_GETH_URFSET_INIT		384        /* 3/4 urfs */
+#define UCC_GETH_UTFS_INIT		512        /* Tx virtual FIFO size */
+#define UCC_GETH_UTFET_INIT		256        /* 1/2 utfs */
+#define UCC_GETH_UTFTT_INIT		128
+
+/* Gigabit Ethernet (1000 Mbps)
+*/
+#define UCC_GETH_URFS_GIGA_INIT		4096/*2048*/    /* Rx virtual FIFO size */
+#define UCC_GETH_URFET_GIGA_INIT	2048/*1024*/    /* 1/2 urfs */
+#define UCC_GETH_URFSET_GIGA_INIT	3072/*1536*/    /* 3/4 urfs */
+#define UCC_GETH_UTFS_GIGA_INIT		8192/*2048*/    /* Tx virtual FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT	4096/*1024*/    /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT	0x400/*0x40*/   /*  */
+
+/* UCC fast alignment
+*/
+#define UCC_FAST_RX_ALIGN			4
+#define UCC_FAST_MRBLR_ALIGNMENT		4
+#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT	8
+
+/* Sizes
+*/
+#define UCC_FAST_RX_VIRTUAL_FIFO_SIZE_PAD	8
+
+/* UCC fast structure.
+*/
+typedef struct ucc_fast_info {
+	int		ucc_num;
+	qe_clock_e	rx_clock;
+	qe_clock_e	tx_clock;
+	enet_type_e	eth_type;
+} ucc_fast_info_t;
+
+typedef struct ucc_fast_private {
+	ucc_fast_info_t	*uf_info;
+	ucc_fast_t	*uf_regs; /* a pointer to memory map of UCC regs */
+	u32		*p_ucce; /* a pointer to the event register */
+	u32		*p_uccm; /* a pointer to the mask register */
+	int		enabled_tx; /* whether UCC is enabled for Tx (ENT) */
+	int		enabled_rx; /* whether UCC is enabled for Rx (ENR) */
+	u32		ucc_fast_tx_virtual_fifo_base_offset;
+	u32		ucc_fast_rx_virtual_fifo_base_offset;
+} ucc_fast_private_t;
+
+void ucc_fast_transmit_on_demand(ucc_fast_private_t *uccf);
+u32 ucc_fast_get_qe_cr_subblock(int ucc_num);
+void ucc_fast_enable(ucc_fast_private_t *uccf, comm_dir_e mode);
+void ucc_fast_disable(ucc_fast_private_t *uccf, comm_dir_e mode);
+int ucc_fast_init(ucc_fast_info_t *uf_info, ucc_fast_private_t **uccf_ret);
+
+#endif /* __UCCF_H__ */
diff --git a/marvell/uboot/drivers/qe/uec.c b/marvell/uboot/drivers/qe/uec.c
new file mode 100644
index 0000000..6804573
--- /dev/null
+++ b/marvell/uboot/drivers/qe/uec.c
@@ -0,0 +1,1421 @@
+/*
+ * Copyright (C) 2006-2011 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include "common.h"
+#include "net.h"
+#include "malloc.h"
+#include "asm/errno.h"
+#include "asm/io.h"
+#include "asm/immap_qe.h"
+#include "qe.h"
+#include "uccf.h"
+#include "uec.h"
+#include "uec_phy.h"
+#include "miiphy.h"
+#include <phy.h>
+
+/* Default UTBIPAR SMI address */
+#ifndef CONFIG_UTBIPAR_INIT_TBIPA
+#define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
+#endif
+
+static uec_info_t uec_info[] = {
+#ifdef CONFIG_UEC_ETH1
+	STD_UEC_INFO(1),	/* UEC1 */
+#endif
+#ifdef CONFIG_UEC_ETH2
+	STD_UEC_INFO(2),	/* UEC2 */
+#endif
+#ifdef CONFIG_UEC_ETH3
+	STD_UEC_INFO(3),	/* UEC3 */
+#endif
+#ifdef CONFIG_UEC_ETH4
+	STD_UEC_INFO(4),	/* UEC4 */
+#endif
+#ifdef CONFIG_UEC_ETH5
+	STD_UEC_INFO(5),	/* UEC5 */
+#endif
+#ifdef CONFIG_UEC_ETH6
+	STD_UEC_INFO(6),	/* UEC6 */
+#endif
+#ifdef CONFIG_UEC_ETH7
+	STD_UEC_INFO(7),	/* UEC7 */
+#endif
+#ifdef CONFIG_UEC_ETH8
+	STD_UEC_INFO(8),	/* UEC8 */
+#endif
+};
+
+#define MAXCONTROLLERS	(8)
+
+static struct eth_device *devlist[MAXCONTROLLERS];
+
+static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode)
+{
+	uec_t		*uec_regs;
+	u32		maccfg1;
+
+	if (!uec) {
+		printf("%s: uec not initial\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	uec_regs = uec->uec_regs;
+
+	maccfg1 = in_be32(&uec_regs->maccfg1);
+
+	if (mode & COMM_DIR_TX)	{
+		maccfg1 |= MACCFG1_ENABLE_TX;
+		out_be32(&uec_regs->maccfg1, maccfg1);
+		uec->mac_tx_enabled = 1;
+	}
+
+	if (mode & COMM_DIR_RX)	{
+		maccfg1 |= MACCFG1_ENABLE_RX;
+		out_be32(&uec_regs->maccfg1, maccfg1);
+		uec->mac_rx_enabled = 1;
+	}
+
+	return 0;
+}
+
+static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode)
+{
+	uec_t		*uec_regs;
+	u32		maccfg1;
+
+	if (!uec) {
+		printf("%s: uec not initial\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	uec_regs = uec->uec_regs;
+
+	maccfg1 = in_be32(&uec_regs->maccfg1);
+
+	if (mode & COMM_DIR_TX)	{
+		maccfg1 &= ~MACCFG1_ENABLE_TX;
+		out_be32(&uec_regs->maccfg1, maccfg1);
+		uec->mac_tx_enabled = 0;
+	}
+
+	if (mode & COMM_DIR_RX)	{
+		maccfg1 &= ~MACCFG1_ENABLE_RX;
+		out_be32(&uec_regs->maccfg1, maccfg1);
+		uec->mac_rx_enabled = 0;
+	}
+
+	return 0;
+}
+
+static int uec_graceful_stop_tx(uec_private_t *uec)
+{
+	ucc_fast_t		*uf_regs;
+	u32			cecr_subblock;
+	u32			ucce;
+
+	if (!uec || !uec->uccf) {
+		printf("%s: No handle passed.\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	uf_regs = uec->uccf->uf_regs;
+
+	/* Clear the grace stop event */
+	out_be32(&uf_regs->ucce, UCCE_GRA);
+
+	/* Issue host command */
+	cecr_subblock =
+		 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
+	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+
+	/* Wait for command to complete */
+	do {
+		ucce = in_be32(&uf_regs->ucce);
+	} while (! (ucce & UCCE_GRA));
+
+	uec->grace_stopped_tx = 1;
+
+	return 0;
+}
+
+static int uec_graceful_stop_rx(uec_private_t *uec)
+{
+	u32		cecr_subblock;
+	u8		ack;
+
+	if (!uec) {
+		printf("%s: No handle passed.\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	if (!uec->p_rx_glbl_pram) {
+		printf("%s: No init rx global parameter\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* Clear acknowledge bit */
+	ack = uec->p_rx_glbl_pram->rxgstpack;
+	ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
+	uec->p_rx_glbl_pram->rxgstpack = ack;
+
+	/* Keep issuing cmd and checking ack bit until it is asserted */
+	do {
+		/* Issue host command */
+		cecr_subblock =
+		 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
+		qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
+				 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+		ack = uec->p_rx_glbl_pram->rxgstpack;
+	} while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX ));
+
+	uec->grace_stopped_rx = 1;
+
+	return 0;
+}
+
+static int uec_restart_tx(uec_private_t *uec)
+{
+	u32		cecr_subblock;
+
+	if (!uec || !uec->uec_info) {
+		printf("%s: No handle passed.\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	cecr_subblock =
+	 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
+	qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
+			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+
+	uec->grace_stopped_tx = 0;
+
+	return 0;
+}
+
+static int uec_restart_rx(uec_private_t *uec)
+{
+	u32		cecr_subblock;
+
+	if (!uec || !uec->uec_info) {
+		printf("%s: No handle passed.\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	cecr_subblock =
+	 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
+	qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
+			 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
+
+	uec->grace_stopped_rx = 0;
+
+	return 0;
+}
+
+static int uec_open(uec_private_t *uec, comm_dir_e mode)
+{
+	ucc_fast_private_t	*uccf;
+
+	if (!uec || !uec->uccf) {
+		printf("%s: No handle passed.\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	uccf = uec->uccf;
+
+	/* check if the UCC number is in range. */
+	if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+		printf("%s: ucc_num out of range.\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* Enable MAC */
+	uec_mac_enable(uec, mode);
+
+	/* Enable UCC fast */
+	ucc_fast_enable(uccf, mode);
+
+	/* RISC microcode start */
+	if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) {
+		uec_restart_tx(uec);
+	}
+	if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) {
+		uec_restart_rx(uec);
+	}
+
+	return 0;
+}
+
+static int uec_stop(uec_private_t *uec, comm_dir_e mode)
+{
+	if (!uec || !uec->uccf) {
+		printf("%s: No handle passed.\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* check if the UCC number is in range. */
+	if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+		printf("%s: ucc_num out of range.\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	/* Stop any transmissions */
+	if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) {
+		uec_graceful_stop_tx(uec);
+	}
+	/* Stop any receptions */
+	if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) {
+		uec_graceful_stop_rx(uec);
+	}
+
+	/* Disable the UCC fast */
+	ucc_fast_disable(uec->uccf, mode);
+
+	/* Disable the MAC */
+	uec_mac_disable(uec, mode);
+
+	return 0;
+}
+
+static int uec_set_mac_duplex(uec_private_t *uec, int duplex)
+{
+	uec_t		*uec_regs;
+	u32		maccfg2;
+
+	if (!uec) {
+		printf("%s: uec not initial\n", __FUNCTION__);
+		return -EINVAL;
+	}
+	uec_regs = uec->uec_regs;
+
+	if (duplex == DUPLEX_HALF) {
+		maccfg2 = in_be32(&uec_regs->maccfg2);
+		maccfg2 &= ~MACCFG2_FDX;
+		out_be32(&uec_regs->maccfg2, maccfg2);
+	}
+
+	if (duplex == DUPLEX_FULL) {
+		maccfg2 = in_be32(&uec_regs->maccfg2);
+		maccfg2 |= MACCFG2_FDX;
+		out_be32(&uec_regs->maccfg2, maccfg2);
+	}
+
+	return 0;
+}
+
+static int uec_set_mac_if_mode(uec_private_t *uec,
+		phy_interface_t if_mode, int speed)
+{
+	phy_interface_t		enet_if_mode;
+	uec_t			*uec_regs;
+	u32			upsmr;
+	u32			maccfg2;
+
+	if (!uec) {
+		printf("%s: uec not initial\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	uec_regs = uec->uec_regs;
+	enet_if_mode = if_mode;
+
+	maccfg2 = in_be32(&uec_regs->maccfg2);
+	maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+
+	upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
+	upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
+
+	switch (speed) {
+		case SPEED_10:
+			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+			switch (enet_if_mode) {
+				case PHY_INTERFACE_MODE_MII:
+					break;
+				case PHY_INTERFACE_MODE_RGMII:
+					upsmr |= (UPSMR_RPM | UPSMR_R10M);
+					break;
+				case PHY_INTERFACE_MODE_RMII:
+					upsmr |= (UPSMR_R10M | UPSMR_RMM);
+					break;
+				default:
+					return -EINVAL;
+					break;
+			}
+			break;
+		case SPEED_100:
+			maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+			switch (enet_if_mode) {
+				case PHY_INTERFACE_MODE_MII:
+					break;
+				case PHY_INTERFACE_MODE_RGMII:
+					upsmr |= UPSMR_RPM;
+					break;
+				case PHY_INTERFACE_MODE_RMII:
+					upsmr |= UPSMR_RMM;
+					break;
+				default:
+					return -EINVAL;
+					break;
+			}
+			break;
+		case SPEED_1000:
+			maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+			switch (enet_if_mode) {
+				case PHY_INTERFACE_MODE_GMII:
+					break;
+				case PHY_INTERFACE_MODE_TBI:
+					upsmr |= UPSMR_TBIM;
+					break;
+				case PHY_INTERFACE_MODE_RTBI:
+					upsmr |= (UPSMR_RPM | UPSMR_TBIM);
+					break;
+				case PHY_INTERFACE_MODE_RGMII_RXID:
+				case PHY_INTERFACE_MODE_RGMII_TXID:
+				case PHY_INTERFACE_MODE_RGMII_ID:
+				case PHY_INTERFACE_MODE_RGMII:
+					upsmr |= UPSMR_RPM;
+					break;
+				case PHY_INTERFACE_MODE_SGMII:
+					upsmr |= UPSMR_SGMM;
+					break;
+				default:
+					return -EINVAL;
+					break;
+			}
+			break;
+		default:
+			return -EINVAL;
+			break;
+	}
+
+	out_be32(&uec_regs->maccfg2, maccfg2);
+	out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
+
+	return 0;
+}
+
+static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
+{
+	uint		timeout = 0x1000;
+	u32		miimcfg = 0;
+
+	miimcfg = in_be32(&uec_mii_regs->miimcfg);
+	miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
+	out_be32(&uec_mii_regs->miimcfg, miimcfg);
+
+	/* Wait until the bus is free */
+	while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--);
+	if (timeout <= 0) {
+		printf("%s: The MII Bus is stuck!", __FUNCTION__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int init_phy(struct eth_device *dev)
+{
+	uec_private_t		*uec;
+	uec_mii_t		*umii_regs;
+	struct uec_mii_info	*mii_info;
+	struct phy_info		*curphy;
+	int			err;
+
+	uec = (uec_private_t *)dev->priv;
+	umii_regs = uec->uec_mii_regs;
+
+	uec->oldlink = 0;
+	uec->oldspeed = 0;
+	uec->oldduplex = -1;
+
+	mii_info = malloc(sizeof(*mii_info));
+	if (!mii_info) {
+		printf("%s: Could not allocate mii_info", dev->name);
+		return -ENOMEM;
+	}
+	memset(mii_info, 0, sizeof(*mii_info));
+
+	if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
+		mii_info->speed = SPEED_1000;
+	} else {
+		mii_info->speed = SPEED_100;
+	}
+
+	mii_info->duplex = DUPLEX_FULL;
+	mii_info->pause = 0;
+	mii_info->link = 1;
+
+	mii_info->advertising = (ADVERTISED_10baseT_Half |
+				ADVERTISED_10baseT_Full |
+				ADVERTISED_100baseT_Half |
+				ADVERTISED_100baseT_Full |
+				ADVERTISED_1000baseT_Full);
+	mii_info->autoneg = 1;
+	mii_info->mii_id = uec->uec_info->phy_address;
+	mii_info->dev = dev;
+
+	mii_info->mdio_read = &uec_read_phy_reg;
+	mii_info->mdio_write = &uec_write_phy_reg;
+
+	uec->mii_info = mii_info;
+
+	qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
+
+	if (init_mii_management_configuration(umii_regs)) {
+		printf("%s: The MII Bus is stuck!", dev->name);
+		err = -1;
+		goto bus_fail;
+	}
+
+	/* get info for this PHY */
+	curphy = uec_get_phy_info(uec->mii_info);
+	if (!curphy) {
+		printf("%s: No PHY found", dev->name);
+		err = -1;
+		goto no_phy;
+	}
+
+	mii_info->phyinfo = curphy;
+
+	/* Run the commands which initialize the PHY */
+	if (curphy->init) {
+		err = curphy->init(uec->mii_info);
+		if (err)
+			goto phy_init_fail;
+	}
+
+	return 0;
+
+phy_init_fail:
+no_phy:
+bus_fail:
+	free(mii_info);
+	return err;
+}
+
+static void adjust_link(struct eth_device *dev)
+{
+	uec_private_t		*uec = (uec_private_t *)dev->priv;
+	struct uec_mii_info	*mii_info = uec->mii_info;
+
+	extern void change_phy_interface_mode(struct eth_device *dev,
+				 phy_interface_t mode, int speed);
+
+	if (mii_info->link) {
+		/* Now we make sure that we can be in full duplex mode.
+		* If not, we operate in half-duplex mode. */
+		if (mii_info->duplex != uec->oldduplex) {
+			if (!(mii_info->duplex)) {
+				uec_set_mac_duplex(uec, DUPLEX_HALF);
+				printf("%s: Half Duplex\n", dev->name);
+			} else {
+				uec_set_mac_duplex(uec, DUPLEX_FULL);
+				printf("%s: Full Duplex\n", dev->name);
+			}
+			uec->oldduplex = mii_info->duplex;
+		}
+
+		if (mii_info->speed != uec->oldspeed) {
+			phy_interface_t mode =
+				uec->uec_info->enet_interface_type;
+			if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
+				switch (mii_info->speed) {
+				case SPEED_1000:
+					break;
+				case SPEED_100:
+					printf ("switching to rgmii 100\n");
+					mode = PHY_INTERFACE_MODE_RGMII;
+					break;
+				case SPEED_10:
+					printf ("switching to rgmii 10\n");
+					mode = PHY_INTERFACE_MODE_RGMII;
+					break;
+				default:
+					printf("%s: Ack,Speed(%d)is illegal\n",
+						dev->name, mii_info->speed);
+					break;
+				}
+			}
+
+			/* change phy */
+			change_phy_interface_mode(dev, mode, mii_info->speed);
+			/* change the MAC interface mode */
+			uec_set_mac_if_mode(uec, mode, mii_info->speed);
+
+			printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
+			uec->oldspeed = mii_info->speed;
+		}
+
+		if (!uec->oldlink) {
+			printf("%s: Link is up\n", dev->name);
+			uec->oldlink = 1;
+		}
+
+	} else { /* if (mii_info->link) */
+		if (uec->oldlink) {
+			printf("%s: Link is down\n", dev->name);
+			uec->oldlink = 0;
+			uec->oldspeed = 0;
+			uec->oldduplex = -1;
+		}
+	}
+}
+
+static void phy_change(struct eth_device *dev)
+{
+	uec_private_t	*uec = (uec_private_t *)dev->priv;
+
+#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
+	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
+
+	/* QE9 and QE12 need to be set for enabling QE MII managment signals */
+	setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
+	setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
+#endif
+
+	/* Update the link, speed, duplex */
+	uec->mii_info->phyinfo->read_status(uec->mii_info);
+
+#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
+	/*
+	 * QE12 is muxed with LBCTL, it needs to be released for enabling
+	 * LBCTL signal for LBC usage.
+	 */
+	clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
+#endif
+
+	/* Adjust the interface according to speed */
+	adjust_link(dev);
+}
+
+#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
+
+/*
+ * Find a device index from the devlist by name
+ *
+ * Returns:
+ *  The index where the device is located, -1 on error
+ */
+static int uec_miiphy_find_dev_by_name(const char *devname)
+{
+	int i;
+
+	for (i = 0; i < MAXCONTROLLERS; i++) {
+		if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0) {
+			break;
+		}
+	}
+
+	/* If device cannot be found, returns -1 */
+	if (i == MAXCONTROLLERS) {
+		debug ("%s: device %s not found in devlist\n", __FUNCTION__, devname);
+		i = -1;
+	}
+
+	return i;
+}
+
+/*
+ * Read a MII PHY register.
+ *
+ * Returns:
+ *  0 on success
+ */
+static int uec_miiphy_read(const char *devname, unsigned char addr,
+			    unsigned char reg, unsigned short *value)
+{
+	int devindex = 0;
+
+	if (devname == NULL || value == NULL) {
+		debug("%s: NULL pointer given\n", __FUNCTION__);
+	} else {
+		devindex = uec_miiphy_find_dev_by_name(devname);
+		if (devindex >= 0) {
+			*value = uec_read_phy_reg(devlist[devindex], addr, reg);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Write a MII PHY register.
+ *
+ * Returns:
+ *  0 on success
+ */
+static int uec_miiphy_write(const char *devname, unsigned char addr,
+			     unsigned char reg, unsigned short value)
+{
+	int devindex = 0;
+
+	if (devname == NULL) {
+		debug("%s: NULL pointer given\n", __FUNCTION__);
+	} else {
+		devindex = uec_miiphy_find_dev_by_name(devname);
+		if (devindex >= 0) {
+			uec_write_phy_reg(devlist[devindex], addr, reg, value);
+		}
+	}
+	return 0;
+}
+#endif
+
+static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr)
+{
+	uec_t		*uec_regs;
+	u32		mac_addr1;
+	u32		mac_addr2;
+
+	if (!uec) {
+		printf("%s: uec not initial\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	uec_regs = uec->uec_regs;
+
+	/* if a station address of 0x12345678ABCD, perform a write to
+	MACSTNADDR1 of 0xCDAB7856,
+	MACSTNADDR2 of 0x34120000 */
+
+	mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \
+			(mac_addr[3] << 8)  | (mac_addr[2]);
+	out_be32(&uec_regs->macstnaddr1, mac_addr1);
+
+	mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
+	out_be32(&uec_regs->macstnaddr2, mac_addr2);
+
+	return 0;
+}
+
+static int uec_convert_threads_num(uec_num_of_threads_e threads_num,
+					 int *threads_num_ret)
+{
+	int	num_threads_numerica;
+
+	switch (threads_num) {
+		case UEC_NUM_OF_THREADS_1:
+			num_threads_numerica = 1;
+			break;
+		case UEC_NUM_OF_THREADS_2:
+			num_threads_numerica = 2;
+			break;
+		case UEC_NUM_OF_THREADS_4:
+			num_threads_numerica = 4;
+			break;
+		case UEC_NUM_OF_THREADS_6:
+			num_threads_numerica = 6;
+			break;
+		case UEC_NUM_OF_THREADS_8:
+			num_threads_numerica = 8;
+			break;
+		default:
+			printf("%s: Bad number of threads value.",
+				 __FUNCTION__);
+			return -EINVAL;
+	}
+
+	*threads_num_ret = num_threads_numerica;
+
+	return 0;
+}
+
+static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx)
+{
+	uec_info_t	*uec_info;
+	u32		end_bd;
+	u8		bmrx = 0;
+	int		i;
+
+	uec_info = uec->uec_info;
+
+	/* Alloc global Tx parameter RAM page */
+	uec->tx_glbl_pram_offset = qe_muram_alloc(
+				sizeof(uec_tx_global_pram_t),
+				 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
+	uec->p_tx_glbl_pram = (uec_tx_global_pram_t *)
+				qe_muram_addr(uec->tx_glbl_pram_offset);
+
+	/* Zero the global Tx prameter RAM */
+	memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t));
+
+	/* Init global Tx parameter RAM */
+
+	/* TEMODER, RMON statistics disable, one Tx queue */
+	out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
+
+	/* SQPTR */
+	uec->send_q_mem_reg_offset = qe_muram_alloc(
+				sizeof(uec_send_queue_qd_t),
+				 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
+	uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *)
+				qe_muram_addr(uec->send_q_mem_reg_offset);
+	out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
+
+	/* Setup the table with TxBDs ring */
+	end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
+					 * SIZEOFBD;
+	out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
+				 (u32)(uec->p_tx_bd_ring));
+	out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
+						 end_bd);
+
+	/* Scheduler Base Pointer, we have only one Tx queue, no need it */
+	out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
+
+	/* TxRMON Base Pointer, TxRMON disable, we don't need it */
+	out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
+
+	/* TSTATE, global snooping, big endian, the CSB bus selected */
+	bmrx = BMR_INIT_VALUE;
+	out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
+
+	/* IPH_Offset */
+	for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) {
+		out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
+	}
+
+	/* VTAG table */
+	for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) {
+		out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
+	}
+
+	/* TQPTR */
+	uec->thread_dat_tx_offset = qe_muram_alloc(
+		num_threads_tx * sizeof(uec_thread_data_tx_t) +
+		 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT);
+
+	uec->p_thread_data_tx = (uec_thread_data_tx_t *)
+				qe_muram_addr(uec->thread_dat_tx_offset);
+	out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
+}
+
+static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx)
+{
+	u8	bmrx = 0;
+	int	i;
+	uec_82xx_address_filtering_pram_t	*p_af_pram;
+
+	/* Allocate global Rx parameter RAM page */
+	uec->rx_glbl_pram_offset = qe_muram_alloc(
+		sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT);
+	uec->p_rx_glbl_pram = (uec_rx_global_pram_t *)
+				qe_muram_addr(uec->rx_glbl_pram_offset);
+
+	/* Zero Global Rx parameter RAM */
+	memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t));
+
+	/* Init global Rx parameter RAM */
+	/* REMODER, Extended feature mode disable, VLAN disable,
+	 LossLess flow control disable, Receive firmware statisic disable,
+	 Extended address parsing mode disable, One Rx queues,
+	 Dynamic maximum/minimum frame length disable, IP checksum check
+	 disable, IP address alignment disable
+	*/
+	out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
+
+	/* RQPTR */
+	uec->thread_dat_rx_offset = qe_muram_alloc(
+			num_threads_rx * sizeof(uec_thread_data_rx_t),
+			 UEC_THREAD_DATA_ALIGNMENT);
+	uec->p_thread_data_rx = (uec_thread_data_rx_t *)
+				qe_muram_addr(uec->thread_dat_rx_offset);
+	out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
+
+	/* Type_or_Len */
+	out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
+
+	/* RxRMON base pointer, we don't need it */
+	out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
+
+	/* IntCoalescingPTR, we don't need it, no interrupt */
+	out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
+
+	/* RSTATE, global snooping, big endian, the CSB bus selected */
+	bmrx = BMR_INIT_VALUE;
+	out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
+
+	/* MRBLR */
+	out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
+
+	/* RBDQPTR */
+	uec->rx_bd_qs_tbl_offset = qe_muram_alloc(
+				sizeof(uec_rx_bd_queues_entry_t) + \
+				sizeof(uec_rx_prefetched_bds_t),
+				 UEC_RX_BD_QUEUES_ALIGNMENT);
+	uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *)
+				qe_muram_addr(uec->rx_bd_qs_tbl_offset);
+
+	/* Zero it */
+	memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \
+					sizeof(uec_rx_prefetched_bds_t));
+	out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
+	out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
+		 (u32)uec->p_rx_bd_ring);
+
+	/* MFLR */
+	out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
+	/* MINFLR */
+	out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
+	/* MAXD1 */
+	out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
+	/* MAXD2 */
+	out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
+	/* ECAM_PTR */
+	out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
+	/* L2QT */
+	out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
+	/* L3QT */
+	for (i = 0; i < 8; i++)	{
+		out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
+	}
+
+	/* VLAN_TYPE */
+	out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
+	/* TCI */
+	out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
+
+	/* Clear PQ2 style address filtering hash table */
+	p_af_pram = (uec_82xx_address_filtering_pram_t *) \
+			uec->p_rx_glbl_pram->addressfiltering;
+
+	p_af_pram->iaddr_h = 0;
+	p_af_pram->iaddr_l = 0;
+	p_af_pram->gaddr_h = 0;
+	p_af_pram->gaddr_l = 0;
+}
+
+static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec,
+					 int thread_tx, int thread_rx)
+{
+	uec_init_cmd_pram_t		*p_init_enet_param;
+	u32				init_enet_param_offset;
+	uec_info_t			*uec_info;
+	int				i;
+	int				snum;
+	u32				init_enet_offset;
+	u32				entry_val;
+	u32				command;
+	u32				cecr_subblock;
+
+	uec_info = uec->uec_info;
+
+	/* Allocate init enet command parameter */
+	uec->init_enet_param_offset = qe_muram_alloc(
+					sizeof(uec_init_cmd_pram_t), 4);
+	init_enet_param_offset = uec->init_enet_param_offset;
+	uec->p_init_enet_param = (uec_init_cmd_pram_t *)
+				qe_muram_addr(uec->init_enet_param_offset);
+
+	/* Zero init enet command struct */
+	memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t));
+
+	/* Init the command struct */
+	p_init_enet_param = uec->p_init_enet_param;
+	p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
+	p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
+	p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
+	p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
+	p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
+	p_init_enet_param->largestexternallookupkeysize = 0;
+
+	p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
+					 << ENET_INIT_PARAM_RGF_SHIFT;
+	p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
+					 << ENET_INIT_PARAM_TGF_SHIFT;
+
+	/* Init Rx global parameter pointer */
+	p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
+						 (u32)uec_info->risc_rx;
+
+	/* Init Rx threads */
+	for (i = 0; i < (thread_rx + 1); i++) {
+		if ((snum = qe_get_snum()) < 0) {
+			printf("%s can not get snum\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		if (i==0) {
+			init_enet_offset = 0;
+		} else {
+			init_enet_offset = qe_muram_alloc(
+					sizeof(uec_thread_rx_pram_t),
+					 UEC_THREAD_RX_PRAM_ALIGNMENT);
+		}
+
+		entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
+				 init_enet_offset | (u32)uec_info->risc_rx;
+		p_init_enet_param->rxthread[i] = entry_val;
+	}
+
+	/* Init Tx global parameter pointer */
+	p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
+					 (u32)uec_info->risc_tx;
+
+	/* Init Tx threads */
+	for (i = 0; i < thread_tx; i++) {
+		if ((snum = qe_get_snum()) < 0)	{
+			printf("%s can not get snum\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t),
+						 UEC_THREAD_TX_PRAM_ALIGNMENT);
+
+		entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
+				 init_enet_offset | (u32)uec_info->risc_tx;
+		p_init_enet_param->txthread[i] = entry_val;
+	}
+
+	__asm__ __volatile__("sync");
+
+	/* Issue QE command */
+	command = QE_INIT_TX_RX;
+	cecr_subblock =	ucc_fast_get_qe_cr_subblock(
+				uec->uec_info->uf_info.ucc_num);
+	qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+						 init_enet_param_offset);
+
+	return 0;
+}
+
+static int uec_startup(uec_private_t *uec)
+{
+	uec_info_t			*uec_info;
+	ucc_fast_info_t			*uf_info;
+	ucc_fast_private_t		*uccf;
+	ucc_fast_t			*uf_regs;
+	uec_t				*uec_regs;
+	int				num_threads_tx;
+	int				num_threads_rx;
+	u32				utbipar;
+	u32				length;
+	u32				align;
+	qe_bd_t				*bd;
+	u8				*buf;
+	int				i;
+
+	if (!uec || !uec->uec_info) {
+		printf("%s: uec or uec_info not initial\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	uec_info = uec->uec_info;
+	uf_info = &(uec_info->uf_info);
+
+	/* Check if Rx BD ring len is illegal */
+	if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \
+		(uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
+		printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
+			 __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* Check if Tx BD ring len is illegal */
+	if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
+		printf("%s: Tx BD ring length must not be smaller than 2.\n",
+			 __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* Check if MRBLR is illegal */
+	if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN  % UEC_MRBLR_ALIGNMENT)) {
+		printf("%s: max rx buffer length must be mutliple of 128.\n",
+			 __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* Both Rx and Tx are stopped */
+	uec->grace_stopped_rx = 1;
+	uec->grace_stopped_tx = 1;
+
+	/* Init UCC fast */
+	if (ucc_fast_init(uf_info, &uccf)) {
+		printf("%s: failed to init ucc fast\n", __FUNCTION__);
+		return -ENOMEM;
+	}
+
+	/* Save uccf */
+	uec->uccf = uccf;
+
+	/* Convert the Tx threads number */
+	if (uec_convert_threads_num(uec_info->num_threads_tx,
+					 &num_threads_tx)) {
+		return -EINVAL;
+	}
+
+	/* Convert the Rx threads number */
+	if (uec_convert_threads_num(uec_info->num_threads_rx,
+					 &num_threads_rx)) {
+		return -EINVAL;
+	}
+
+	uf_regs = uccf->uf_regs;
+
+	/* UEC register is following UCC fast registers */
+	uec_regs = (uec_t *)(&uf_regs->ucc_eth);
+
+	/* Save the UEC register pointer to UEC private struct */
+	uec->uec_regs = uec_regs;
+
+	/* Init UPSMR, enable hardware statistics (UCC) */
+	out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
+
+	/* Init MACCFG1, flow control disable, disable Tx and Rx */
+	out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
+
+	/* Init MACCFG2, length check, MAC PAD and CRC enable */
+	out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
+
+	/* Setup MAC interface mode */
+	uec_set_mac_if_mode(uec, uec_info->enet_interface_type, uec_info->speed);
+
+	/* Setup MII management base */
+#ifndef CONFIG_eTSEC_MDIO_BUS
+	uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
+#else
+	uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS;
+#endif
+
+	/* Setup MII master clock source */
+	qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
+
+	/* Setup UTBIPAR */
+	utbipar = in_be32(&uec_regs->utbipar);
+	utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
+
+	/* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
+	 * This frees up the remaining SMI addresses for use.
+	 */
+	utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
+	out_be32(&uec_regs->utbipar, utbipar);
+
+	/* Configure the TBI for SGMII operation */
+	if ((uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII) &&
+	   (uec->uec_info->speed == SPEED_1000)) {
+		uec_write_phy_reg(uec->dev, uec_regs->utbipar,
+			ENET_TBI_MII_ANA, TBIANA_SETTINGS);
+
+		uec_write_phy_reg(uec->dev, uec_regs->utbipar,
+			ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
+
+		uec_write_phy_reg(uec->dev, uec_regs->utbipar,
+			ENET_TBI_MII_CR, TBICR_SETTINGS);
+	}
+
+	/* Allocate Tx BDs */
+	length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
+		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
+		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+	if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
+		 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
+		length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+	}
+
+	align = UEC_TX_BD_RING_ALIGNMENT;
+	uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
+	if (uec->tx_bd_ring_offset != 0) {
+		uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
+						 & ~(align - 1));
+	}
+
+	/* Zero all of Tx BDs */
+	memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
+
+	/* Allocate Rx BDs */
+	length = uec_info->rx_bd_ring_len * SIZEOFBD;
+	align = UEC_RX_BD_RING_ALIGNMENT;
+	uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
+	if (uec->rx_bd_ring_offset != 0) {
+		uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
+							 & ~(align - 1));
+	}
+
+	/* Zero all of Rx BDs */
+	memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
+
+	/* Allocate Rx buffer */
+	length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
+	align = UEC_RX_DATA_BUF_ALIGNMENT;
+	uec->rx_buf_offset = (u32)malloc(length + align);
+	if (uec->rx_buf_offset != 0) {
+		uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
+						 & ~(align - 1));
+	}
+
+	/* Zero all of the Rx buffer */
+	memset((void *)(uec->rx_buf_offset), 0, length + align);
+
+	/* Init TxBD ring */
+	bd = (qe_bd_t *)uec->p_tx_bd_ring;
+	uec->txBd = bd;
+
+	for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
+		BD_DATA_CLEAR(bd);
+		BD_STATUS_SET(bd, 0);
+		BD_LENGTH_SET(bd, 0);
+		bd ++;
+	}
+	BD_STATUS_SET((--bd), TxBD_WRAP);
+
+	/* Init RxBD ring */
+	bd = (qe_bd_t *)uec->p_rx_bd_ring;
+	uec->rxBd = bd;
+	buf = uec->p_rx_buf;
+	for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
+		BD_DATA_SET(bd, buf);
+		BD_LENGTH_SET(bd, 0);
+		BD_STATUS_SET(bd, RxBD_EMPTY);
+		buf += MAX_RXBUF_LEN;
+		bd ++;
+	}
+	BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY);
+
+	/* Init global Tx parameter RAM */
+	uec_init_tx_parameter(uec, num_threads_tx);
+
+	/* Init global Rx parameter RAM */
+	uec_init_rx_parameter(uec, num_threads_rx);
+
+	/* Init ethernet Tx and Rx parameter command */
+	if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
+					 num_threads_rx)) {
+		printf("%s issue init enet cmd failed\n", __FUNCTION__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int uec_init(struct eth_device* dev, bd_t *bd)
+{
+	uec_private_t		*uec;
+	int			err, i;
+	struct phy_info         *curphy;
+#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
+	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
+#endif
+
+	uec = (uec_private_t *)dev->priv;
+
+	if (uec->the_first_run == 0) {
+#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
+	/* QE9 and QE12 need to be set for enabling QE MII managment signals */
+	setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
+	setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
+#endif
+
+		err = init_phy(dev);
+		if (err) {
+			printf("%s: Cannot initialize PHY, aborting.\n",
+			       dev->name);
+			return err;
+		}
+
+		curphy = uec->mii_info->phyinfo;
+
+		if (curphy->config_aneg) {
+			err = curphy->config_aneg(uec->mii_info);
+			if (err) {
+				printf("%s: Can't negotiate PHY\n", dev->name);
+				return err;
+			}
+		}
+
+		/* Give PHYs up to 5 sec to report a link */
+		i = 50;
+		do {
+			err = curphy->read_status(uec->mii_info);
+			if (!(((i-- > 0) && !uec->mii_info->link) || err))
+				break;
+			udelay(100000);
+		} while (1);
+
+#if defined(CONFIG_P1012) || defined(CONFIG_P1021) || defined(CONFIG_P1025)
+		/* QE12 needs to be released for enabling LBCTL signal*/
+		clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
+#endif
+
+		if (err || i <= 0)
+			printf("warning: %s: timeout on PHY link\n", dev->name);
+
+		adjust_link(dev);
+		uec->the_first_run = 1;
+	}
+
+	/* Set up the MAC address */
+	if (dev->enetaddr[0] & 0x01) {
+		printf("%s: MacAddress is multcast address\n",
+			 __FUNCTION__);
+		return -1;
+	}
+	uec_set_mac_address(uec, dev->enetaddr);
+
+
+	err = uec_open(uec, COMM_DIR_RX_AND_TX);
+	if (err) {
+		printf("%s: cannot enable UEC device\n", dev->name);
+		return -1;
+	}
+
+	phy_change(dev);
+
+	return (uec->mii_info->link ? 0 : -1);
+}
+
+static void uec_halt(struct eth_device* dev)
+{
+	uec_private_t	*uec = (uec_private_t *)dev->priv;
+	uec_stop(uec, COMM_DIR_RX_AND_TX);
+}
+
+static int uec_send(struct eth_device *dev, void *buf, int len)
+{
+	uec_private_t		*uec;
+	ucc_fast_private_t	*uccf;
+	volatile qe_bd_t	*bd;
+	u16			status;
+	int			i;
+	int			result = 0;
+
+	uec = (uec_private_t *)dev->priv;
+	uccf = uec->uccf;
+	bd = uec->txBd;
+
+	/* Find an empty TxBD */
+	for (i = 0; bd->status & TxBD_READY; i++) {
+		if (i > 0x100000) {
+			printf("%s: tx buffer not ready\n", dev->name);
+			return result;
+		}
+	}
+
+	/* Init TxBD */
+	BD_DATA_SET(bd, buf);
+	BD_LENGTH_SET(bd, len);
+	status = bd->status;
+	status &= BD_WRAP;
+	status |= (TxBD_READY | TxBD_LAST);
+	BD_STATUS_SET(bd, status);
+
+	/* Tell UCC to transmit the buffer */
+	ucc_fast_transmit_on_demand(uccf);
+
+	/* Wait for buffer to be transmitted */
+	for (i = 0; bd->status & TxBD_READY; i++) {
+		if (i > 0x100000) {
+			printf("%s: tx error\n", dev->name);
+			return result;
+		}
+	}
+
+	/* Ok, the buffer be transimitted */
+	BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
+	uec->txBd = bd;
+	result = 1;
+
+	return result;
+}
+
+static int uec_recv(struct eth_device* dev)
+{
+	uec_private_t		*uec = dev->priv;
+	volatile qe_bd_t	*bd;
+	u16			status;
+	u16			len;
+	u8			*data;
+
+	bd = uec->rxBd;
+	status = bd->status;
+
+	while (!(status & RxBD_EMPTY)) {
+		if (!(status & RxBD_ERROR)) {
+			data = BD_DATA(bd);
+			len = BD_LENGTH(bd);
+			NetReceive(data, len);
+		} else {
+			printf("%s: Rx error\n", dev->name);
+		}
+		status &= BD_CLEAN;
+		BD_LENGTH_SET(bd, 0);
+		BD_STATUS_SET(bd, status | RxBD_EMPTY);
+		BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
+		status = bd->status;
+	}
+	uec->rxBd = bd;
+
+	return 1;
+}
+
+int uec_initialize(bd_t *bis, uec_info_t *uec_info)
+{
+	struct eth_device	*dev;
+	int			i;
+	uec_private_t		*uec;
+	int			err;
+
+	dev = (struct eth_device *)malloc(sizeof(struct eth_device));
+	if (!dev)
+		return 0;
+	memset(dev, 0, sizeof(struct eth_device));
+
+	/* Allocate the UEC private struct */
+	uec = (uec_private_t *)malloc(sizeof(uec_private_t));
+	if (!uec) {
+		return -ENOMEM;
+	}
+	memset(uec, 0, sizeof(uec_private_t));
+
+	/* Adjust uec_info */
+#if (MAX_QE_RISC == 4)
+	uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
+	uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
+#endif
+
+	devlist[uec_info->uf_info.ucc_num] = dev;
+
+	uec->uec_info = uec_info;
+	uec->dev = dev;
+
+	sprintf(dev->name, "UEC%d", uec_info->uf_info.ucc_num);
+	dev->iobase = 0;
+	dev->priv = (void *)uec;
+	dev->init = uec_init;
+	dev->halt = uec_halt;
+	dev->send = uec_send;
+	dev->recv = uec_recv;
+
+	/* Clear the ethnet address */
+	for (i = 0; i < 6; i++)
+		dev->enetaddr[i] = 0;
+
+	eth_register(dev);
+
+	err = uec_startup(uec);
+	if (err) {
+		printf("%s: Cannot configure net device, aborting.",dev->name);
+		return err;
+	}
+
+#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
+	miiphy_register(dev->name, uec_miiphy_read, uec_miiphy_write);
+#endif
+
+	return 1;
+}
+
+int uec_eth_init(bd_t *bis, uec_info_t *uecs, int num)
+{
+	int i;
+
+	for (i = 0; i < num; i++)
+		uec_initialize(bis, &uecs[i]);
+
+	return 0;
+}
+
+int uec_standard_init(bd_t *bis)
+{
+	return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info));
+}
diff --git a/marvell/uboot/drivers/qe/uec.h b/marvell/uboot/drivers/qe/uec.h
new file mode 100644
index 0000000..48a1634
--- /dev/null
+++ b/marvell/uboot/drivers/qe/uec.h
@@ -0,0 +1,743 @@
+/*
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc.
+ *
+ * Dave Liu <daveliu@freescale.com>
+ * based on source code of Shlomi Gridish
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#ifndef __UEC_H__
+#define __UEC_H__
+
+#include "qe.h"
+#include "uccf.h"
+#include <phy.h>
+#include <asm/fsl_enet.h>
+
+#define MAX_TX_THREADS				8
+#define MAX_RX_THREADS				8
+#define MAX_TX_QUEUES				8
+#define MAX_RX_QUEUES				8
+#define MAX_PREFETCHED_BDS			4
+#define MAX_IPH_OFFSET_ENTRY			8
+#define MAX_ENET_INIT_PARAM_ENTRIES_RX		9
+#define MAX_ENET_INIT_PARAM_ENTRIES_TX		8
+
+/* UEC UPSMR (Protocol Specific Mode Register)
+ */
+#define UPSMR_ECM	0x04000000 /* Enable CAM Miss               */
+#define UPSMR_HSE	0x02000000 /* Hardware Statistics Enable    */
+#define UPSMR_PRO	0x00400000 /* Promiscuous                   */
+#define UPSMR_CAP	0x00200000 /* CAM polarity                  */
+#define UPSMR_RSH	0x00100000 /* Receive Short Frames          */
+#define UPSMR_RPM	0x00080000 /* Reduced Pin Mode interfaces   */
+#define UPSMR_R10M	0x00040000 /* RGMII/RMII 10 Mode            */
+#define UPSMR_RLPB	0x00020000 /* RMII Loopback Mode            */
+#define UPSMR_TBIM	0x00010000 /* Ten-bit Interface Mode        */
+#define UPSMR_RMM	0x00001000 /* RMII/RGMII Mode               */
+#define UPSMR_CAM	0x00000400 /* CAM Address Matching          */
+#define UPSMR_BRO	0x00000200 /* Broadcast Address             */
+#define UPSMR_RES1	0x00002000 /* Reserved feild - must be 1    */
+#define UPSMR_SGMM	0x00000020 /* SGMII mode    */
+
+#define UPSMR_INIT_VALUE	(UPSMR_HSE | UPSMR_RES1)
+
+/* UEC MACCFG1 (MAC Configuration 1 Register)
+ */
+#define MACCFG1_FLOW_RX			0x00000020 /* Flow Control Rx */
+#define MACCFG1_FLOW_TX			0x00000010 /* Flow Control Tx */
+#define MACCFG1_ENABLE_SYNCHED_RX	0x00000008 /* Enable Rx Sync  */
+#define MACCFG1_ENABLE_RX		0x00000004 /* Enable Rx       */
+#define MACCFG1_ENABLE_SYNCHED_TX	0x00000002 /* Enable Tx Sync  */
+#define MACCFG1_ENABLE_TX		0x00000001 /* Enable Tx       */
+
+#define MACCFG1_INIT_VALUE		(0)
+
+/* UEC MACCFG2 (MAC Configuration 2 Register)
+ */
+#define MACCFG2_PREL				0x00007000
+#define MACCFG2_PREL_SHIFT			(31 - 19)
+#define MACCFG2_PREL_MASK			0x0000f000
+#define MACCFG2_SRP				0x00000080
+#define MACCFG2_STP				0x00000040
+#define MACCFG2_RESERVED_1			0x00000020 /* must be set  */
+#define MACCFG2_LC				0x00000010 /* Length Check */
+#define MACCFG2_MPE				0x00000008
+#define MACCFG2_FDX				0x00000001 /* Full Duplex  */
+#define MACCFG2_FDX_MASK			0x00000001
+#define MACCFG2_PAD_CRC				0x00000004
+#define MACCFG2_CRC_EN				0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_NONE		0x00000000
+#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY	0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC	0x00000004
+#define MACCFG2_INTERFACE_MODE_NIBBLE		0x00000100
+#define MACCFG2_INTERFACE_MODE_BYTE		0x00000200
+#define MACCFG2_INTERFACE_MODE_MASK		0x00000300
+
+#define MACCFG2_INIT_VALUE	(MACCFG2_PREL | MACCFG2_RESERVED_1 | \
+				 MACCFG2_LC | MACCFG2_PAD_CRC | MACCFG2_FDX)
+
+/* UEC Event Register
+*/
+#define UCCE_MPD				0x80000000
+#define UCCE_SCAR				0x40000000
+#define UCCE_GRA				0x20000000
+#define UCCE_CBPR				0x10000000
+#define UCCE_BSY				0x08000000
+#define UCCE_RXC				0x04000000
+#define UCCE_TXC				0x02000000
+#define UCCE_TXE				0x01000000
+#define UCCE_TXB7				0x00800000
+#define UCCE_TXB6				0x00400000
+#define UCCE_TXB5				0x00200000
+#define UCCE_TXB4				0x00100000
+#define UCCE_TXB3				0x00080000
+#define UCCE_TXB2				0x00040000
+#define UCCE_TXB1				0x00020000
+#define UCCE_TXB0				0x00010000
+#define UCCE_RXB7				0x00008000
+#define UCCE_RXB6				0x00004000
+#define UCCE_RXB5				0x00002000
+#define UCCE_RXB4				0x00001000
+#define UCCE_RXB3				0x00000800
+#define UCCE_RXB2				0x00000400
+#define UCCE_RXB1				0x00000200
+#define UCCE_RXB0				0x00000100
+#define UCCE_RXF7				0x00000080
+#define UCCE_RXF6				0x00000040
+#define UCCE_RXF5				0x00000020
+#define UCCE_RXF4				0x00000010
+#define UCCE_RXF3				0x00000008
+#define UCCE_RXF2				0x00000004
+#define UCCE_RXF1				0x00000002
+#define UCCE_RXF0				0x00000001
+
+#define UCCE_TXB	(UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 | \
+			 UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
+#define UCCE_RXB	(UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 | \
+			 UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
+#define UCCE_RXF	(UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 | \
+			 UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
+#define UCCE_OTHER	(UCCE_SCAR | UCCE_GRA  | UCCE_CBPR | UCCE_BSY  | \
+			 UCCE_RXC  | UCCE_TXC  | UCCE_TXE)
+
+/* UEC TEMODR Register
+*/
+#define TEMODER_SCHEDULER_ENABLE		0x2000
+#define TEMODER_IP_CHECKSUM_GENERATE		0x0400
+#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1	0x0200
+#define TEMODER_RMON_STATISTICS			0x0100
+#define TEMODER_NUM_OF_QUEUES_SHIFT		(15-15)
+
+#define TEMODER_INIT_VALUE			0xc000
+
+/* UEC REMODR Register
+*/
+#define REMODER_RX_RMON_STATISTICS_ENABLE	0x00001000
+#define REMODER_RX_EXTENDED_FEATURES		0x80000000
+#define REMODER_VLAN_OPERATION_TAGGED_SHIFT	(31-9 )
+#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT	(31-10)
+#define REMODER_RX_QOS_MODE_SHIFT		(31-15)
+#define REMODER_RMON_STATISTICS			0x00001000
+#define REMODER_RX_EXTENDED_FILTERING		0x00000800
+#define REMODER_NUM_OF_QUEUES_SHIFT		(31-23)
+#define REMODER_DYNAMIC_MAX_FRAME_LENGTH	0x00000008
+#define REMODER_DYNAMIC_MIN_FRAME_LENGTH	0x00000004
+#define REMODER_IP_CHECKSUM_CHECK		0x00000002
+#define REMODER_IP_ADDRESS_ALIGNMENT		0x00000001
+
+#define REMODER_INIT_VALUE			0
+
+/* BMRx - Bus Mode Register */
+#define BMR_GLB					0x20
+#define BMR_BO_BE				0x10
+#define BMR_DTB_SECONDARY_BUS			0x02
+#define BMR_BDB_SECONDARY_BUS			0x01
+
+#define BMR_SHIFT				24
+#define BMR_INIT_VALUE				(BMR_GLB | BMR_BO_BE)
+
+/* UEC UCCS (Ethernet Status Register)
+ */
+#define UCCS_BPR				0x02
+#define UCCS_PAU				0x02
+#define UCCS_MPD				0x01
+
+/* UEC MIIMCFG (MII Management Configuration Register)
+ */
+#define MIIMCFG_RESET_MANAGEMENT		0x80000000
+#define MIIMCFG_NO_PREAMBLE			0x00000010
+#define MIIMCFG_CLOCK_DIVIDE_SHIFT		(31 - 31)
+#define MIIMCFG_CLOCK_DIVIDE_MASK		0x0000000f
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4	0x00000001
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6	0x00000002
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8	0x00000003
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10	0x00000004
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14	0x00000005
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20	0x00000006
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28	0x00000007
+
+#define MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE	\
+	MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10
+
+/* UEC MIIMCOM (MII Management Command Register)
+ */
+#define MIIMCOM_SCAN_CYCLE			0x00000002 /* Scan cycle */
+#define MIIMCOM_READ_CYCLE			0x00000001 /* Read cycle */
+
+/* UEC MIIMADD (MII Management Address Register)
+ */
+#define MIIMADD_PHY_ADDRESS_SHIFT		(31 - 23)
+#define MIIMADD_PHY_REGISTER_SHIFT		(31 - 31)
+
+/* UEC MIIMCON (MII Management Control Register)
+ */
+#define MIIMCON_PHY_CONTROL_SHIFT		(31 - 31)
+#define MIIMCON_PHY_STATUS_SHIFT		(31 - 31)
+
+/* UEC MIIMIND (MII Management Indicator Register)
+ */
+#define MIIMIND_NOT_VALID			0x00000004
+#define MIIMIND_SCAN				0x00000002
+#define MIIMIND_BUSY				0x00000001
+
+/* UEC UTBIPAR (Ten Bit Interface Physical Address Register)
+ */
+#define UTBIPAR_PHY_ADDRESS_SHIFT		(31 - 31)
+#define UTBIPAR_PHY_ADDRESS_MASK		0x0000001f
+
+/* UEC UESCR (Ethernet Statistics Control Register)
+ */
+#define UESCR_AUTOZ				0x8000
+#define UESCR_CLRCNT				0x4000
+#define UESCR_MAXCOV_SHIFT			(15 -  7)
+#define UESCR_SCOV_SHIFT			(15 - 15)
+
+/****** Tx data struct collection ******/
+/* Tx thread data, each Tx thread has one this struct.
+*/
+typedef struct uec_thread_data_tx {
+	u8   res0[136];
+} __attribute__ ((packed)) uec_thread_data_tx_t;
+
+/* Tx thread parameter, each Tx thread has one this struct.
+*/
+typedef struct uec_thread_tx_pram {
+	u8   res0[64];
+} __attribute__ ((packed)) uec_thread_tx_pram_t;
+
+/* Send queue queue-descriptor, each Tx queue has one this QD
+*/
+typedef struct uec_send_queue_qd {
+	u32    bd_ring_base; /* pointer to BD ring base address */
+	u8     res0[0x8];
+	u32    last_bd_completed_address; /* last entry in BD ring */
+	u8     res1[0x30];
+} __attribute__ ((packed)) uec_send_queue_qd_t;
+
+/* Send queue memory region */
+typedef struct uec_send_queue_mem_region {
+	uec_send_queue_qd_t   sqqd[MAX_TX_QUEUES];
+} __attribute__ ((packed)) uec_send_queue_mem_region_t;
+
+/* Scheduler struct
+*/
+typedef struct uec_scheduler {
+	u16  cpucount0;        /* CPU packet counter */
+	u16  cpucount1;        /* CPU packet counter */
+	u16  cecount0;         /* QE  packet counter */
+	u16  cecount1;         /* QE  packet counter */
+	u16  cpucount2;        /* CPU packet counter */
+	u16  cpucount3;        /* CPU packet counter */
+	u16  cecount2;         /* QE  packet counter */
+	u16  cecount3;         /* QE  packet counter */
+	u16  cpucount4;        /* CPU packet counter */
+	u16  cpucount5;        /* CPU packet counter */
+	u16  cecount4;         /* QE  packet counter */
+	u16  cecount5;         /* QE  packet counter */
+	u16  cpucount6;        /* CPU packet counter */
+	u16  cpucount7;        /* CPU packet counter */
+	u16  cecount6;         /* QE  packet counter */
+	u16  cecount7;         /* QE  packet counter */
+	u32  weightstatus[MAX_TX_QUEUES]; /* accumulated weight factor */
+	u32  rtsrshadow;       /* temporary variable handled by QE */
+	u32  time;             /* temporary variable handled by QE */
+	u32  ttl;              /* temporary variable handled by QE */
+	u32  mblinterval;      /* max burst length interval        */
+	u16  nortsrbytetime;   /* normalized value of byte time in tsr units */
+	u8   fracsiz;
+	u8   res0[1];
+	u8   strictpriorityq;  /* Strict Priority Mask register */
+	u8   txasap;           /* Transmit ASAP register        */
+	u8   extrabw;          /* Extra BandWidth register      */
+	u8   oldwfqmask;       /* temporary variable handled by QE */
+	u8   weightfactor[MAX_TX_QUEUES]; /**< weight factor for queues */
+	u32  minw;             /* temporary variable handled by QE */
+	u8   res1[0x70-0x64];
+} __attribute__ ((packed)) uec_scheduler_t;
+
+/* Tx firmware counters
+*/
+typedef struct uec_tx_firmware_statistics_pram {
+	u32  sicoltx;            /* single collision */
+	u32  mulcoltx;           /* multiple collision */
+	u32  latecoltxfr;        /* late collision */
+	u32  frabortduecol;      /* frames aborted due to tx collision */
+	u32  frlostinmactxer;    /* frames lost due to internal MAC error tx */
+	u32  carriersenseertx;   /* carrier sense error */
+	u32  frtxok;             /* frames transmitted OK */
+	u32  txfrexcessivedefer;
+	u32  txpkts256;          /* total packets(including bad) 256~511 B */
+	u32  txpkts512;          /* total packets(including bad) 512~1023B */
+	u32  txpkts1024;         /* total packets(including bad) 1024~1518B */
+	u32  txpktsjumbo;        /* total packets(including bad)  >1024 */
+} __attribute__ ((packed)) uec_tx_firmware_statistics_pram_t;
+
+/* Tx global parameter table
+*/
+typedef struct uec_tx_global_pram {
+	u16  temoder;
+	u8   res0[0x38-0x02];
+	u32  sqptr;
+	u32  schedulerbasepointer;
+	u32  txrmonbaseptr;
+	u32  tstate;
+	u8   iphoffset[MAX_IPH_OFFSET_ENTRY];
+	u32  vtagtable[0x8];
+	u32  tqptr;
+	u8   res2[0x80-0x74];
+} __attribute__ ((packed)) uec_tx_global_pram_t;
+
+
+/****** Rx data struct collection ******/
+/* Rx thread data, each Rx thread has one this struct.
+*/
+typedef struct uec_thread_data_rx {
+	u8   res0[40];
+} __attribute__ ((packed)) uec_thread_data_rx_t;
+
+/* Rx thread parameter, each Rx thread has one this struct.
+*/
+typedef struct uec_thread_rx_pram {
+	u8   res0[128];
+} __attribute__ ((packed)) uec_thread_rx_pram_t;
+
+/* Rx firmware counters
+*/
+typedef struct uec_rx_firmware_statistics_pram {
+	u32   frrxfcser;         /* frames with crc error */
+	u32   fraligner;         /* frames with alignment error */
+	u32   inrangelenrxer;    /* in range length error */
+	u32   outrangelenrxer;   /* out of range length error */
+	u32   frtoolong;         /* frame too long */
+	u32   runt;              /* runt */
+	u32   verylongevent;     /* very long event */
+	u32   symbolerror;       /* symbol error */
+	u32   dropbsy;           /* drop because of BD not ready */
+	u8    res0[0x8];
+	u32   mismatchdrop;      /* drop because of MAC filtering */
+	u32   underpkts;         /* total frames less than 64 octets */
+	u32   pkts256;           /* total frames(including bad)256~511 B */
+	u32   pkts512;           /* total frames(including bad)512~1023 B */
+	u32   pkts1024;          /* total frames(including bad)1024~1518 B */
+	u32   pktsjumbo;         /* total frames(including bad) >1024 B */
+	u32   frlossinmacer;
+	u32   pausefr;           /* pause frames */
+	u8    res1[0x4];
+	u32   removevlan;
+	u32   replacevlan;
+	u32   insertvlan;
+} __attribute__ ((packed)) uec_rx_firmware_statistics_pram_t;
+
+/* Rx interrupt coalescing entry, each Rx queue has one this entry.
+*/
+typedef struct uec_rx_interrupt_coalescing_entry {
+	u32   maxvalue;
+	u32   counter;
+} __attribute__ ((packed)) uec_rx_interrupt_coalescing_entry_t;
+
+typedef struct uec_rx_interrupt_coalescing_table {
+	uec_rx_interrupt_coalescing_entry_t   entry[MAX_RX_QUEUES];
+} __attribute__ ((packed)) uec_rx_interrupt_coalescing_table_t;
+
+/* RxBD queue entry, each Rx queue has one this entry.
+*/
+typedef struct uec_rx_bd_queues_entry {
+	u32   bdbaseptr;         /* BD base pointer          */
+	u32   bdptr;             /* BD pointer               */
+	u32   externalbdbaseptr; /* external BD base pointer */
+	u32   externalbdptr;     /* external BD pointer      */
+} __attribute__ ((packed)) uec_rx_bd_queues_entry_t;
+
+/* Rx global paramter table
+*/
+typedef struct uec_rx_global_pram {
+	u32  remoder;             /* ethernet mode reg. */
+	u32  rqptr;               /* base pointer to the Rx Queues */
+	u32  res0[0x1];
+	u8   res1[0x20-0xC];
+	u16  typeorlen;
+	u8   res2[0x1];
+	u8   rxgstpack;           /* ack on GRACEFUL STOP RX command */
+	u32  rxrmonbaseptr;       /* Rx RMON statistics base */
+	u8   res3[0x30-0x28];
+	u32  intcoalescingptr;    /* Interrupt coalescing table pointer */
+	u8   res4[0x36-0x34];
+	u8   rstate;
+	u8   res5[0x46-0x37];
+	u16  mrblr;               /* max receive buffer length reg. */
+	u32  rbdqptr;             /* RxBD parameter table description */
+	u16  mflr;                /* max frame length reg. */
+	u16  minflr;              /* min frame length reg. */
+	u16  maxd1;               /* max dma1 length reg. */
+	u16  maxd2;               /* max dma2 length reg. */
+	u32  ecamptr;             /* external CAM address */
+	u32  l2qt;                /* VLAN priority mapping table. */
+	u32  l3qt[0x8];           /* IP   priority mapping table. */
+	u16  vlantype;            /* vlan type */
+	u16  vlantci;             /* default vlan tci */
+	u8   addressfiltering[64];/* address filtering data structure */
+	u32  exfGlobalParam;      /* extended filtering global parameters */
+	u8   res6[0x100-0xC4];    /* Initialize to zero */
+} __attribute__ ((packed)) uec_rx_global_pram_t;
+
+#define GRACEFUL_STOP_ACKNOWLEDGE_RX            0x01
+
+
+/****** UEC common ******/
+/* UCC statistics - hardware counters
+*/
+typedef struct uec_hardware_statistics {
+	u32 tx64;
+	u32 tx127;
+	u32 tx255;
+	u32 rx64;
+	u32 rx127;
+	u32 rx255;
+	u32 txok;
+	u16 txcf;
+	u32 tmca;
+	u32 tbca;
+	u32 rxfok;
+	u32 rxbok;
+	u32 rbyt;
+	u32 rmca;
+	u32 rbca;
+} __attribute__ ((packed)) uec_hardware_statistics_t;
+
+/* InitEnet command parameter
+*/
+typedef struct uec_init_cmd_pram {
+	u8   resinit0;
+	u8   resinit1;
+	u8   resinit2;
+	u8   resinit3;
+	u16  resinit4;
+	u8   res1[0x1];
+	u8   largestexternallookupkeysize;
+	u32  rgftgfrxglobal;
+	u32  rxthread[MAX_ENET_INIT_PARAM_ENTRIES_RX]; /* rx threads */
+	u8   res2[0x38 - 0x30];
+	u32  txglobal;				   /* tx global  */
+	u32  txthread[MAX_ENET_INIT_PARAM_ENTRIES_TX]; /* tx threads */
+	u8   res3[0x1];
+} __attribute__ ((packed)) uec_init_cmd_pram_t;
+
+#define ENET_INIT_PARAM_RGF_SHIFT		(32 - 4)
+#define ENET_INIT_PARAM_TGF_SHIFT		(32 - 8)
+
+#define ENET_INIT_PARAM_RISC_MASK		0x0000003f
+#define ENET_INIT_PARAM_PTR_MASK		0x00ffffc0
+#define ENET_INIT_PARAM_SNUM_MASK		0xff000000
+#define ENET_INIT_PARAM_SNUM_SHIFT		24
+
+#define ENET_INIT_PARAM_MAGIC_RES_INIT0		0x06
+#define ENET_INIT_PARAM_MAGIC_RES_INIT1		0x30
+#define ENET_INIT_PARAM_MAGIC_RES_INIT2		0xff
+#define ENET_INIT_PARAM_MAGIC_RES_INIT3		0x00
+#define ENET_INIT_PARAM_MAGIC_RES_INIT4		0x0400
+
+/* structure representing 82xx Address Filtering Enet Address in PRAM
+*/
+typedef struct uec_82xx_enet_address {
+	u8   res1[0x2];
+	u16  h;       /* address (MSB) */
+	u16  m;       /* address       */
+	u16  l;       /* address (LSB) */
+} __attribute__ ((packed)) uec_82xx_enet_address_t;
+
+/* structure representing 82xx Address Filtering PRAM
+*/
+typedef struct uec_82xx_address_filtering_pram {
+	u32  iaddr_h;        /* individual address filter, high */
+	u32  iaddr_l;        /* individual address filter, low  */
+	u32  gaddr_h;        /* group address filter, high      */
+	u32  gaddr_l;        /* group address filter, low       */
+	uec_82xx_enet_address_t    taddr;
+	uec_82xx_enet_address_t    paddr[4];
+	u8                         res0[0x40-0x38];
+} __attribute__ ((packed)) uec_82xx_address_filtering_pram_t;
+
+/* Buffer Descriptor
+*/
+typedef struct buffer_descriptor {
+	u16 status;
+	u16 len;
+	u32 data;
+} __attribute__ ((packed)) qe_bd_t, *p_bd_t;
+
+#define	SIZEOFBD		sizeof(qe_bd_t)
+
+/* Common BD flags
+*/
+#define BD_WRAP			0x2000
+#define BD_INT			0x1000
+#define BD_LAST			0x0800
+#define BD_CLEAN		0x3000
+
+/* TxBD status flags
+*/
+#define TxBD_READY		0x8000
+#define TxBD_PADCRC		0x4000
+#define TxBD_WRAP		BD_WRAP
+#define TxBD_INT		BD_INT
+#define TxBD_LAST		BD_LAST
+#define TxBD_TXCRC		0x0400
+#define TxBD_DEF		0x0200
+#define TxBD_PP			0x0100
+#define TxBD_LC			0x0080
+#define TxBD_RL			0x0040
+#define TxBD_RC			0x003C
+#define TxBD_UNDERRUN		0x0002
+#define TxBD_TRUNC		0x0001
+
+#define TxBD_ERROR		(TxBD_UNDERRUN | TxBD_TRUNC)
+
+/* RxBD status flags
+*/
+#define RxBD_EMPTY		0x8000
+#define RxBD_OWNER		0x4000
+#define RxBD_WRAP		BD_WRAP
+#define RxBD_INT		BD_INT
+#define RxBD_LAST		BD_LAST
+#define RxBD_FIRST		0x0400
+#define RxBD_CMR		0x0200
+#define RxBD_MISS		0x0100
+#define RxBD_BCAST		0x0080
+#define RxBD_MCAST		0x0040
+#define RxBD_LG			0x0020
+#define RxBD_NO			0x0010
+#define RxBD_SHORT		0x0008
+#define RxBD_CRCERR		0x0004
+#define RxBD_OVERRUN		0x0002
+#define RxBD_IPCH		0x0001
+
+#define RxBD_ERROR		(RxBD_LG | RxBD_NO | RxBD_SHORT | \
+				 RxBD_CRCERR | RxBD_OVERRUN)
+
+/* BD access macros
+*/
+#define BD_STATUS(_bd)			(((p_bd_t)(_bd))->status)
+#define BD_STATUS_SET(_bd, _val)	(((p_bd_t)(_bd))->status = _val)
+#define BD_LENGTH(_bd)			(((p_bd_t)(_bd))->len)
+#define BD_LENGTH_SET(_bd, _val)	(((p_bd_t)(_bd))->len = _val)
+#define BD_DATA_CLEAR(_bd)		(((p_bd_t)(_bd))->data = 0)
+#define BD_IS_DATA(_bd)			(((p_bd_t)(_bd))->data)
+#define BD_DATA(_bd)			((u8 *)(((p_bd_t)(_bd))->data))
+#define BD_DATA_SET(_bd, _data)		(((p_bd_t)(_bd))->data = (u32)(_data))
+#define BD_ADVANCE(_bd,_status,_base)	\
+	(((_status) & BD_WRAP) ? (_bd) = ((p_bd_t)(_base)) : ++(_bd))
+
+/* Rx Prefetched BDs
+*/
+typedef struct uec_rx_prefetched_bds {
+    qe_bd_t   bd[MAX_PREFETCHED_BDS]; /* prefetched bd */
+} __attribute__ ((packed)) uec_rx_prefetched_bds_t;
+
+/* Alignments
+ */
+#define UEC_RX_GLOBAL_PRAM_ALIGNMENT				64
+#define UEC_TX_GLOBAL_PRAM_ALIGNMENT				64
+#define UEC_THREAD_RX_PRAM_ALIGNMENT				128
+#define UEC_THREAD_TX_PRAM_ALIGNMENT				64
+#define UEC_THREAD_DATA_ALIGNMENT				256
+#define UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT		32
+#define UEC_SCHEDULER_ALIGNMENT					4
+#define UEC_TX_STATISTICS_ALIGNMENT				4
+#define UEC_RX_STATISTICS_ALIGNMENT				4
+#define UEC_RX_INTERRUPT_COALESCING_ALIGNMENT			4
+#define UEC_RX_BD_QUEUES_ALIGNMENT				8
+#define UEC_RX_PREFETCHED_BDS_ALIGNMENT				128
+#define UEC_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT	4
+#define UEC_RX_BD_RING_ALIGNMENT				32
+#define UEC_TX_BD_RING_ALIGNMENT				32
+#define UEC_MRBLR_ALIGNMENT					128
+#define UEC_RX_BD_RING_SIZE_ALIGNMENT				4
+#define UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT			32
+#define UEC_RX_DATA_BUF_ALIGNMENT				64
+
+#define UEC_VLAN_PRIORITY_MAX					8
+#define UEC_IP_PRIORITY_MAX					64
+#define UEC_TX_VTAG_TABLE_ENTRY_MAX				8
+#define UEC_RX_BD_RING_SIZE_MIN					8
+#define UEC_TX_BD_RING_SIZE_MIN					2
+
+/* Ethernet speed
+*/
+typedef enum enet_speed {
+	ENET_SPEED_10BT,   /* 10 Base T */
+	ENET_SPEED_100BT,  /* 100 Base T */
+	ENET_SPEED_1000BT  /* 1000 Base T */
+} enet_speed_e;
+
+/* Ethernet Address Type.
+*/
+typedef enum enet_addr_type {
+	ENET_ADDR_TYPE_INDIVIDUAL,
+	ENET_ADDR_TYPE_GROUP,
+	ENET_ADDR_TYPE_BROADCAST
+} enet_addr_type_e;
+
+/* TBI / MII Set Register
+*/
+typedef enum enet_tbi_mii_reg {
+	ENET_TBI_MII_CR        = 0x00,
+	ENET_TBI_MII_SR        = 0x01,
+	ENET_TBI_MII_ANA       = 0x04,
+	ENET_TBI_MII_ANLPBPA   = 0x05,
+	ENET_TBI_MII_ANEX      = 0x06,
+	ENET_TBI_MII_ANNPT     = 0x07,
+	ENET_TBI_MII_ANLPANP   = 0x08,
+	ENET_TBI_MII_EXST      = 0x0F,
+	ENET_TBI_MII_JD        = 0x10,
+	ENET_TBI_MII_TBICON    = 0x11
+} enet_tbi_mii_reg_e;
+
+/* TBI MDIO register bit fields*/
+#define TBICON_CLK_SELECT	0x0020
+#define TBIANA_ASYMMETRIC_PAUSE	0x0100
+#define TBIANA_SYMMETRIC_PAUSE	0x0080
+#define TBIANA_HALF_DUPLEX	0x0040
+#define TBIANA_FULL_DUPLEX	0x0020
+#define TBICR_PHY_RESET		0x8000
+#define TBICR_ANEG_ENABLE	0x1000
+#define TBICR_RESTART_ANEG	0x0200
+#define TBICR_FULL_DUPLEX	0x0100
+#define TBICR_SPEED1_SET	0x0040
+
+#define TBIANA_SETTINGS ( \
+		TBIANA_ASYMMETRIC_PAUSE \
+		| TBIANA_SYMMETRIC_PAUSE \
+		| TBIANA_FULL_DUPLEX \
+		)
+
+#define TBICR_SETTINGS ( \
+		TBICR_PHY_RESET \
+		| TBICR_ANEG_ENABLE \
+		| TBICR_FULL_DUPLEX \
+		| TBICR_SPEED1_SET \
+		)
+
+/* UEC number of threads
+*/
+typedef enum uec_num_of_threads {
+	UEC_NUM_OF_THREADS_1  = 0x1,  /* 1 */
+	UEC_NUM_OF_THREADS_2  = 0x2,  /* 2 */
+	UEC_NUM_OF_THREADS_4  = 0x0,  /* 4 */
+	UEC_NUM_OF_THREADS_6  = 0x3,  /* 6 */
+	UEC_NUM_OF_THREADS_8  = 0x4   /* 8 */
+} uec_num_of_threads_e;
+
+/* UEC initialization info struct
+*/
+#define STD_UEC_INFO(num) \
+{			\
+	.uf_info		= {	\
+		.ucc_num	= CONFIG_SYS_UEC##num##_UCC_NUM,\
+		.rx_clock	= CONFIG_SYS_UEC##num##_RX_CLK,	\
+		.tx_clock	= CONFIG_SYS_UEC##num##_TX_CLK,	\
+		.eth_type	= CONFIG_SYS_UEC##num##_ETH_TYPE,\
+	},	\
+	.num_threads_tx		= UEC_NUM_OF_THREADS_1,	\
+	.num_threads_rx		= UEC_NUM_OF_THREADS_1,	\
+	.risc_tx		= QE_RISC_ALLOCATION_RISC1_AND_RISC2, \
+	.risc_rx		= QE_RISC_ALLOCATION_RISC1_AND_RISC2, \
+	.tx_bd_ring_len		= 16,	\
+	.rx_bd_ring_len		= 16,	\
+	.phy_address		= CONFIG_SYS_UEC##num##_PHY_ADDR, \
+	.enet_interface_type	= CONFIG_SYS_UEC##num##_INTERFACE_TYPE, \
+	.speed			= CONFIG_SYS_UEC##num##_INTERFACE_SPEED, \
+}
+
+typedef struct uec_info {
+	ucc_fast_info_t			uf_info;
+	uec_num_of_threads_e		num_threads_tx;
+	uec_num_of_threads_e		num_threads_rx;
+	unsigned int			risc_tx;
+	unsigned int			risc_rx;
+	u16				rx_bd_ring_len;
+	u16				tx_bd_ring_len;
+	u8				phy_address;
+	phy_interface_t			enet_interface_type;
+	int				speed;
+} uec_info_t;
+
+/* UEC driver initialized info
+*/
+#define MAX_RXBUF_LEN			1536
+#define MAX_FRAME_LEN			1518
+#define MIN_FRAME_LEN			64
+#define MAX_DMA1_LEN			1520
+#define MAX_DMA2_LEN			1520
+
+/* UEC driver private struct
+*/
+typedef struct uec_private {
+	uec_info_t			*uec_info;
+	ucc_fast_private_t		*uccf;
+	struct eth_device		*dev;
+	uec_t				*uec_regs;
+	uec_mii_t			*uec_mii_regs;
+	/* enet init command parameter */
+	uec_init_cmd_pram_t		*p_init_enet_param;
+	u32				init_enet_param_offset;
+	/* Rx and Tx paramter */
+	uec_rx_global_pram_t		*p_rx_glbl_pram;
+	u32				rx_glbl_pram_offset;
+	uec_tx_global_pram_t		*p_tx_glbl_pram;
+	u32				tx_glbl_pram_offset;
+	uec_send_queue_mem_region_t	*p_send_q_mem_reg;
+	u32				send_q_mem_reg_offset;
+	uec_thread_data_tx_t		*p_thread_data_tx;
+	u32				thread_dat_tx_offset;
+	uec_thread_data_rx_t		*p_thread_data_rx;
+	u32				thread_dat_rx_offset;
+	uec_rx_bd_queues_entry_t	*p_rx_bd_qs_tbl;
+	u32				rx_bd_qs_tbl_offset;
+	/* BDs specific */
+	u8				*p_tx_bd_ring;
+	u32				tx_bd_ring_offset;
+	u8				*p_rx_bd_ring;
+	u32				rx_bd_ring_offset;
+	u8				*p_rx_buf;
+	u32				rx_buf_offset;
+	volatile qe_bd_t		*txBd;
+	volatile qe_bd_t		*rxBd;
+	/* Status */
+	int				mac_tx_enabled;
+	int				mac_rx_enabled;
+	int				grace_stopped_tx;
+	int				grace_stopped_rx;
+	int				the_first_run;
+	/* PHY specific */
+	struct uec_mii_info		*mii_info;
+	int				oldspeed;
+	int				oldduplex;
+	int				oldlink;
+} uec_private_t;
+
+int uec_initialize(bd_t *bis, uec_info_t *uec_info);
+int uec_eth_init(bd_t *bis, uec_info_t *uecs, int num);
+int uec_standard_init(bd_t *bis);
+#endif /* __UEC_H__ */
diff --git a/marvell/uboot/drivers/qe/uec_phy.c b/marvell/uboot/drivers/qe/uec_phy.c
new file mode 100644
index 0000000..5dc4641
--- /dev/null
+++ b/marvell/uboot/drivers/qe/uec_phy.c
@@ -0,0 +1,912 @@
+/*
+ * Copyright (C) 2005,2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Shlomi Gridish
+ *
+ * Description: UCC GETH Driver -- PHY handling
+ *		Driver for UEC on QE
+ *		Based on 8260_io/fcc_enet.c
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include "common.h"
+#include "net.h"
+#include "malloc.h"
+#include "asm/errno.h"
+#include "asm/immap_qe.h"
+#include "asm/io.h"
+#include "qe.h"
+#include "uccf.h"
+#include "uec.h"
+#include "uec_phy.h"
+#include "miiphy.h"
+#include <phy.h>
+
+#define ugphy_printk(format, arg...)  \
+	printf(format "\n", ## arg)
+
+#define ugphy_dbg(format, arg...)	     \
+	ugphy_printk(format , ## arg)
+#define ugphy_err(format, arg...)	     \
+	ugphy_printk(format , ## arg)
+#define ugphy_info(format, arg...)	     \
+	ugphy_printk(format , ## arg)
+#define ugphy_warn(format, arg...)	     \
+	ugphy_printk(format , ## arg)
+
+#ifdef UEC_VERBOSE_DEBUG
+#define ugphy_vdbg ugphy_dbg
+#else
+#define ugphy_vdbg(ugeth, fmt, args...) do { } while (0)
+#endif /* UEC_VERBOSE_DEBUG */
+
+/*--------------------------------------------------------------------+
+ * Fixed PHY (PHY-less) support for Ethernet Ports.
+ *
+ * Copied from arch/powerpc/cpu/ppc4xx/4xx_enet.c
+ *--------------------------------------------------------------------*/
+
+/*
+ * Some boards do not have a PHY for each ethernet port. These ports are known
+ * as Fixed PHY (or PHY-less) ports. For such ports, set the appropriate
+ * CONFIG_SYS_UECx_PHY_ADDR equal to CONFIG_FIXED_PHY_ADDR (an unused address)
+ * When the drver tries to identify the PHYs, CONFIG_FIXED_PHY will be returned
+ * and the driver will search CONFIG_SYS_FIXED_PHY_PORTS to find what network
+ * speed and duplex should be for the port.
+ *
+ * Example board header configuration file:
+ *     #define CONFIG_FIXED_PHY   0xFFFFFFFF
+ *     #define CONFIG_SYS_FIXED_PHY_ADDR 0x1E (pick an unused phy address)
+ *
+ *     #define CONFIG_SYS_UEC1_PHY_ADDR CONFIG_SYS_FIXED_PHY_ADDR
+ *     #define CONFIG_SYS_UEC2_PHY_ADDR 0x02
+ *     #define CONFIG_SYS_UEC3_PHY_ADDR CONFIG_SYS_FIXED_PHY_ADDR
+ *     #define CONFIG_SYS_UEC4_PHY_ADDR 0x04
+ *
+ *     #define CONFIG_SYS_FIXED_PHY_PORT(name,speed,duplex) \
+ *                 {name, speed, duplex},
+ *
+ *     #define CONFIG_SYS_FIXED_PHY_PORTS \
+ *                 CONFIG_SYS_FIXED_PHY_PORT("UEC0",SPEED_100,DUPLEX_FULL) \
+ *                 CONFIG_SYS_FIXED_PHY_PORT("UEC2",SPEED_100,DUPLEX_HALF)
+ */
+
+#ifndef CONFIG_FIXED_PHY
+#define CONFIG_FIXED_PHY	0xFFFFFFFF /* Fixed PHY (PHY-less) */
+#endif
+
+#ifndef CONFIG_SYS_FIXED_PHY_PORTS
+#define CONFIG_SYS_FIXED_PHY_PORTS	/* default is an empty array */
+#endif
+
+struct fixed_phy_port {
+	char name[16];	/* ethernet port name */
+	unsigned int speed;	/* specified speed 10,100 or 1000 */
+	unsigned int duplex;	/* specified duplex FULL or HALF */
+};
+
+static const struct fixed_phy_port fixed_phy_port[] = {
+	CONFIG_SYS_FIXED_PHY_PORTS /* defined in board configuration file */
+};
+
+/*--------------------------------------------------------------------+
+ * BitBang MII support for ethernet ports
+ *
+ * Based from MPC8560ADS implementation
+ *--------------------------------------------------------------------*/
+/*
+ * Example board header file to define bitbang ethernet ports:
+ *
+ * #define CONFIG_SYS_BITBANG_PHY_PORT(name) name,
+ * #define CONFIG_SYS_BITBANG_PHY_PORTS CONFIG_SYS_BITBANG_PHY_PORT("UEC0")
+*/
+#ifndef CONFIG_SYS_BITBANG_PHY_PORTS
+#define CONFIG_SYS_BITBANG_PHY_PORTS	/* default is an empty array */
+#endif
+
+#if defined(CONFIG_BITBANGMII)
+static const char *bitbang_phy_port[] = {
+	CONFIG_SYS_BITBANG_PHY_PORTS /* defined in board configuration file */
+};
+#endif /* CONFIG_BITBANGMII */
+
+static void config_genmii_advert (struct uec_mii_info *mii_info);
+static void genmii_setup_forced (struct uec_mii_info *mii_info);
+static void genmii_restart_aneg (struct uec_mii_info *mii_info);
+static int gbit_config_aneg (struct uec_mii_info *mii_info);
+static int genmii_config_aneg (struct uec_mii_info *mii_info);
+static int genmii_update_link (struct uec_mii_info *mii_info);
+static int genmii_read_status (struct uec_mii_info *mii_info);
+u16 uec_phy_read(struct uec_mii_info *mii_info, u16 regnum);
+void uec_phy_write(struct uec_mii_info *mii_info, u16 regnum, u16 val);
+
+/* Write value to the PHY for this device to the register at regnum, */
+/* waiting until the write is done before it returns.  All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+void uec_write_phy_reg (struct eth_device *dev, int mii_id, int regnum, int value)
+{
+	uec_private_t *ugeth = (uec_private_t *) dev->priv;
+	uec_mii_t *ug_regs;
+	enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+	u32 tmp_reg;
+
+
+#if defined(CONFIG_BITBANGMII)
+	u32 i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(bitbang_phy_port); i++) {
+		if (strncmp(dev->name, bitbang_phy_port[i],
+			sizeof(dev->name)) == 0) {
+			(void)bb_miiphy_write(NULL, mii_id, regnum, value);
+			return;
+		}
+	}
+#endif /* CONFIG_BITBANGMII */
+
+	ug_regs = ugeth->uec_mii_regs;
+
+	/* Stop the MII management read cycle */
+	out_be32 (&ug_regs->miimcom, 0);
+	/* Setting up the MII Mangement Address Register */
+	tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+	out_be32 (&ug_regs->miimadd, tmp_reg);
+
+	/* Setting up the MII Mangement Control Register with the value */
+	out_be32 (&ug_regs->miimcon, (u32) value);
+	sync();
+
+	/* Wait till MII management write is complete */
+	while ((in_be32 (&ug_regs->miimind)) & MIIMIND_BUSY);
+}
+
+/* Reads from register regnum in the PHY for device dev, */
+/* returning the value.  Clears miimcom first.  All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+int uec_read_phy_reg (struct eth_device *dev, int mii_id, int regnum)
+{
+	uec_private_t *ugeth = (uec_private_t *) dev->priv;
+	uec_mii_t *ug_regs;
+	enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+	u32 tmp_reg;
+	u16 value;
+
+
+#if defined(CONFIG_BITBANGMII)
+	u32 i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(bitbang_phy_port); i++) {
+		if (strncmp(dev->name, bitbang_phy_port[i],
+			sizeof(dev->name)) == 0) {
+			(void)bb_miiphy_read(NULL, mii_id, regnum, &value);
+			return (value);
+		}
+	}
+#endif /* CONFIG_BITBANGMII */
+
+	ug_regs = ugeth->uec_mii_regs;
+
+	/* Setting up the MII Mangement Address Register */
+	tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+	out_be32 (&ug_regs->miimadd, tmp_reg);
+
+	/* clear MII management command cycle */
+	out_be32 (&ug_regs->miimcom, 0);
+	sync();
+
+	/* Perform an MII management read cycle */
+	out_be32 (&ug_regs->miimcom, MIIMCOM_READ_CYCLE);
+
+	/* Wait till MII management write is complete */
+	while ((in_be32 (&ug_regs->miimind)) &
+	       (MIIMIND_NOT_VALID | MIIMIND_BUSY));
+
+	/* Read MII management status  */
+	value = (u16) in_be32 (&ug_regs->miimstat);
+	if (value == 0xffff)
+		ugphy_vdbg
+			("read wrong value : mii_id %d,mii_reg %d, base %08x",
+			 mii_id, mii_reg, (u32) & (ug_regs->miimcfg));
+
+	return (value);
+}
+
+void mii_clear_phy_interrupt (struct uec_mii_info *mii_info)
+{
+	if (mii_info->phyinfo->ack_interrupt)
+		mii_info->phyinfo->ack_interrupt (mii_info);
+}
+
+void mii_configure_phy_interrupt (struct uec_mii_info *mii_info,
+				  u32 interrupts)
+{
+	mii_info->interrupts = interrupts;
+	if (mii_info->phyinfo->config_intr)
+		mii_info->phyinfo->config_intr (mii_info);
+}
+
+/* Writes MII_ADVERTISE with the appropriate values, after
+ * sanitizing advertise to make sure only supported features
+ * are advertised
+ */
+static void config_genmii_advert (struct uec_mii_info *mii_info)
+{
+	u32 advertise;
+	u16 adv;
+
+	/* Only allow advertising what this PHY supports */
+	mii_info->advertising &= mii_info->phyinfo->features;
+	advertise = mii_info->advertising;
+
+	/* Setup standard advertisement */
+	adv = uec_phy_read(mii_info, MII_ADVERTISE);
+	adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+	if (advertise & ADVERTISED_10baseT_Half)
+		adv |= ADVERTISE_10HALF;
+	if (advertise & ADVERTISED_10baseT_Full)
+		adv |= ADVERTISE_10FULL;
+	if (advertise & ADVERTISED_100baseT_Half)
+		adv |= ADVERTISE_100HALF;
+	if (advertise & ADVERTISED_100baseT_Full)
+		adv |= ADVERTISE_100FULL;
+	uec_phy_write(mii_info, MII_ADVERTISE, adv);
+}
+
+static void genmii_setup_forced (struct uec_mii_info *mii_info)
+{
+	u16 ctrl;
+	u32 features = mii_info->phyinfo->features;
+
+	ctrl = uec_phy_read(mii_info, MII_BMCR);
+
+	ctrl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
+		  BMCR_SPEED1000 | BMCR_ANENABLE);
+	ctrl |= BMCR_RESET;
+
+	switch (mii_info->speed) {
+	case SPEED_1000:
+		if (features & (SUPPORTED_1000baseT_Half
+				| SUPPORTED_1000baseT_Full)) {
+			ctrl |= BMCR_SPEED1000;
+			break;
+		}
+		mii_info->speed = SPEED_100;
+	case SPEED_100:
+		if (features & (SUPPORTED_100baseT_Half
+				| SUPPORTED_100baseT_Full)) {
+			ctrl |= BMCR_SPEED100;
+			break;
+		}
+		mii_info->speed = SPEED_10;
+	case SPEED_10:
+		if (features & (SUPPORTED_10baseT_Half
+				| SUPPORTED_10baseT_Full))
+			break;
+	default:		/* Unsupported speed! */
+		ugphy_err ("%s: Bad speed!", mii_info->dev->name);
+		break;
+	}
+
+	uec_phy_write(mii_info, MII_BMCR, ctrl);
+}
+
+/* Enable and Restart Autonegotiation */
+static void genmii_restart_aneg (struct uec_mii_info *mii_info)
+{
+	u16 ctl;
+
+	ctl = uec_phy_read(mii_info, MII_BMCR);
+	ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+	uec_phy_write(mii_info, MII_BMCR, ctl);
+}
+
+static int gbit_config_aneg (struct uec_mii_info *mii_info)
+{
+	u16 adv;
+	u32 advertise;
+
+	if (mii_info->autoneg) {
+		/* Configure the ADVERTISE register */
+		config_genmii_advert (mii_info);
+		advertise = mii_info->advertising;
+
+		adv = uec_phy_read(mii_info, MII_CTRL1000);
+		adv &= ~(ADVERTISE_1000FULL |
+			 ADVERTISE_1000HALF);
+		if (advertise & SUPPORTED_1000baseT_Half)
+			adv |= ADVERTISE_1000HALF;
+		if (advertise & SUPPORTED_1000baseT_Full)
+			adv |= ADVERTISE_1000FULL;
+		uec_phy_write(mii_info, MII_CTRL1000, adv);
+
+		/* Start/Restart aneg */
+		genmii_restart_aneg (mii_info);
+	} else
+		genmii_setup_forced (mii_info);
+
+	return 0;
+}
+
+static int marvell_config_aneg (struct uec_mii_info *mii_info)
+{
+	/* The Marvell PHY has an errata which requires
+	 * that certain registers get written in order
+	 * to restart autonegotiation */
+	uec_phy_write(mii_info, MII_BMCR, BMCR_RESET);
+
+	uec_phy_write(mii_info, 0x1d, 0x1f);
+	uec_phy_write(mii_info, 0x1e, 0x200c);
+	uec_phy_write(mii_info, 0x1d, 0x5);
+	uec_phy_write(mii_info, 0x1e, 0);
+	uec_phy_write(mii_info, 0x1e, 0x100);
+
+	gbit_config_aneg (mii_info);
+
+	return 0;
+}
+
+static int genmii_config_aneg (struct uec_mii_info *mii_info)
+{
+	if (mii_info->autoneg) {
+		/* Speed up the common case, if link is already up, speed and
+		   duplex match, skip auto neg as it already matches */
+		if (!genmii_read_status(mii_info) && mii_info->link)
+			if (mii_info->duplex == DUPLEX_FULL &&
+			    mii_info->speed == SPEED_100)
+				if (mii_info->advertising &
+				    ADVERTISED_100baseT_Full)
+					return 0;
+
+		config_genmii_advert (mii_info);
+		genmii_restart_aneg (mii_info);
+	} else
+		genmii_setup_forced (mii_info);
+
+	return 0;
+}
+
+static int genmii_update_link (struct uec_mii_info *mii_info)
+{
+	u16 status;
+
+	/* Status is read once to clear old link state */
+	uec_phy_read(mii_info, MII_BMSR);
+
+	/*
+	 * Wait if the link is up, and autonegotiation is in progress
+	 * (ie - we're capable and it's not done)
+	 */
+	status = uec_phy_read(mii_info, MII_BMSR);
+	if ((status & BMSR_LSTATUS) && (status & BMSR_ANEGCAPABLE)
+	    && !(status & BMSR_ANEGCOMPLETE)) {
+		int i = 0;
+
+		while (!(status & BMSR_ANEGCOMPLETE)) {
+			/*
+			 * Timeout reached ?
+			 */
+			if (i > UGETH_AN_TIMEOUT) {
+				mii_info->link = 0;
+				return 0;
+			}
+
+			i++;
+			udelay(1000);	/* 1 ms */
+			status = uec_phy_read(mii_info, MII_BMSR);
+		}
+		mii_info->link = 1;
+	} else {
+		if (status & BMSR_LSTATUS)
+			mii_info->link = 1;
+		else
+			mii_info->link = 0;
+	}
+
+	return 0;
+}
+
+static int genmii_read_status (struct uec_mii_info *mii_info)
+{
+	u16 status;
+	int err;
+
+	/* Update the link, but return if there
+	 * was an error */
+	err = genmii_update_link (mii_info);
+	if (err)
+		return err;
+
+	if (mii_info->autoneg) {
+		status = uec_phy_read(mii_info, MII_STAT1000);
+
+		if (status & (LPA_1000FULL | LPA_1000HALF)) {
+			mii_info->speed = SPEED_1000;
+			if (status & LPA_1000FULL)
+				mii_info->duplex = DUPLEX_FULL;
+			else
+				mii_info->duplex = DUPLEX_HALF;
+		} else {
+			status = uec_phy_read(mii_info, MII_LPA);
+
+			if (status & (LPA_10FULL | LPA_100FULL))
+				mii_info->duplex = DUPLEX_FULL;
+			else
+				mii_info->duplex = DUPLEX_HALF;
+			if (status & (LPA_100FULL | LPA_100HALF))
+				mii_info->speed = SPEED_100;
+			else
+				mii_info->speed = SPEED_10;
+		}
+		mii_info->pause = 0;
+	}
+	/* On non-aneg, we assume what we put in BMCR is the speed,
+	 * though magic-aneg shouldn't prevent this case from occurring
+	 */
+
+	return 0;
+}
+
+static int bcm_init(struct uec_mii_info *mii_info)
+{
+	struct eth_device *edev = mii_info->dev;
+	uec_private_t *uec = edev->priv;
+
+	gbit_config_aneg(mii_info);
+
+	if ((uec->uec_info->enet_interface_type ==
+				PHY_INTERFACE_MODE_RGMII_RXID) &&
+			(uec->uec_info->speed == SPEED_1000)) {
+		u16 val;
+		int cnt = 50;
+
+		/* Wait for aneg to complete. */
+		do
+			val = uec_phy_read(mii_info, MII_BMSR);
+		while (--cnt && !(val & BMSR_ANEGCOMPLETE));
+
+		/* Set RDX clk delay. */
+		uec_phy_write(mii_info, 0x18, 0x7 | (7 << 12));
+
+		val = uec_phy_read(mii_info, 0x18);
+		/* Set RDX-RXC skew. */
+		val |= (1 << 8);
+		val |= (7 | (7 << 12));
+		/* Write bits 14:0. */
+		val |= (1 << 15);
+		uec_phy_write(mii_info, 0x18, val);
+	}
+
+	 return 0;
+}
+
+static int uec_marvell_init(struct uec_mii_info *mii_info)
+{
+	struct eth_device *edev = mii_info->dev;
+	uec_private_t *uec = edev->priv;
+	phy_interface_t iface = uec->uec_info->enet_interface_type;
+	int	speed = uec->uec_info->speed;
+
+	if ((speed == SPEED_1000) &&
+	   (iface == PHY_INTERFACE_MODE_RGMII_ID ||
+	    iface == PHY_INTERFACE_MODE_RGMII_RXID ||
+	    iface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+		int temp;
+
+		temp = uec_phy_read(mii_info, MII_M1111_PHY_EXT_CR);
+		if (iface == PHY_INTERFACE_MODE_RGMII_ID) {
+			temp |= MII_M1111_RX_DELAY | MII_M1111_TX_DELAY;
+		} else if (iface == PHY_INTERFACE_MODE_RGMII_RXID) {
+			temp &= ~MII_M1111_TX_DELAY;
+			temp |= MII_M1111_RX_DELAY;
+		} else if (iface == PHY_INTERFACE_MODE_RGMII_TXID) {
+			temp &= ~MII_M1111_RX_DELAY;
+			temp |= MII_M1111_TX_DELAY;
+		}
+		uec_phy_write(mii_info, MII_M1111_PHY_EXT_CR, temp);
+
+		temp = uec_phy_read(mii_info, MII_M1111_PHY_EXT_SR);
+		temp &= ~MII_M1111_HWCFG_MODE_MASK;
+		temp |= MII_M1111_HWCFG_MODE_RGMII;
+		uec_phy_write(mii_info, MII_M1111_PHY_EXT_SR, temp);
+
+		uec_phy_write(mii_info, MII_BMCR, BMCR_RESET);
+	}
+
+	return 0;
+}
+
+static int marvell_read_status (struct uec_mii_info *mii_info)
+{
+	u16 status;
+	int err;
+
+	/* Update the link, but return if there
+	 * was an error */
+	err = genmii_update_link (mii_info);
+	if (err)
+		return err;
+
+	/* If the link is up, read the speed and duplex */
+	/* If we aren't autonegotiating, assume speeds
+	 * are as set */
+	if (mii_info->autoneg && mii_info->link) {
+		int speed;
+
+		status = uec_phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
+
+		/* Get the duplexity */
+		if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
+			mii_info->duplex = DUPLEX_FULL;
+		else
+			mii_info->duplex = DUPLEX_HALF;
+
+		/* Get the speed */
+		speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
+		switch (speed) {
+		case MII_M1011_PHY_SPEC_STATUS_1000:
+			mii_info->speed = SPEED_1000;
+			break;
+		case MII_M1011_PHY_SPEC_STATUS_100:
+			mii_info->speed = SPEED_100;
+			break;
+		default:
+			mii_info->speed = SPEED_10;
+			break;
+		}
+		mii_info->pause = 0;
+	}
+
+	return 0;
+}
+
+static int marvell_ack_interrupt (struct uec_mii_info *mii_info)
+{
+	/* Clear the interrupts by reading the reg */
+	uec_phy_read(mii_info, MII_M1011_IEVENT);
+
+	return 0;
+}
+
+static int marvell_config_intr (struct uec_mii_info *mii_info)
+{
+	if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+		uec_phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+	else
+		uec_phy_write(mii_info, MII_M1011_IMASK,
+				MII_M1011_IMASK_CLEAR);
+
+	return 0;
+}
+
+static int dm9161_init (struct uec_mii_info *mii_info)
+{
+	/* Reset the PHY */
+	uec_phy_write(mii_info, MII_BMCR, uec_phy_read(mii_info, MII_BMCR) |
+		   BMCR_RESET);
+	/* PHY and MAC connect */
+	uec_phy_write(mii_info, MII_BMCR, uec_phy_read(mii_info, MII_BMCR) &
+		   ~BMCR_ISOLATE);
+
+	uec_phy_write(mii_info, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
+
+	config_genmii_advert (mii_info);
+	/* Start/restart aneg */
+	genmii_config_aneg (mii_info);
+
+	return 0;
+}
+
+static int dm9161_config_aneg (struct uec_mii_info *mii_info)
+{
+	return 0;
+}
+
+static int dm9161_read_status (struct uec_mii_info *mii_info)
+{
+	u16 status;
+	int err;
+
+	/* Update the link, but return if there was an error */
+	err = genmii_update_link (mii_info);
+	if (err)
+		return err;
+	/* If the link is up, read the speed and duplex
+	   If we aren't autonegotiating assume speeds are as set */
+	if (mii_info->autoneg && mii_info->link) {
+		status = uec_phy_read(mii_info, MII_DM9161_SCSR);
+		if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
+			mii_info->speed = SPEED_100;
+		else
+			mii_info->speed = SPEED_10;
+
+		if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
+			mii_info->duplex = DUPLEX_FULL;
+		else
+			mii_info->duplex = DUPLEX_HALF;
+	}
+
+	return 0;
+}
+
+static int dm9161_ack_interrupt (struct uec_mii_info *mii_info)
+{
+	/* Clear the interrupt by reading the reg */
+	uec_phy_read(mii_info, MII_DM9161_INTR);
+
+	return 0;
+}
+
+static int dm9161_config_intr (struct uec_mii_info *mii_info)
+{
+	if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+		uec_phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT);
+	else
+		uec_phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP);
+
+	return 0;
+}
+
+static void dm9161_close (struct uec_mii_info *mii_info)
+{
+}
+
+static int fixed_phy_aneg (struct uec_mii_info *mii_info)
+{
+	mii_info->autoneg = 0; /* Turn off auto negotiation for fixed phy */
+	return 0;
+}
+
+static int fixed_phy_read_status (struct uec_mii_info *mii_info)
+{
+	int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(fixed_phy_port); i++) {
+		if (strncmp(mii_info->dev->name, fixed_phy_port[i].name,
+				strlen(mii_info->dev->name)) == 0) {
+			mii_info->speed = fixed_phy_port[i].speed;
+			mii_info->duplex = fixed_phy_port[i].duplex;
+			mii_info->link = 1; /* Link is always UP */
+			mii_info->pause = 0;
+			break;
+		}
+	}
+	return 0;
+}
+
+static int smsc_config_aneg (struct uec_mii_info *mii_info)
+{
+	return 0;
+}
+
+static int smsc_read_status (struct uec_mii_info *mii_info)
+{
+	u16 status;
+	int err;
+
+	/* Update the link, but return if there
+	 * was an error */
+	err = genmii_update_link (mii_info);
+	if (err)
+		return err;
+
+	/* If the link is up, read the speed and duplex */
+	/* If we aren't autonegotiating, assume speeds
+	 * are as set */
+	if (mii_info->autoneg && mii_info->link) {
+		int	val;
+
+		status = uec_phy_read(mii_info, 0x1f);
+		val = (status & 0x1c) >> 2;
+
+		switch (val) {
+			case 1:
+				mii_info->duplex = DUPLEX_HALF;
+				mii_info->speed = SPEED_10;
+				break;
+			case 5:
+				mii_info->duplex = DUPLEX_FULL;
+				mii_info->speed = SPEED_10;
+				break;
+			case 2:
+				mii_info->duplex = DUPLEX_HALF;
+				mii_info->speed = SPEED_100;
+				break;
+			case 6:
+				mii_info->duplex = DUPLEX_FULL;
+				mii_info->speed = SPEED_100;
+				break;
+		}
+		mii_info->pause = 0;
+	}
+
+	return 0;
+}
+
+static struct phy_info phy_info_dm9161 = {
+	.phy_id = 0x0181b880,
+	.phy_id_mask = 0x0ffffff0,
+	.name = "Davicom DM9161E",
+	.init = dm9161_init,
+	.config_aneg = dm9161_config_aneg,
+	.read_status = dm9161_read_status,
+	.close = dm9161_close,
+};
+
+static struct phy_info phy_info_dm9161a = {
+	.phy_id = 0x0181b8a0,
+	.phy_id_mask = 0x0ffffff0,
+	.name = "Davicom DM9161A",
+	.features = MII_BASIC_FEATURES,
+	.init = dm9161_init,
+	.config_aneg = dm9161_config_aneg,
+	.read_status = dm9161_read_status,
+	.ack_interrupt = dm9161_ack_interrupt,
+	.config_intr = dm9161_config_intr,
+	.close = dm9161_close,
+};
+
+static struct phy_info phy_info_marvell = {
+	.phy_id = 0x01410c00,
+	.phy_id_mask = 0xffffff00,
+	.name = "Marvell 88E11x1",
+	.features = MII_GBIT_FEATURES,
+	.init = &uec_marvell_init,
+	.config_aneg = &marvell_config_aneg,
+	.read_status = &marvell_read_status,
+	.ack_interrupt = &marvell_ack_interrupt,
+	.config_intr = &marvell_config_intr,
+};
+
+static struct phy_info phy_info_bcm5481 = {
+	.phy_id = 0x0143bca0,
+	.phy_id_mask = 0xffffff0,
+	.name = "Broadcom 5481",
+	.features = MII_GBIT_FEATURES,
+	.read_status = genmii_read_status,
+	.init = bcm_init,
+};
+
+static struct phy_info phy_info_fixedphy = {
+	.phy_id = CONFIG_FIXED_PHY,
+	.phy_id_mask = CONFIG_FIXED_PHY,
+	.name = "Fixed PHY",
+	.config_aneg = fixed_phy_aneg,
+	.read_status = fixed_phy_read_status,
+};
+
+static struct phy_info phy_info_smsclan8700 = {
+	.phy_id = 0x0007c0c0,
+	.phy_id_mask = 0xfffffff0,
+	.name = "SMSC LAN8700",
+	.features = MII_BASIC_FEATURES,
+	.config_aneg = smsc_config_aneg,
+	.read_status = smsc_read_status,
+};
+
+static struct phy_info phy_info_genmii = {
+	.phy_id = 0x00000000,
+	.phy_id_mask = 0x00000000,
+	.name = "Generic MII",
+	.features = MII_BASIC_FEATURES,
+	.config_aneg = genmii_config_aneg,
+	.read_status = genmii_read_status,
+};
+
+static struct phy_info *phy_info[] = {
+	&phy_info_dm9161,
+	&phy_info_dm9161a,
+	&phy_info_marvell,
+	&phy_info_bcm5481,
+	&phy_info_smsclan8700,
+	&phy_info_fixedphy,
+	&phy_info_genmii,
+	NULL
+};
+
+u16 uec_phy_read(struct uec_mii_info *mii_info, u16 regnum)
+{
+	return mii_info->mdio_read (mii_info->dev, mii_info->mii_id, regnum);
+}
+
+void uec_phy_write(struct uec_mii_info *mii_info, u16 regnum, u16 val)
+{
+	mii_info->mdio_write (mii_info->dev, mii_info->mii_id, regnum, val);
+}
+
+/* Use the PHY ID registers to determine what type of PHY is attached
+ * to device dev.  return a struct phy_info structure describing that PHY
+ */
+struct phy_info *uec_get_phy_info (struct uec_mii_info *mii_info)
+{
+	u16 phy_reg;
+	u32 phy_ID;
+	int i;
+	struct phy_info *theInfo = NULL;
+
+	/* Grab the bits from PHYIR1, and put them in the upper half */
+	phy_reg = uec_phy_read(mii_info, MII_PHYSID1);
+	phy_ID = (phy_reg & 0xffff) << 16;
+
+	/* Grab the bits from PHYIR2, and put them in the lower half */
+	phy_reg = uec_phy_read(mii_info, MII_PHYSID2);
+	phy_ID |= (phy_reg & 0xffff);
+
+	/* loop through all the known PHY types, and find one that */
+	/* matches the ID we read from the PHY. */
+	for (i = 0; phy_info[i]; i++)
+		if (phy_info[i]->phy_id ==
+		    (phy_ID & phy_info[i]->phy_id_mask)) {
+			theInfo = phy_info[i];
+			break;
+		}
+
+	/* This shouldn't happen, as we have generic PHY support */
+	if (theInfo == NULL) {
+		ugphy_info ("UEC: PHY id %x is not supported!", phy_ID);
+		return NULL;
+	} else {
+		ugphy_info ("UEC: PHY is %s (%x)", theInfo->name, phy_ID);
+	}
+
+	return theInfo;
+}
+
+void marvell_phy_interface_mode(struct eth_device *dev, phy_interface_t type,
+		int speed)
+{
+	uec_private_t *uec = (uec_private_t *) dev->priv;
+	struct uec_mii_info *mii_info;
+	u16 status;
+
+	if (!uec->mii_info) {
+		printf ("%s: the PHY not initialized\n", __FUNCTION__);
+		return;
+	}
+	mii_info = uec->mii_info;
+
+	if (type == PHY_INTERFACE_MODE_RGMII) {
+		if (speed == SPEED_100) {
+			uec_phy_write(mii_info, 0x00, 0x9140);
+			uec_phy_write(mii_info, 0x1d, 0x001f);
+			uec_phy_write(mii_info, 0x1e, 0x200c);
+			uec_phy_write(mii_info, 0x1d, 0x0005);
+			uec_phy_write(mii_info, 0x1e, 0x0000);
+			uec_phy_write(mii_info, 0x1e, 0x0100);
+			uec_phy_write(mii_info, 0x09, 0x0e00);
+			uec_phy_write(mii_info, 0x04, 0x01e1);
+			uec_phy_write(mii_info, 0x00, 0x9140);
+			uec_phy_write(mii_info, 0x00, 0x1000);
+			udelay (100000);
+			uec_phy_write(mii_info, 0x00, 0x2900);
+			uec_phy_write(mii_info, 0x14, 0x0cd2);
+			uec_phy_write(mii_info, 0x00, 0xa100);
+			uec_phy_write(mii_info, 0x09, 0x0000);
+			uec_phy_write(mii_info, 0x1b, 0x800b);
+			uec_phy_write(mii_info, 0x04, 0x05e1);
+			uec_phy_write(mii_info, 0x00, 0xa100);
+			uec_phy_write(mii_info, 0x00, 0x2100);
+			udelay (1000000);
+		} else if (speed == SPEED_10) {
+			uec_phy_write(mii_info, 0x14, 0x8e40);
+			uec_phy_write(mii_info, 0x1b, 0x800b);
+			uec_phy_write(mii_info, 0x14, 0x0c82);
+			uec_phy_write(mii_info, 0x00, 0x8100);
+			udelay (1000000);
+		}
+	}
+
+	/* handle 88e1111 rev.B2 erratum 5.6 */
+	if (mii_info->autoneg) {
+		status = uec_phy_read(mii_info, MII_BMCR);
+		uec_phy_write(mii_info, MII_BMCR, status | BMCR_ANENABLE);
+	}
+	/* now the B2 will correctly report autoneg completion status */
+}
+
+void change_phy_interface_mode (struct eth_device *dev,
+				phy_interface_t type, int speed)
+{
+#ifdef CONFIG_PHY_MODE_NEED_CHANGE
+	marvell_phy_interface_mode (dev, type, speed);
+#endif
+}
diff --git a/marvell/uboot/drivers/qe/uec_phy.h b/marvell/uboot/drivers/qe/uec_phy.h
new file mode 100644
index 0000000..11cbc25
--- /dev/null
+++ b/marvell/uboot/drivers/qe/uec_phy.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2005, 2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description: UCC ethernet driver -- PHY handling
+ *		Driver for UEC on QE
+ *		Based on 8260_io/fcc_enet.c
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+#ifndef __UEC_PHY_H__
+#define __UEC_PHY_H__
+
+#define MII_end ((u32)-2)
+#define MII_read ((u32)-1)
+
+#define MIIMIND_BUSY		0x00000001
+#define MIIMIND_NOTVALID	0x00000004
+
+#define UGETH_AN_TIMEOUT	2000
+
+/* Cicada Extended Control Register 1 */
+#define MII_CIS8201_EXT_CON1	    0x17
+#define MII_CIS8201_EXTCON1_INIT    0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MII_CIS8201_IMASK	    0x19
+#define MII_CIS8201_IMASK_IEN	    0x8000
+#define MII_CIS8201_IMASK_SPEED	    0x4000
+#define MII_CIS8201_IMASK_LINK	    0x2000
+#define MII_CIS8201_IMASK_DUPLEX    0x1000
+#define MII_CIS8201_IMASK_MASK	    0xf000
+
+/* Cicada Interrupt Status Register */
+#define MII_CIS8201_ISTAT	    0x1a
+#define MII_CIS8201_ISTAT_STATUS    0x8000
+#define MII_CIS8201_ISTAT_SPEED	    0x4000
+#define MII_CIS8201_ISTAT_LINK	    0x2000
+#define MII_CIS8201_ISTAT_DUPLEX    0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MII_CIS8201_AUX_CONSTAT	       0x1c
+#define MII_CIS8201_AUXCONSTAT_INIT    0x0004
+#define MII_CIS8201_AUXCONSTAT_DUPLEX  0x0020
+#define MII_CIS8201_AUXCONSTAT_SPEED   0x0018
+#define MII_CIS8201_AUXCONSTAT_GBIT    0x0010
+#define MII_CIS8201_AUXCONSTAT_100     0x0008
+
+/* 88E1011 PHY Status Register */
+#define MII_M1011_PHY_SPEC_STATUS		0x11
+#define MII_M1011_PHY_SPEC_STATUS_1000		0x8000
+#define MII_M1011_PHY_SPEC_STATUS_100		0x4000
+#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK	0xc000
+#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX	0x2000
+#define MII_M1011_PHY_SPEC_STATUS_RESOLVED	0x0800
+#define MII_M1011_PHY_SPEC_STATUS_LINK		0x0400
+
+#define MII_M1011_IEVENT		0x13
+#define MII_M1011_IEVENT_CLEAR		0x0000
+
+#define MII_M1011_IMASK			0x12
+#define MII_M1011_IMASK_INIT		0x6400
+#define MII_M1011_IMASK_CLEAR		0x0000
+
+/* 88E1111 PHY Register */
+#define MII_M1111_PHY_EXT_CR            0x14
+#define MII_M1111_RX_DELAY              0x80
+#define MII_M1111_TX_DELAY              0x2
+#define MII_M1111_PHY_EXT_SR            0x1b
+#define MII_M1111_HWCFG_MODE_MASK       0xf
+#define MII_M1111_HWCFG_MODE_RGMII      0xb
+
+#define MII_DM9161_SCR			0x10
+#define MII_DM9161_SCR_INIT		0x0610
+#define MII_DM9161_SCR_RMII_INIT	0x0710
+
+/* DM9161 Specified Configuration and Status Register */
+#define MII_DM9161_SCSR			0x11
+#define MII_DM9161_SCSR_100F		0x8000
+#define MII_DM9161_SCSR_100H		0x4000
+#define MII_DM9161_SCSR_10F		0x2000
+#define MII_DM9161_SCSR_10H		0x1000
+
+/* DM9161 Interrupt Register */
+#define MII_DM9161_INTR			0x15
+#define MII_DM9161_INTR_PEND		0x8000
+#define MII_DM9161_INTR_DPLX_MASK	0x0800
+#define MII_DM9161_INTR_SPD_MASK	0x0400
+#define MII_DM9161_INTR_LINK_MASK	0x0200
+#define MII_DM9161_INTR_MASK		0x0100
+#define MII_DM9161_INTR_DPLX_CHANGE	0x0010
+#define MII_DM9161_INTR_SPD_CHANGE	0x0008
+#define MII_DM9161_INTR_LINK_CHANGE	0x0004
+#define MII_DM9161_INTR_INIT		0x0000
+#define MII_DM9161_INTR_STOP	\
+(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
+ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MII_DM9161_10BTCSR		0x12
+#define MII_DM9161_10BTCSR_INIT		0x7800
+
+#define MII_BASIC_FEATURES    (SUPPORTED_10baseT_Half | \
+		 SUPPORTED_10baseT_Full | \
+		 SUPPORTED_100baseT_Half | \
+		 SUPPORTED_100baseT_Full | \
+		 SUPPORTED_Autoneg | \
+		 SUPPORTED_TP | \
+		 SUPPORTED_MII)
+
+#define MII_GBIT_FEATURES    (MII_BASIC_FEATURES | \
+		 SUPPORTED_1000baseT_Half | \
+		 SUPPORTED_1000baseT_Full)
+
+#define MII_READ_COMMAND		0x00000001
+
+#define MII_INTERRUPT_DISABLED		0x0
+#define MII_INTERRUPT_ENABLED		0x1
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+
+/* Duplex, half or full. */
+#define DUPLEX_HALF		0x00
+#define DUPLEX_FULL		0x01
+
+/* Indicates what features are supported by the interface. */
+#define SUPPORTED_10baseT_Half		(1 << 0)
+#define SUPPORTED_10baseT_Full		(1 << 1)
+#define SUPPORTED_100baseT_Half		(1 << 2)
+#define SUPPORTED_100baseT_Full		(1 << 3)
+#define SUPPORTED_1000baseT_Half	(1 << 4)
+#define SUPPORTED_1000baseT_Full	(1 << 5)
+#define SUPPORTED_Autoneg		(1 << 6)
+#define SUPPORTED_TP			(1 << 7)
+#define SUPPORTED_AUI			(1 << 8)
+#define SUPPORTED_MII			(1 << 9)
+#define SUPPORTED_FIBRE			(1 << 10)
+#define SUPPORTED_BNC			(1 << 11)
+#define SUPPORTED_10000baseT_Full	(1 << 12)
+
+#define ADVERTISED_10baseT_Half		(1 << 0)
+#define ADVERTISED_10baseT_Full		(1 << 1)
+#define ADVERTISED_100baseT_Half	(1 << 2)
+#define ADVERTISED_100baseT_Full	(1 << 3)
+#define ADVERTISED_1000baseT_Half	(1 << 4)
+#define ADVERTISED_1000baseT_Full	(1 << 5)
+#define ADVERTISED_Autoneg		(1 << 6)
+#define ADVERTISED_TP			(1 << 7)
+#define ADVERTISED_AUI			(1 << 8)
+#define ADVERTISED_MII			(1 << 9)
+#define ADVERTISED_FIBRE		(1 << 10)
+#define ADVERTISED_BNC			(1 << 11)
+#define ADVERTISED_10000baseT_Full	(1 << 12)
+
+/* Taken from mii_if_info and sungem_phy.h */
+struct uec_mii_info {
+	/* Information about the PHY type */
+	/* And management functions */
+	struct phy_info *phyinfo;
+
+	struct eth_device *dev;
+
+	/* forced speed & duplex (no autoneg)
+	 * partner speed & duplex & pause (autoneg)
+	 */
+	int speed;
+	int duplex;
+	int pause;
+
+	/* The most recently read link state */
+	int link;
+
+	/* Enabled Interrupts */
+	u32 interrupts;
+
+	u32 advertising;
+	int autoneg;
+	int mii_id;
+
+	/* private data pointer */
+	/* For use by PHYs to maintain extra state */
+	void *priv;
+
+	/* Provided by ethernet driver */
+	int (*mdio_read) (struct eth_device * dev, int mii_id, int reg);
+	void (*mdio_write) (struct eth_device * dev, int mii_id, int reg,
+			    int val);
+};
+
+/* struct phy_info: a structure which defines attributes for a PHY
+ *
+ * id will contain a number which represents the PHY.  During
+ * startup, the driver will poll the PHY to find out what its
+ * UID--as defined by registers 2 and 3--is.  The 32-bit result
+ * gotten from the PHY will be ANDed with phy_id_mask to
+ * discard any bits which may change based on revision numbers
+ * unimportant to functionality
+ *
+ * There are 6 commands which take a ugeth_mii_info structure.
+ * Each PHY must declare config_aneg, and read_status.
+ */
+struct phy_info {
+	u32 phy_id;
+	char *name;
+	unsigned int phy_id_mask;
+	u32 features;
+
+	/* Called to initialize the PHY */
+	int (*init) (struct uec_mii_info * mii_info);
+
+	/* Called to suspend the PHY for power */
+	int (*suspend) (struct uec_mii_info * mii_info);
+
+	/* Reconfigures autonegotiation (or disables it) */
+	int (*config_aneg) (struct uec_mii_info * mii_info);
+
+	/* Determines the negotiated speed and duplex */
+	int (*read_status) (struct uec_mii_info * mii_info);
+
+	/* Clears any pending interrupts */
+	int (*ack_interrupt) (struct uec_mii_info * mii_info);
+
+	/* Enables or disables interrupts */
+	int (*config_intr) (struct uec_mii_info * mii_info);
+
+	/* Clears up any memory if needed */
+	void (*close) (struct uec_mii_info * mii_info);
+};
+
+struct phy_info *uec_get_phy_info (struct uec_mii_info *mii_info);
+void uec_write_phy_reg (struct eth_device *dev, int mii_id, int regnum,
+		    int value);
+int uec_read_phy_reg (struct eth_device *dev, int mii_id, int regnum);
+void mii_clear_phy_interrupt (struct uec_mii_info *mii_info);
+void mii_configure_phy_interrupt (struct uec_mii_info *mii_info,
+				  u32 interrupts);
+#endif /* __UEC_PHY_H__ */