ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/usb/gadget/function/u_ether.c b/marvell/linux/drivers/usb/gadget/function/u_ether.c
new file mode 100644
index 0000000..91c51cd
--- /dev/null
+++ b/marvell/linux/drivers/usb/gadget/function/u_ether.c
@@ -0,0 +1,2130 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
+#include <linux/pm_qos.h>
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include "u_ether.h"
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used.  Each end of the link uses one address.  The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link.  (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ */
+
+#define UETH__VERSION	"29-May-2008"
+
+#ifdef CONFIG_CPU_ASR1903
+#define CONFIG_USB_GADGET_DEBUG_FILES	1
+extern u32 nr_aggr_fail_padding;
+#endif
+
+struct eth_dev {
+	/* lock is held while accessing port_usb
+	 */
+	spinlock_t		lock;
+	struct gether		*port_usb;
+
+	struct net_device	*net;
+	struct usb_gadget	*gadget;
+
+	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
+	struct list_head	tx_reqs, rx_reqs;
+	unsigned		tx_qlen;
+	atomic_t		no_tx_req_used;
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	int			*tx_mult_histogram;
+#endif
+
+	struct sk_buff_head	rx_frames;
+
+	unsigned		header_len;
+	unsigned int		ul_max_pkts_per_xfer;
+	struct sk_buff		*(*wrap)(struct gether *,
+						struct sk_buff *skb,
+						struct aggr_ctx *aggr_ctx);
+	int			(*unwrap)(struct gether *,
+						struct sk_buff *skb,
+						struct sk_buff_head *list);
+	struct sk_buff		*(*unwrap_fixup)(struct gether *,
+						struct sk_buff *skb);
+
+	struct work_struct	work;
+	struct work_struct	rx_work;
+	struct tasklet_struct	rx_tl;
+
+	unsigned long		todo;
+#define	WORK_RX_MEMORY		0
+
+	bool			zlp;
+	u8			host_mac[ETH_ALEN];
+
+#ifdef CONFIG_DDR_DEVFREQ
+/* for nezhas the boost freq is 355mhz, while 398mhz for nezha3 */
+#define DDR_BOOST_FREQ		(350000)
+#if defined(CONFIG_CPU_ASR18XX) && defined(CONFIG_USB_MV_UDC) 
+#define INTERVALS_PER_SEC	(50)
+#else
+#define INTERVALS_PER_SEC	(10)
+#endif
+
+#if defined(CONFIG_CPU_ASR18XX)
+#ifdef CONFIG_USB_MV_UDC
+#define DDR_TX_BOOST_BYTES	(( 10000000 / INTERVALS_PER_SEC) >> 3) /* 10mbps*/
+#define DDR_RX_BOOST_BYTES	(( 1000000 / INTERVALS_PER_SEC) >> 3)  /* 1mbps*/
+#else
+#define DDR_TX_BOOST_BYTES	(( 15000000 / INTERVALS_PER_SEC) >> 3) /* 15mbps*/
+#define DDR_RX_BOOST_BYTES	(( 15000000 / INTERVALS_PER_SEC) >> 3)  /* 15mbps*/
+#endif
+#else
+#define DDR_TX_BOOST_BYTES	((150000000 / INTERVALS_PER_SEC) >> 3) /* 150mbps*/
+#define DDR_RX_BOOST_BYTES	(( 20000000 / INTERVALS_PER_SEC) >> 3)  /* 20mbps*/
+#endif
+
+	atomic_t		no_rx_skb;
+	unsigned int		tx_boost_threshhold;
+	unsigned int		rx_boost_threshhold;
+	struct pm_qos_request	ddr_qos_min;
+	struct delayed_work	txrx_monitor_work;
+	bool			dwork_inited;
+#endif
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define AGGR_DONE(req)	\
+		(AGGRCTX(req)->pending_skb || \
+		AGGRCTX(req)->total_size == 0)
+
+#define RX_EXTRA	20	/* bytes guarding against rx overflows */
+#define DEFAULT_QLEN	5	/* quintuple buffering by default */
+
+#ifdef CONFIG_CPU_ASR1903
+static unsigned qmult_rx = 20;
+#else
+static unsigned qmult_rx = 15;
+#endif
+static unsigned qmult_tx = 40;
+static struct net_device *g_usbnet_dev;
+#ifdef CONFIG_CPU_ASR1903
+static unsigned rx_notl = 0;
+#else
+static unsigned rx_notl = 0;
+#endif
+/* 
+ * TX_REQ_THRESHOLD is used for two different (but related) optimizations:
+ * 1. IRQ_OPTIMIZATION -> getting complete interrupt every TX_REQ_THRESHOLD requestes.
+ * 2. USB_AGGRIGATION -> Start aggregate only after USB has more then TX_REQ_THRESHOLD requestes.
+ * --> To prevent packet delay the value for #1 and #2 MUST be the same.
+ * NOTE: The value of TX_REQ_THRESHOLD must be smaller then half of the queue size
+ *       e.g. TX_REQ_THRESHOLD_MAX <= DEFAULT_QLEN * (qmult_tx / 2) 
+ */
+#ifdef CONFIG_CPU_ASR1903
+#define TX_REQ_THRESHOLD	(1)
+#else
+#define TX_REQ_THRESHOLD	(DEFAULT_QLEN * 4)
+#endif
+
+#define USB_ALLOC_MEM_LOW_THRESH	(2 * 1024 * 1024)
+
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+static unsigned histogram_size;
+#endif
+
+//only asr1803 need to use work
+#ifdef CONFIG_USB_MV_UDC
+#define USB_RX_USE_WORK	1
+#endif
+
+module_param(qmult_rx, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(qmult_rx, "rx queue length multiplier at high/super speed");
+module_param(qmult_tx, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(qmult_tx, "tx queue length multiplier at high/super speed");
+module_param(rx_notl, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(rx_notl, "rx not use tasklet");
+
+static unsigned rx_boost_thr = DDR_RX_BOOST_BYTES;
+module_param(rx_boost_thr, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(rx_boost_thr, "rx ddr boost threshold");
+
+#ifdef CONFIG_USBNET_USE_SG
+static bool sg_is_enabled;
+static struct scatterlist *
+alloc_sglist(int nents)
+{
+	struct scatterlist	*sg;
+
+	sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
+	if (!sg) {
+		pr_err("usbnet error: sg alloc failed\n");
+		return NULL;
+	}
+	sg_init_table(sg, nents);
+	if (!sg) {
+		pr_err("usbnet error: sg_init_table failed\n");
+		return NULL;
+	}
+	return sg;
+}
+
+static void
+build_sglist(struct usb_request *req, struct aggr_ctx* aggrctx)
+{
+	struct scatterlist	*sg = aggrctx->sg;
+	struct sk_buff *cur, *next;
+	int nr_pkts = 0;
+
+	if (unlikely(!sg)) {
+		pr_err("sg of aggr is NULL\n");
+		BUG();
+	}
+
+	skb_queue_walk_safe(&AGGRCTX(req)->skb_list, cur, next) {
+		sg_set_buf(&sg[nr_pkts], (void *)cur->data, cur->len);
+		nr_pkts++;
+	}
+	if (nr_pkts == 0 || nr_pkts > USBNET_SG_NENTS) {
+		pr_err("error pkts: %d\n", nr_pkts);
+		BUG();
+	}
+	sg_mark_end(&sg[nr_pkts - 1]);
+
+	req->sg = sg;
+	req->num_sgs = nr_pkts;
+}
+#endif
+
+/* for dual-speed hardware, use deeper queues at high/super speed */
+static inline int qlen(struct usb_gadget *gadget, bool rx)
+{
+	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+					    gadget->speed == USB_SPEED_SUPER))
+		return DEFAULT_QLEN * (rx ? qmult_rx : qmult_tx);
+	else
+		return DEFAULT_QLEN;
+}
+
+int u_ether_rx_qlen(void)
+{
+	return qmult_rx * DEFAULT_QLEN;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* REVISIT there must be a better way than having two sets
+ * of debug calls ...
+ */
+#if 0
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+	printk(level "%s: " fmt , (d)->net->name , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG	DBG
+#else
+#define VDBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define INFO(dev, fmt, args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+#endif
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+static void histogram_realloc(struct eth_dev *dev, int skb_qlen)
+{
+	int *tmp, *oldbuf;
+	int size = histogram_size;
+
+	while (size <= skb_qlen)
+		size = size * 2;
+
+	tmp = kzalloc(sizeof(int) * size, GFP_ATOMIC);
+	if (!tmp){
+		histogram_size = 0;
+		kfree(dev->tx_mult_histogram);
+		dev->tx_mult_histogram = NULL;		
+	}
+
+	memcpy(tmp, dev->tx_mult_histogram, sizeof(int) * histogram_size);
+	oldbuf = dev->tx_mult_histogram;
+	dev->tx_mult_histogram = tmp;
+	histogram_size = size;
+	if (oldbuf)
+		kfree(oldbuf);
+	else
+		BUG();
+}
+
+static ssize_t
+u_ether_histogram_clean(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	struct eth_dev *priv = netdev_priv(to_net_dev(dev));
+	if (priv->tx_mult_histogram)
+		memset(priv->tx_mult_histogram, 0, histogram_size * sizeof(int));
+	return size;
+}
+
+static ssize_t
+u_ether_histogram_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct eth_dev *priv = netdev_priv(to_net_dev(dev));
+	char *fmt = "mult[%d] = %d/100 (%d)\n";
+	int i, ret = 0, total = 0, max_aggr;
+
+	if (!priv->tx_mult_histogram)
+		return 0;
+
+	for (i = 0 ; i < histogram_size; i++)
+		total += priv->tx_mult_histogram[i];
+
+	for (i = histogram_size - 1; i >= 0; i--)
+		if (priv->tx_mult_histogram[i])
+			break;
+	max_aggr = i;
+
+#ifdef CONFIG_CPU_ASR1903
+	ret += sprintf(buf + ret, "padding_fail=%d\n", nr_aggr_fail_padding);
+#endif
+
+	ret += sprintf(buf + ret, "histogram_size = %d\n", histogram_size);
+	for (i = 0; i <= max_aggr; i++)
+		ret += sprintf(buf + ret, fmt, i, priv->tx_mult_histogram[i] * 100 / total, priv->tx_mult_histogram[i]);
+	return ret;
+}
+
+static
+DEVICE_ATTR(u_ether_tx_mult_histogram, S_IRUGO|S_IWUSR, u_ether_histogram_show, u_ether_histogram_clean);
+
+#ifdef CONFIG_DDR_DEVFREQ
+static ssize_t
+tx_boost_thresh_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	struct eth_dev *priv = netdev_priv(to_net_dev(dev));
+
+	sscanf(buf, "%d", &priv->tx_boost_threshhold);
+
+	pr_info("u_ether tx boost threshold set as %d\n",
+				priv->tx_boost_threshhold);
+
+	return strnlen(buf, PAGE_SIZE);
+}
+
+static ssize_t
+tx_boost_thresh_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct eth_dev *priv = netdev_priv(to_net_dev(dev));
+	int ret = 0;
+
+	ret += sprintf(buf + ret, "tx_boost_threshhold = %d\n",
+				priv->tx_boost_threshhold);
+	return ret;
+}
+static
+DEVICE_ATTR(tx_boost_param, S_IRUGO|S_IWUSR, tx_boost_thresh_show,
+		tx_boost_thresh_store);
+
+static ssize_t
+rx_boost_thresh_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	struct eth_dev *priv = netdev_priv(to_net_dev(dev));
+
+	sscanf(buf, "%d", &priv->rx_boost_threshhold);
+
+	pr_info("u_ether tx boost threshold set as %d\n",
+				priv->rx_boost_threshhold);
+
+	return strnlen(buf, PAGE_SIZE);
+}
+
+static ssize_t
+rx_boost_thresh_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct eth_dev *priv = netdev_priv(to_net_dev(dev));
+	int ret = 0;
+
+	ret += sprintf(buf + ret, "rx_boost_threshhold = %d\n",
+				priv->rx_boost_threshhold);
+	return ret;
+}
+static
+DEVICE_ATTR(rx_boost_param, S_IRUGO|S_IWUSR, rx_boost_thresh_show,
+		rx_boost_thresh_store);
+#endif
+
+static struct attribute *u_ether_attrs[] = {
+	&dev_attr_u_ether_tx_mult_histogram.attr,
+#ifdef CONFIG_DDR_DEVFREQ
+	&dev_attr_tx_boost_param.attr,
+	&dev_attr_rx_boost_param.attr,
+#endif
+	NULL,
+};
+static struct attribute_group u_ether_attr_group = {
+	.attrs = u_ether_attrs,
+};
+#endif /*CONFIG_USB_GADGET_DEBUG_FILES*/
+
+static inline void req_aggr_clean(struct usb_request *req)
+{
+	struct sk_buff *skb;
+	BUG_ON(!AGGRCTX(req));
+	BUG_ON(AGGRCTX(req)->pending_skb);
+
+	while ((skb = skb_dequeue(&AGGRCTX(req)->skb_list) ))
+		dev_kfree_skb_any(skb);
+
+	AGGRCTX(req)->total_size = 0;
+	req->length = 0;
+	req->buf = NULL;
+#ifdef CONFIG_USBNET_USE_SG
+	AGGRCTX(req)->num_sgs = 0;
+	if (AGGRCTX(req)->sg)
+		sg_init_table(AGGRCTX(req)->sg, USBNET_SG_NENTS);
+	req->num_sgs = 0;
+	req->num_mapped_sgs = 0;
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+static int ueth_change_mtu(struct net_device *net, int new_mtu)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	unsigned long	flags;
+	int		status = 0;
+
+	/* don't change MTU on "live" link (peer won't know) */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb)
+		status = -EBUSY;
+	else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+		status = -ERANGE;
+	else
+		net->mtu = new_mtu;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return status;
+}
+
+static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+{
+	struct eth_dev *dev = netdev_priv(net);
+
+	strlcpy(p->driver, "g_ether", sizeof(p->driver));
+	strlcpy(p->version, UETH__VERSION, sizeof(p->version));
+	strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
+	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
+}
+
+/* REVISIT can also support:
+ *   - WOL (by tracking suspends and issuing remote wakeup)
+ *   - msglevel (implies updated messaging)
+ *   - ... probably more ethtool ops
+ */
+
+static const struct ethtool_ops ops = {
+	.get_drvinfo = eth_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
+
+static void defer_kevent(struct eth_dev *dev, int flag)
+{
+	if (test_and_set_bit(flag, &dev->todo))
+		return;
+	if (!schedule_work(&dev->work))
+		ERROR(dev, "kevent %d may have been dropped\n", flag);
+	else
+		DBG(dev, "kevent %d scheduled\n", flag);
+}
+
+static void rx_complete(struct usb_ep *ep, struct usb_request *req);
+static void rx_complete_notl(struct usb_ep *ep, struct usb_request *req);
+static void tx_complete(struct usb_ep *ep, struct usb_request *req);
+
+#define EXTRA_SKB_WIFI_HEADROOM 96
+static int
+rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct sk_buff	*skb;
+	int		retval = -ENOMEM;
+	size_t		size = 0;
+	struct usb_ep	*out;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb)
+		out = dev->port_usb->out_ep;
+	else
+		out = NULL;
+
+	if (!out) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -ENOTCONN;
+	}
+
+	/* Padding up to RX_EXTRA handles minor disagreements with host.
+	 * Normally we use the USB "terminate on short read" convention;
+	 * so allow up to (N*maxpacket), since that memory is normally
+	 * already allocated.  Some hardware doesn't deal well with short
+	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
+	 * byte off the end (to force hardware errors on overflow).
+	 *
+	 * RNDIS uses internal framing, and explicitly allows senders to
+	 * pad to end-of-packet.  That's potentially nice for speed, but
+	 * means receivers can't recover lost synch on their own (because
+	 * new packets don't only start after a short RX).
+	 */
+	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
+	size += dev->port_usb->header_len;
+
+	size += EXTRA_SKB_WIFI_HEADROOM; /* for wifi header */
+	size += out->maxpacket - 1;
+	size -= size % out->maxpacket;
+
+	if (dev->ul_max_pkts_per_xfer)
+		size *= dev->ul_max_pkts_per_xfer;
+
+	if (dev->port_usb->is_fixed)
+		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	pr_debug("%s: size: %zu", __func__, size);
+
+	//if ((global_zone_page_state(NR_FREE_PAGES) << PAGE_SHIFT) < USB_ALLOC_MEM_LOW_THRESH) {
+	if ((global_zone_page_state(NR_FREE_PAGES) << PAGE_SHIFT) < ((min_free_kbytes + 64) << 10)) {
+		pr_err_ratelimited("usb mem low\n");
+		skb = NULL;
+		goto enomem;
+	}
+
+#ifdef CONFIG_USB_DWC2
+	/* add 3 bytes for 4-bytes alignment requirement
+	* wifi headroom is not calculated in the fixed length
+	*/
+	if (dev->port_usb->is_fixed)
+		skb = alloc_skb(size + NET_IP_ALIGN + 3 + EXTRA_SKB_WIFI_HEADROOM, gfp_flags);
+	else
+		skb = alloc_skb(size + NET_IP_ALIGN + 3, gfp_flags);
+#else
+	if (dev->port_usb->is_fixed)
+		skb = alloc_skb(size + NET_IP_ALIGN + EXTRA_SKB_WIFI_HEADROOM, gfp_flags);
+	else
+		skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+#endif
+	if (skb == NULL) {
+		ERROR(dev, "no rx skb\n");
+		goto enomem;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb)
+		out = dev->port_usb->out_ep;
+	else
+		out = NULL;
+
+	if (!out) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		goto enomem;
+	}
+
+	/* Some platforms perform better when IP packets are aligned,
+	 * but on at least one, checksumming fails otherwise.  Note:
+	 * RNDIS headers involve variable numbers of LE32 values.
+	 */
+	skb_reserve(skb, NET_IP_ALIGN + EXTRA_SKB_WIFI_HEADROOM);
+
+#ifdef CONFIG_USB_DWC2
+	if (((u32)skb->data) & 0x3)
+		skb_reserve(skb, (4 - (((u32)skb->data) & 3)));
+#endif
+
+	req->buf = skb->data;
+	req->length = size;
+	req->context = skb;
+
+#if !defined(CONFIG_USB_DWC3) && !defined(CONFIG_USB_DWC2)
+	/* Always active, handled in the low level driver*/
+	req->no_interrupt = 1;
+#endif
+
+	retval = usb_ep_queue(out, req, gfp_flags);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (retval == -ENOMEM)
+enomem:
+		defer_kevent(dev, WORK_RX_MEMORY);
+	if (retval) {
+		printk_ratelimited(KERN_DEBUG "rx submit --> %d\n", retval);
+		if (skb)
+			dev_kfree_skb_any(skb);
+	}
+	return retval;
+}
+
+static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags);
+static void rx_complete_notl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context, *skb2;
+	struct eth_dev	*dev = ep->driver_data;
+	int		status = req->status;
+	unsigned long	flags;
+	bool		queue = false;
+
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		skb_put(skb, req->actual);
+
+		if (dev->unwrap) {
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->port_usb) {
+				status = dev->unwrap(dev->port_usb,
+							skb,
+							&dev->rx_frames);
+			} else {
+				dev_kfree_skb_any(skb);
+				skb = NULL;
+				status = -ENOTCONN;
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+		} else {
+			skb_queue_tail(&dev->rx_frames, skb);
+		}
+		skb = NULL;
+
+		if (!status)
+			queue = true;
+		else
+			pr_err_ratelimited("%s nq status: %d\n", __func__, status);
+
+		skb2 = skb_dequeue(&dev->rx_frames);
+		while (skb2) {
+			if (status < 0
+					|| ETH_HLEN > skb2->len
+					|| skb2->len > VLAN_ETH_FRAME_LEN) {
+				dev->net->stats.rx_errors++;
+				dev->net->stats.rx_length_errors++;
+				ERROR(dev, "rx length %d\n", skb2->len);
+				dev_kfree_skb_any(skb2);
+				goto next_frame;
+			}
+			skb2->protocol = eth_type_trans(skb2, dev->net);
+			dev->net->stats.rx_packets++;
+			dev->net->stats.rx_bytes += skb2->len;
+
+			/* no buffer copies needed, unless hardware can't
+			 * use skb buffers.
+			 */
+			status = netif_rx(skb2);
+next_frame:
+			skb2 = skb_dequeue(&dev->rx_frames);
+		}
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		VDBG(dev, "rx shutdown, code %d\n", status);
+		goto quiesce;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		/* endpoint reset */
+		ERROR(dev, "rx %s reset\n", ep->name);
+		defer_kevent(dev, WORK_RX_MEMORY);
+quiesce:
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+		goto clean;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		dev->net->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		queue = true;
+		dev->net->stats.rx_errors++;
+		ERROR(dev, "rx status %d\n", status);
+		break;
+	}
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+clean:
+	spin_lock_irqsave(&dev->req_lock, flags);
+	list_add(&req->list, &dev->rx_reqs);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	if (queue)
+		rx_fill(dev, GFP_ATOMIC);
+}
+
+static void rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = ep->driver_data;
+	int		status = req->status;
+	bool		queue = false;
+
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		skb_put(skb, req->actual);
+
+		if (dev->unwrap) {
+			unsigned long	flags;
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->port_usb) {
+				status = dev->unwrap(dev->port_usb,
+							skb,
+							&dev->rx_frames);
+				if (status == -EINVAL)
+					dev->net->stats.rx_errors++;
+				else if (status == -EOVERFLOW)
+					dev->net->stats.rx_over_errors++;
+			} else {
+				dev_kfree_skb_any(skb);
+				status = -ENOTCONN;
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+		} else {
+			skb_queue_tail(&dev->rx_frames, skb);
+		}
+		if (!status)
+			queue = true;
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		VDBG(dev, "rx shutdown, code %d\n", status);
+		goto quiesce;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		/* endpoint reset */
+		DBG(dev, "rx %s reset\n", ep->name);
+		defer_kevent(dev, WORK_RX_MEMORY);
+quiesce:
+		dev_kfree_skb_any(skb);
+		goto clean;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		dev->net->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		queue = true;
+		dev_kfree_skb_any(skb);
+		dev->net->stats.rx_errors++;
+		DBG(dev, "rx status %d\n", status);
+		break;
+	}
+
+clean:
+	spin_lock(&dev->req_lock);
+	list_add(&req->list, &dev->rx_reqs);
+	spin_unlock(&dev->req_lock);
+#ifndef USB_RX_USE_WORK
+	if (queue)
+		tasklet_schedule(&dev->rx_tl);
+#else
+	if (queue)
+		schedule_work(&dev->rx_work);
+#endif
+}
+
+static inline void usb_ep_free_request_tx_mult(struct usb_ep *ep,
+					        struct usb_request *req)
+{
+	req_aggr_clean(req);
+	BUG_ON(!AGGRCTX(req));
+#ifdef CONFIG_USBNET_USE_SG
+	if(AGGRCTX(req)->sg)
+		kfree(AGGRCTX(req)->sg);
+#endif
+	kfree(AGGRCTX(req));
+	req->context = NULL;
+	usb_ep_free_request(ep, req);
+}
+
+static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
+{
+	unsigned		i;
+	struct usb_request	*req;
+	bool			usb_in;
+
+	if (!n)
+		return -ENOMEM;
+
+	if (ep->desc->bEndpointAddress & USB_DIR_IN)
+		usb_in = true;
+	else
+		usb_in = false;
+
+	/* queue/recycle up to N requests */
+	i = n;
+	list_for_each_entry(req, list, list) {
+		if (i-- == 0)
+			goto extra;
+	}
+
+
+	while (i--) {
+		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+		if (!req)
+			return list_empty(list) ? -ENOMEM : 0;
+		/* update completion handler */
+		if (usb_in){
+			req->complete = tx_complete;
+			req->context = (void*)kzalloc(sizeof(struct aggr_ctx), GFP_ATOMIC);
+			if (!req->context){
+				usb_ep_free_request(ep, req);
+				pr_err("%s:%d: error: only %d reqs allocated\n",
+				           __func__, __LINE__, (n - i));
+				return list_empty(list) ? -ENOMEM : 0;
+			}
+			skb_queue_head_init(&AGGRCTX(req)->skb_list);
+			AGGRCTX(req)->total_size = 0;
+#ifdef CONFIG_USBNET_USE_SG
+			if (sg_is_enabled)
+				AGGRCTX(req)->sg = alloc_sglist(USBNET_SG_NENTS);
+#endif
+		} else {
+			if (rx_notl)
+				req->complete = rx_complete_notl;
+			else
+				req->complete = rx_complete;
+		}
+		list_add(&req->list, list);
+	}
+
+	return 0;
+
+extra:
+	/* free extras */
+	for (;;) {
+		struct list_head	*next;
+
+		next = req->list.next;
+		list_del(&req->list);
+		if (usb_in) {
+			usb_ep_free_request_tx_mult(ep, req);
+		} else
+			usb_ep_free_request(ep, req);
+
+		if (next == list)
+			break;
+
+		req = container_of(next, struct usb_request, list);
+	}
+	return 0;
+}
+
+static int alloc_requests(struct eth_dev *dev, struct gether *link,
+			   unsigned n_rx, unsigned n_tx)
+{
+	int	status;
+
+	spin_lock(&dev->req_lock);
+	status = prealloc(&dev->tx_reqs, link->in_ep, n_tx);
+	if (status < 0)
+		goto fail;
+	status = prealloc(&dev->rx_reqs, link->out_ep, n_rx);
+	if (status < 0)
+		goto fail;
+	goto done;
+fail:
+	DBG(dev, "can't alloc requests\n");
+done:
+	spin_unlock(&dev->req_lock);
+	return status;
+}
+
+static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	struct usb_request	*req;
+	unsigned long		flags;
+	int			req_cnt = 0;
+
+	/* fill unused rxq slots with some skb */
+	spin_lock_irqsave(&dev->req_lock, flags);
+	while (!list_empty(&dev->rx_reqs)) {
+		/* break the nexus of continuous completion and re-submission*/
+		if (++req_cnt > qlen(dev->gadget, true))
+			break;
+
+		req = container_of(dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del_init(&req->list);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		if (rx_submit(dev, req, gfp_flags) < 0) {
+			spin_lock_irqsave(&dev->req_lock, flags);
+			list_add(&req->list, &dev->rx_reqs);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
+			defer_kevent(dev, WORK_RX_MEMORY);
+			return;
+		}
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+}
+
+static void process_rx_tl(unsigned long priv)
+{
+	struct eth_dev	*dev = (struct eth_dev *)priv;
+	struct sk_buff	*skb;
+	int		status = 0;
+
+	if (!dev->port_usb)
+		return;
+
+	while ((skb = skb_dequeue(&dev->rx_frames))) {
+		if (status < 0
+				|| ETH_HLEN > skb->len
+				|| skb->len > VLAN_ETH_FRAME_LEN) {
+			dev->net->stats.rx_errors++;
+			dev->net->stats.rx_length_errors++;
+			DBG(dev, "rx length %d\n", skb->len);
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+		
+		if (dev->unwrap_fixup) {
+			struct sk_buff *new =
+				dev->unwrap_fixup(dev->port_usb, skb);
+			if (!new) {
+				pr_info("unwrap_fixup failed\n");
+				dev_kfree_skb_any(skb);
+				continue;
+			}
+
+			dev_kfree_skb_any(skb);
+			skb = new;
+			WARN_ON(!skb_mac_header_was_set(skb));
+		}
+
+#ifdef CONFIG_DDR_DEVFREQ
+		atomic_inc(&dev->no_rx_skb);
+#endif
+		skb->protocol = eth_type_trans(skb, dev->net);
+		dev->net->stats.rx_packets++;
+		dev->net->stats.rx_bytes += skb->len;
+
+		status = netif_rx(skb);
+	}
+
+	if (netif_running(dev->net))
+		rx_fill(dev, GFP_ATOMIC);
+}
+
+#ifdef USB_RX_USE_WORK
+static DEFINE_MUTEX(rx_work_lock);
+
+static void process_rx_work(struct work_struct *data)
+{
+	struct eth_dev	*dev = container_of(data, struct eth_dev, rx_work);
+
+	mutex_lock(&rx_work_lock);
+	local_bh_disable();
+	process_rx_tl((unsigned long)dev);
+	local_bh_enable();
+	mutex_unlock(&rx_work_lock);
+}
+#endif
+
+#if defined(USB_RX_USE_WORK) && defined(CONFIG_USB_MV_HSIC_UDC)
+static DEFINE_MUTEX(hsic_rx_work_lock);
+
+static void process_hsic_rx_work(struct work_struct *data)
+{
+	struct eth_dev	*dev = container_of(data, struct eth_dev, rx_work);
+
+	mutex_lock(&hsic_rx_work_lock);
+	local_bh_disable();
+	process_rx_tl((unsigned long)dev);
+	local_bh_enable();
+	mutex_unlock(&hsic_rx_work_lock);
+}
+#endif
+
+static void eth_work(struct work_struct *work)
+{
+	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
+
+	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
+		if (netif_running(dev->net))
+			rx_fill(dev, GFP_KERNEL);
+	}
+
+	if (dev->todo)
+		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
+}
+
+static void tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct eth_dev	*dev;
+	struct net_device *net;
+	struct usb_request *new_req;
+	struct usb_ep *in;
+	int length;
+	int retval;
+	int skb_qlen = skb_queue_len(&AGGRCTX(req)->skb_list);
+
+	if (!ep->driver_data) {
+		usb_ep_free_request_tx_mult(ep, req);
+		return;
+	}
+
+	dev = ep->driver_data;
+	net = dev->net;
+
+	if (!dev->port_usb) {
+		usb_ep_free_request_tx_mult(ep, req);
+		return;
+	}
+
+	switch (req->status) {
+	default:
+		dev->net->stats.tx_errors += skb_qlen;
+		VDBG(dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		break;
+	case 0:
+		if (!req->zero)
+			dev->net->stats.tx_bytes += req->length-1;
+		else
+			dev->net->stats.tx_bytes += req->length;
+		dev->net->stats.tx_packets += skb_qlen;
+	}
+
+	spin_lock(&dev->req_lock);
+	req_aggr_clean(req);
+
+	list_add_tail(&req->list, &dev->tx_reqs);
+	atomic_dec(&dev->no_tx_req_used);
+	in = dev->port_usb->in_ep;
+
+	if (!list_empty(&dev->tx_reqs)) {
+		new_req = container_of(dev->tx_reqs.next,
+				struct usb_request, list);
+		list_del(&new_req->list);
+		spin_unlock(&dev->req_lock);
+		if (AGGRCTX(new_req)->total_size > 0) {
+			length = AGGRCTX(new_req)->total_size;
+
+			/* NCM requires no zlp if transfer is
+			 * dwNtbInMaxSize */
+			if (dev->port_usb->is_fixed &&
+				length == dev->port_usb->fixed_in_len &&
+				(length % in->maxpacket) == 0)
+				new_req->zero = 0;
+			else
+				new_req->zero = 1;
+
+			/* use zlp framing on tx for strict CDC-Ether
+			 * conformance, though any robust network rx
+			 * path ignores extra padding. and some hardware
+			 * doesn't like to write zlps.
+			 */
+			if (new_req->zero && !dev->zlp &&
+					(length % in->maxpacket) == 0) {
+				new_req->zero = 0;
+				length++;
+#ifdef CONFIG_USBNET_USE_SG
+				if (sg_is_enabled && new_req->num_sgs) {
+					new_req->sg[new_req->num_sgs - 1].length++;
+				}
+#endif
+			}
+			new_req->length = length;
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+			skb_qlen = skb_queue_len(&AGGRCTX(req)->skb_list);
+			if (skb_qlen > histogram_size && histogram_size) {
+				histogram_realloc(dev, skb_qlen);
+				pr_err("%s: %d histogram buffer to small, realloc to %d, "
+				      "hold_count=%d\n", __func__, __LINE__, histogram_size,
+				      skb_qlen);
+			}
+			if (dev->tx_mult_histogram && skb_qlen <= histogram_size)
+				dev->tx_mult_histogram[skb_qlen - 1]++;
+#endif
+#ifdef CONFIG_USBNET_USE_SG
+			if (sg_is_enabled)
+				build_sglist(new_req, AGGRCTX(new_req));
+#endif
+			retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+			switch (retval) {
+			default:
+				DBG(dev, "tx queue err %d\n", retval);
+				dev->net->stats.tx_dropped +=
+					              skb_queue_len(&AGGRCTX(new_req)->skb_list);
+				req_aggr_clean(req);
+				spin_lock(&dev->req_lock);
+				list_add_tail(&new_req->list,
+						&dev->tx_reqs);
+				spin_unlock(&dev->req_lock);
+				break;
+			case 0:
+				atomic_inc(&dev->no_tx_req_used);
+				//net->trans_start = jiffies;
+			}
+		} else {
+			spin_lock(&dev->req_lock);
+			/*
+			 * Put the idle request at the back of the
+			 * queue. The xmit function will put the
+			 * unfinished request at the beginning of the
+			 * queue.
+			 */
+			list_add_tail(&new_req->list, &dev->tx_reqs);
+			spin_unlock(&dev->req_lock);
+		}
+	} else {
+		spin_unlock(&dev->req_lock);
+	}
+
+	if (netif_carrier_ok(dev->net))
+		netif_wake_queue(dev->net);
+}
+
+static inline int is_promisc(u16 cdc_filter)
+{
+	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+}
+
+static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+					struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+	int			skb_qlen = 0;
+	int			length = skb->len;
+	struct 		sk_buff *pending_skb = NULL;
+	int			retval;
+	struct usb_request	*req = NULL;
+	unsigned long		flags;
+	struct usb_ep		*in;
+	u16			cdc_filter;
+
+	if (unlikely(!skb))
+		return NETDEV_TX_OK;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb) {
+		in = dev->port_usb->in_ep;
+		cdc_filter = dev->port_usb->cdc_filter;
+	} else {
+		in = NULL;
+		cdc_filter = 0;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (!in) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* apply outgoing CDC or RNDIS filters */
+	if (!is_promisc(cdc_filter)) {
+		u8		*dest = skb->data;
+
+		if (is_multicast_ether_addr(dest)) {
+			u16	type;
+
+			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+			 * SET_ETHERNET_MULTICAST_FILTERS requests
+			 */
+			if (is_broadcast_ether_addr(dest))
+				type = USB_CDC_PACKET_TYPE_BROADCAST;
+			else
+				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+			if (!(cdc_filter & type)) {
+				dev_kfree_skb_any(skb);
+				return NETDEV_TX_OK;
+			}
+		}
+		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+	}
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	/*
+	 * this freelist can be empty if an interrupt triggered disconnect()
+	 * and reconfigured the gadget (shutting down this queue) after the
+	 * network stack decided to xmit but before we got the spinlock.
+	 */
+	if (list_empty(&dev->tx_reqs)) {
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		return NETDEV_TX_BUSY;
+	}
+
+	req = container_of(dev->tx_reqs.next, struct usb_request, list);
+	list_del(&req->list);
+
+	/* temporarily stop TX queue when the freelist empties */
+	if (list_empty(&dev->tx_reqs))
+		netif_stop_queue(net);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	BUG_ON(!AGGRCTX(req));
+	//TODO
+	//BUG_ON(skb->next || skb->prev);
+	BUG_ON(skb->next);
+
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 * or there's not enough space for extra headers we need
+	 */
+	if (dev->wrap) {
+		unsigned long	flags;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (dev->port_usb)
+			skb = dev->wrap(dev->port_usb, skb, AGGRCTX(req));
+		spin_unlock_irqrestore(&dev->lock, flags);
+		if (!skb)
+			goto drop;
+	}
+	req->buf = skb->data;
+
+	if (AGGRCTX(req)->total_size > 0) {
+		BUG_ON(skb_queue_empty(&AGGRCTX(req)->skb_list));
+		length = AGGRCTX(req)->total_size;
+	} else {
+		BUG_ON(!skb_queue_empty(&AGGRCTX(req)->skb_list));
+		skb_queue_tail(&AGGRCTX(req)->skb_list, skb);
+		length = skb->len;
+	}
+
+	if (!AGGR_DONE(req) &&
+		      atomic_read(&dev->no_tx_req_used) > TX_REQ_THRESHOLD) {
+		BUG_ON(!AGGRCTX(req)->total_size);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		list_add(&req->list, &dev->tx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		goto success;
+	}
+	atomic_inc(&dev->no_tx_req_used);
+
+	if (unlikely(!dev->port_usb)) {
+		ERROR(dev, "port_usb = NULL\n");
+		goto drop;
+	}
+	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
+	if (dev->port_usb->is_fixed &&
+	length == dev->port_usb->fixed_in_len &&
+	(length % in->maxpacket) == 0)
+		req->zero = 0;
+	else
+		req->zero = 1;
+	
+	/* use zlp framing on tx for strict CDC-Ether conformance,
+	 * though any robust network rx path ignores extra padding.
+	 * and some hardware doesn't like to write zlps.
+	 */
+	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+		req->zero = 0;
+		length++;
+#ifdef CONFIG_USBNET_USE_SG
+		if (sg_is_enabled && req->num_sgs) {
+			req->sg[req->num_sgs - 1].length++;
+		}
+#endif
+	}
+	req->length = length;
+
+#if defined(CONFIG_USB_DWC3) || defined(CONFIG_USB_DWC2)
+	req->no_interrupt = 0;
+#else
+	/* throttle highspeed IRQ rate back slightly */
+	if (gadget_is_dualspeed(dev->gadget) &&
+		(dev->gadget->speed >= USB_SPEED_HIGH)) {
+		dev->tx_qlen++;
+		if (dev->tx_qlen == TX_REQ_THRESHOLD) {
+			req->no_interrupt = 0;
+			dev->tx_qlen = 0;
+		} else {
+			req->no_interrupt = 1;
+		}
+	} else {
+		req->no_interrupt = 0;
+	}
+#endif
+
+	pending_skb = AGGRCTX(req)->pending_skb;
+	AGGRCTX(req)->pending_skb = NULL;
+
+#ifdef CONFIG_USBNET_USE_SG
+	if (sg_is_enabled)
+		build_sglist(req, AGGRCTX(req));
+#endif
+
+	retval = usb_ep_queue(in, req, GFP_ATOMIC);
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	if (likely((retval == 0) && (req->context != NULL))) {
+		skb_qlen = skb_queue_len(&AGGRCTX(req)->skb_list);
+	} else if (unlikely(req->context == NULL)) {
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		pr_err_ratelimited("req is already freed\n");
+		return NETDEV_TX_OK;
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	switch (retval) {
+	default:
+		DBG(dev, "tx queue err %d\n", retval);
+		break;
+	case 0:
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+		if (skb_qlen > histogram_size && histogram_size) {
+			histogram_realloc(dev, skb_qlen);
+			pr_err("%s: %d histogram buffer to small, realloc to %d, "
+				  "hold_count=%d\n", __func__, __LINE__, histogram_size,
+				  skb_qlen);
+		}
+		if (dev->tx_mult_histogram && skb_qlen <= histogram_size)
+			dev->tx_mult_histogram[skb_qlen - 1]++;
+#endif
+		//net->trans_start = jiffies;
+		do {}while(0);
+	}
+
+	if (retval) {
+		atomic_dec(&dev->no_tx_req_used);
+drop:
+		dev->net->stats.tx_dropped += (skb_qlen == 0) ? 1 : skb_qlen;
+		req_aggr_clean(req);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (list_empty(&dev->tx_reqs))
+			netif_start_queue(net);
+		list_add_tail(&req->list, &dev->tx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+
+	if (pending_skb)
+		return eth_start_xmit(pending_skb, net);
+success:
+	return NETDEV_TX_OK;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	DBG(dev, "%s\n", __func__);
+
+	/* fill the rx queue */
+	rx_fill(dev, gfp_flags);
+
+	/* and open the tx floodgates */
+	dev->tx_qlen = 0;
+	netif_wake_queue(dev->net);
+}
+
+static int eth_open(struct net_device *net)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	struct gether	*link;
+
+	DBG(dev, "%s\n", __func__);
+	if (netif_carrier_ok(dev->net))
+		eth_start(dev, GFP_KERNEL);
+
+	spin_lock_irq(&dev->lock);
+	link = dev->port_usb;
+	if (link && link->open)
+		link->open(link);
+	spin_unlock_irq(&dev->lock);
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	if (sysfs_create_group(&dev->net->dev.kobj, &u_ether_attr_group))
+		pr_err("%s:%d: fail to register sysfs attr group\n", __func__,
+		                __LINE__);
+#endif
+
+	return 0;
+}
+
+static int eth_stop(struct net_device *net)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	unsigned long	flags;
+
+	VDBG(dev, "%s\n", __func__);
+	netif_stop_queue(net);
+
+	INFO(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
+		dev->net->stats.rx_errors, dev->net->stats.tx_errors
+		);
+
+	/* ensure there are no more active requests */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb) {
+		struct gether	*link = dev->port_usb;
+		const struct usb_endpoint_descriptor *in;
+		const struct usb_endpoint_descriptor *out;
+
+		if (link->close)
+			link->close(link);
+
+		/* NOTE:  we have no abort-queue primitive we could use
+		 * to cancel all pending I/O.  Instead, we disable then
+		 * reenable the endpoints ... this idiom may leave toggle
+		 * wrong, but that's a self-correcting error.
+		 *
+		 * REVISIT:  we *COULD* just let the transfers complete at
+		 * their own pace; the network stack can handle old packets.
+		 * For the moment we leave this here, since it works.
+		 */
+		in = link->in_ep->desc;
+		out = link->out_ep->desc;
+		/* usb_ep_disable(link->in_ep);
+		usb_ep_disable(link->out_ep); */
+		if (netif_carrier_ok(net)) {
+			INFO(dev, "host still using in/out endpoints\n");
+			link->in_ep->desc = in;
+			link->out_ep->desc = out;
+			/* usb_ep_enable(link->in_ep);
+			usb_ep_enable(link->out_ep); */
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+		sysfs_remove_group(&dev->net->dev.kobj, &u_ether_attr_group);
+	#endif
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static u8 host_ethaddr[ETH_ALEN];
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *dev_addr;
+module_param(dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
+
+#ifdef CONFIG_USB_MV_HSIC_UDC
+static u8 host_ethaddr_hsic[ETH_ALEN];
+static char *dev_addr_hsic;
+module_param(dev_addr_hsic, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr_hsic, "HSIC Device Ethernet Addr");
+#endif
+
+/* this address is invisible to ifconfig */
+static char *host_addr;
+module_param(host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
+void save_usbnet_host_ethaddr(u8 addr[])
+{
+	memcpy(host_ethaddr, addr, ETH_ALEN);
+}
+
+static int get_ether_addr(const char *str, u8 *dev_addr)
+{
+	if (str) {
+		unsigned	i;
+
+		for (i = 0; i < 6; i++) {
+			unsigned char num;
+
+			if ((*str == '.') || (*str == ':'))
+				str++;
+			num = hex_to_bin(*str++) << 4;
+			num |= hex_to_bin(*str++);
+			dev_addr [i] = num;
+		}
+		if (is_valid_ether_addr(dev_addr))
+			return 0;
+	}
+	eth_random_addr(dev_addr);
+	return 1;
+}
+
+static int get_host_ether_addr(u8 *str, u8 *dev_addr)
+{
+	memcpy(dev_addr, str, ETH_ALEN);
+	if (is_valid_ether_addr(dev_addr))
+		return 0;
+
+	random_ether_addr(dev_addr);
+	memcpy(str, dev_addr, ETH_ALEN);
+	return 1;
+}
+
+static const struct net_device_ops eth_netdev_ops = {
+	.ndo_open		= eth_open,
+	.ndo_stop		= eth_stop,
+	.ndo_start_xmit		= eth_start_xmit,
+	.ndo_change_mtu		= ueth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static struct device_type gadget_type = {
+	.name	= "gadget",
+};
+
+/**
+ * gether_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ *	host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework.  The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname)
+{
+	struct eth_dev		*dev;
+	struct net_device	*net;
+	int			status;
+
+	net = alloc_etherdev(sizeof *dev);
+	if (!net)
+		return ERR_PTR(-ENOMEM);
+#if 0
+	net->element_used = 0;
+	for (i = 0; i < DEBUG_ELEMENT_CNT; i++) {
+		net->caller_elments[i].func_address = 0;
+		net->caller_elments[i].cnt = 0;
+	}
+#endif
+	dev = netdev_priv(net);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->req_lock);
+	INIT_WORK(&dev->work, eth_work);
+#ifdef USB_RX_USE_WORK
+	INIT_WORK(&dev->rx_work, process_rx_work);
+#else
+	tasklet_init(&dev->rx_tl, process_rx_tl,(unsigned long)dev);
+#endif
+	INIT_LIST_HEAD(&dev->tx_reqs);
+	INIT_LIST_HEAD(&dev->rx_reqs);
+
+	/* by default we always have a random MAC address */
+	net->addr_assign_type = NET_ADDR_RANDOM;
+
+	skb_queue_head_init(&dev->rx_frames);
+
+	/* network device setup */
+	dev->net = net;
+	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+	if (get_ether_addr(dev_addr, net->dev_addr)) {
+		net->addr_assign_type = NET_ADDR_RANDOM;
+		dev_warn(&g->dev,
+			"using random %s ethernet address\n", "self");
+	} else {
+		net->addr_assign_type = NET_ADDR_SET;
+	}
+	if (get_host_ether_addr(host_ethaddr, dev->host_mac))
+		dev_warn(&g->dev, "using random %s ethernet address\n", "host");
+	else
+		dev_warn(&g->dev, "using previous %s ethernet address\n", "host");
+
+	if (ethaddr)
+		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+
+	net->netdev_ops = &eth_netdev_ops;
+
+	//SET_ETHTOOL_OPS(net, &ops);
+	net->ethtool_ops = &ops;
+
+	dev->gadget = g;
+	SET_NETDEV_DEV(net, &g->dev);
+	SET_NETDEV_DEVTYPE(net, &gadget_type);
+
+	status = register_netdev(net);
+	if (status < 0) {
+		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+		free_netdev(net);
+		dev = ERR_PTR(status);
+		g_usbnet_dev = NULL;
+	} else {
+		INFO(dev, "MAC %pM\n", net->dev_addr);
+		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+		/* two kinds of host-initiated state changes:
+		 *  - iff DATA transfer is active, carrier is "on"
+		 *  - tx queueing enabled if open *and* carrier is "on"
+		 */
+		netif_carrier_off(net);
+		g_usbnet_dev = net;
+	}
+
+#ifdef CONFIG_DDR_DEVFREQ
+	dev->ddr_qos_min.name = net->name;
+	pm_qos_add_request(&dev->ddr_qos_min,
+		PM_QOS_DDR_DEVFREQ_MIN, PM_QOS_DEFAULT_VALUE);
+#endif
+	return dev;
+}
+
+#ifdef CONFIG_USB_MV_HSIC_UDC
+struct eth_dev *gether_setup_name_hsic(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname)
+{
+	struct eth_dev		*dev;
+	struct net_device	*net;
+	int			status, i;
+
+	net = alloc_etherdev(sizeof *dev);
+	if (!net)
+		return ERR_PTR(-ENOMEM);
+#if 0
+	net->element_used = 0;
+	for (i = 0; i < DEBUG_ELEMENT_CNT; i++) {
+		net->caller_elments[i].func_address = 0;
+		net->caller_elments[i].cnt = 0;
+	}
+#endif
+	dev = netdev_priv(net);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->req_lock);
+	INIT_WORK(&dev->work, eth_work);
+#ifdef USB_RX_USE_WORK
+	INIT_WORK(&dev->rx_work, process_hsic_rx_work);
+#else
+	tasklet_init(&dev->rx_tl, process_rx_tl,(unsigned long)dev);
+#endif
+	INIT_LIST_HEAD(&dev->tx_reqs);
+	INIT_LIST_HEAD(&dev->rx_reqs);
+
+	/* by default we always have a random MAC address */
+	net->addr_assign_type = NET_ADDR_RANDOM;
+
+	skb_queue_head_init(&dev->rx_frames);
+
+	/* network device setup */
+	dev->net = net;
+	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+	if (get_ether_addr(dev_addr, net->dev_addr)) {
+		net->addr_assign_type = NET_ADDR_RANDOM;
+		dev_warn(&g->dev,
+			"using random %s ethernet address\n", "self");
+	} else {
+		net->addr_assign_type = NET_ADDR_SET;
+	}
+
+	if (get_host_ether_addr(host_ethaddr_hsic, dev->host_mac))
+		dev_warn(&g->dev, "using random %s ethernet address\n", "host");
+	else
+		dev_warn(&g->dev, "using previous %s ethernet address\n", "host");
+
+	if (ethaddr)
+		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+
+	net->netdev_ops = &eth_netdev_ops;
+
+	SET_ETHTOOL_OPS(net, &ops);
+
+	dev->gadget = g;
+	SET_NETDEV_DEV(net, &g->dev);
+	SET_NETDEV_DEVTYPE(net, &gadget_type);
+
+	status = register_netdev(net);
+	if (status < 0) {
+		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+		free_netdev(net);
+		dev = ERR_PTR(status);
+	} else {
+		INFO(dev, "MAC %pM\n", net->dev_addr);
+		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+		/* two kinds of host-initiated state changes:
+		 *  - iff DATA transfer is active, carrier is "on"
+		 *  - tx queueing enabled if open *and* carrier is "on"
+		 */
+		netif_carrier_off(net);
+	}
+
+#ifdef CONFIG_DDR_DEVFREQ
+	dev->ddr_qos_min.name = net->name;
+	pm_qos_add_request(&dev->ddr_qos_min,
+		PM_QOS_DDR_DEVFREQ_MIN, PM_QOS_DEFAULT_VALUE);
+#endif
+	return dev;
+}
+
+#endif
+
+#if defined(CONFIG_CPU_ASR18XX) || defined(CONFIG_CPU_ASR1901)
+static bool is_netifd_online(void)
+{
+	struct task_struct *g, *p;
+
+	do_each_thread(g, p) {
+		if (!strcmp("netifd", p->comm))
+			return true;
+	} while_each_thread(g, p);
+
+	return false;
+}
+
+/* wait until the network is configured by netifd */
+void wait_usbnet_br_up_and_brigded(void)
+{
+	struct net_device *dev;
+	int timeout = 300;
+
+	might_sleep();
+	/* just return when netfid is not lauched up */
+	if (unlikely(!is_netifd_online())) {
+		return;
+	} else {
+		/* first wait for usbnet to be up */
+		if (g_usbnet_dev == NULL) {
+			pr_info("!!!!!!!!! usbnet regsiter failed\n");
+			WARN_ON(1);
+			return;
+		} else {
+			/* there may be issue for netifd if system is not wakeup */
+			timeout = 200;
+			while (pm_suspend_target_state != PM_SUSPEND_ON) {
+				msleep(10);
+				timeout--;
+				if (timeout == 0) {
+					pr_info("!!!!! wait system resume timeout\n");
+					WARN_ON(1);
+					return;
+				}
+			}
+			timeout = 300;
+			while (!((g_usbnet_dev->flags & IFF_UP) &&
+				(g_usbnet_dev->priv_flags & IFF_BRIDGE_PORT))) {
+				timeout--;
+				msleep(30);
+				if (timeout == 0) {
+					pr_info("!!!!! wait netifd timeout\n");
+					WARN_ON(1);
+					return;
+				}
+			}
+		}
+
+		/* wait for br-lan to be up */
+		timeout = 150;
+		dev = dev_get_by_name(&init_net, "br-lan");
+		/* network regsiter failed we just return true to move on */
+		if (dev == NULL) {
+			pr_info("!!!!!!!!! br-lan get failed\n");
+			WARN_ON(1);
+			return;
+		} else {
+			while (!(dev->flags & IFF_UP)) {
+				timeout--;
+				msleep(30);
+				if (timeout == 0) {
+					pr_info("!!!!! wait netifd timeout\n");
+					WARN_ON(1);
+					dev_put(dev);
+					return;
+				}
+			}
+			dev_put(dev);
+			return;
+		}
+	}
+}
+
+void wait_usbnet_if_down(void)
+{
+	int timeout = 60;
+
+	might_sleep();
+	/* just return when netfid is not lauched up */
+	if (unlikely(!is_netifd_online())) {
+		return;
+	} else {
+		while (!get_usbnet_ifdown_flag()) {
+			timeout--;
+			msleep(30);
+			if (timeout == 0) {
+				pr_info("!!!!! wait usbnet if_down timeout\n");
+				WARN_ON(1);
+				return;
+			}
+		}
+	}
+}
+#endif
+
+#ifdef CONFIG_DDR_DEVFREQ
+#ifdef CONFIG_CPU_ASR1803
+extern void asr1803_set_32k_to_rtc32k(void);
+#endif
+
+static void txrx_monitor_work(struct work_struct *work)
+{
+	static unsigned long old_rx_bytes = 0;
+	static unsigned long old_tx_bytes = 0;
+	unsigned long rx_bytes, tx_bytes;
+
+	struct eth_dev	*dev = container_of(work, struct eth_dev,
+						txrx_monitor_work.work);
+
+	if (!dev || !dev->net || !dev->port_usb) {
+		pr_err("%s: dev or net is not ready\n", __func__);
+		return;
+	}
+
+#ifdef CONFIG_CPU_ASR1803
+	/* call it in this polling loop for none suspend usb dongle mode */
+	asr1803_set_32k_to_rtc32k();
+#endif
+	if (likely(dev->net->stats.rx_bytes > old_rx_bytes))
+		rx_bytes = dev->net->stats.rx_bytes - old_rx_bytes;
+	else
+		rx_bytes = ULONG_MAX - old_rx_bytes + dev->net->stats.rx_bytes + 1;
+
+	if (likely(dev->net->stats.tx_bytes > old_tx_bytes))
+		tx_bytes = dev->net->stats.tx_bytes - old_tx_bytes;
+	else
+		tx_bytes = ULONG_MAX - old_tx_bytes + dev->net->stats.tx_bytes + 1;
+
+	old_rx_bytes = dev->net->stats.rx_bytes;
+	old_tx_bytes = dev->net->stats.tx_bytes;
+
+	/* first check the tx side and boost ddr_freq to 398MHZ */
+	if (tx_bytes >= DDR_TX_BOOST_BYTES) {
+		pm_qos_update_request_timeout(&dev->ddr_qos_min,
+			DDR_BOOST_FREQ,
+			(2 * USEC_PER_SEC / INTERVALS_PER_SEC));
+		goto out;
+	}
+
+	if (rx_bytes >= rx_boost_thr) {
+		pm_qos_update_request_timeout(&dev->ddr_qos_min,
+			DDR_BOOST_FREQ,
+			(2 * USEC_PER_SEC / INTERVALS_PER_SEC));
+		goto out;
+	}
+
+	if (tx_bytes < DDR_TX_BOOST_BYTES && rx_bytes < rx_boost_thr)
+		pm_qos_update_request(&dev->ddr_qos_min, PM_QOS_DEFAULT_VALUE);
+out:
+	schedule_delayed_work(&dev->txrx_monitor_work, HZ / INTERVALS_PER_SEC);
+}
+#endif
+
+/**
+ * gether_cleanup - remove Ethernet-over-USB device
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_setup().
+ */
+void gether_cleanup(struct eth_dev *dev)
+{
+	if (!dev)
+		return;
+
+#ifndef USB_RX_USE_WORK
+	tasklet_kill(&dev->rx_tl);
+#endif
+	unregister_netdev(dev->net);
+	flush_work(&dev->work);
+#ifdef USB_RX_USE_WORK
+	flush_work(&dev->rx_work);
+#endif
+
+#ifdef CONFIG_DDR_DEVFREQ
+	if (dev->dwork_inited)
+		flush_delayed_work(&dev->txrx_monitor_work);
+	pm_qos_remove_request(&dev->ddr_qos_min);
+#endif
+
+	free_netdev(dev->net);
+}
+
+/**
+ * gether_connect - notify network layer that USB link is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ *	current device speed, and any framing wrapper(s) set up.
+ * Context: irqs blocked
+ *
+ * This is called to activate endpoints and let the network layer know
+ * the connection is active ("carrier detect").  It may cause the I/O
+ * queues to open and start letting network packets flow, but will in
+ * any case activate the endpoints so that they respond properly to the
+ * USB host.
+ *
+ * Verify net_device pointer returned using IS_ERR().  If it doesn't
+ * indicate some error code (negative errno), ep->driver_data values
+ * have been overwritten.
+ */
+struct net_device *gether_connect(struct gether *link)
+{
+	struct eth_dev		*dev = link->ioport;
+	int			result = 0;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+#ifdef CONFIG_USBNET_USE_SG
+	sg_is_enabled = link->is_sg_mode;
+	pr_info("sg enabled: %d\n", sg_is_enabled);
+#endif
+
+	link->in_ep->driver_data = dev;
+	result = usb_ep_enable(link->in_ep);
+	if (result != 0) {
+		DBG(dev, "enable %s --> %d\n",
+			link->in_ep->name, result);
+		goto fail0;
+	}
+
+	link->out_ep->driver_data = dev;
+	result = usb_ep_enable(link->out_ep);
+	if (result != 0) {
+		DBG(dev, "enable %s --> %d\n",
+			link->out_ep->name, result);
+		goto fail1;
+	}
+	if (result == 0)
+		result = alloc_requests(dev, link, qlen(dev->gadget, true),
+					qlen(dev->gadget, false));
+
+	if (result == 0) {
+		dev->zlp = link->is_zlp_ok;
+		DBG(dev, "qlen_rx %d qlen_tx %d\n", qlen(dev->gadget, true),
+		    qlen(dev->gadget, false));
+
+#ifdef CONFIG_DDR_DEVFREQ
+		dev->tx_boost_threshhold = qlen(dev->gadget, false) / 5;
+
+		/* boost when about 12 * 8 = 96Mbps */
+		dev->rx_boost_threshhold = 8;
+		atomic_set(&dev->no_rx_skb, 0);
+		if (false == dev->dwork_inited) {
+			INIT_DELAYED_WORK(&dev->txrx_monitor_work, txrx_monitor_work);
+			dev->dwork_inited = true;
+		}
+#endif
+		dev->header_len = link->header_len;
+		dev->unwrap = link->unwrap;
+		dev->wrap = link->wrap;
+		dev->unwrap_fixup = link->unwrap_fixup;
+		dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
+		spin_lock(&dev->lock);
+		atomic_set(&dev->no_tx_req_used, 0);
+		dev->port_usb = link;
+		if (netif_running(dev->net)) {
+			if (link->open)
+				link->open(link);
+		} else {
+			if (link->close)
+				link->close(link);
+		}
+		spin_unlock(&dev->lock);
+
+		netif_carrier_on(dev->net);
+		if (netif_running(dev->net))
+			eth_start(dev, GFP_ATOMIC);
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+		/*allocate tx multiple packets histogram pointer:*/
+		BUG_ON(dev->tx_mult_histogram);
+		histogram_size = 16;
+		dev->tx_mult_histogram = kzalloc(sizeof(int) * histogram_size, GFP_ATOMIC);
+		if (!dev->tx_mult_histogram) {
+			histogram_size = 0;
+			pr_err("u_ether: failed to alloc tx_mult_histogram\n");
+		}
+#endif
+
+#ifdef CONFIG_DDR_DEVFREQ
+		/* the delay set as 10ms */
+		schedule_delayed_work(&dev->txrx_monitor_work, HZ / 100);
+#endif
+	/* on error, disable any endpoints  */
+	} else {
+		(void) usb_ep_disable(link->out_ep);
+fail1:
+		(void) usb_ep_disable(link->in_ep);
+	}
+fail0:
+	/* caller is responsible for cleanup on error */
+	if (result < 0)
+		return ERR_PTR(result);
+
+	return dev->net;
+}
+
+/**
+ * gether_disconnect - notify network layer that USB link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * Context: irqs blocked
+ *
+ * This is called to deactivate endpoints and let the network layer know
+ * the connection went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ * The endpoints are inactive, and accordingly without active USB I/O.
+ * Pointers to endpoint descriptors and endpoint private data are nulled.
+ */
+void gether_disconnect(struct gether *link)
+{
+	struct eth_dev		*dev = link->ioport;
+	struct usb_request	*req;
+	struct sk_buff		*skb;
+
+	WARN_ON(!dev);
+	if (!dev)
+		return;
+
+	DBG(dev, "%s\n", __func__);
+
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+#ifdef CONFIG_DDR_DEVFREQ
+	/* don't use cancel_delayed_work_sync as this function will be called
+	* in irq context under MAC-ECM resume process
+	*/
+	if (work_pending(&dev->txrx_monitor_work.work))
+		cancel_delayed_work(&dev->txrx_monitor_work);
+#endif
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	histogram_size = 0;
+	if (dev->tx_mult_histogram) {
+		kfree(dev->tx_mult_histogram);
+		dev->tx_mult_histogram = NULL;
+	}
+#endif
+	/* disable endpoints, forcing (synchronous) completion
+	 * of all pending i/o.  then free the request objects
+	 * and forget about the endpoints.
+	 */
+	usb_ep_disable(link->in_ep);
+	spin_lock(&dev->req_lock);
+	while (!list_empty(&dev->tx_reqs)) {
+		req = container_of(dev->tx_reqs.next,
+					struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock(&dev->req_lock);
+		usb_ep_free_request_tx_mult(link->in_ep, req);
+		spin_lock(&dev->req_lock);
+	}
+	spin_unlock(&dev->req_lock);
+	link->in_ep->driver_data = NULL;
+	link->in_ep->desc = NULL;
+
+	usb_ep_disable(link->out_ep);
+	spin_lock(&dev->req_lock);
+	while (!list_empty(&dev->rx_reqs)) {
+		req = container_of(dev->rx_reqs.next,
+					struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock(&dev->req_lock);
+		usb_ep_free_request(link->out_ep, req);
+		spin_lock(&dev->req_lock);
+	}
+	spin_unlock(&dev->req_lock);
+
+	spin_lock(&dev->rx_frames.lock);
+	while ((skb = __skb_dequeue(&dev->rx_frames)))
+		dev_kfree_skb_any(skb);
+	spin_unlock(&dev->rx_frames.lock);
+
+	link->out_ep->driver_data = NULL;
+	link->out_ep->desc = NULL;
+
+	/* finish forgetting about this USB link episode */
+	dev->header_len = 0;
+	dev->unwrap = NULL;
+	dev->wrap = NULL;
+
+	spin_lock(&dev->lock);
+	dev->port_usb = NULL;
+	spin_unlock(&dev->lock);
+}
+
+u8 *eth_get_host_mac(struct net_device *net)
+{
+	struct eth_dev *eth = netdev_priv(net);
+	return eth->host_mac;
+}
+EXPORT_SYMBOL(eth_get_host_mac);
+
+MODULE_DESCRIPTION("ethernet over USB driver");
+MODULE_LICENSE("GPL v2");