blob: 91c51cd0de6f7db64269e6d77fe78c9b258f07a2 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 *
5 * Copyright (C) 2003-2005,2008 David Brownell
6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
7 * Copyright (C) 2008 Nokia Corporation
8 */
9
10/* #define VERBOSE_DEBUG */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/gfp.h>
15#include <linux/device.h>
16#include <linux/ctype.h>
17#include <linux/etherdevice.h>
18#include <linux/ethtool.h>
19#include <linux/if_vlan.h>
20#include <linux/interrupt.h>
21#include <linux/if_vlan.h>
22#include <linux/pm_qos.h>
23#include <linux/mm.h>
24#include <linux/suspend.h>
25#include "u_ether.h"
26/*
27 * This component encapsulates the Ethernet link glue needed to provide
28 * one (!) network link through the USB gadget stack, normally "usb0".
29 *
30 * The control and data models are handled by the function driver which
31 * connects to this code; such as CDC Ethernet (ECM or EEM),
32 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
33 * management.
34 *
35 * Link level addressing is handled by this component using module
36 * parameters; if no such parameters are provided, random link level
37 * addresses are used. Each end of the link uses one address. The
38 * host end address is exported in various ways, and is often recorded
39 * in configuration databases.
40 *
41 * The driver which assembles each configuration using such a link is
42 * responsible for ensuring that each configuration includes at most one
43 * instance of is network link. (The network layer provides ways for
44 * this single "physical" link to be used by multiple virtual links.)
45 */
46
47#define UETH__VERSION "29-May-2008"
48
49#ifdef CONFIG_CPU_ASR1903
50#define CONFIG_USB_GADGET_DEBUG_FILES 1
51extern u32 nr_aggr_fail_padding;
52#endif
53
54struct eth_dev {
55 /* lock is held while accessing port_usb
56 */
57 spinlock_t lock;
58 struct gether *port_usb;
59
60 struct net_device *net;
61 struct usb_gadget *gadget;
62
63 spinlock_t req_lock; /* guard {rx,tx}_reqs */
64 struct list_head tx_reqs, rx_reqs;
65 unsigned tx_qlen;
66 atomic_t no_tx_req_used;
67#ifdef CONFIG_USB_GADGET_DEBUG_FILES
68 int *tx_mult_histogram;
69#endif
70
71 struct sk_buff_head rx_frames;
72
73 unsigned header_len;
74 unsigned int ul_max_pkts_per_xfer;
75 struct sk_buff *(*wrap)(struct gether *,
76 struct sk_buff *skb,
77 struct aggr_ctx *aggr_ctx);
78 int (*unwrap)(struct gether *,
79 struct sk_buff *skb,
80 struct sk_buff_head *list);
81 struct sk_buff *(*unwrap_fixup)(struct gether *,
82 struct sk_buff *skb);
83
84 struct work_struct work;
85 struct work_struct rx_work;
86 struct tasklet_struct rx_tl;
87
88 unsigned long todo;
89#define WORK_RX_MEMORY 0
90
91 bool zlp;
92 u8 host_mac[ETH_ALEN];
93
94#ifdef CONFIG_DDR_DEVFREQ
95/* for nezhas the boost freq is 355mhz, while 398mhz for nezha3 */
96#define DDR_BOOST_FREQ (350000)
97#if defined(CONFIG_CPU_ASR18XX) && defined(CONFIG_USB_MV_UDC)
98#define INTERVALS_PER_SEC (50)
99#else
100#define INTERVALS_PER_SEC (10)
101#endif
102
103#if defined(CONFIG_CPU_ASR18XX)
104#ifdef CONFIG_USB_MV_UDC
105#define DDR_TX_BOOST_BYTES (( 10000000 / INTERVALS_PER_SEC) >> 3) /* 10mbps*/
106#define DDR_RX_BOOST_BYTES (( 1000000 / INTERVALS_PER_SEC) >> 3) /* 1mbps*/
107#else
108#define DDR_TX_BOOST_BYTES (( 15000000 / INTERVALS_PER_SEC) >> 3) /* 15mbps*/
109#define DDR_RX_BOOST_BYTES (( 15000000 / INTERVALS_PER_SEC) >> 3) /* 15mbps*/
110#endif
111#else
112#define DDR_TX_BOOST_BYTES ((150000000 / INTERVALS_PER_SEC) >> 3) /* 150mbps*/
113#define DDR_RX_BOOST_BYTES (( 20000000 / INTERVALS_PER_SEC) >> 3) /* 20mbps*/
114#endif
115
116 atomic_t no_rx_skb;
117 unsigned int tx_boost_threshhold;
118 unsigned int rx_boost_threshhold;
119 struct pm_qos_request ddr_qos_min;
120 struct delayed_work txrx_monitor_work;
121 bool dwork_inited;
122#endif
123};
124
125/*-------------------------------------------------------------------------*/
126
127#define AGGR_DONE(req) \
128 (AGGRCTX(req)->pending_skb || \
129 AGGRCTX(req)->total_size == 0)
130
131#define RX_EXTRA 20 /* bytes guarding against rx overflows */
132#define DEFAULT_QLEN 5 /* quintuple buffering by default */
133
134#ifdef CONFIG_CPU_ASR1903
135static unsigned qmult_rx = 20;
136#else
137static unsigned qmult_rx = 15;
138#endif
139static unsigned qmult_tx = 40;
140static struct net_device *g_usbnet_dev;
141#ifdef CONFIG_CPU_ASR1903
142static unsigned rx_notl = 0;
143#else
144static unsigned rx_notl = 0;
145#endif
146/*
147 * TX_REQ_THRESHOLD is used for two different (but related) optimizations:
148 * 1. IRQ_OPTIMIZATION -> getting complete interrupt every TX_REQ_THRESHOLD requestes.
149 * 2. USB_AGGRIGATION -> Start aggregate only after USB has more then TX_REQ_THRESHOLD requestes.
150 * --> To prevent packet delay the value for #1 and #2 MUST be the same.
151 * NOTE: The value of TX_REQ_THRESHOLD must be smaller then half of the queue size
152 * e.g. TX_REQ_THRESHOLD_MAX <= DEFAULT_QLEN * (qmult_tx / 2)
153 */
154#ifdef CONFIG_CPU_ASR1903
155#define TX_REQ_THRESHOLD (1)
156#else
157#define TX_REQ_THRESHOLD (DEFAULT_QLEN * 4)
158#endif
159
160#define USB_ALLOC_MEM_LOW_THRESH (2 * 1024 * 1024)
161
162
163#ifdef CONFIG_USB_GADGET_DEBUG_FILES
164static unsigned histogram_size;
165#endif
166
167//only asr1803 need to use work
168#ifdef CONFIG_USB_MV_UDC
169#define USB_RX_USE_WORK 1
170#endif
171
172module_param(qmult_rx, uint, S_IRUGO|S_IWUSR);
173MODULE_PARM_DESC(qmult_rx, "rx queue length multiplier at high/super speed");
174module_param(qmult_tx, uint, S_IRUGO|S_IWUSR);
175MODULE_PARM_DESC(qmult_tx, "tx queue length multiplier at high/super speed");
176module_param(rx_notl, uint, S_IRUGO|S_IWUSR);
177MODULE_PARM_DESC(rx_notl, "rx not use tasklet");
178
179static unsigned rx_boost_thr = DDR_RX_BOOST_BYTES;
180module_param(rx_boost_thr, uint, S_IRUGO|S_IWUSR);
181MODULE_PARM_DESC(rx_boost_thr, "rx ddr boost threshold");
182
183#ifdef CONFIG_USBNET_USE_SG
184static bool sg_is_enabled;
185static struct scatterlist *
186alloc_sglist(int nents)
187{
188 struct scatterlist *sg;
189
190 sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
191 if (!sg) {
192 pr_err("usbnet error: sg alloc failed\n");
193 return NULL;
194 }
195 sg_init_table(sg, nents);
196 if (!sg) {
197 pr_err("usbnet error: sg_init_table failed\n");
198 return NULL;
199 }
200 return sg;
201}
202
203static void
204build_sglist(struct usb_request *req, struct aggr_ctx* aggrctx)
205{
206 struct scatterlist *sg = aggrctx->sg;
207 struct sk_buff *cur, *next;
208 int nr_pkts = 0;
209
210 if (unlikely(!sg)) {
211 pr_err("sg of aggr is NULL\n");
212 BUG();
213 }
214
215 skb_queue_walk_safe(&AGGRCTX(req)->skb_list, cur, next) {
216 sg_set_buf(&sg[nr_pkts], (void *)cur->data, cur->len);
217 nr_pkts++;
218 }
219 if (nr_pkts == 0 || nr_pkts > USBNET_SG_NENTS) {
220 pr_err("error pkts: %d\n", nr_pkts);
221 BUG();
222 }
223 sg_mark_end(&sg[nr_pkts - 1]);
224
225 req->sg = sg;
226 req->num_sgs = nr_pkts;
227}
228#endif
229
230/* for dual-speed hardware, use deeper queues at high/super speed */
231static inline int qlen(struct usb_gadget *gadget, bool rx)
232{
233 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
234 gadget->speed == USB_SPEED_SUPER))
235 return DEFAULT_QLEN * (rx ? qmult_rx : qmult_tx);
236 else
237 return DEFAULT_QLEN;
238}
239
240int u_ether_rx_qlen(void)
241{
242 return qmult_rx * DEFAULT_QLEN;
243}
244
245/*-------------------------------------------------------------------------*/
246
247/* REVISIT there must be a better way than having two sets
248 * of debug calls ...
249 */
250#if 0
251#undef DBG
252#undef VDBG
253#undef ERROR
254#undef INFO
255
256#define xprintk(d, level, fmt, args...) \
257 printk(level "%s: " fmt , (d)->net->name , ## args)
258
259#ifdef DEBUG
260#undef DEBUG
261#define DBG(dev, fmt, args...) \
262 xprintk(dev , KERN_DEBUG , fmt , ## args)
263#else
264#define DBG(dev, fmt, args...) \
265 do { } while (0)
266#endif /* DEBUG */
267
268#ifdef VERBOSE_DEBUG
269#define VDBG DBG
270#else
271#define VDBG(dev, fmt, args...) \
272 do { } while (0)
273#endif /* DEBUG */
274
275#define ERROR(dev, fmt, args...) \
276 xprintk(dev , KERN_ERR , fmt , ## args)
277#define INFO(dev, fmt, args...) \
278 xprintk(dev , KERN_INFO , fmt , ## args)
279#endif
280
281#ifdef CONFIG_USB_GADGET_DEBUG_FILES
282static void histogram_realloc(struct eth_dev *dev, int skb_qlen)
283{
284 int *tmp, *oldbuf;
285 int size = histogram_size;
286
287 while (size <= skb_qlen)
288 size = size * 2;
289
290 tmp = kzalloc(sizeof(int) * size, GFP_ATOMIC);
291 if (!tmp){
292 histogram_size = 0;
293 kfree(dev->tx_mult_histogram);
294 dev->tx_mult_histogram = NULL;
295 }
296
297 memcpy(tmp, dev->tx_mult_histogram, sizeof(int) * histogram_size);
298 oldbuf = dev->tx_mult_histogram;
299 dev->tx_mult_histogram = tmp;
300 histogram_size = size;
301 if (oldbuf)
302 kfree(oldbuf);
303 else
304 BUG();
305}
306
307static ssize_t
308u_ether_histogram_clean(struct device *dev, struct device_attribute *attr,
309 const char *buf, size_t size)
310{
311 struct eth_dev *priv = netdev_priv(to_net_dev(dev));
312 if (priv->tx_mult_histogram)
313 memset(priv->tx_mult_histogram, 0, histogram_size * sizeof(int));
314 return size;
315}
316
317static ssize_t
318u_ether_histogram_show(struct device *dev, struct device_attribute *attr, char *buf)
319{
320 struct eth_dev *priv = netdev_priv(to_net_dev(dev));
321 char *fmt = "mult[%d] = %d/100 (%d)\n";
322 int i, ret = 0, total = 0, max_aggr;
323
324 if (!priv->tx_mult_histogram)
325 return 0;
326
327 for (i = 0 ; i < histogram_size; i++)
328 total += priv->tx_mult_histogram[i];
329
330 for (i = histogram_size - 1; i >= 0; i--)
331 if (priv->tx_mult_histogram[i])
332 break;
333 max_aggr = i;
334
335#ifdef CONFIG_CPU_ASR1903
336 ret += sprintf(buf + ret, "padding_fail=%d\n", nr_aggr_fail_padding);
337#endif
338
339 ret += sprintf(buf + ret, "histogram_size = %d\n", histogram_size);
340 for (i = 0; i <= max_aggr; i++)
341 ret += sprintf(buf + ret, fmt, i, priv->tx_mult_histogram[i] * 100 / total, priv->tx_mult_histogram[i]);
342 return ret;
343}
344
345static
346DEVICE_ATTR(u_ether_tx_mult_histogram, S_IRUGO|S_IWUSR, u_ether_histogram_show, u_ether_histogram_clean);
347
348#ifdef CONFIG_DDR_DEVFREQ
349static ssize_t
350tx_boost_thresh_store(struct device *dev, struct device_attribute *attr,
351 const char *buf, size_t size)
352{
353 struct eth_dev *priv = netdev_priv(to_net_dev(dev));
354
355 sscanf(buf, "%d", &priv->tx_boost_threshhold);
356
357 pr_info("u_ether tx boost threshold set as %d\n",
358 priv->tx_boost_threshhold);
359
360 return strnlen(buf, PAGE_SIZE);
361}
362
363static ssize_t
364tx_boost_thresh_show(struct device *dev,
365 struct device_attribute *attr, char *buf)
366{
367 struct eth_dev *priv = netdev_priv(to_net_dev(dev));
368 int ret = 0;
369
370 ret += sprintf(buf + ret, "tx_boost_threshhold = %d\n",
371 priv->tx_boost_threshhold);
372 return ret;
373}
374static
375DEVICE_ATTR(tx_boost_param, S_IRUGO|S_IWUSR, tx_boost_thresh_show,
376 tx_boost_thresh_store);
377
378static ssize_t
379rx_boost_thresh_store(struct device *dev, struct device_attribute *attr,
380 const char *buf, size_t size)
381{
382 struct eth_dev *priv = netdev_priv(to_net_dev(dev));
383
384 sscanf(buf, "%d", &priv->rx_boost_threshhold);
385
386 pr_info("u_ether tx boost threshold set as %d\n",
387 priv->rx_boost_threshhold);
388
389 return strnlen(buf, PAGE_SIZE);
390}
391
392static ssize_t
393rx_boost_thresh_show(struct device *dev,
394 struct device_attribute *attr, char *buf)
395{
396 struct eth_dev *priv = netdev_priv(to_net_dev(dev));
397 int ret = 0;
398
399 ret += sprintf(buf + ret, "rx_boost_threshhold = %d\n",
400 priv->rx_boost_threshhold);
401 return ret;
402}
403static
404DEVICE_ATTR(rx_boost_param, S_IRUGO|S_IWUSR, rx_boost_thresh_show,
405 rx_boost_thresh_store);
406#endif
407
408static struct attribute *u_ether_attrs[] = {
409 &dev_attr_u_ether_tx_mult_histogram.attr,
410#ifdef CONFIG_DDR_DEVFREQ
411 &dev_attr_tx_boost_param.attr,
412 &dev_attr_rx_boost_param.attr,
413#endif
414 NULL,
415};
416static struct attribute_group u_ether_attr_group = {
417 .attrs = u_ether_attrs,
418};
419#endif /*CONFIG_USB_GADGET_DEBUG_FILES*/
420
421static inline void req_aggr_clean(struct usb_request *req)
422{
423 struct sk_buff *skb;
424 BUG_ON(!AGGRCTX(req));
425 BUG_ON(AGGRCTX(req)->pending_skb);
426
427 while ((skb = skb_dequeue(&AGGRCTX(req)->skb_list) ))
428 dev_kfree_skb_any(skb);
429
430 AGGRCTX(req)->total_size = 0;
431 req->length = 0;
432 req->buf = NULL;
433#ifdef CONFIG_USBNET_USE_SG
434 AGGRCTX(req)->num_sgs = 0;
435 if (AGGRCTX(req)->sg)
436 sg_init_table(AGGRCTX(req)->sg, USBNET_SG_NENTS);
437 req->num_sgs = 0;
438 req->num_mapped_sgs = 0;
439#endif
440}
441
442/*-------------------------------------------------------------------------*/
443
444/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
445
446static int ueth_change_mtu(struct net_device *net, int new_mtu)
447{
448 struct eth_dev *dev = netdev_priv(net);
449 unsigned long flags;
450 int status = 0;
451
452 /* don't change MTU on "live" link (peer won't know) */
453 spin_lock_irqsave(&dev->lock, flags);
454 if (dev->port_usb)
455 status = -EBUSY;
456 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
457 status = -ERANGE;
458 else
459 net->mtu = new_mtu;
460 spin_unlock_irqrestore(&dev->lock, flags);
461
462 return status;
463}
464
465static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
466{
467 struct eth_dev *dev = netdev_priv(net);
468
469 strlcpy(p->driver, "g_ether", sizeof(p->driver));
470 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
471 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
472 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
473}
474
475/* REVISIT can also support:
476 * - WOL (by tracking suspends and issuing remote wakeup)
477 * - msglevel (implies updated messaging)
478 * - ... probably more ethtool ops
479 */
480
481static const struct ethtool_ops ops = {
482 .get_drvinfo = eth_get_drvinfo,
483 .get_link = ethtool_op_get_link,
484};
485
486static void defer_kevent(struct eth_dev *dev, int flag)
487{
488 if (test_and_set_bit(flag, &dev->todo))
489 return;
490 if (!schedule_work(&dev->work))
491 ERROR(dev, "kevent %d may have been dropped\n", flag);
492 else
493 DBG(dev, "kevent %d scheduled\n", flag);
494}
495
496static void rx_complete(struct usb_ep *ep, struct usb_request *req);
497static void rx_complete_notl(struct usb_ep *ep, struct usb_request *req);
498static void tx_complete(struct usb_ep *ep, struct usb_request *req);
499
500#define EXTRA_SKB_WIFI_HEADROOM 96
501static int
502rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
503{
504 struct sk_buff *skb;
505 int retval = -ENOMEM;
506 size_t size = 0;
507 struct usb_ep *out;
508 unsigned long flags;
509
510 spin_lock_irqsave(&dev->lock, flags);
511 if (dev->port_usb)
512 out = dev->port_usb->out_ep;
513 else
514 out = NULL;
515
516 if (!out) {
517 spin_unlock_irqrestore(&dev->lock, flags);
518 return -ENOTCONN;
519 }
520
521 /* Padding up to RX_EXTRA handles minor disagreements with host.
522 * Normally we use the USB "terminate on short read" convention;
523 * so allow up to (N*maxpacket), since that memory is normally
524 * already allocated. Some hardware doesn't deal well with short
525 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
526 * byte off the end (to force hardware errors on overflow).
527 *
528 * RNDIS uses internal framing, and explicitly allows senders to
529 * pad to end-of-packet. That's potentially nice for speed, but
530 * means receivers can't recover lost synch on their own (because
531 * new packets don't only start after a short RX).
532 */
533 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
534 size += dev->port_usb->header_len;
535
536 size += EXTRA_SKB_WIFI_HEADROOM; /* for wifi header */
537 size += out->maxpacket - 1;
538 size -= size % out->maxpacket;
539
540 if (dev->ul_max_pkts_per_xfer)
541 size *= dev->ul_max_pkts_per_xfer;
542
543 if (dev->port_usb->is_fixed)
544 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
545
546 spin_unlock_irqrestore(&dev->lock, flags);
547
548 pr_debug("%s: size: %zu", __func__, size);
549
550 //if ((global_zone_page_state(NR_FREE_PAGES) << PAGE_SHIFT) < USB_ALLOC_MEM_LOW_THRESH) {
551 if ((global_zone_page_state(NR_FREE_PAGES) << PAGE_SHIFT) < ((min_free_kbytes + 64) << 10)) {
552 pr_err_ratelimited("usb mem low\n");
553 skb = NULL;
554 goto enomem;
555 }
556
557#ifdef CONFIG_USB_DWC2
558 /* add 3 bytes for 4-bytes alignment requirement
559 * wifi headroom is not calculated in the fixed length
560 */
561 if (dev->port_usb->is_fixed)
562 skb = alloc_skb(size + NET_IP_ALIGN + 3 + EXTRA_SKB_WIFI_HEADROOM, gfp_flags);
563 else
564 skb = alloc_skb(size + NET_IP_ALIGN + 3, gfp_flags);
565#else
566 if (dev->port_usb->is_fixed)
567 skb = alloc_skb(size + NET_IP_ALIGN + EXTRA_SKB_WIFI_HEADROOM, gfp_flags);
568 else
569 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
570#endif
571 if (skb == NULL) {
572 ERROR(dev, "no rx skb\n");
573 goto enomem;
574 }
575
576 spin_lock_irqsave(&dev->lock, flags);
577 if (dev->port_usb)
578 out = dev->port_usb->out_ep;
579 else
580 out = NULL;
581
582 if (!out) {
583 spin_unlock_irqrestore(&dev->lock, flags);
584 goto enomem;
585 }
586
587 /* Some platforms perform better when IP packets are aligned,
588 * but on at least one, checksumming fails otherwise. Note:
589 * RNDIS headers involve variable numbers of LE32 values.
590 */
591 skb_reserve(skb, NET_IP_ALIGN + EXTRA_SKB_WIFI_HEADROOM);
592
593#ifdef CONFIG_USB_DWC2
594 if (((u32)skb->data) & 0x3)
595 skb_reserve(skb, (4 - (((u32)skb->data) & 3)));
596#endif
597
598 req->buf = skb->data;
599 req->length = size;
600 req->context = skb;
601
602#if !defined(CONFIG_USB_DWC3) && !defined(CONFIG_USB_DWC2)
603 /* Always active, handled in the low level driver*/
604 req->no_interrupt = 1;
605#endif
606
607 retval = usb_ep_queue(out, req, gfp_flags);
608
609 spin_unlock_irqrestore(&dev->lock, flags);
610
611 if (retval == -ENOMEM)
612enomem:
613 defer_kevent(dev, WORK_RX_MEMORY);
614 if (retval) {
615 printk_ratelimited(KERN_DEBUG "rx submit --> %d\n", retval);
616 if (skb)
617 dev_kfree_skb_any(skb);
618 }
619 return retval;
620}
621
622static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags);
623static void rx_complete_notl(struct usb_ep *ep, struct usb_request *req)
624{
625 struct sk_buff *skb = req->context, *skb2;
626 struct eth_dev *dev = ep->driver_data;
627 int status = req->status;
628 unsigned long flags;
629 bool queue = false;
630
631 switch (status) {
632
633 /* normal completion */
634 case 0:
635 skb_put(skb, req->actual);
636
637 if (dev->unwrap) {
638 spin_lock_irqsave(&dev->lock, flags);
639 if (dev->port_usb) {
640 status = dev->unwrap(dev->port_usb,
641 skb,
642 &dev->rx_frames);
643 } else {
644 dev_kfree_skb_any(skb);
645 skb = NULL;
646 status = -ENOTCONN;
647 }
648 spin_unlock_irqrestore(&dev->lock, flags);
649 } else {
650 skb_queue_tail(&dev->rx_frames, skb);
651 }
652 skb = NULL;
653
654 if (!status)
655 queue = true;
656 else
657 pr_err_ratelimited("%s nq status: %d\n", __func__, status);
658
659 skb2 = skb_dequeue(&dev->rx_frames);
660 while (skb2) {
661 if (status < 0
662 || ETH_HLEN > skb2->len
663 || skb2->len > VLAN_ETH_FRAME_LEN) {
664 dev->net->stats.rx_errors++;
665 dev->net->stats.rx_length_errors++;
666 ERROR(dev, "rx length %d\n", skb2->len);
667 dev_kfree_skb_any(skb2);
668 goto next_frame;
669 }
670 skb2->protocol = eth_type_trans(skb2, dev->net);
671 dev->net->stats.rx_packets++;
672 dev->net->stats.rx_bytes += skb2->len;
673
674 /* no buffer copies needed, unless hardware can't
675 * use skb buffers.
676 */
677 status = netif_rx(skb2);
678next_frame:
679 skb2 = skb_dequeue(&dev->rx_frames);
680 }
681 break;
682
683 /* software-driven interface shutdown */
684 case -ECONNRESET: /* unlink */
685 case -ESHUTDOWN: /* disconnect etc */
686 VDBG(dev, "rx shutdown, code %d\n", status);
687 goto quiesce;
688
689 /* for hardware automagic (such as pxa) */
690 case -ECONNABORTED: /* endpoint reset */
691 ERROR(dev, "rx %s reset\n", ep->name);
692 defer_kevent(dev, WORK_RX_MEMORY);
693quiesce:
694 dev_kfree_skb_any(skb);
695 skb = NULL;
696 goto clean;
697
698 /* data overrun */
699 case -EOVERFLOW:
700 dev->net->stats.rx_over_errors++;
701 /* FALLTHROUGH */
702
703 default:
704 queue = true;
705 dev->net->stats.rx_errors++;
706 ERROR(dev, "rx status %d\n", status);
707 break;
708 }
709
710 if (skb)
711 dev_kfree_skb_any(skb);
712clean:
713 spin_lock_irqsave(&dev->req_lock, flags);
714 list_add(&req->list, &dev->rx_reqs);
715 spin_unlock_irqrestore(&dev->req_lock, flags);
716
717 if (queue)
718 rx_fill(dev, GFP_ATOMIC);
719}
720
721static void rx_complete(struct usb_ep *ep, struct usb_request *req)
722{
723 struct sk_buff *skb = req->context;
724 struct eth_dev *dev = ep->driver_data;
725 int status = req->status;
726 bool queue = false;
727
728 switch (status) {
729
730 /* normal completion */
731 case 0:
732 skb_put(skb, req->actual);
733
734 if (dev->unwrap) {
735 unsigned long flags;
736
737 spin_lock_irqsave(&dev->lock, flags);
738 if (dev->port_usb) {
739 status = dev->unwrap(dev->port_usb,
740 skb,
741 &dev->rx_frames);
742 if (status == -EINVAL)
743 dev->net->stats.rx_errors++;
744 else if (status == -EOVERFLOW)
745 dev->net->stats.rx_over_errors++;
746 } else {
747 dev_kfree_skb_any(skb);
748 status = -ENOTCONN;
749 }
750 spin_unlock_irqrestore(&dev->lock, flags);
751 } else {
752 skb_queue_tail(&dev->rx_frames, skb);
753 }
754 if (!status)
755 queue = true;
756 break;
757
758 /* software-driven interface shutdown */
759 case -ECONNRESET: /* unlink */
760 case -ESHUTDOWN: /* disconnect etc */
761 VDBG(dev, "rx shutdown, code %d\n", status);
762 goto quiesce;
763
764 /* for hardware automagic (such as pxa) */
765 case -ECONNABORTED: /* endpoint reset */
766 DBG(dev, "rx %s reset\n", ep->name);
767 defer_kevent(dev, WORK_RX_MEMORY);
768quiesce:
769 dev_kfree_skb_any(skb);
770 goto clean;
771
772 /* data overrun */
773 case -EOVERFLOW:
774 dev->net->stats.rx_over_errors++;
775 /* FALLTHROUGH */
776
777 default:
778 queue = true;
779 dev_kfree_skb_any(skb);
780 dev->net->stats.rx_errors++;
781 DBG(dev, "rx status %d\n", status);
782 break;
783 }
784
785clean:
786 spin_lock(&dev->req_lock);
787 list_add(&req->list, &dev->rx_reqs);
788 spin_unlock(&dev->req_lock);
789#ifndef USB_RX_USE_WORK
790 if (queue)
791 tasklet_schedule(&dev->rx_tl);
792#else
793 if (queue)
794 schedule_work(&dev->rx_work);
795#endif
796}
797
798static inline void usb_ep_free_request_tx_mult(struct usb_ep *ep,
799 struct usb_request *req)
800{
801 req_aggr_clean(req);
802 BUG_ON(!AGGRCTX(req));
803#ifdef CONFIG_USBNET_USE_SG
804 if(AGGRCTX(req)->sg)
805 kfree(AGGRCTX(req)->sg);
806#endif
807 kfree(AGGRCTX(req));
808 req->context = NULL;
809 usb_ep_free_request(ep, req);
810}
811
812static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
813{
814 unsigned i;
815 struct usb_request *req;
816 bool usb_in;
817
818 if (!n)
819 return -ENOMEM;
820
821 if (ep->desc->bEndpointAddress & USB_DIR_IN)
822 usb_in = true;
823 else
824 usb_in = false;
825
826 /* queue/recycle up to N requests */
827 i = n;
828 list_for_each_entry(req, list, list) {
829 if (i-- == 0)
830 goto extra;
831 }
832
833
834 while (i--) {
835 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
836 if (!req)
837 return list_empty(list) ? -ENOMEM : 0;
838 /* update completion handler */
839 if (usb_in){
840 req->complete = tx_complete;
841 req->context = (void*)kzalloc(sizeof(struct aggr_ctx), GFP_ATOMIC);
842 if (!req->context){
843 usb_ep_free_request(ep, req);
844 pr_err("%s:%d: error: only %d reqs allocated\n",
845 __func__, __LINE__, (n - i));
846 return list_empty(list) ? -ENOMEM : 0;
847 }
848 skb_queue_head_init(&AGGRCTX(req)->skb_list);
849 AGGRCTX(req)->total_size = 0;
850#ifdef CONFIG_USBNET_USE_SG
851 if (sg_is_enabled)
852 AGGRCTX(req)->sg = alloc_sglist(USBNET_SG_NENTS);
853#endif
854 } else {
855 if (rx_notl)
856 req->complete = rx_complete_notl;
857 else
858 req->complete = rx_complete;
859 }
860 list_add(&req->list, list);
861 }
862
863 return 0;
864
865extra:
866 /* free extras */
867 for (;;) {
868 struct list_head *next;
869
870 next = req->list.next;
871 list_del(&req->list);
872 if (usb_in) {
873 usb_ep_free_request_tx_mult(ep, req);
874 } else
875 usb_ep_free_request(ep, req);
876
877 if (next == list)
878 break;
879
880 req = container_of(next, struct usb_request, list);
881 }
882 return 0;
883}
884
885static int alloc_requests(struct eth_dev *dev, struct gether *link,
886 unsigned n_rx, unsigned n_tx)
887{
888 int status;
889
890 spin_lock(&dev->req_lock);
891 status = prealloc(&dev->tx_reqs, link->in_ep, n_tx);
892 if (status < 0)
893 goto fail;
894 status = prealloc(&dev->rx_reqs, link->out_ep, n_rx);
895 if (status < 0)
896 goto fail;
897 goto done;
898fail:
899 DBG(dev, "can't alloc requests\n");
900done:
901 spin_unlock(&dev->req_lock);
902 return status;
903}
904
905static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
906{
907 struct usb_request *req;
908 unsigned long flags;
909 int req_cnt = 0;
910
911 /* fill unused rxq slots with some skb */
912 spin_lock_irqsave(&dev->req_lock, flags);
913 while (!list_empty(&dev->rx_reqs)) {
914 /* break the nexus of continuous completion and re-submission*/
915 if (++req_cnt > qlen(dev->gadget, true))
916 break;
917
918 req = container_of(dev->rx_reqs.next,
919 struct usb_request, list);
920 list_del_init(&req->list);
921 spin_unlock_irqrestore(&dev->req_lock, flags);
922
923 if (rx_submit(dev, req, gfp_flags) < 0) {
924 spin_lock_irqsave(&dev->req_lock, flags);
925 list_add(&req->list, &dev->rx_reqs);
926 spin_unlock_irqrestore(&dev->req_lock, flags);
927 defer_kevent(dev, WORK_RX_MEMORY);
928 return;
929 }
930
931 spin_lock_irqsave(&dev->req_lock, flags);
932 }
933 spin_unlock_irqrestore(&dev->req_lock, flags);
934}
935
936static void process_rx_tl(unsigned long priv)
937{
938 struct eth_dev *dev = (struct eth_dev *)priv;
939 struct sk_buff *skb;
940 int status = 0;
941
942 if (!dev->port_usb)
943 return;
944
945 while ((skb = skb_dequeue(&dev->rx_frames))) {
946 if (status < 0
947 || ETH_HLEN > skb->len
948 || skb->len > VLAN_ETH_FRAME_LEN) {
949 dev->net->stats.rx_errors++;
950 dev->net->stats.rx_length_errors++;
951 DBG(dev, "rx length %d\n", skb->len);
952 dev_kfree_skb_any(skb);
953 continue;
954 }
955
956 if (dev->unwrap_fixup) {
957 struct sk_buff *new =
958 dev->unwrap_fixup(dev->port_usb, skb);
959 if (!new) {
960 pr_info("unwrap_fixup failed\n");
961 dev_kfree_skb_any(skb);
962 continue;
963 }
964
965 dev_kfree_skb_any(skb);
966 skb = new;
967 WARN_ON(!skb_mac_header_was_set(skb));
968 }
969
970#ifdef CONFIG_DDR_DEVFREQ
971 atomic_inc(&dev->no_rx_skb);
972#endif
973 skb->protocol = eth_type_trans(skb, dev->net);
974 dev->net->stats.rx_packets++;
975 dev->net->stats.rx_bytes += skb->len;
976
977 status = netif_rx(skb);
978 }
979
980 if (netif_running(dev->net))
981 rx_fill(dev, GFP_ATOMIC);
982}
983
984#ifdef USB_RX_USE_WORK
985static DEFINE_MUTEX(rx_work_lock);
986
987static void process_rx_work(struct work_struct *data)
988{
989 struct eth_dev *dev = container_of(data, struct eth_dev, rx_work);
990
991 mutex_lock(&rx_work_lock);
992 local_bh_disable();
993 process_rx_tl((unsigned long)dev);
994 local_bh_enable();
995 mutex_unlock(&rx_work_lock);
996}
997#endif
998
999#if defined(USB_RX_USE_WORK) && defined(CONFIG_USB_MV_HSIC_UDC)
1000static DEFINE_MUTEX(hsic_rx_work_lock);
1001
1002static void process_hsic_rx_work(struct work_struct *data)
1003{
1004 struct eth_dev *dev = container_of(data, struct eth_dev, rx_work);
1005
1006 mutex_lock(&hsic_rx_work_lock);
1007 local_bh_disable();
1008 process_rx_tl((unsigned long)dev);
1009 local_bh_enable();
1010 mutex_unlock(&hsic_rx_work_lock);
1011}
1012#endif
1013
1014static void eth_work(struct work_struct *work)
1015{
1016 struct eth_dev *dev = container_of(work, struct eth_dev, work);
1017
1018 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
1019 if (netif_running(dev->net))
1020 rx_fill(dev, GFP_KERNEL);
1021 }
1022
1023 if (dev->todo)
1024 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
1025}
1026
1027static void tx_complete(struct usb_ep *ep, struct usb_request *req)
1028{
1029 struct eth_dev *dev;
1030 struct net_device *net;
1031 struct usb_request *new_req;
1032 struct usb_ep *in;
1033 int length;
1034 int retval;
1035 int skb_qlen = skb_queue_len(&AGGRCTX(req)->skb_list);
1036
1037 if (!ep->driver_data) {
1038 usb_ep_free_request_tx_mult(ep, req);
1039 return;
1040 }
1041
1042 dev = ep->driver_data;
1043 net = dev->net;
1044
1045 if (!dev->port_usb) {
1046 usb_ep_free_request_tx_mult(ep, req);
1047 return;
1048 }
1049
1050 switch (req->status) {
1051 default:
1052 dev->net->stats.tx_errors += skb_qlen;
1053 VDBG(dev, "tx err %d\n", req->status);
1054 /* FALLTHROUGH */
1055 case -ECONNRESET: /* unlink */
1056 case -ESHUTDOWN: /* disconnect etc */
1057 break;
1058 case 0:
1059 if (!req->zero)
1060 dev->net->stats.tx_bytes += req->length-1;
1061 else
1062 dev->net->stats.tx_bytes += req->length;
1063 dev->net->stats.tx_packets += skb_qlen;
1064 }
1065
1066 spin_lock(&dev->req_lock);
1067 req_aggr_clean(req);
1068
1069 list_add_tail(&req->list, &dev->tx_reqs);
1070 atomic_dec(&dev->no_tx_req_used);
1071 in = dev->port_usb->in_ep;
1072
1073 if (!list_empty(&dev->tx_reqs)) {
1074 new_req = container_of(dev->tx_reqs.next,
1075 struct usb_request, list);
1076 list_del(&new_req->list);
1077 spin_unlock(&dev->req_lock);
1078 if (AGGRCTX(new_req)->total_size > 0) {
1079 length = AGGRCTX(new_req)->total_size;
1080
1081 /* NCM requires no zlp if transfer is
1082 * dwNtbInMaxSize */
1083 if (dev->port_usb->is_fixed &&
1084 length == dev->port_usb->fixed_in_len &&
1085 (length % in->maxpacket) == 0)
1086 new_req->zero = 0;
1087 else
1088 new_req->zero = 1;
1089
1090 /* use zlp framing on tx for strict CDC-Ether
1091 * conformance, though any robust network rx
1092 * path ignores extra padding. and some hardware
1093 * doesn't like to write zlps.
1094 */
1095 if (new_req->zero && !dev->zlp &&
1096 (length % in->maxpacket) == 0) {
1097 new_req->zero = 0;
1098 length++;
1099#ifdef CONFIG_USBNET_USE_SG
1100 if (sg_is_enabled && new_req->num_sgs) {
1101 new_req->sg[new_req->num_sgs - 1].length++;
1102 }
1103#endif
1104 }
1105 new_req->length = length;
1106#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1107 skb_qlen = skb_queue_len(&AGGRCTX(req)->skb_list);
1108 if (skb_qlen > histogram_size && histogram_size) {
1109 histogram_realloc(dev, skb_qlen);
1110 pr_err("%s: %d histogram buffer to small, realloc to %d, "
1111 "hold_count=%d\n", __func__, __LINE__, histogram_size,
1112 skb_qlen);
1113 }
1114 if (dev->tx_mult_histogram && skb_qlen <= histogram_size)
1115 dev->tx_mult_histogram[skb_qlen - 1]++;
1116#endif
1117#ifdef CONFIG_USBNET_USE_SG
1118 if (sg_is_enabled)
1119 build_sglist(new_req, AGGRCTX(new_req));
1120#endif
1121 retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
1122 switch (retval) {
1123 default:
1124 DBG(dev, "tx queue err %d\n", retval);
1125 dev->net->stats.tx_dropped +=
1126 skb_queue_len(&AGGRCTX(new_req)->skb_list);
1127 req_aggr_clean(req);
1128 spin_lock(&dev->req_lock);
1129 list_add_tail(&new_req->list,
1130 &dev->tx_reqs);
1131 spin_unlock(&dev->req_lock);
1132 break;
1133 case 0:
1134 atomic_inc(&dev->no_tx_req_used);
1135 //net->trans_start = jiffies;
1136 }
1137 } else {
1138 spin_lock(&dev->req_lock);
1139 /*
1140 * Put the idle request at the back of the
1141 * queue. The xmit function will put the
1142 * unfinished request at the beginning of the
1143 * queue.
1144 */
1145 list_add_tail(&new_req->list, &dev->tx_reqs);
1146 spin_unlock(&dev->req_lock);
1147 }
1148 } else {
1149 spin_unlock(&dev->req_lock);
1150 }
1151
1152 if (netif_carrier_ok(dev->net))
1153 netif_wake_queue(dev->net);
1154}
1155
1156static inline int is_promisc(u16 cdc_filter)
1157{
1158 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
1159}
1160
1161static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
1162 struct net_device *net)
1163{
1164 struct eth_dev *dev = netdev_priv(net);
1165 int skb_qlen = 0;
1166 int length = skb->len;
1167 struct sk_buff *pending_skb = NULL;
1168 int retval;
1169 struct usb_request *req = NULL;
1170 unsigned long flags;
1171 struct usb_ep *in;
1172 u16 cdc_filter;
1173
1174 if (unlikely(!skb))
1175 return NETDEV_TX_OK;
1176
1177 spin_lock_irqsave(&dev->lock, flags);
1178 if (dev->port_usb) {
1179 in = dev->port_usb->in_ep;
1180 cdc_filter = dev->port_usb->cdc_filter;
1181 } else {
1182 in = NULL;
1183 cdc_filter = 0;
1184 }
1185 spin_unlock_irqrestore(&dev->lock, flags);
1186
1187 if (!in) {
1188 dev_kfree_skb_any(skb);
1189 return NETDEV_TX_OK;
1190 }
1191
1192 /* apply outgoing CDC or RNDIS filters */
1193 if (!is_promisc(cdc_filter)) {
1194 u8 *dest = skb->data;
1195
1196 if (is_multicast_ether_addr(dest)) {
1197 u16 type;
1198
1199 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
1200 * SET_ETHERNET_MULTICAST_FILTERS requests
1201 */
1202 if (is_broadcast_ether_addr(dest))
1203 type = USB_CDC_PACKET_TYPE_BROADCAST;
1204 else
1205 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
1206 if (!(cdc_filter & type)) {
1207 dev_kfree_skb_any(skb);
1208 return NETDEV_TX_OK;
1209 }
1210 }
1211 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
1212 }
1213
1214 spin_lock_irqsave(&dev->req_lock, flags);
1215 /*
1216 * this freelist can be empty if an interrupt triggered disconnect()
1217 * and reconfigured the gadget (shutting down this queue) after the
1218 * network stack decided to xmit but before we got the spinlock.
1219 */
1220 if (list_empty(&dev->tx_reqs)) {
1221 spin_unlock_irqrestore(&dev->req_lock, flags);
1222 return NETDEV_TX_BUSY;
1223 }
1224
1225 req = container_of(dev->tx_reqs.next, struct usb_request, list);
1226 list_del(&req->list);
1227
1228 /* temporarily stop TX queue when the freelist empties */
1229 if (list_empty(&dev->tx_reqs))
1230 netif_stop_queue(net);
1231 spin_unlock_irqrestore(&dev->req_lock, flags);
1232
1233 BUG_ON(!AGGRCTX(req));
1234 //TODO
1235 //BUG_ON(skb->next || skb->prev);
1236 BUG_ON(skb->next);
1237
1238 /* no buffer copies needed, unless the network stack did it
1239 * or the hardware can't use skb buffers.
1240 * or there's not enough space for extra headers we need
1241 */
1242 if (dev->wrap) {
1243 unsigned long flags;
1244
1245 spin_lock_irqsave(&dev->lock, flags);
1246 if (dev->port_usb)
1247 skb = dev->wrap(dev->port_usb, skb, AGGRCTX(req));
1248 spin_unlock_irqrestore(&dev->lock, flags);
1249 if (!skb)
1250 goto drop;
1251 }
1252 req->buf = skb->data;
1253
1254 if (AGGRCTX(req)->total_size > 0) {
1255 BUG_ON(skb_queue_empty(&AGGRCTX(req)->skb_list));
1256 length = AGGRCTX(req)->total_size;
1257 } else {
1258 BUG_ON(!skb_queue_empty(&AGGRCTX(req)->skb_list));
1259 skb_queue_tail(&AGGRCTX(req)->skb_list, skb);
1260 length = skb->len;
1261 }
1262
1263 if (!AGGR_DONE(req) &&
1264 atomic_read(&dev->no_tx_req_used) > TX_REQ_THRESHOLD) {
1265 BUG_ON(!AGGRCTX(req)->total_size);
1266 spin_lock_irqsave(&dev->req_lock, flags);
1267 list_add(&req->list, &dev->tx_reqs);
1268 spin_unlock_irqrestore(&dev->req_lock, flags);
1269 goto success;
1270 }
1271 atomic_inc(&dev->no_tx_req_used);
1272
1273 if (unlikely(!dev->port_usb)) {
1274 ERROR(dev, "port_usb = NULL\n");
1275 goto drop;
1276 }
1277 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
1278 if (dev->port_usb->is_fixed &&
1279 length == dev->port_usb->fixed_in_len &&
1280 (length % in->maxpacket) == 0)
1281 req->zero = 0;
1282 else
1283 req->zero = 1;
1284
1285 /* use zlp framing on tx for strict CDC-Ether conformance,
1286 * though any robust network rx path ignores extra padding.
1287 * and some hardware doesn't like to write zlps.
1288 */
1289 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
1290 req->zero = 0;
1291 length++;
1292#ifdef CONFIG_USBNET_USE_SG
1293 if (sg_is_enabled && req->num_sgs) {
1294 req->sg[req->num_sgs - 1].length++;
1295 }
1296#endif
1297 }
1298 req->length = length;
1299
1300#if defined(CONFIG_USB_DWC3) || defined(CONFIG_USB_DWC2)
1301 req->no_interrupt = 0;
1302#else
1303 /* throttle highspeed IRQ rate back slightly */
1304 if (gadget_is_dualspeed(dev->gadget) &&
1305 (dev->gadget->speed >= USB_SPEED_HIGH)) {
1306 dev->tx_qlen++;
1307 if (dev->tx_qlen == TX_REQ_THRESHOLD) {
1308 req->no_interrupt = 0;
1309 dev->tx_qlen = 0;
1310 } else {
1311 req->no_interrupt = 1;
1312 }
1313 } else {
1314 req->no_interrupt = 0;
1315 }
1316#endif
1317
1318 pending_skb = AGGRCTX(req)->pending_skb;
1319 AGGRCTX(req)->pending_skb = NULL;
1320
1321#ifdef CONFIG_USBNET_USE_SG
1322 if (sg_is_enabled)
1323 build_sglist(req, AGGRCTX(req));
1324#endif
1325
1326 retval = usb_ep_queue(in, req, GFP_ATOMIC);
1327
1328 spin_lock_irqsave(&dev->req_lock, flags);
1329 if (likely((retval == 0) && (req->context != NULL))) {
1330 skb_qlen = skb_queue_len(&AGGRCTX(req)->skb_list);
1331 } else if (unlikely(req->context == NULL)) {
1332 spin_unlock_irqrestore(&dev->req_lock, flags);
1333 pr_err_ratelimited("req is already freed\n");
1334 return NETDEV_TX_OK;
1335 }
1336 spin_unlock_irqrestore(&dev->req_lock, flags);
1337
1338 switch (retval) {
1339 default:
1340 DBG(dev, "tx queue err %d\n", retval);
1341 break;
1342 case 0:
1343
1344#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1345 if (skb_qlen > histogram_size && histogram_size) {
1346 histogram_realloc(dev, skb_qlen);
1347 pr_err("%s: %d histogram buffer to small, realloc to %d, "
1348 "hold_count=%d\n", __func__, __LINE__, histogram_size,
1349 skb_qlen);
1350 }
1351 if (dev->tx_mult_histogram && skb_qlen <= histogram_size)
1352 dev->tx_mult_histogram[skb_qlen - 1]++;
1353#endif
1354 //net->trans_start = jiffies;
1355 do {}while(0);
1356 }
1357
1358 if (retval) {
1359 atomic_dec(&dev->no_tx_req_used);
1360drop:
1361 dev->net->stats.tx_dropped += (skb_qlen == 0) ? 1 : skb_qlen;
1362 req_aggr_clean(req);
1363 spin_lock_irqsave(&dev->req_lock, flags);
1364 if (list_empty(&dev->tx_reqs))
1365 netif_start_queue(net);
1366 list_add_tail(&req->list, &dev->tx_reqs);
1367 spin_unlock_irqrestore(&dev->req_lock, flags);
1368 }
1369
1370 if (pending_skb)
1371 return eth_start_xmit(pending_skb, net);
1372success:
1373 return NETDEV_TX_OK;
1374}
1375
1376/*-------------------------------------------------------------------------*/
1377
1378static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
1379{
1380 DBG(dev, "%s\n", __func__);
1381
1382 /* fill the rx queue */
1383 rx_fill(dev, gfp_flags);
1384
1385 /* and open the tx floodgates */
1386 dev->tx_qlen = 0;
1387 netif_wake_queue(dev->net);
1388}
1389
1390static int eth_open(struct net_device *net)
1391{
1392 struct eth_dev *dev = netdev_priv(net);
1393 struct gether *link;
1394
1395 DBG(dev, "%s\n", __func__);
1396 if (netif_carrier_ok(dev->net))
1397 eth_start(dev, GFP_KERNEL);
1398
1399 spin_lock_irq(&dev->lock);
1400 link = dev->port_usb;
1401 if (link && link->open)
1402 link->open(link);
1403 spin_unlock_irq(&dev->lock);
1404
1405#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1406 if (sysfs_create_group(&dev->net->dev.kobj, &u_ether_attr_group))
1407 pr_err("%s:%d: fail to register sysfs attr group\n", __func__,
1408 __LINE__);
1409#endif
1410
1411 return 0;
1412}
1413
1414static int eth_stop(struct net_device *net)
1415{
1416 struct eth_dev *dev = netdev_priv(net);
1417 unsigned long flags;
1418
1419 VDBG(dev, "%s\n", __func__);
1420 netif_stop_queue(net);
1421
1422 INFO(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
1423 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
1424 dev->net->stats.rx_errors, dev->net->stats.tx_errors
1425 );
1426
1427 /* ensure there are no more active requests */
1428 spin_lock_irqsave(&dev->lock, flags);
1429 if (dev->port_usb) {
1430 struct gether *link = dev->port_usb;
1431 const struct usb_endpoint_descriptor *in;
1432 const struct usb_endpoint_descriptor *out;
1433
1434 if (link->close)
1435 link->close(link);
1436
1437 /* NOTE: we have no abort-queue primitive we could use
1438 * to cancel all pending I/O. Instead, we disable then
1439 * reenable the endpoints ... this idiom may leave toggle
1440 * wrong, but that's a self-correcting error.
1441 *
1442 * REVISIT: we *COULD* just let the transfers complete at
1443 * their own pace; the network stack can handle old packets.
1444 * For the moment we leave this here, since it works.
1445 */
1446 in = link->in_ep->desc;
1447 out = link->out_ep->desc;
1448 /* usb_ep_disable(link->in_ep);
1449 usb_ep_disable(link->out_ep); */
1450 if (netif_carrier_ok(net)) {
1451 INFO(dev, "host still using in/out endpoints\n");
1452 link->in_ep->desc = in;
1453 link->out_ep->desc = out;
1454 /* usb_ep_enable(link->in_ep);
1455 usb_ep_enable(link->out_ep); */
1456 }
1457 }
1458 spin_unlock_irqrestore(&dev->lock, flags);
1459
1460 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1461 sysfs_remove_group(&dev->net->dev.kobj, &u_ether_attr_group);
1462 #endif
1463
1464 return 0;
1465}
1466
1467/*-------------------------------------------------------------------------*/
1468
1469static u8 host_ethaddr[ETH_ALEN];
1470
1471/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
1472static char *dev_addr;
1473module_param(dev_addr, charp, S_IRUGO);
1474MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
1475
1476#ifdef CONFIG_USB_MV_HSIC_UDC
1477static u8 host_ethaddr_hsic[ETH_ALEN];
1478static char *dev_addr_hsic;
1479module_param(dev_addr_hsic, charp, S_IRUGO);
1480MODULE_PARM_DESC(dev_addr_hsic, "HSIC Device Ethernet Addr");
1481#endif
1482
1483/* this address is invisible to ifconfig */
1484static char *host_addr;
1485module_param(host_addr, charp, S_IRUGO);
1486MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
1487
1488void save_usbnet_host_ethaddr(u8 addr[])
1489{
1490 memcpy(host_ethaddr, addr, ETH_ALEN);
1491}
1492
1493static int get_ether_addr(const char *str, u8 *dev_addr)
1494{
1495 if (str) {
1496 unsigned i;
1497
1498 for (i = 0; i < 6; i++) {
1499 unsigned char num;
1500
1501 if ((*str == '.') || (*str == ':'))
1502 str++;
1503 num = hex_to_bin(*str++) << 4;
1504 num |= hex_to_bin(*str++);
1505 dev_addr [i] = num;
1506 }
1507 if (is_valid_ether_addr(dev_addr))
1508 return 0;
1509 }
1510 eth_random_addr(dev_addr);
1511 return 1;
1512}
1513
1514static int get_host_ether_addr(u8 *str, u8 *dev_addr)
1515{
1516 memcpy(dev_addr, str, ETH_ALEN);
1517 if (is_valid_ether_addr(dev_addr))
1518 return 0;
1519
1520 random_ether_addr(dev_addr);
1521 memcpy(str, dev_addr, ETH_ALEN);
1522 return 1;
1523}
1524
1525static const struct net_device_ops eth_netdev_ops = {
1526 .ndo_open = eth_open,
1527 .ndo_stop = eth_stop,
1528 .ndo_start_xmit = eth_start_xmit,
1529 .ndo_change_mtu = ueth_change_mtu,
1530 .ndo_set_mac_address = eth_mac_addr,
1531 .ndo_validate_addr = eth_validate_addr,
1532};
1533
1534static struct device_type gadget_type = {
1535 .name = "gadget",
1536};
1537
1538/**
1539 * gether_setup_name - initialize one ethernet-over-usb link
1540 * @g: gadget to associated with these links
1541 * @ethaddr: NULL, or a buffer in which the ethernet address of the
1542 * host side of the link is recorded
1543 * @netname: name for network device (for example, "usb")
1544 * Context: may sleep
1545 *
1546 * This sets up the single network link that may be exported by a
1547 * gadget driver using this framework. The link layer addresses are
1548 * set up using module parameters.
1549 *
1550 * Returns negative errno, or zero on success
1551 */
1552struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
1553 const char *netname)
1554{
1555 struct eth_dev *dev;
1556 struct net_device *net;
1557 int status;
1558
1559 net = alloc_etherdev(sizeof *dev);
1560 if (!net)
1561 return ERR_PTR(-ENOMEM);
1562#if 0
1563 net->element_used = 0;
1564 for (i = 0; i < DEBUG_ELEMENT_CNT; i++) {
1565 net->caller_elments[i].func_address = 0;
1566 net->caller_elments[i].cnt = 0;
1567 }
1568#endif
1569 dev = netdev_priv(net);
1570 spin_lock_init(&dev->lock);
1571 spin_lock_init(&dev->req_lock);
1572 INIT_WORK(&dev->work, eth_work);
1573#ifdef USB_RX_USE_WORK
1574 INIT_WORK(&dev->rx_work, process_rx_work);
1575#else
1576 tasklet_init(&dev->rx_tl, process_rx_tl,(unsigned long)dev);
1577#endif
1578 INIT_LIST_HEAD(&dev->tx_reqs);
1579 INIT_LIST_HEAD(&dev->rx_reqs);
1580
1581 /* by default we always have a random MAC address */
1582 net->addr_assign_type = NET_ADDR_RANDOM;
1583
1584 skb_queue_head_init(&dev->rx_frames);
1585
1586 /* network device setup */
1587 dev->net = net;
1588 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
1589
1590 if (get_ether_addr(dev_addr, net->dev_addr)) {
1591 net->addr_assign_type = NET_ADDR_RANDOM;
1592 dev_warn(&g->dev,
1593 "using random %s ethernet address\n", "self");
1594 } else {
1595 net->addr_assign_type = NET_ADDR_SET;
1596 }
1597 if (get_host_ether_addr(host_ethaddr, dev->host_mac))
1598 dev_warn(&g->dev, "using random %s ethernet address\n", "host");
1599 else
1600 dev_warn(&g->dev, "using previous %s ethernet address\n", "host");
1601
1602 if (ethaddr)
1603 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
1604
1605 net->netdev_ops = &eth_netdev_ops;
1606
1607 //SET_ETHTOOL_OPS(net, &ops);
1608 net->ethtool_ops = &ops;
1609
1610 dev->gadget = g;
1611 SET_NETDEV_DEV(net, &g->dev);
1612 SET_NETDEV_DEVTYPE(net, &gadget_type);
1613
1614 status = register_netdev(net);
1615 if (status < 0) {
1616 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1617 free_netdev(net);
1618 dev = ERR_PTR(status);
1619 g_usbnet_dev = NULL;
1620 } else {
1621 INFO(dev, "MAC %pM\n", net->dev_addr);
1622 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
1623
1624 /* two kinds of host-initiated state changes:
1625 * - iff DATA transfer is active, carrier is "on"
1626 * - tx queueing enabled if open *and* carrier is "on"
1627 */
1628 netif_carrier_off(net);
1629 g_usbnet_dev = net;
1630 }
1631
1632#ifdef CONFIG_DDR_DEVFREQ
1633 dev->ddr_qos_min.name = net->name;
1634 pm_qos_add_request(&dev->ddr_qos_min,
1635 PM_QOS_DDR_DEVFREQ_MIN, PM_QOS_DEFAULT_VALUE);
1636#endif
1637 return dev;
1638}
1639
1640#ifdef CONFIG_USB_MV_HSIC_UDC
1641struct eth_dev *gether_setup_name_hsic(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
1642 const char *netname)
1643{
1644 struct eth_dev *dev;
1645 struct net_device *net;
1646 int status, i;
1647
1648 net = alloc_etherdev(sizeof *dev);
1649 if (!net)
1650 return ERR_PTR(-ENOMEM);
1651#if 0
1652 net->element_used = 0;
1653 for (i = 0; i < DEBUG_ELEMENT_CNT; i++) {
1654 net->caller_elments[i].func_address = 0;
1655 net->caller_elments[i].cnt = 0;
1656 }
1657#endif
1658 dev = netdev_priv(net);
1659 spin_lock_init(&dev->lock);
1660 spin_lock_init(&dev->req_lock);
1661 INIT_WORK(&dev->work, eth_work);
1662#ifdef USB_RX_USE_WORK
1663 INIT_WORK(&dev->rx_work, process_hsic_rx_work);
1664#else
1665 tasklet_init(&dev->rx_tl, process_rx_tl,(unsigned long)dev);
1666#endif
1667 INIT_LIST_HEAD(&dev->tx_reqs);
1668 INIT_LIST_HEAD(&dev->rx_reqs);
1669
1670 /* by default we always have a random MAC address */
1671 net->addr_assign_type = NET_ADDR_RANDOM;
1672
1673 skb_queue_head_init(&dev->rx_frames);
1674
1675 /* network device setup */
1676 dev->net = net;
1677 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
1678
1679 if (get_ether_addr(dev_addr, net->dev_addr)) {
1680 net->addr_assign_type = NET_ADDR_RANDOM;
1681 dev_warn(&g->dev,
1682 "using random %s ethernet address\n", "self");
1683 } else {
1684 net->addr_assign_type = NET_ADDR_SET;
1685 }
1686
1687 if (get_host_ether_addr(host_ethaddr_hsic, dev->host_mac))
1688 dev_warn(&g->dev, "using random %s ethernet address\n", "host");
1689 else
1690 dev_warn(&g->dev, "using previous %s ethernet address\n", "host");
1691
1692 if (ethaddr)
1693 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
1694
1695 net->netdev_ops = &eth_netdev_ops;
1696
1697 SET_ETHTOOL_OPS(net, &ops);
1698
1699 dev->gadget = g;
1700 SET_NETDEV_DEV(net, &g->dev);
1701 SET_NETDEV_DEVTYPE(net, &gadget_type);
1702
1703 status = register_netdev(net);
1704 if (status < 0) {
1705 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1706 free_netdev(net);
1707 dev = ERR_PTR(status);
1708 } else {
1709 INFO(dev, "MAC %pM\n", net->dev_addr);
1710 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
1711
1712 /* two kinds of host-initiated state changes:
1713 * - iff DATA transfer is active, carrier is "on"
1714 * - tx queueing enabled if open *and* carrier is "on"
1715 */
1716 netif_carrier_off(net);
1717 }
1718
1719#ifdef CONFIG_DDR_DEVFREQ
1720 dev->ddr_qos_min.name = net->name;
1721 pm_qos_add_request(&dev->ddr_qos_min,
1722 PM_QOS_DDR_DEVFREQ_MIN, PM_QOS_DEFAULT_VALUE);
1723#endif
1724 return dev;
1725}
1726
1727#endif
1728
1729#if defined(CONFIG_CPU_ASR18XX) || defined(CONFIG_CPU_ASR1901)
1730static bool is_netifd_online(void)
1731{
1732 struct task_struct *g, *p;
1733
1734 do_each_thread(g, p) {
1735 if (!strcmp("netifd", p->comm))
1736 return true;
1737 } while_each_thread(g, p);
1738
1739 return false;
1740}
1741
1742/* wait until the network is configured by netifd */
1743void wait_usbnet_br_up_and_brigded(void)
1744{
1745 struct net_device *dev;
1746 int timeout = 300;
1747
1748 might_sleep();
1749 /* just return when netfid is not lauched up */
1750 if (unlikely(!is_netifd_online())) {
1751 return;
1752 } else {
1753 /* first wait for usbnet to be up */
1754 if (g_usbnet_dev == NULL) {
1755 pr_info("!!!!!!!!! usbnet regsiter failed\n");
1756 WARN_ON(1);
1757 return;
1758 } else {
1759 /* there may be issue for netifd if system is not wakeup */
1760 timeout = 200;
1761 while (pm_suspend_target_state != PM_SUSPEND_ON) {
1762 msleep(10);
1763 timeout--;
1764 if (timeout == 0) {
1765 pr_info("!!!!! wait system resume timeout\n");
1766 WARN_ON(1);
1767 return;
1768 }
1769 }
1770 timeout = 300;
1771 while (!((g_usbnet_dev->flags & IFF_UP) &&
1772 (g_usbnet_dev->priv_flags & IFF_BRIDGE_PORT))) {
1773 timeout--;
1774 msleep(30);
1775 if (timeout == 0) {
1776 pr_info("!!!!! wait netifd timeout\n");
1777 WARN_ON(1);
1778 return;
1779 }
1780 }
1781 }
1782
1783 /* wait for br-lan to be up */
1784 timeout = 150;
1785 dev = dev_get_by_name(&init_net, "br-lan");
1786 /* network regsiter failed we just return true to move on */
1787 if (dev == NULL) {
1788 pr_info("!!!!!!!!! br-lan get failed\n");
1789 WARN_ON(1);
1790 return;
1791 } else {
1792 while (!(dev->flags & IFF_UP)) {
1793 timeout--;
1794 msleep(30);
1795 if (timeout == 0) {
1796 pr_info("!!!!! wait netifd timeout\n");
1797 WARN_ON(1);
1798 dev_put(dev);
1799 return;
1800 }
1801 }
1802 dev_put(dev);
1803 return;
1804 }
1805 }
1806}
1807
1808void wait_usbnet_if_down(void)
1809{
1810 int timeout = 60;
1811
1812 might_sleep();
1813 /* just return when netfid is not lauched up */
1814 if (unlikely(!is_netifd_online())) {
1815 return;
1816 } else {
1817 while (!get_usbnet_ifdown_flag()) {
1818 timeout--;
1819 msleep(30);
1820 if (timeout == 0) {
1821 pr_info("!!!!! wait usbnet if_down timeout\n");
1822 WARN_ON(1);
1823 return;
1824 }
1825 }
1826 }
1827}
1828#endif
1829
1830#ifdef CONFIG_DDR_DEVFREQ
1831#ifdef CONFIG_CPU_ASR1803
1832extern void asr1803_set_32k_to_rtc32k(void);
1833#endif
1834
1835static void txrx_monitor_work(struct work_struct *work)
1836{
1837 static unsigned long old_rx_bytes = 0;
1838 static unsigned long old_tx_bytes = 0;
1839 unsigned long rx_bytes, tx_bytes;
1840
1841 struct eth_dev *dev = container_of(work, struct eth_dev,
1842 txrx_monitor_work.work);
1843
1844 if (!dev || !dev->net || !dev->port_usb) {
1845 pr_err("%s: dev or net is not ready\n", __func__);
1846 return;
1847 }
1848
1849#ifdef CONFIG_CPU_ASR1803
1850 /* call it in this polling loop for none suspend usb dongle mode */
1851 asr1803_set_32k_to_rtc32k();
1852#endif
1853 if (likely(dev->net->stats.rx_bytes > old_rx_bytes))
1854 rx_bytes = dev->net->stats.rx_bytes - old_rx_bytes;
1855 else
1856 rx_bytes = ULONG_MAX - old_rx_bytes + dev->net->stats.rx_bytes + 1;
1857
1858 if (likely(dev->net->stats.tx_bytes > old_tx_bytes))
1859 tx_bytes = dev->net->stats.tx_bytes - old_tx_bytes;
1860 else
1861 tx_bytes = ULONG_MAX - old_tx_bytes + dev->net->stats.tx_bytes + 1;
1862
1863 old_rx_bytes = dev->net->stats.rx_bytes;
1864 old_tx_bytes = dev->net->stats.tx_bytes;
1865
1866 /* first check the tx side and boost ddr_freq to 398MHZ */
1867 if (tx_bytes >= DDR_TX_BOOST_BYTES) {
1868 pm_qos_update_request_timeout(&dev->ddr_qos_min,
1869 DDR_BOOST_FREQ,
1870 (2 * USEC_PER_SEC / INTERVALS_PER_SEC));
1871 goto out;
1872 }
1873
1874 if (rx_bytes >= rx_boost_thr) {
1875 pm_qos_update_request_timeout(&dev->ddr_qos_min,
1876 DDR_BOOST_FREQ,
1877 (2 * USEC_PER_SEC / INTERVALS_PER_SEC));
1878 goto out;
1879 }
1880
1881 if (tx_bytes < DDR_TX_BOOST_BYTES && rx_bytes < rx_boost_thr)
1882 pm_qos_update_request(&dev->ddr_qos_min, PM_QOS_DEFAULT_VALUE);
1883out:
1884 schedule_delayed_work(&dev->txrx_monitor_work, HZ / INTERVALS_PER_SEC);
1885}
1886#endif
1887
1888/**
1889 * gether_cleanup - remove Ethernet-over-USB device
1890 * Context: may sleep
1891 *
1892 * This is called to free all resources allocated by @gether_setup().
1893 */
1894void gether_cleanup(struct eth_dev *dev)
1895{
1896 if (!dev)
1897 return;
1898
1899#ifndef USB_RX_USE_WORK
1900 tasklet_kill(&dev->rx_tl);
1901#endif
1902 unregister_netdev(dev->net);
1903 flush_work(&dev->work);
1904#ifdef USB_RX_USE_WORK
1905 flush_work(&dev->rx_work);
1906#endif
1907
1908#ifdef CONFIG_DDR_DEVFREQ
1909 if (dev->dwork_inited)
1910 flush_delayed_work(&dev->txrx_monitor_work);
1911 pm_qos_remove_request(&dev->ddr_qos_min);
1912#endif
1913
1914 free_netdev(dev->net);
1915}
1916
1917/**
1918 * gether_connect - notify network layer that USB link is active
1919 * @link: the USB link, set up with endpoints, descriptors matching
1920 * current device speed, and any framing wrapper(s) set up.
1921 * Context: irqs blocked
1922 *
1923 * This is called to activate endpoints and let the network layer know
1924 * the connection is active ("carrier detect"). It may cause the I/O
1925 * queues to open and start letting network packets flow, but will in
1926 * any case activate the endpoints so that they respond properly to the
1927 * USB host.
1928 *
1929 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1930 * indicate some error code (negative errno), ep->driver_data values
1931 * have been overwritten.
1932 */
1933struct net_device *gether_connect(struct gether *link)
1934{
1935 struct eth_dev *dev = link->ioport;
1936 int result = 0;
1937
1938 if (!dev)
1939 return ERR_PTR(-EINVAL);
1940
1941#ifdef CONFIG_USBNET_USE_SG
1942 sg_is_enabled = link->is_sg_mode;
1943 pr_info("sg enabled: %d\n", sg_is_enabled);
1944#endif
1945
1946 link->in_ep->driver_data = dev;
1947 result = usb_ep_enable(link->in_ep);
1948 if (result != 0) {
1949 DBG(dev, "enable %s --> %d\n",
1950 link->in_ep->name, result);
1951 goto fail0;
1952 }
1953
1954 link->out_ep->driver_data = dev;
1955 result = usb_ep_enable(link->out_ep);
1956 if (result != 0) {
1957 DBG(dev, "enable %s --> %d\n",
1958 link->out_ep->name, result);
1959 goto fail1;
1960 }
1961 if (result == 0)
1962 result = alloc_requests(dev, link, qlen(dev->gadget, true),
1963 qlen(dev->gadget, false));
1964
1965 if (result == 0) {
1966 dev->zlp = link->is_zlp_ok;
1967 DBG(dev, "qlen_rx %d qlen_tx %d\n", qlen(dev->gadget, true),
1968 qlen(dev->gadget, false));
1969
1970#ifdef CONFIG_DDR_DEVFREQ
1971 dev->tx_boost_threshhold = qlen(dev->gadget, false) / 5;
1972
1973 /* boost when about 12 * 8 = 96Mbps */
1974 dev->rx_boost_threshhold = 8;
1975 atomic_set(&dev->no_rx_skb, 0);
1976 if (false == dev->dwork_inited) {
1977 INIT_DELAYED_WORK(&dev->txrx_monitor_work, txrx_monitor_work);
1978 dev->dwork_inited = true;
1979 }
1980#endif
1981 dev->header_len = link->header_len;
1982 dev->unwrap = link->unwrap;
1983 dev->wrap = link->wrap;
1984 dev->unwrap_fixup = link->unwrap_fixup;
1985 dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
1986 spin_lock(&dev->lock);
1987 atomic_set(&dev->no_tx_req_used, 0);
1988 dev->port_usb = link;
1989 if (netif_running(dev->net)) {
1990 if (link->open)
1991 link->open(link);
1992 } else {
1993 if (link->close)
1994 link->close(link);
1995 }
1996 spin_unlock(&dev->lock);
1997
1998 netif_carrier_on(dev->net);
1999 if (netif_running(dev->net))
2000 eth_start(dev, GFP_ATOMIC);
2001
2002#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2003 /*allocate tx multiple packets histogram pointer:*/
2004 BUG_ON(dev->tx_mult_histogram);
2005 histogram_size = 16;
2006 dev->tx_mult_histogram = kzalloc(sizeof(int) * histogram_size, GFP_ATOMIC);
2007 if (!dev->tx_mult_histogram) {
2008 histogram_size = 0;
2009 pr_err("u_ether: failed to alloc tx_mult_histogram\n");
2010 }
2011#endif
2012
2013#ifdef CONFIG_DDR_DEVFREQ
2014 /* the delay set as 10ms */
2015 schedule_delayed_work(&dev->txrx_monitor_work, HZ / 100);
2016#endif
2017 /* on error, disable any endpoints */
2018 } else {
2019 (void) usb_ep_disable(link->out_ep);
2020fail1:
2021 (void) usb_ep_disable(link->in_ep);
2022 }
2023fail0:
2024 /* caller is responsible for cleanup on error */
2025 if (result < 0)
2026 return ERR_PTR(result);
2027
2028 return dev->net;
2029}
2030
2031/**
2032 * gether_disconnect - notify network layer that USB link is inactive
2033 * @link: the USB link, on which gether_connect() was called
2034 * Context: irqs blocked
2035 *
2036 * This is called to deactivate endpoints and let the network layer know
2037 * the connection went inactive ("no carrier").
2038 *
2039 * On return, the state is as if gether_connect() had never been called.
2040 * The endpoints are inactive, and accordingly without active USB I/O.
2041 * Pointers to endpoint descriptors and endpoint private data are nulled.
2042 */
2043void gether_disconnect(struct gether *link)
2044{
2045 struct eth_dev *dev = link->ioport;
2046 struct usb_request *req;
2047 struct sk_buff *skb;
2048
2049 WARN_ON(!dev);
2050 if (!dev)
2051 return;
2052
2053 DBG(dev, "%s\n", __func__);
2054
2055 netif_stop_queue(dev->net);
2056 netif_carrier_off(dev->net);
2057
2058#ifdef CONFIG_DDR_DEVFREQ
2059 /* don't use cancel_delayed_work_sync as this function will be called
2060 * in irq context under MAC-ECM resume process
2061 */
2062 if (work_pending(&dev->txrx_monitor_work.work))
2063 cancel_delayed_work(&dev->txrx_monitor_work);
2064#endif
2065
2066#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2067 histogram_size = 0;
2068 if (dev->tx_mult_histogram) {
2069 kfree(dev->tx_mult_histogram);
2070 dev->tx_mult_histogram = NULL;
2071 }
2072#endif
2073 /* disable endpoints, forcing (synchronous) completion
2074 * of all pending i/o. then free the request objects
2075 * and forget about the endpoints.
2076 */
2077 usb_ep_disable(link->in_ep);
2078 spin_lock(&dev->req_lock);
2079 while (!list_empty(&dev->tx_reqs)) {
2080 req = container_of(dev->tx_reqs.next,
2081 struct usb_request, list);
2082 list_del(&req->list);
2083 spin_unlock(&dev->req_lock);
2084 usb_ep_free_request_tx_mult(link->in_ep, req);
2085 spin_lock(&dev->req_lock);
2086 }
2087 spin_unlock(&dev->req_lock);
2088 link->in_ep->driver_data = NULL;
2089 link->in_ep->desc = NULL;
2090
2091 usb_ep_disable(link->out_ep);
2092 spin_lock(&dev->req_lock);
2093 while (!list_empty(&dev->rx_reqs)) {
2094 req = container_of(dev->rx_reqs.next,
2095 struct usb_request, list);
2096 list_del(&req->list);
2097
2098 spin_unlock(&dev->req_lock);
2099 usb_ep_free_request(link->out_ep, req);
2100 spin_lock(&dev->req_lock);
2101 }
2102 spin_unlock(&dev->req_lock);
2103
2104 spin_lock(&dev->rx_frames.lock);
2105 while ((skb = __skb_dequeue(&dev->rx_frames)))
2106 dev_kfree_skb_any(skb);
2107 spin_unlock(&dev->rx_frames.lock);
2108
2109 link->out_ep->driver_data = NULL;
2110 link->out_ep->desc = NULL;
2111
2112 /* finish forgetting about this USB link episode */
2113 dev->header_len = 0;
2114 dev->unwrap = NULL;
2115 dev->wrap = NULL;
2116
2117 spin_lock(&dev->lock);
2118 dev->port_usb = NULL;
2119 spin_unlock(&dev->lock);
2120}
2121
2122u8 *eth_get_host_mac(struct net_device *net)
2123{
2124 struct eth_dev *eth = netdev_priv(net);
2125 return eth->host_mac;
2126}
2127EXPORT_SYMBOL(eth_get_host_mac);
2128
2129MODULE_DESCRIPTION("ethernet over USB driver");
2130MODULE_LICENSE("GPL v2");