blob: 219a11e8f8d5d4208163d347e9767b8812e65483 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2015 Microchip Technology
4 */
5#include <linux/version.h>
6#include <linux/module.h>
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/ethtool.h>
10#include <linux/usb.h>
11#include <linux/crc32.h>
12#include <linux/signal.h>
13#include <linux/slab.h>
14#include <linux/if_vlan.h>
15#include <linux/uaccess.h>
16#include <linux/linkmode.h>
17#include <linux/list.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/mdio.h>
21#include <linux/phy.h>
22#include <net/ip6_checksum.h>
23#include <net/vxlan.h>
24#include <linux/interrupt.h>
25#include <linux/irqdomain.h>
26#include <linux/irq.h>
27#include <linux/irqchip/chained_irq.h>
28#include <linux/microchipphy.h>
29#include <linux/phy_fixed.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include "lan78xx.h"
33
34#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
35#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36#define DRIVER_NAME "lan78xx"
37
38#define TX_TIMEOUT_JIFFIES (5 * HZ)
39#define THROTTLE_JIFFIES (HZ / 8)
40#define UNLINK_TIMEOUT_MS 3
41
42#define RX_MAX_QUEUE_MEMORY (60 * 1518)
43
44#define SS_USB_PKT_SIZE (1024)
45#define HS_USB_PKT_SIZE (512)
46#define FS_USB_PKT_SIZE (64)
47
48#define MAX_RX_FIFO_SIZE (12 * 1024)
49#define MAX_TX_FIFO_SIZE (12 * 1024)
50#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
51#define DEFAULT_BULK_IN_DELAY (0x0800)
52#define MAX_SINGLE_PACKET_SIZE (9000)
53#define DEFAULT_TX_CSUM_ENABLE (true)
54#define DEFAULT_RX_CSUM_ENABLE (true)
55#define DEFAULT_TSO_CSUM_ENABLE (true)
56#define DEFAULT_VLAN_FILTER_ENABLE (true)
57#define DEFAULT_VLAN_RX_OFFLOAD (true)
58#define TX_OVERHEAD (8)
59#define RXW_PADDING 2
60
61#define LAN78XX_USB_VENDOR_ID (0x0424)
62#define LAN7800_USB_PRODUCT_ID (0x7800)
63#define LAN7850_USB_PRODUCT_ID (0x7850)
64#define LAN7801_USB_PRODUCT_ID (0x7801)
65#define LAN78XX_EEPROM_MAGIC (0x78A5)
66#define LAN78XX_OTP_MAGIC (0x78F3)
67#define AT29M2AF_USB_VENDOR_ID (0x07C9)
68#define AT29M2AF_USB_PRODUCT_ID (0x0012)
69
70#define MII_READ 1
71#define MII_WRITE 0
72
73#define EEPROM_INDICATOR (0xA5)
74#define EEPROM_MAC_OFFSET (0x01)
75#define MAX_EEPROM_SIZE 512
76#define OTP_INDICATOR_1 (0xF3)
77#define OTP_INDICATOR_2 (0xF7)
78
79#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
80 WAKE_MCAST | WAKE_BCAST | \
81 WAKE_ARP | WAKE_MAGIC)
82
83/* USB related defines */
84#define BULK_IN_PIPE 1
85#define BULK_OUT_PIPE 2
86
87/* default autosuspend delay (mSec)*/
88#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
89
90/* statistic update interval (mSec) */
91#define STAT_UPDATE_TIMER (1 * 1000)
92
93/* time to wait for MAC or FCT to stop (jiffies) */
94#define HW_DISABLE_TIMEOUT (HZ / 10)
95
96/* time to wait between polling MAC or FCT state (ms) */
97#define HW_DISABLE_DELAY_MS 1
98
99/* defines interrupts from interrupt EP */
100#define MAX_INT_EP (32)
101#define INT_EP_INTEP (31)
102#define INT_EP_OTP_WR_DONE (28)
103#define INT_EP_EEE_TX_LPI_START (26)
104#define INT_EP_EEE_TX_LPI_STOP (25)
105#define INT_EP_EEE_RX_LPI (24)
106#define INT_EP_MAC_RESET_TIMEOUT (23)
107#define INT_EP_RDFO (22)
108#define INT_EP_TXE (21)
109#define INT_EP_USB_STATUS (20)
110#define INT_EP_TX_DIS (19)
111#define INT_EP_RX_DIS (18)
112#define INT_EP_PHY (17)
113#define INT_EP_DP (16)
114#define INT_EP_MAC_ERR (15)
115#define INT_EP_TDFU (14)
116#define INT_EP_TDFO (13)
117#define INT_EP_UTX (12)
118#define INT_EP_GPIO_11 (11)
119#define INT_EP_GPIO_10 (10)
120#define INT_EP_GPIO_9 (9)
121#define INT_EP_GPIO_8 (8)
122#define INT_EP_GPIO_7 (7)
123#define INT_EP_GPIO_6 (6)
124#define INT_EP_GPIO_5 (5)
125#define INT_EP_GPIO_4 (4)
126#define INT_EP_GPIO_3 (3)
127#define INT_EP_GPIO_2 (2)
128#define INT_EP_GPIO_1 (1)
129#define INT_EP_GPIO_0 (0)
130
131static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
132 "RX FCS Errors",
133 "RX Alignment Errors",
134 "Rx Fragment Errors",
135 "RX Jabber Errors",
136 "RX Undersize Frame Errors",
137 "RX Oversize Frame Errors",
138 "RX Dropped Frames",
139 "RX Unicast Byte Count",
140 "RX Broadcast Byte Count",
141 "RX Multicast Byte Count",
142 "RX Unicast Frames",
143 "RX Broadcast Frames",
144 "RX Multicast Frames",
145 "RX Pause Frames",
146 "RX 64 Byte Frames",
147 "RX 65 - 127 Byte Frames",
148 "RX 128 - 255 Byte Frames",
149 "RX 256 - 511 Bytes Frames",
150 "RX 512 - 1023 Byte Frames",
151 "RX 1024 - 1518 Byte Frames",
152 "RX Greater 1518 Byte Frames",
153 "EEE RX LPI Transitions",
154 "EEE RX LPI Time",
155 "TX FCS Errors",
156 "TX Excess Deferral Errors",
157 "TX Carrier Errors",
158 "TX Bad Byte Count",
159 "TX Single Collisions",
160 "TX Multiple Collisions",
161 "TX Excessive Collision",
162 "TX Late Collisions",
163 "TX Unicast Byte Count",
164 "TX Broadcast Byte Count",
165 "TX Multicast Byte Count",
166 "TX Unicast Frames",
167 "TX Broadcast Frames",
168 "TX Multicast Frames",
169 "TX Pause Frames",
170 "TX 64 Byte Frames",
171 "TX 65 - 127 Byte Frames",
172 "TX 128 - 255 Byte Frames",
173 "TX 256 - 511 Bytes Frames",
174 "TX 512 - 1023 Byte Frames",
175 "TX 1024 - 1518 Byte Frames",
176 "TX Greater 1518 Byte Frames",
177 "EEE TX LPI Transitions",
178 "EEE TX LPI Time",
179};
180
181struct lan78xx_statstage {
182 u32 rx_fcs_errors;
183 u32 rx_alignment_errors;
184 u32 rx_fragment_errors;
185 u32 rx_jabber_errors;
186 u32 rx_undersize_frame_errors;
187 u32 rx_oversize_frame_errors;
188 u32 rx_dropped_frames;
189 u32 rx_unicast_byte_count;
190 u32 rx_broadcast_byte_count;
191 u32 rx_multicast_byte_count;
192 u32 rx_unicast_frames;
193 u32 rx_broadcast_frames;
194 u32 rx_multicast_frames;
195 u32 rx_pause_frames;
196 u32 rx_64_byte_frames;
197 u32 rx_65_127_byte_frames;
198 u32 rx_128_255_byte_frames;
199 u32 rx_256_511_bytes_frames;
200 u32 rx_512_1023_byte_frames;
201 u32 rx_1024_1518_byte_frames;
202 u32 rx_greater_1518_byte_frames;
203 u32 eee_rx_lpi_transitions;
204 u32 eee_rx_lpi_time;
205 u32 tx_fcs_errors;
206 u32 tx_excess_deferral_errors;
207 u32 tx_carrier_errors;
208 u32 tx_bad_byte_count;
209 u32 tx_single_collisions;
210 u32 tx_multiple_collisions;
211 u32 tx_excessive_collision;
212 u32 tx_late_collisions;
213 u32 tx_unicast_byte_count;
214 u32 tx_broadcast_byte_count;
215 u32 tx_multicast_byte_count;
216 u32 tx_unicast_frames;
217 u32 tx_broadcast_frames;
218 u32 tx_multicast_frames;
219 u32 tx_pause_frames;
220 u32 tx_64_byte_frames;
221 u32 tx_65_127_byte_frames;
222 u32 tx_128_255_byte_frames;
223 u32 tx_256_511_bytes_frames;
224 u32 tx_512_1023_byte_frames;
225 u32 tx_1024_1518_byte_frames;
226 u32 tx_greater_1518_byte_frames;
227 u32 eee_tx_lpi_transitions;
228 u32 eee_tx_lpi_time;
229};
230
231struct lan78xx_statstage64 {
232 u64 rx_fcs_errors;
233 u64 rx_alignment_errors;
234 u64 rx_fragment_errors;
235 u64 rx_jabber_errors;
236 u64 rx_undersize_frame_errors;
237 u64 rx_oversize_frame_errors;
238 u64 rx_dropped_frames;
239 u64 rx_unicast_byte_count;
240 u64 rx_broadcast_byte_count;
241 u64 rx_multicast_byte_count;
242 u64 rx_unicast_frames;
243 u64 rx_broadcast_frames;
244 u64 rx_multicast_frames;
245 u64 rx_pause_frames;
246 u64 rx_64_byte_frames;
247 u64 rx_65_127_byte_frames;
248 u64 rx_128_255_byte_frames;
249 u64 rx_256_511_bytes_frames;
250 u64 rx_512_1023_byte_frames;
251 u64 rx_1024_1518_byte_frames;
252 u64 rx_greater_1518_byte_frames;
253 u64 eee_rx_lpi_transitions;
254 u64 eee_rx_lpi_time;
255 u64 tx_fcs_errors;
256 u64 tx_excess_deferral_errors;
257 u64 tx_carrier_errors;
258 u64 tx_bad_byte_count;
259 u64 tx_single_collisions;
260 u64 tx_multiple_collisions;
261 u64 tx_excessive_collision;
262 u64 tx_late_collisions;
263 u64 tx_unicast_byte_count;
264 u64 tx_broadcast_byte_count;
265 u64 tx_multicast_byte_count;
266 u64 tx_unicast_frames;
267 u64 tx_broadcast_frames;
268 u64 tx_multicast_frames;
269 u64 tx_pause_frames;
270 u64 tx_64_byte_frames;
271 u64 tx_65_127_byte_frames;
272 u64 tx_128_255_byte_frames;
273 u64 tx_256_511_bytes_frames;
274 u64 tx_512_1023_byte_frames;
275 u64 tx_1024_1518_byte_frames;
276 u64 tx_greater_1518_byte_frames;
277 u64 eee_tx_lpi_transitions;
278 u64 eee_tx_lpi_time;
279};
280
281static u32 lan78xx_regs[] = {
282 ID_REV,
283 INT_STS,
284 HW_CFG,
285 PMT_CTL,
286 E2P_CMD,
287 E2P_DATA,
288 USB_STATUS,
289 VLAN_TYPE,
290 MAC_CR,
291 MAC_RX,
292 MAC_TX,
293 FLOW,
294 ERR_STS,
295 MII_ACC,
296 MII_DATA,
297 EEE_TX_LPI_REQ_DLY,
298 EEE_TW_TX_SYS,
299 EEE_TX_LPI_REM_DLY,
300 WUCSR
301};
302
303#define PHY_REG_SIZE (32 * sizeof(u32))
304
305struct lan78xx_net;
306
307struct lan78xx_priv {
308 struct lan78xx_net *dev;
309 u32 rfe_ctl;
310 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
311 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
312 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
313 struct mutex dataport_mutex; /* for dataport access */
314 spinlock_t rfe_ctl_lock; /* for rfe register access */
315 struct work_struct set_multicast;
316 struct work_struct set_vlan;
317 u32 wol;
318};
319
320enum skb_state {
321 illegal = 0,
322 tx_start,
323 tx_done,
324 rx_start,
325 rx_done,
326 rx_cleanup,
327 unlink_start
328};
329
330struct skb_data { /* skb->cb is one of these */
331 struct urb *urb;
332 struct lan78xx_net *dev;
333 enum skb_state state;
334 size_t length;
335 int num_of_packet;
336};
337
338struct usb_context {
339 struct usb_ctrlrequest req;
340 struct lan78xx_net *dev;
341};
342
343#define EVENT_TX_HALT 0
344#define EVENT_RX_HALT 1
345#define EVENT_RX_MEMORY 2
346#define EVENT_STS_SPLIT 3
347#define EVENT_LINK_RESET 4
348#define EVENT_RX_PAUSED 5
349#define EVENT_DEV_WAKING 6
350#define EVENT_DEV_ASLEEP 7
351#define EVENT_DEV_OPEN 8
352#define EVENT_STAT_UPDATE 9
353
354struct statstage {
355 struct mutex access_lock; /* for stats access */
356 struct lan78xx_statstage saved;
357 struct lan78xx_statstage rollover_count;
358 struct lan78xx_statstage rollover_max;
359 struct lan78xx_statstage64 curr_stat;
360};
361
362struct irq_domain_data {
363 struct irq_domain *irqdomain;
364 unsigned int phyirq;
365 struct irq_chip *irqchip;
366 irq_flow_handler_t irq_handler;
367 u32 irqenable;
368 struct mutex irq_lock; /* for irq bus access */
369};
370
371struct lan78xx_net {
372 struct net_device *net;
373 struct usb_device *udev;
374 struct usb_interface *intf;
375 void *driver_priv;
376
377 int rx_qlen;
378 int tx_qlen;
379 struct sk_buff_head rxq;
380 struct sk_buff_head txq;
381 struct sk_buff_head done;
382 struct sk_buff_head rxq_pause;
383 struct sk_buff_head txq_pend;
384
385 struct tasklet_struct bh;
386 struct delayed_work wq;
387
388 int msg_enable;
389
390 struct urb *urb_intr;
391 struct usb_anchor deferred;
392
393 struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
394 struct mutex phy_mutex; /* for phy access */
395 unsigned int pipe_in, pipe_out, pipe_intr;
396
397 u32 hard_mtu; /* count any extra framing */
398 size_t rx_urb_size; /* size for rx urbs */
399
400 unsigned long flags;
401
402 wait_queue_head_t *wait;
403 unsigned char suspend_count;
404
405 unsigned int maxpacket;
406 struct timer_list delay;
407 struct timer_list stat_monitor;
408
409 unsigned long data[5];
410
411 int link_on;
412 u8 mdix_ctrl;
413
414 u32 chipid;
415 u32 chiprev;
416 struct mii_bus *mdiobus;
417 phy_interface_t interface;
418
419 int fc_autoneg;
420 u8 fc_request_control;
421
422 int delta;
423 struct statstage stats;
424
425 struct irq_domain_data domain_data;
426};
427
428/* define external phy id */
429#define PHY_LAN8835 (0x0007C130)
430#define PHY_KSZ9031RNX (0x00221620)
431
432/* use ethtool to change the level for any given device */
433static int msg_level = -1;
434module_param(msg_level, int, 0);
435MODULE_PARM_DESC(msg_level, "Override default message level");
436
437static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
438{
439 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
440 int ret;
441
442 if (!buf)
443 return -ENOMEM;
444
445 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
446 USB_VENDOR_REQUEST_READ_REGISTER,
447 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
448 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
449 if (likely(ret >= 0)) {
450 le32_to_cpus(buf);
451 *data = *buf;
452 } else {
453 netdev_warn(dev->net,
454 "Failed to read register index 0x%08x. ret = %d",
455 index, ret);
456 }
457
458 kfree(buf);
459
460 return ret;
461}
462
463static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
464{
465 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
466 int ret;
467
468 if (!buf)
469 return -ENOMEM;
470
471 *buf = data;
472 cpu_to_le32s(buf);
473
474 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
475 USB_VENDOR_REQUEST_WRITE_REGISTER,
476 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
477 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
478 if (unlikely(ret < 0)) {
479 netdev_warn(dev->net,
480 "Failed to write register index 0x%08x. ret = %d",
481 index, ret);
482 }
483
484 kfree(buf);
485
486 return ret;
487}
488
489static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
490 u32 data)
491{
492 int ret;
493 u32 buf;
494
495 ret = lan78xx_read_reg(dev, reg, &buf);
496 if (ret < 0)
497 return ret;
498
499 buf &= ~mask;
500 buf |= (mask & data);
501
502 ret = lan78xx_write_reg(dev, reg, buf);
503 if (ret < 0)
504 return ret;
505
506 return 0;
507}
508
509static int lan78xx_read_stats(struct lan78xx_net *dev,
510 struct lan78xx_statstage *data)
511{
512 int ret = 0;
513 int i;
514 struct lan78xx_statstage *stats;
515 u32 *src;
516 u32 *dst;
517
518 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
519 if (!stats)
520 return -ENOMEM;
521
522 ret = usb_control_msg(dev->udev,
523 usb_rcvctrlpipe(dev->udev, 0),
524 USB_VENDOR_REQUEST_GET_STATS,
525 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
526 0,
527 0,
528 (void *)stats,
529 sizeof(*stats),
530 USB_CTRL_SET_TIMEOUT);
531 if (likely(ret >= 0)) {
532 src = (u32 *)stats;
533 dst = (u32 *)data;
534 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
535 le32_to_cpus(&src[i]);
536 dst[i] = src[i];
537 }
538 } else {
539 netdev_warn(dev->net,
540 "Failed to read stat ret = %d", ret);
541 }
542
543 kfree(stats);
544
545 return ret;
546}
547
548#define check_counter_rollover(struct1, dev_stats, member) \
549 do { \
550 if ((struct1)->member < (dev_stats).saved.member) \
551 (dev_stats).rollover_count.member++; \
552 } while (0)
553
554static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
555 struct lan78xx_statstage *stats)
556{
557 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
558 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
559 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
560 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
561 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
562 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
563 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
564 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
565 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
566 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
567 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
568 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
569 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
570 check_counter_rollover(stats, dev->stats, rx_pause_frames);
571 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
572 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
573 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
574 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
575 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
576 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
577 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
578 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
579 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
580 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
581 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
582 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
583 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
584 check_counter_rollover(stats, dev->stats, tx_single_collisions);
585 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
586 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
587 check_counter_rollover(stats, dev->stats, tx_late_collisions);
588 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
589 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
590 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
591 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
592 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
593 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
594 check_counter_rollover(stats, dev->stats, tx_pause_frames);
595 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
596 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
597 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
598 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
599 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
600 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
601 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
602 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
603 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
604
605 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
606}
607
608static void lan78xx_update_stats(struct lan78xx_net *dev)
609{
610 u32 *p, *count, *max;
611 u64 *data;
612 int i;
613 struct lan78xx_statstage lan78xx_stats;
614
615 if (usb_autopm_get_interface(dev->intf) < 0)
616 return;
617
618 p = (u32 *)&lan78xx_stats;
619 count = (u32 *)&dev->stats.rollover_count;
620 max = (u32 *)&dev->stats.rollover_max;
621 data = (u64 *)&dev->stats.curr_stat;
622
623 mutex_lock(&dev->stats.access_lock);
624
625 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
626 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
627
628 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
629 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
630
631 mutex_unlock(&dev->stats.access_lock);
632
633 usb_autopm_put_interface(dev->intf);
634}
635
636/* Loop until the read is completed with timeout called with phy_mutex held */
637static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
638{
639 unsigned long start_time = jiffies;
640 u32 val;
641 int ret;
642
643 do {
644 ret = lan78xx_read_reg(dev, MII_ACC, &val);
645 if (unlikely(ret < 0))
646 return -EIO;
647
648 if (!(val & MII_ACC_MII_BUSY_))
649 return 0;
650 } while (!time_after(jiffies, start_time + HZ));
651
652 return -EIO;
653}
654
655static inline u32 mii_access(int id, int index, int read)
656{
657 u32 ret;
658
659 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
660 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
661 if (read)
662 ret |= MII_ACC_MII_READ_;
663 else
664 ret |= MII_ACC_MII_WRITE_;
665 ret |= MII_ACC_MII_BUSY_;
666
667 return ret;
668}
669
670static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
671{
672 unsigned long start_time = jiffies;
673 u32 val;
674 int ret;
675
676 do {
677 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
678 if (unlikely(ret < 0))
679 return -EIO;
680
681 if (!(val & E2P_CMD_EPC_BUSY_) ||
682 (val & E2P_CMD_EPC_TIMEOUT_))
683 break;
684 usleep_range(40, 100);
685 } while (!time_after(jiffies, start_time + HZ));
686
687 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
688 netdev_warn(dev->net, "EEPROM read operation timeout");
689 return -EIO;
690 }
691
692 return 0;
693}
694
695static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
696{
697 unsigned long start_time = jiffies;
698 u32 val;
699 int ret;
700
701 do {
702 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
703 if (unlikely(ret < 0))
704 return -EIO;
705
706 if (!(val & E2P_CMD_EPC_BUSY_))
707 return 0;
708
709 usleep_range(40, 100);
710 } while (!time_after(jiffies, start_time + HZ));
711
712 netdev_warn(dev->net, "EEPROM is busy");
713 return -EIO;
714}
715
716static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
717 u32 length, u8 *data)
718{
719 u32 val;
720 u32 saved;
721 int i, ret;
722 int retval;
723
724 /* depends on chip, some EEPROM pins are muxed with LED function.
725 * disable & restore LED function to access EEPROM.
726 */
727 ret = lan78xx_read_reg(dev, HW_CFG, &val);
728 saved = val;
729 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
730 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
731 ret = lan78xx_write_reg(dev, HW_CFG, val);
732 }
733
734 retval = lan78xx_eeprom_confirm_not_busy(dev);
735 if (retval)
736 return retval;
737
738 for (i = 0; i < length; i++) {
739 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
740 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
741 ret = lan78xx_write_reg(dev, E2P_CMD, val);
742 if (unlikely(ret < 0)) {
743 retval = -EIO;
744 goto exit;
745 }
746
747 retval = lan78xx_wait_eeprom(dev);
748 if (retval < 0)
749 goto exit;
750
751 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
752 if (unlikely(ret < 0)) {
753 retval = -EIO;
754 goto exit;
755 }
756
757 data[i] = val & 0xFF;
758 offset++;
759 }
760
761 retval = 0;
762exit:
763 if (dev->chipid == ID_REV_CHIP_ID_7800_)
764 ret = lan78xx_write_reg(dev, HW_CFG, saved);
765
766 return retval;
767}
768
769static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
770 u32 length, u8 *data)
771{
772 u8 sig;
773 int ret;
774
775 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
776 if ((ret == 0) && (sig == EEPROM_INDICATOR))
777 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
778 else
779 ret = -EINVAL;
780
781 return ret;
782}
783
784static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
785 u32 length, u8 *data)
786{
787 u32 val;
788 u32 saved;
789 int i, ret;
790 int retval;
791
792 /* depends on chip, some EEPROM pins are muxed with LED function.
793 * disable & restore LED function to access EEPROM.
794 */
795 ret = lan78xx_read_reg(dev, HW_CFG, &val);
796 saved = val;
797 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
798 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
799 ret = lan78xx_write_reg(dev, HW_CFG, val);
800 }
801
802 retval = lan78xx_eeprom_confirm_not_busy(dev);
803 if (retval)
804 goto exit;
805
806 /* Issue write/erase enable command */
807 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
808 ret = lan78xx_write_reg(dev, E2P_CMD, val);
809 if (unlikely(ret < 0)) {
810 retval = -EIO;
811 goto exit;
812 }
813
814 retval = lan78xx_wait_eeprom(dev);
815 if (retval < 0)
816 goto exit;
817
818 for (i = 0; i < length; i++) {
819 /* Fill data register */
820 val = data[i];
821 ret = lan78xx_write_reg(dev, E2P_DATA, val);
822 if (ret < 0) {
823 retval = -EIO;
824 goto exit;
825 }
826
827 /* Send "write" command */
828 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
829 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
830 ret = lan78xx_write_reg(dev, E2P_CMD, val);
831 if (ret < 0) {
832 retval = -EIO;
833 goto exit;
834 }
835
836 retval = lan78xx_wait_eeprom(dev);
837 if (retval < 0)
838 goto exit;
839
840 offset++;
841 }
842
843 retval = 0;
844exit:
845 if (dev->chipid == ID_REV_CHIP_ID_7800_)
846 ret = lan78xx_write_reg(dev, HW_CFG, saved);
847
848 return retval;
849}
850
851static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
852 u32 length, u8 *data)
853{
854 int i;
855 u32 buf;
856 unsigned long timeout;
857
858 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
859
860 if (buf & OTP_PWR_DN_PWRDN_N_) {
861 /* clear it and wait to be cleared */
862 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
863
864 timeout = jiffies + HZ;
865 do {
866 usleep_range(1, 10);
867 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
868 if (time_after(jiffies, timeout)) {
869 netdev_warn(dev->net,
870 "timeout on OTP_PWR_DN");
871 return -EIO;
872 }
873 } while (buf & OTP_PWR_DN_PWRDN_N_);
874 }
875
876 for (i = 0; i < length; i++) {
877 lan78xx_write_reg(dev, OTP_ADDR1,
878 ((offset + i) >> 8) & OTP_ADDR1_15_11);
879 lan78xx_write_reg(dev, OTP_ADDR2,
880 ((offset + i) & OTP_ADDR2_10_3));
881
882 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
883 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
884
885 timeout = jiffies + HZ;
886 do {
887 udelay(1);
888 lan78xx_read_reg(dev, OTP_STATUS, &buf);
889 if (time_after(jiffies, timeout)) {
890 netdev_warn(dev->net,
891 "timeout on OTP_STATUS");
892 return -EIO;
893 }
894 } while (buf & OTP_STATUS_BUSY_);
895
896 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
897
898 data[i] = (u8)(buf & 0xFF);
899 }
900
901 return 0;
902}
903
904static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
905 u32 length, u8 *data)
906{
907 int i;
908 u32 buf;
909 unsigned long timeout;
910
911 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
912
913 if (buf & OTP_PWR_DN_PWRDN_N_) {
914 /* clear it and wait to be cleared */
915 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
916
917 timeout = jiffies + HZ;
918 do {
919 udelay(1);
920 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
921 if (time_after(jiffies, timeout)) {
922 netdev_warn(dev->net,
923 "timeout on OTP_PWR_DN completion");
924 return -EIO;
925 }
926 } while (buf & OTP_PWR_DN_PWRDN_N_);
927 }
928
929 /* set to BYTE program mode */
930 lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
931
932 for (i = 0; i < length; i++) {
933 lan78xx_write_reg(dev, OTP_ADDR1,
934 ((offset + i) >> 8) & OTP_ADDR1_15_11);
935 lan78xx_write_reg(dev, OTP_ADDR2,
936 ((offset + i) & OTP_ADDR2_10_3));
937 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
938 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
939 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
940
941 timeout = jiffies + HZ;
942 do {
943 udelay(1);
944 lan78xx_read_reg(dev, OTP_STATUS, &buf);
945 if (time_after(jiffies, timeout)) {
946 netdev_warn(dev->net,
947 "Timeout on OTP_STATUS completion");
948 return -EIO;
949 }
950 } while (buf & OTP_STATUS_BUSY_);
951 }
952
953 return 0;
954}
955
956static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
957 u32 length, u8 *data)
958{
959 u8 sig;
960 int ret;
961
962 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
963
964 if (ret == 0) {
965 if (sig == OTP_INDICATOR_2)
966 offset += 0x100;
967 else if (sig != OTP_INDICATOR_1)
968 ret = -EINVAL;
969 if (!ret)
970 ret = lan78xx_read_raw_otp(dev, offset, length, data);
971 }
972
973 return ret;
974}
975
976static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
977{
978 int i, ret;
979
980 for (i = 0; i < 100; i++) {
981 u32 dp_sel;
982
983 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
984 if (unlikely(ret < 0))
985 return -EIO;
986
987 if (dp_sel & DP_SEL_DPRDY_)
988 return 0;
989
990 usleep_range(40, 100);
991 }
992
993 netdev_warn(dev->net, "%s timed out", __func__);
994
995 return -EIO;
996}
997
998static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
999 u32 addr, u32 length, u32 *buf)
1000{
1001 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1002 u32 dp_sel;
1003 int i, ret;
1004
1005 if (usb_autopm_get_interface(dev->intf) < 0)
1006 return 0;
1007
1008 mutex_lock(&pdata->dataport_mutex);
1009
1010 ret = lan78xx_dataport_wait_not_busy(dev);
1011 if (ret < 0)
1012 goto done;
1013
1014 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1015
1016 dp_sel &= ~DP_SEL_RSEL_MASK_;
1017 dp_sel |= ram_select;
1018 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1019
1020 for (i = 0; i < length; i++) {
1021 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1022
1023 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1024
1025 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1026
1027 ret = lan78xx_dataport_wait_not_busy(dev);
1028 if (ret < 0)
1029 goto done;
1030 }
1031
1032done:
1033 mutex_unlock(&pdata->dataport_mutex);
1034 usb_autopm_put_interface(dev->intf);
1035
1036 return ret;
1037}
1038
1039static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1040 int index, u8 addr[ETH_ALEN])
1041{
1042 u32 temp;
1043
1044 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1045 temp = addr[3];
1046 temp = addr[2] | (temp << 8);
1047 temp = addr[1] | (temp << 8);
1048 temp = addr[0] | (temp << 8);
1049 pdata->pfilter_table[index][1] = temp;
1050 temp = addr[5];
1051 temp = addr[4] | (temp << 8);
1052 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1053 pdata->pfilter_table[index][0] = temp;
1054 }
1055}
1056
1057/* returns hash bit number for given MAC address */
1058static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1059{
1060 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1061}
1062
1063static void lan78xx_deferred_multicast_write(struct work_struct *param)
1064{
1065 struct lan78xx_priv *pdata =
1066 container_of(param, struct lan78xx_priv, set_multicast);
1067 struct lan78xx_net *dev = pdata->dev;
1068 int i;
1069
1070 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1071 pdata->rfe_ctl);
1072
1073 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1074 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1075
1076 for (i = 1; i < NUM_OF_MAF; i++) {
1077 lan78xx_write_reg(dev, MAF_HI(i), 0);
1078 lan78xx_write_reg(dev, MAF_LO(i),
1079 pdata->pfilter_table[i][1]);
1080 lan78xx_write_reg(dev, MAF_HI(i),
1081 pdata->pfilter_table[i][0]);
1082 }
1083
1084 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1085}
1086
1087static void lan78xx_set_multicast(struct net_device *netdev)
1088{
1089 struct lan78xx_net *dev = netdev_priv(netdev);
1090 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1091 unsigned long flags;
1092 int i;
1093
1094 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1095
1096 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1097 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1098
1099 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1100 pdata->mchash_table[i] = 0;
1101
1102 /* pfilter_table[0] has own HW address */
1103 for (i = 1; i < NUM_OF_MAF; i++) {
1104 pdata->pfilter_table[i][0] = 0;
1105 pdata->pfilter_table[i][1] = 0;
1106 }
1107
1108 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1109
1110 if (dev->net->flags & IFF_PROMISC) {
1111 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1112 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1113 } else {
1114 if (dev->net->flags & IFF_ALLMULTI) {
1115 netif_dbg(dev, drv, dev->net,
1116 "receive all multicast enabled");
1117 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1118 }
1119 }
1120
1121 if (netdev_mc_count(dev->net)) {
1122 struct netdev_hw_addr *ha;
1123 int i;
1124
1125 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1126
1127 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1128
1129 i = 1;
1130 netdev_for_each_mc_addr(ha, netdev) {
1131 /* set first 32 into Perfect Filter */
1132 if (i < 33) {
1133 lan78xx_set_addr_filter(pdata, i, ha->addr);
1134 } else {
1135 u32 bitnum = lan78xx_hash(ha->addr);
1136
1137 pdata->mchash_table[bitnum / 32] |=
1138 (1 << (bitnum % 32));
1139 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1140 }
1141 i++;
1142 }
1143 }
1144
1145 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1146
1147 /* defer register writes to a sleepable context */
1148 schedule_work(&pdata->set_multicast);
1149}
1150
1151static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1152 u16 lcladv, u16 rmtadv)
1153{
1154 u32 flow = 0, fct_flow = 0;
1155 u8 cap;
1156
1157 if (dev->fc_autoneg)
1158 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1159 else
1160 cap = dev->fc_request_control;
1161
1162 if (cap & FLOW_CTRL_TX)
1163 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1164
1165 if (cap & FLOW_CTRL_RX)
1166 flow |= FLOW_CR_RX_FCEN_;
1167
1168 if (dev->udev->speed == USB_SPEED_SUPER)
1169 fct_flow = 0x817;
1170 else if (dev->udev->speed == USB_SPEED_HIGH)
1171 fct_flow = 0x211;
1172
1173 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1174 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1175 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1176
1177 lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1178
1179 /* threshold value should be set before enabling flow */
1180 lan78xx_write_reg(dev, FLOW, flow);
1181
1182 return 0;
1183}
1184
1185static int lan78xx_link_reset(struct lan78xx_net *dev)
1186{
1187 struct phy_device *phydev = dev->net->phydev;
1188 struct ethtool_link_ksettings ecmd;
1189 int ladv, radv, ret, link;
1190 u32 buf;
1191
1192 /* clear LAN78xx interrupt status */
1193 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1194 if (unlikely(ret < 0))
1195 return ret;
1196
1197 mutex_lock(&phydev->lock);
1198 phy_read_status(phydev);
1199 link = phydev->link;
1200 mutex_unlock(&phydev->lock);
1201
1202 if (!link && dev->link_on) {
1203 dev->link_on = false;
1204
1205 /* reset MAC */
1206 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1207 if (unlikely(ret < 0))
1208 return ret;
1209 buf |= MAC_CR_RST_;
1210 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1211 if (unlikely(ret < 0))
1212 return ret;
1213
1214 del_timer(&dev->stat_monitor);
1215 } else if (link && !dev->link_on) {
1216 dev->link_on = true;
1217
1218 phy_ethtool_ksettings_get(phydev, &ecmd);
1219
1220 if (dev->udev->speed == USB_SPEED_SUPER) {
1221 if (ecmd.base.speed == 1000) {
1222 /* disable U2 */
1223 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1224 if (ret < 0)
1225 return ret;
1226 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1227 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1228 if (ret < 0)
1229 return ret;
1230 /* enable U1 */
1231 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1232 if (ret < 0)
1233 return ret;
1234 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1235 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1236 if (ret < 0)
1237 return ret;
1238 } else {
1239 /* enable U1 & U2 */
1240 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1241 if (ret < 0)
1242 return ret;
1243 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1244 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1245 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1246 if (ret < 0)
1247 return ret;
1248 }
1249 }
1250
1251 ladv = phy_read(phydev, MII_ADVERTISE);
1252 if (ladv < 0)
1253 return ladv;
1254
1255 radv = phy_read(phydev, MII_LPA);
1256 if (radv < 0)
1257 return radv;
1258
1259 netif_dbg(dev, link, dev->net,
1260 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1261 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1262
1263 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1264 radv);
1265 if (ret < 0)
1266 return ret;
1267
1268 if (!timer_pending(&dev->stat_monitor)) {
1269 dev->delta = 1;
1270 mod_timer(&dev->stat_monitor,
1271 jiffies + STAT_UPDATE_TIMER);
1272 }
1273
1274 tasklet_schedule(&dev->bh);
1275 }
1276
1277 return 0;
1278}
1279
1280/* some work can't be done in tasklets, so we use keventd
1281 *
1282 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1283 * but tasklet_schedule() doesn't. hope the failure is rare.
1284 */
1285static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1286{
1287 set_bit(work, &dev->flags);
1288 if (!schedule_delayed_work(&dev->wq, 0))
1289 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1290}
1291
1292static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1293{
1294 u32 intdata;
1295
1296 if (urb->actual_length != 4) {
1297 netdev_warn(dev->net,
1298 "unexpected urb length %d", urb->actual_length);
1299 return;
1300 }
1301
1302 intdata = get_unaligned_le32(urb->transfer_buffer);
1303
1304 if (intdata & INT_ENP_PHY_INT) {
1305 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1306 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1307
1308 if (dev->domain_data.phyirq > 0) {
1309 local_irq_disable();
1310 generic_handle_irq(dev->domain_data.phyirq);
1311 local_irq_enable();
1312 }
1313 } else {
1314 netdev_warn(dev->net,
1315 "unexpected interrupt: 0x%08x\n", intdata);
1316 }
1317}
1318
1319static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1320{
1321 return MAX_EEPROM_SIZE;
1322}
1323
1324static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1325 struct ethtool_eeprom *ee, u8 *data)
1326{
1327 struct lan78xx_net *dev = netdev_priv(netdev);
1328 int ret;
1329
1330 ret = usb_autopm_get_interface(dev->intf);
1331 if (ret)
1332 return ret;
1333
1334 ee->magic = LAN78XX_EEPROM_MAGIC;
1335
1336 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1337
1338 usb_autopm_put_interface(dev->intf);
1339
1340 return ret;
1341}
1342
1343static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1344 struct ethtool_eeprom *ee, u8 *data)
1345{
1346 struct lan78xx_net *dev = netdev_priv(netdev);
1347 int ret;
1348
1349 ret = usb_autopm_get_interface(dev->intf);
1350 if (ret)
1351 return ret;
1352
1353 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1354 * to load data from EEPROM
1355 */
1356 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1357 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1358 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1359 (ee->offset == 0) &&
1360 (ee->len == 512) &&
1361 (data[0] == OTP_INDICATOR_1))
1362 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1363
1364 usb_autopm_put_interface(dev->intf);
1365
1366 return ret;
1367}
1368
1369static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1370 u8 *data)
1371{
1372 if (stringset == ETH_SS_STATS)
1373 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1374}
1375
1376static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1377{
1378 if (sset == ETH_SS_STATS)
1379 return ARRAY_SIZE(lan78xx_gstrings);
1380 else
1381 return -EOPNOTSUPP;
1382}
1383
1384static void lan78xx_get_stats(struct net_device *netdev,
1385 struct ethtool_stats *stats, u64 *data)
1386{
1387 struct lan78xx_net *dev = netdev_priv(netdev);
1388
1389 lan78xx_update_stats(dev);
1390
1391 mutex_lock(&dev->stats.access_lock);
1392 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1393 mutex_unlock(&dev->stats.access_lock);
1394}
1395
1396static void lan78xx_get_wol(struct net_device *netdev,
1397 struct ethtool_wolinfo *wol)
1398{
1399 struct lan78xx_net *dev = netdev_priv(netdev);
1400 int ret;
1401 u32 buf;
1402 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1403
1404 if (usb_autopm_get_interface(dev->intf) < 0)
1405 return;
1406
1407 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1408 if (unlikely(ret < 0)) {
1409 wol->supported = 0;
1410 wol->wolopts = 0;
1411 } else {
1412 if (buf & USB_CFG_RMT_WKP_) {
1413 wol->supported = WAKE_ALL;
1414 wol->wolopts = pdata->wol;
1415 } else {
1416 wol->supported = 0;
1417 wol->wolopts = 0;
1418 }
1419 }
1420
1421 usb_autopm_put_interface(dev->intf);
1422}
1423
1424static int lan78xx_set_wol(struct net_device *netdev,
1425 struct ethtool_wolinfo *wol)
1426{
1427 struct lan78xx_net *dev = netdev_priv(netdev);
1428 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1429 int ret;
1430
1431 if (wol->wolopts & ~WAKE_ALL)
1432 return -EINVAL;
1433
1434 ret = usb_autopm_get_interface(dev->intf);
1435 if (ret < 0)
1436 return ret;
1437
1438 pdata->wol = wol->wolopts;
1439
1440 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1441
1442 phy_ethtool_set_wol(netdev->phydev, wol);
1443
1444 usb_autopm_put_interface(dev->intf);
1445
1446 return ret;
1447}
1448
1449static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1450{
1451 struct lan78xx_net *dev = netdev_priv(net);
1452 struct phy_device *phydev = net->phydev;
1453 int ret;
1454 u32 buf;
1455
1456 ret = usb_autopm_get_interface(dev->intf);
1457 if (ret < 0)
1458 return ret;
1459
1460 ret = phy_ethtool_get_eee(phydev, edata);
1461 if (ret < 0)
1462 goto exit;
1463
1464 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1465 if (buf & MAC_CR_EEE_EN_) {
1466 edata->eee_enabled = true;
1467 edata->eee_active = !!(edata->advertised &
1468 edata->lp_advertised);
1469 edata->tx_lpi_enabled = true;
1470 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1471 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1472 edata->tx_lpi_timer = buf;
1473 } else {
1474 edata->eee_enabled = false;
1475 edata->eee_active = false;
1476 edata->tx_lpi_enabled = false;
1477 edata->tx_lpi_timer = 0;
1478 }
1479
1480 ret = 0;
1481exit:
1482 usb_autopm_put_interface(dev->intf);
1483
1484 return ret;
1485}
1486
1487static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1488{
1489 struct lan78xx_net *dev = netdev_priv(net);
1490 int ret;
1491 u32 buf;
1492
1493 ret = usb_autopm_get_interface(dev->intf);
1494 if (ret < 0)
1495 return ret;
1496
1497 if (edata->eee_enabled) {
1498 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1499 buf |= MAC_CR_EEE_EN_;
1500 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1501
1502 phy_ethtool_set_eee(net->phydev, edata);
1503
1504 buf = (u32)edata->tx_lpi_timer;
1505 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1506 } else {
1507 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1508 buf &= ~MAC_CR_EEE_EN_;
1509 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1510 }
1511
1512 usb_autopm_put_interface(dev->intf);
1513
1514 return 0;
1515}
1516
1517static u32 lan78xx_get_link(struct net_device *net)
1518{
1519 u32 link;
1520
1521 mutex_lock(&net->phydev->lock);
1522 phy_read_status(net->phydev);
1523 link = net->phydev->link;
1524 mutex_unlock(&net->phydev->lock);
1525
1526 return link;
1527}
1528
1529static void lan78xx_get_drvinfo(struct net_device *net,
1530 struct ethtool_drvinfo *info)
1531{
1532 struct lan78xx_net *dev = netdev_priv(net);
1533
1534 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1535 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1536}
1537
1538static u32 lan78xx_get_msglevel(struct net_device *net)
1539{
1540 struct lan78xx_net *dev = netdev_priv(net);
1541
1542 return dev->msg_enable;
1543}
1544
1545static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1546{
1547 struct lan78xx_net *dev = netdev_priv(net);
1548
1549 dev->msg_enable = level;
1550}
1551
1552static int lan78xx_get_link_ksettings(struct net_device *net,
1553 struct ethtool_link_ksettings *cmd)
1554{
1555 struct lan78xx_net *dev = netdev_priv(net);
1556 struct phy_device *phydev = net->phydev;
1557 int ret;
1558
1559 ret = usb_autopm_get_interface(dev->intf);
1560 if (ret < 0)
1561 return ret;
1562
1563 phy_ethtool_ksettings_get(phydev, cmd);
1564
1565 usb_autopm_put_interface(dev->intf);
1566
1567 return ret;
1568}
1569
1570static int lan78xx_set_link_ksettings(struct net_device *net,
1571 const struct ethtool_link_ksettings *cmd)
1572{
1573 struct lan78xx_net *dev = netdev_priv(net);
1574 struct phy_device *phydev = net->phydev;
1575 int ret = 0;
1576 int temp;
1577
1578 ret = usb_autopm_get_interface(dev->intf);
1579 if (ret < 0)
1580 return ret;
1581
1582 /* change speed & duplex */
1583 ret = phy_ethtool_ksettings_set(phydev, cmd);
1584
1585 if (!cmd->base.autoneg) {
1586 /* force link down */
1587 temp = phy_read(phydev, MII_BMCR);
1588 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1589 mdelay(1);
1590 phy_write(phydev, MII_BMCR, temp);
1591 }
1592
1593 usb_autopm_put_interface(dev->intf);
1594
1595 return ret;
1596}
1597
1598static void lan78xx_get_pause(struct net_device *net,
1599 struct ethtool_pauseparam *pause)
1600{
1601 struct lan78xx_net *dev = netdev_priv(net);
1602 struct phy_device *phydev = net->phydev;
1603 struct ethtool_link_ksettings ecmd;
1604
1605 phy_ethtool_ksettings_get(phydev, &ecmd);
1606
1607 pause->autoneg = dev->fc_autoneg;
1608
1609 if (dev->fc_request_control & FLOW_CTRL_TX)
1610 pause->tx_pause = 1;
1611
1612 if (dev->fc_request_control & FLOW_CTRL_RX)
1613 pause->rx_pause = 1;
1614}
1615
1616static int lan78xx_set_pause(struct net_device *net,
1617 struct ethtool_pauseparam *pause)
1618{
1619 struct lan78xx_net *dev = netdev_priv(net);
1620 struct phy_device *phydev = net->phydev;
1621 struct ethtool_link_ksettings ecmd;
1622 int ret;
1623
1624 phy_ethtool_ksettings_get(phydev, &ecmd);
1625
1626 if (pause->autoneg && !ecmd.base.autoneg) {
1627 ret = -EINVAL;
1628 goto exit;
1629 }
1630
1631 dev->fc_request_control = 0;
1632 if (pause->rx_pause)
1633 dev->fc_request_control |= FLOW_CTRL_RX;
1634
1635 if (pause->tx_pause)
1636 dev->fc_request_control |= FLOW_CTRL_TX;
1637
1638 if (ecmd.base.autoneg) {
1639 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1640 u32 mii_adv;
1641
1642 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1643 ecmd.link_modes.advertising);
1644 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1645 ecmd.link_modes.advertising);
1646 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1647 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1648 linkmode_or(ecmd.link_modes.advertising, fc,
1649 ecmd.link_modes.advertising);
1650
1651 phy_ethtool_ksettings_set(phydev, &ecmd);
1652 }
1653
1654 dev->fc_autoneg = pause->autoneg;
1655
1656 ret = 0;
1657exit:
1658 return ret;
1659}
1660
1661static int lan78xx_get_regs_len(struct net_device *netdev)
1662{
1663 if (!netdev->phydev)
1664 return (sizeof(lan78xx_regs));
1665 else
1666 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1667}
1668
1669static void
1670lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1671 void *buf)
1672{
1673 u32 *data = buf;
1674 int i, j;
1675 struct lan78xx_net *dev = netdev_priv(netdev);
1676
1677 /* Read Device/MAC registers */
1678 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1679 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1680
1681 if (!netdev->phydev)
1682 return;
1683
1684 /* Read PHY registers */
1685 for (j = 0; j < 32; i++, j++)
1686 data[i] = phy_read(netdev->phydev, j);
1687}
1688
1689static const struct ethtool_ops lan78xx_ethtool_ops = {
1690 .get_link = lan78xx_get_link,
1691 .nway_reset = phy_ethtool_nway_reset,
1692 .get_drvinfo = lan78xx_get_drvinfo,
1693 .get_msglevel = lan78xx_get_msglevel,
1694 .set_msglevel = lan78xx_set_msglevel,
1695 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1696 .get_eeprom = lan78xx_ethtool_get_eeprom,
1697 .set_eeprom = lan78xx_ethtool_set_eeprom,
1698 .get_ethtool_stats = lan78xx_get_stats,
1699 .get_sset_count = lan78xx_get_sset_count,
1700 .get_strings = lan78xx_get_strings,
1701 .get_wol = lan78xx_get_wol,
1702 .set_wol = lan78xx_set_wol,
1703 .get_eee = lan78xx_get_eee,
1704 .set_eee = lan78xx_set_eee,
1705 .get_pauseparam = lan78xx_get_pause,
1706 .set_pauseparam = lan78xx_set_pause,
1707 .get_link_ksettings = lan78xx_get_link_ksettings,
1708 .set_link_ksettings = lan78xx_set_link_ksettings,
1709 .get_regs_len = lan78xx_get_regs_len,
1710 .get_regs = lan78xx_get_regs,
1711};
1712
1713static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1714{
1715 if (!netif_running(netdev))
1716 return -EINVAL;
1717
1718 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1719}
1720
1721static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1722{
1723 u32 addr_lo, addr_hi;
1724 u8 addr[6];
1725
1726 lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1727 lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1728
1729 addr[0] = addr_lo & 0xFF;
1730 addr[1] = (addr_lo >> 8) & 0xFF;
1731 addr[2] = (addr_lo >> 16) & 0xFF;
1732 addr[3] = (addr_lo >> 24) & 0xFF;
1733 addr[4] = addr_hi & 0xFF;
1734 addr[5] = (addr_hi >> 8) & 0xFF;
1735
1736 if (!is_valid_ether_addr(addr)) {
1737 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1738 /* valid address present in Device Tree */
1739 netif_dbg(dev, ifup, dev->net,
1740 "MAC address read from Device Tree");
1741 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1742 ETH_ALEN, addr) == 0) ||
1743 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1744 ETH_ALEN, addr) == 0)) &&
1745 is_valid_ether_addr(addr)) {
1746 /* eeprom values are valid so use them */
1747 netif_dbg(dev, ifup, dev->net,
1748 "MAC address read from EEPROM");
1749 } else {
1750 /* generate random MAC */
1751 eth_random_addr(addr);
1752 netif_dbg(dev, ifup, dev->net,
1753 "MAC address set to random addr");
1754 }
1755
1756 addr_lo = addr[0] | (addr[1] << 8) |
1757 (addr[2] << 16) | (addr[3] << 24);
1758 addr_hi = addr[4] | (addr[5] << 8);
1759
1760 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1761 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1762 }
1763
1764 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1765 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1766
1767 ether_addr_copy(dev->net->dev_addr, addr);
1768}
1769
1770/* MDIO read and write wrappers for phylib */
1771static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1772{
1773 struct lan78xx_net *dev = bus->priv;
1774 u32 val, addr;
1775 int ret;
1776
1777 ret = usb_autopm_get_interface(dev->intf);
1778 if (ret < 0)
1779 return ret;
1780
1781 mutex_lock(&dev->phy_mutex);
1782
1783 /* confirm MII not busy */
1784 ret = lan78xx_phy_wait_not_busy(dev);
1785 if (ret < 0)
1786 goto done;
1787
1788 /* set the address, index & direction (read from PHY) */
1789 addr = mii_access(phy_id, idx, MII_READ);
1790 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1791
1792 ret = lan78xx_phy_wait_not_busy(dev);
1793 if (ret < 0)
1794 goto done;
1795
1796 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1797
1798 ret = (int)(val & 0xFFFF);
1799
1800done:
1801 mutex_unlock(&dev->phy_mutex);
1802 usb_autopm_put_interface(dev->intf);
1803
1804 return ret;
1805}
1806
1807static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1808 u16 regval)
1809{
1810 struct lan78xx_net *dev = bus->priv;
1811 u32 val, addr;
1812 int ret;
1813
1814 ret = usb_autopm_get_interface(dev->intf);
1815 if (ret < 0)
1816 return ret;
1817
1818 mutex_lock(&dev->phy_mutex);
1819
1820 /* confirm MII not busy */
1821 ret = lan78xx_phy_wait_not_busy(dev);
1822 if (ret < 0)
1823 goto done;
1824
1825 val = (u32)regval;
1826 ret = lan78xx_write_reg(dev, MII_DATA, val);
1827
1828 /* set the address, index & direction (write to PHY) */
1829 addr = mii_access(phy_id, idx, MII_WRITE);
1830 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1831
1832 ret = lan78xx_phy_wait_not_busy(dev);
1833 if (ret < 0)
1834 goto done;
1835
1836done:
1837 mutex_unlock(&dev->phy_mutex);
1838 usb_autopm_put_interface(dev->intf);
1839 return 0;
1840}
1841
1842static int lan78xx_mdio_init(struct lan78xx_net *dev)
1843{
1844 struct device_node *node;
1845 int ret;
1846
1847 dev->mdiobus = mdiobus_alloc();
1848 if (!dev->mdiobus) {
1849 netdev_err(dev->net, "can't allocate MDIO bus\n");
1850 return -ENOMEM;
1851 }
1852
1853 dev->mdiobus->priv = (void *)dev;
1854 dev->mdiobus->read = lan78xx_mdiobus_read;
1855 dev->mdiobus->write = lan78xx_mdiobus_write;
1856 dev->mdiobus->name = "lan78xx-mdiobus";
1857 dev->mdiobus->parent = &dev->udev->dev;
1858
1859 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1860 dev->udev->bus->busnum, dev->udev->devnum);
1861
1862 switch (dev->chipid) {
1863 case ID_REV_CHIP_ID_7800_:
1864 case ID_REV_CHIP_ID_7850_:
1865 /* set to internal PHY id */
1866 dev->mdiobus->phy_mask = ~(1 << 1);
1867 break;
1868 case ID_REV_CHIP_ID_7801_:
1869 /* scan thru PHYAD[2..0] */
1870 dev->mdiobus->phy_mask = ~(0xFF);
1871 break;
1872 }
1873
1874 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1875 ret = of_mdiobus_register(dev->mdiobus, node);
1876 of_node_put(node);
1877 if (ret) {
1878 netdev_err(dev->net, "can't register MDIO bus\n");
1879 goto exit1;
1880 }
1881
1882 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1883 return 0;
1884exit1:
1885 mdiobus_free(dev->mdiobus);
1886 return ret;
1887}
1888
1889static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1890{
1891 mdiobus_unregister(dev->mdiobus);
1892 mdiobus_free(dev->mdiobus);
1893}
1894
1895static void lan78xx_link_status_change(struct net_device *net)
1896{
1897 struct phy_device *phydev = net->phydev;
1898
1899 phy_print_status(phydev);
1900}
1901
1902static int irq_map(struct irq_domain *d, unsigned int irq,
1903 irq_hw_number_t hwirq)
1904{
1905 struct irq_domain_data *data = d->host_data;
1906
1907 irq_set_chip_data(irq, data);
1908 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1909 irq_set_noprobe(irq);
1910
1911 return 0;
1912}
1913
1914static void irq_unmap(struct irq_domain *d, unsigned int irq)
1915{
1916 irq_set_chip_and_handler(irq, NULL, NULL);
1917 irq_set_chip_data(irq, NULL);
1918}
1919
1920static const struct irq_domain_ops chip_domain_ops = {
1921 .map = irq_map,
1922 .unmap = irq_unmap,
1923};
1924
1925static void lan78xx_irq_mask(struct irq_data *irqd)
1926{
1927 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1928
1929 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1930}
1931
1932static void lan78xx_irq_unmask(struct irq_data *irqd)
1933{
1934 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1935
1936 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1937}
1938
1939static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1940{
1941 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1942
1943 mutex_lock(&data->irq_lock);
1944}
1945
1946static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1947{
1948 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1949 struct lan78xx_net *dev =
1950 container_of(data, struct lan78xx_net, domain_data);
1951 u32 buf;
1952
1953 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1954 * are only two callbacks executed in non-atomic contex.
1955 */
1956 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1957 if (buf != data->irqenable)
1958 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1959
1960 mutex_unlock(&data->irq_lock);
1961}
1962
1963static struct irq_chip lan78xx_irqchip = {
1964 .name = "lan78xx-irqs",
1965 .irq_mask = lan78xx_irq_mask,
1966 .irq_unmask = lan78xx_irq_unmask,
1967 .irq_bus_lock = lan78xx_irq_bus_lock,
1968 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1969};
1970
1971static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1972{
1973 struct device_node *of_node;
1974 struct irq_domain *irqdomain;
1975 unsigned int irqmap = 0;
1976 u32 buf;
1977 int ret = 0;
1978
1979 of_node = dev->udev->dev.parent->of_node;
1980
1981 mutex_init(&dev->domain_data.irq_lock);
1982
1983 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1984 dev->domain_data.irqenable = buf;
1985
1986 dev->domain_data.irqchip = &lan78xx_irqchip;
1987 dev->domain_data.irq_handler = handle_simple_irq;
1988
1989 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1990 &chip_domain_ops, &dev->domain_data);
1991 if (irqdomain) {
1992 /* create mapping for PHY interrupt */
1993 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1994 if (!irqmap) {
1995 irq_domain_remove(irqdomain);
1996
1997 irqdomain = NULL;
1998 ret = -EINVAL;
1999 }
2000 } else {
2001 ret = -EINVAL;
2002 }
2003
2004 dev->domain_data.irqdomain = irqdomain;
2005 dev->domain_data.phyirq = irqmap;
2006
2007 return ret;
2008}
2009
2010static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2011{
2012 if (dev->domain_data.phyirq > 0) {
2013 irq_dispose_mapping(dev->domain_data.phyirq);
2014
2015 if (dev->domain_data.irqdomain)
2016 irq_domain_remove(dev->domain_data.irqdomain);
2017 }
2018 dev->domain_data.phyirq = 0;
2019 dev->domain_data.irqdomain = NULL;
2020}
2021
2022static int lan8835_fixup(struct phy_device *phydev)
2023{
2024 int buf;
2025 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2026
2027 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2028 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2029 buf &= ~0x1800;
2030 buf |= 0x0800;
2031 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2032
2033 /* RGMII MAC TXC Delay Enable */
2034 lan78xx_write_reg(dev, MAC_RGMII_ID,
2035 MAC_RGMII_ID_TXC_DELAY_EN_);
2036
2037 /* RGMII TX DLL Tune Adjust */
2038 lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2039
2040 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2041
2042 return 1;
2043}
2044
2045static int ksz9031rnx_fixup(struct phy_device *phydev)
2046{
2047 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2048
2049 /* Micrel9301RNX PHY configuration */
2050 /* RGMII Control Signal Pad Skew */
2051 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2052 /* RGMII RX Data Pad Skew */
2053 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2054 /* RGMII RX Clock Pad Skew */
2055 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2056
2057 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2058
2059 return 1;
2060}
2061
2062static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2063{
2064 u32 buf;
2065 int ret;
2066 struct fixed_phy_status fphy_status = {
2067 .link = 1,
2068 .speed = SPEED_1000,
2069 .duplex = DUPLEX_FULL,
2070 };
2071 struct phy_device *phydev;
2072
2073 phydev = phy_find_first(dev->mdiobus);
2074 if (!phydev) {
2075 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2076 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2077 if (IS_ERR(phydev)) {
2078 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2079 return NULL;
2080 }
2081 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2082 dev->interface = PHY_INTERFACE_MODE_RGMII;
2083 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2084 MAC_RGMII_ID_TXC_DELAY_EN_);
2085 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2086 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2087 buf |= HW_CFG_CLK125_EN_;
2088 buf |= HW_CFG_REFCLK25_EN_;
2089 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2090 } else {
2091 if (!phydev->drv) {
2092 netdev_err(dev->net, "no PHY driver found\n");
2093 return NULL;
2094 }
2095 dev->interface = PHY_INTERFACE_MODE_RGMII;
2096 /* external PHY fixup for KSZ9031RNX */
2097 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2098 ksz9031rnx_fixup);
2099 if (ret < 0) {
2100 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2101 return NULL;
2102 }
2103 /* external PHY fixup for LAN8835 */
2104 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2105 lan8835_fixup);
2106 if (ret < 0) {
2107 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2108 return NULL;
2109 }
2110 /* add more external PHY fixup here if needed */
2111
2112 phydev->is_internal = false;
2113 }
2114 return phydev;
2115}
2116
2117static int lan78xx_phy_init(struct lan78xx_net *dev)
2118{
2119 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2120 int ret;
2121 u32 mii_adv;
2122 struct phy_device *phydev;
2123
2124 switch (dev->chipid) {
2125 case ID_REV_CHIP_ID_7801_:
2126 phydev = lan7801_phy_init(dev);
2127 if (!phydev) {
2128 netdev_err(dev->net, "lan7801: PHY Init Failed");
2129 return -EIO;
2130 }
2131 break;
2132
2133 case ID_REV_CHIP_ID_7800_:
2134 case ID_REV_CHIP_ID_7850_:
2135 phydev = phy_find_first(dev->mdiobus);
2136 if (!phydev) {
2137 netdev_err(dev->net, "no PHY found\n");
2138 return -EIO;
2139 }
2140 phydev->is_internal = true;
2141 dev->interface = PHY_INTERFACE_MODE_GMII;
2142 break;
2143
2144 default:
2145 netdev_err(dev->net, "Unknown CHIP ID found\n");
2146 return -EIO;
2147 }
2148
2149 /* if phyirq is not set, use polling mode in phylib */
2150 if (dev->domain_data.phyirq > 0)
2151 phydev->irq = dev->domain_data.phyirq;
2152 else
2153 phydev->irq = PHY_POLL;
2154 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2155
2156 /* set to AUTOMDIX */
2157 phydev->mdix = ETH_TP_MDI_AUTO;
2158
2159 ret = phy_connect_direct(dev->net, phydev,
2160 lan78xx_link_status_change,
2161 dev->interface);
2162 if (ret) {
2163 netdev_err(dev->net, "can't attach PHY to %s\n",
2164 dev->mdiobus->id);
2165 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2166 if (phy_is_pseudo_fixed_link(phydev)) {
2167 fixed_phy_unregister(phydev);
2168 phy_device_free(phydev);
2169 } else {
2170 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2171 0xfffffff0);
2172 phy_unregister_fixup_for_uid(PHY_LAN8835,
2173 0xfffffff0);
2174 }
2175 }
2176 return -EIO;
2177 }
2178
2179 /* MAC doesn't support 1000T Half */
2180 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2181
2182 /* support both flow controls */
2183 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2184 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2185 phydev->advertising);
2186 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2187 phydev->advertising);
2188 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2189 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2190 linkmode_or(phydev->advertising, fc, phydev->advertising);
2191
2192 if (phydev->mdio.dev.of_node) {
2193 u32 reg;
2194 int len;
2195
2196 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2197 "microchip,led-modes",
2198 sizeof(u32));
2199 if (len >= 0) {
2200 /* Ensure the appropriate LEDs are enabled */
2201 lan78xx_read_reg(dev, HW_CFG, &reg);
2202 reg &= ~(HW_CFG_LED0_EN_ |
2203 HW_CFG_LED1_EN_ |
2204 HW_CFG_LED2_EN_ |
2205 HW_CFG_LED3_EN_);
2206 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2207 (len > 1) * HW_CFG_LED1_EN_ |
2208 (len > 2) * HW_CFG_LED2_EN_ |
2209 (len > 3) * HW_CFG_LED3_EN_;
2210 lan78xx_write_reg(dev, HW_CFG, reg);
2211 }
2212 }
2213
2214 genphy_config_aneg(phydev);
2215
2216 dev->fc_autoneg = phydev->autoneg;
2217
2218 return 0;
2219}
2220
2221static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2222{
2223 u32 buf;
2224 bool rxenabled;
2225
2226 lan78xx_read_reg(dev, MAC_RX, &buf);
2227
2228 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2229
2230 if (rxenabled) {
2231 buf &= ~MAC_RX_RXEN_;
2232 lan78xx_write_reg(dev, MAC_RX, buf);
2233 }
2234
2235 /* add 4 to size for FCS */
2236 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2237 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2238
2239 lan78xx_write_reg(dev, MAC_RX, buf);
2240
2241 if (rxenabled) {
2242 buf |= MAC_RX_RXEN_;
2243 lan78xx_write_reg(dev, MAC_RX, buf);
2244 }
2245
2246 return 0;
2247}
2248
2249static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2250{
2251 struct sk_buff *skb;
2252 unsigned long flags;
2253 int count = 0;
2254
2255 spin_lock_irqsave(&q->lock, flags);
2256 while (!skb_queue_empty(q)) {
2257 struct skb_data *entry;
2258 struct urb *urb;
2259 int ret;
2260
2261 skb_queue_walk(q, skb) {
2262 entry = (struct skb_data *)skb->cb;
2263 if (entry->state != unlink_start)
2264 goto found;
2265 }
2266 break;
2267found:
2268 entry->state = unlink_start;
2269 urb = entry->urb;
2270
2271 /* Get reference count of the URB to avoid it to be
2272 * freed during usb_unlink_urb, which may trigger
2273 * use-after-free problem inside usb_unlink_urb since
2274 * usb_unlink_urb is always racing with .complete
2275 * handler(include defer_bh).
2276 */
2277 usb_get_urb(urb);
2278 spin_unlock_irqrestore(&q->lock, flags);
2279 /* during some PM-driven resume scenarios,
2280 * these (async) unlinks complete immediately
2281 */
2282 ret = usb_unlink_urb(urb);
2283 if (ret != -EINPROGRESS && ret != 0)
2284 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2285 else
2286 count++;
2287 usb_put_urb(urb);
2288 spin_lock_irqsave(&q->lock, flags);
2289 }
2290 spin_unlock_irqrestore(&q->lock, flags);
2291 return count;
2292}
2293
2294static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2295{
2296 struct lan78xx_net *dev = netdev_priv(netdev);
2297 int ll_mtu = new_mtu + netdev->hard_header_len;
2298 int old_hard_mtu = dev->hard_mtu;
2299 int old_rx_urb_size = dev->rx_urb_size;
2300 int ret;
2301
2302 /* no second zero-length packet read wanted after mtu-sized packets */
2303 if ((ll_mtu % dev->maxpacket) == 0)
2304 return -EDOM;
2305
2306 ret = usb_autopm_get_interface(dev->intf);
2307 if (ret < 0)
2308 return ret;
2309
2310 lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2311
2312 netdev->mtu = new_mtu;
2313
2314 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2315 if (dev->rx_urb_size == old_hard_mtu) {
2316 dev->rx_urb_size = dev->hard_mtu;
2317 if (dev->rx_urb_size > old_rx_urb_size) {
2318 if (netif_running(dev->net)) {
2319 unlink_urbs(dev, &dev->rxq);
2320 tasklet_schedule(&dev->bh);
2321 }
2322 }
2323 }
2324
2325 usb_autopm_put_interface(dev->intf);
2326
2327 return 0;
2328}
2329
2330static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2331{
2332 struct lan78xx_net *dev = netdev_priv(netdev);
2333 struct sockaddr *addr = p;
2334 u32 addr_lo, addr_hi;
2335
2336 if (netif_running(netdev))
2337 return -EBUSY;
2338
2339 if (!is_valid_ether_addr(addr->sa_data))
2340 return -EADDRNOTAVAIL;
2341
2342 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2343
2344 addr_lo = netdev->dev_addr[0] |
2345 netdev->dev_addr[1] << 8 |
2346 netdev->dev_addr[2] << 16 |
2347 netdev->dev_addr[3] << 24;
2348 addr_hi = netdev->dev_addr[4] |
2349 netdev->dev_addr[5] << 8;
2350
2351 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2352 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2353
2354 /* Added to support MAC address changes */
2355 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2356 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2357
2358 return 0;
2359}
2360
2361/* Enable or disable Rx checksum offload engine */
2362static int lan78xx_set_features(struct net_device *netdev,
2363 netdev_features_t features)
2364{
2365 struct lan78xx_net *dev = netdev_priv(netdev);
2366 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2367 unsigned long flags;
2368
2369 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2370
2371 if (features & NETIF_F_RXCSUM) {
2372 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2373 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2374 } else {
2375 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2376 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2377 }
2378
2379 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2380 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2381 else
2382 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2383
2384 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2385 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2386 else
2387 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2388
2389 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2390
2391 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2392
2393 return 0;
2394}
2395
2396static void lan78xx_deferred_vlan_write(struct work_struct *param)
2397{
2398 struct lan78xx_priv *pdata =
2399 container_of(param, struct lan78xx_priv, set_vlan);
2400 struct lan78xx_net *dev = pdata->dev;
2401
2402 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2403 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2404}
2405
2406static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2407 __be16 proto, u16 vid)
2408{
2409 struct lan78xx_net *dev = netdev_priv(netdev);
2410 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2411 u16 vid_bit_index;
2412 u16 vid_dword_index;
2413
2414 vid_dword_index = (vid >> 5) & 0x7F;
2415 vid_bit_index = vid & 0x1F;
2416
2417 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2418
2419 /* defer register writes to a sleepable context */
2420 schedule_work(&pdata->set_vlan);
2421
2422 return 0;
2423}
2424
2425static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2426 __be16 proto, u16 vid)
2427{
2428 struct lan78xx_net *dev = netdev_priv(netdev);
2429 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2430 u16 vid_bit_index;
2431 u16 vid_dword_index;
2432
2433 vid_dword_index = (vid >> 5) & 0x7F;
2434 vid_bit_index = vid & 0x1F;
2435
2436 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2437
2438 /* defer register writes to a sleepable context */
2439 schedule_work(&pdata->set_vlan);
2440
2441 return 0;
2442}
2443
2444static void lan78xx_init_ltm(struct lan78xx_net *dev)
2445{
2446 int ret;
2447 u32 buf;
2448 u32 regs[6] = { 0 };
2449
2450 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2451 if (buf & USB_CFG1_LTM_ENABLE_) {
2452 u8 temp[2];
2453 /* Get values from EEPROM first */
2454 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2455 if (temp[0] == 24) {
2456 ret = lan78xx_read_raw_eeprom(dev,
2457 temp[1] * 2,
2458 24,
2459 (u8 *)regs);
2460 if (ret < 0)
2461 return;
2462 }
2463 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2464 if (temp[0] == 24) {
2465 ret = lan78xx_read_raw_otp(dev,
2466 temp[1] * 2,
2467 24,
2468 (u8 *)regs);
2469 if (ret < 0)
2470 return;
2471 }
2472 }
2473 }
2474
2475 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2476 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2477 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2478 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2479 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2480 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2481}
2482
2483static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2484{
2485 return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2486}
2487
2488static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2489 u32 hw_disabled)
2490{
2491 unsigned long timeout;
2492 bool stopped = true;
2493 int ret;
2494 u32 buf;
2495
2496 /* Stop the h/w block (if not already stopped) */
2497
2498 ret = lan78xx_read_reg(dev, reg, &buf);
2499 if (ret < 0)
2500 return ret;
2501
2502 if (buf & hw_enabled) {
2503 buf &= ~hw_enabled;
2504
2505 ret = lan78xx_write_reg(dev, reg, buf);
2506 if (ret < 0)
2507 return ret;
2508
2509 stopped = false;
2510 timeout = jiffies + HW_DISABLE_TIMEOUT;
2511 do {
2512 ret = lan78xx_read_reg(dev, reg, &buf);
2513 if (ret < 0)
2514 return ret;
2515
2516 if (buf & hw_disabled)
2517 stopped = true;
2518 else
2519 msleep(HW_DISABLE_DELAY_MS);
2520 } while (!stopped && !time_after(jiffies, timeout));
2521 }
2522
2523 ret = stopped ? 0 : -ETIME;
2524
2525 return ret;
2526}
2527
2528static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2529{
2530 return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2531}
2532
2533static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2534{
2535 int ret;
2536
2537 netif_dbg(dev, drv, dev->net, "start tx path");
2538
2539 /* Start the MAC transmitter */
2540
2541 ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2542 if (ret < 0)
2543 return ret;
2544
2545 /* Start the Tx FIFO */
2546
2547 ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2548 if (ret < 0)
2549 return ret;
2550
2551 return 0;
2552}
2553
2554static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2555{
2556 int ret;
2557
2558 netif_dbg(dev, drv, dev->net, "stop tx path");
2559
2560 /* Stop the Tx FIFO */
2561
2562 ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2563 if (ret < 0)
2564 return ret;
2565
2566 /* Stop the MAC transmitter */
2567
2568 ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2569 if (ret < 0)
2570 return ret;
2571
2572 return 0;
2573}
2574
2575/* The caller must ensure the Tx path is stopped before calling
2576 * lan78xx_flush_tx_fifo().
2577 */
2578static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2579{
2580 return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2581}
2582
2583static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2584{
2585 int ret;
2586
2587 netif_dbg(dev, drv, dev->net, "start rx path");
2588
2589 /* Start the Rx FIFO */
2590
2591 ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2592 if (ret < 0)
2593 return ret;
2594
2595 /* Start the MAC receiver*/
2596
2597 ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2598 if (ret < 0)
2599 return ret;
2600
2601 return 0;
2602}
2603
2604static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2605{
2606 int ret;
2607
2608 netif_dbg(dev, drv, dev->net, "stop rx path");
2609
2610 /* Stop the MAC receiver */
2611
2612 ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2613 if (ret < 0)
2614 return ret;
2615
2616 /* Stop the Rx FIFO */
2617
2618 ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2619 if (ret < 0)
2620 return ret;
2621
2622 return 0;
2623}
2624
2625/* The caller must ensure the Rx path is stopped before calling
2626 * lan78xx_flush_rx_fifo().
2627 */
2628static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2629{
2630 return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2631}
2632
2633static int lan78xx_reset(struct lan78xx_net *dev)
2634{
2635 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2636 unsigned long timeout;
2637 int ret;
2638 u32 buf;
2639 u8 sig;
2640
2641 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2642 if (ret < 0)
2643 return ret;
2644
2645 buf |= HW_CFG_LRST_;
2646
2647 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2648 if (ret < 0)
2649 return ret;
2650
2651 timeout = jiffies + HZ;
2652 do {
2653 mdelay(1);
2654 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2655 if (ret < 0)
2656 return ret;
2657
2658 if (time_after(jiffies, timeout)) {
2659 netdev_warn(dev->net,
2660 "timeout on completion of LiteReset");
2661 ret = -ETIMEDOUT;
2662 return ret;
2663 }
2664 } while (buf & HW_CFG_LRST_);
2665
2666 lan78xx_init_mac_address(dev);
2667
2668 /* save DEVID for later usage */
2669 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2670 if (ret < 0)
2671 return ret;
2672
2673 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2674 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2675
2676 /* Respond to the IN token with a NAK */
2677 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2678 if (ret < 0)
2679 return ret;
2680
2681 buf |= USB_CFG_BIR_;
2682
2683 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2684 if (ret < 0)
2685 return ret;
2686
2687 /* Init LTM */
2688 lan78xx_init_ltm(dev);
2689
2690 if (dev->udev->speed == USB_SPEED_SUPER) {
2691 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2692 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2693 dev->rx_qlen = 4;
2694 dev->tx_qlen = 4;
2695 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2696 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2697 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2698 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2699 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2700 } else {
2701 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2702 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2703 dev->rx_qlen = 4;
2704 dev->tx_qlen = 4;
2705 }
2706
2707 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2708 if (ret < 0)
2709 return ret;
2710
2711 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2712 if (ret < 0)
2713 return ret;
2714
2715 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2716 if (ret < 0)
2717 return ret;
2718
2719 buf |= HW_CFG_MEF_;
2720
2721 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2722 if (ret < 0)
2723 return ret;
2724
2725 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2726 if (ret < 0)
2727 return ret;
2728
2729 buf |= USB_CFG_BCE_;
2730
2731 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2732 if (ret < 0)
2733 return ret;
2734
2735 /* set FIFO sizes */
2736 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2737
2738 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2739 if (ret < 0)
2740 return ret;
2741
2742 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2743
2744 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2745 if (ret < 0)
2746 return ret;
2747
2748 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2749 if (ret < 0)
2750 return ret;
2751
2752 ret = lan78xx_write_reg(dev, FLOW, 0);
2753 if (ret < 0)
2754 return ret;
2755
2756 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2757 if (ret < 0)
2758 return ret;
2759
2760 /* Don't need rfe_ctl_lock during initialisation */
2761 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2762 if (ret < 0)
2763 return ret;
2764
2765 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2766
2767 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2768 if (ret < 0)
2769 return ret;
2770
2771 /* Enable or disable checksum offload engines */
2772 ret = lan78xx_set_features(dev->net, dev->net->features);
2773 if (ret < 0)
2774 return ret;
2775
2776 lan78xx_set_multicast(dev->net);
2777
2778 /* reset PHY */
2779 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2780 if (ret < 0)
2781 return ret;
2782
2783 buf |= PMT_CTL_PHY_RST_;
2784
2785 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2786 if (ret < 0)
2787 return ret;
2788
2789 timeout = jiffies + HZ;
2790 do {
2791 mdelay(1);
2792 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2793 if (ret < 0)
2794 return ret;
2795
2796 if (time_after(jiffies, timeout)) {
2797 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2798 ret = -ETIMEDOUT;
2799 return ret;
2800 }
2801 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2802
2803 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2804 if (ret < 0)
2805 return ret;
2806
2807 /* LAN7801 only has RGMII mode */
2808 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2809 buf &= ~MAC_CR_GMII_EN_;
2810
2811 if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
2812 dev->chipid == ID_REV_CHIP_ID_7850_) {
2813 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2814 if (!ret && sig != EEPROM_INDICATOR) {
2815 /* Implies there is no external eeprom. Set mac speed */
2816 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2817 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2818 }
2819 }
2820 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2821 if (ret < 0)
2822 return ret;
2823
2824 ret = lan78xx_set_rx_max_frame_length(dev,
2825 dev->net->mtu + VLAN_ETH_HLEN);
2826
2827 return ret;
2828}
2829
2830static void lan78xx_init_stats(struct lan78xx_net *dev)
2831{
2832 u32 *p;
2833 int i;
2834
2835 /* initialize for stats update
2836 * some counters are 20bits and some are 32bits
2837 */
2838 p = (u32 *)&dev->stats.rollover_max;
2839 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2840 p[i] = 0xFFFFF;
2841
2842 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2843 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2844 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2845 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2846 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2847 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2848 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2849 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2850 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2851 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2852
2853 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2854}
2855
2856static int lan78xx_open(struct net_device *net)
2857{
2858 struct lan78xx_net *dev = netdev_priv(net);
2859 int ret;
2860
2861 netif_dbg(dev, ifup, dev->net, "open device");
2862
2863 ret = usb_autopm_get_interface(dev->intf);
2864 if (ret < 0)
2865 return ret;
2866
2867 mutex_lock(&dev->dev_mutex);
2868
2869 phy_start(net->phydev);
2870
2871 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2872
2873 /* for Link Check */
2874 if (dev->urb_intr) {
2875 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2876 if (ret < 0) {
2877 netif_err(dev, ifup, dev->net,
2878 "intr submit %d\n", ret);
2879 goto done;
2880 }
2881 }
2882
2883 ret = lan78xx_flush_rx_fifo(dev);
2884 if (ret < 0)
2885 goto done;
2886 ret = lan78xx_flush_tx_fifo(dev);
2887 if (ret < 0)
2888 goto done;
2889
2890 ret = lan78xx_start_tx_path(dev);
2891 if (ret < 0)
2892 goto done;
2893 ret = lan78xx_start_rx_path(dev);
2894 if (ret < 0)
2895 goto done;
2896
2897 lan78xx_init_stats(dev);
2898
2899 set_bit(EVENT_DEV_OPEN, &dev->flags);
2900
2901 netif_start_queue(net);
2902
2903 dev->link_on = false;
2904
2905 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2906done:
2907 mutex_unlock(&dev->dev_mutex);
2908
2909 if (ret < 0)
2910 usb_autopm_put_interface(dev->intf);
2911
2912 return ret;
2913}
2914
2915static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2916{
2917 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2918 DECLARE_WAITQUEUE(wait, current);
2919 int temp;
2920
2921 /* ensure there are no more active urbs */
2922 add_wait_queue(&unlink_wakeup, &wait);
2923 set_current_state(TASK_UNINTERRUPTIBLE);
2924 dev->wait = &unlink_wakeup;
2925 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2926
2927 /* maybe wait for deletions to finish. */
2928 while (!skb_queue_empty(&dev->rxq) ||
2929 !skb_queue_empty(&dev->txq)) {
2930 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2931 set_current_state(TASK_UNINTERRUPTIBLE);
2932 netif_dbg(dev, ifdown, dev->net,
2933 "waited for %d urb completions", temp);
2934 }
2935 set_current_state(TASK_RUNNING);
2936 dev->wait = NULL;
2937 remove_wait_queue(&unlink_wakeup, &wait);
2938
2939 while (!skb_queue_empty(&dev->done)) {
2940 struct skb_data *entry;
2941 struct sk_buff *skb;
2942
2943 skb = skb_dequeue(&dev->done);
2944 entry = (struct skb_data *)(skb->cb);
2945 usb_free_urb(entry->urb);
2946 dev_kfree_skb(skb);
2947 }
2948}
2949
2950static int lan78xx_stop(struct net_device *net)
2951{
2952 struct lan78xx_net *dev = netdev_priv(net);
2953
2954 netif_dbg(dev, ifup, dev->net, "stop device");
2955
2956 mutex_lock(&dev->dev_mutex);
2957
2958 if (timer_pending(&dev->stat_monitor))
2959 del_timer_sync(&dev->stat_monitor);
2960
2961 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2962 netif_stop_queue(net);
2963 tasklet_kill(&dev->bh);
2964
2965 lan78xx_terminate_urbs(dev);
2966
2967 netif_info(dev, ifdown, dev->net,
2968 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2969 net->stats.rx_packets, net->stats.tx_packets,
2970 net->stats.rx_errors, net->stats.tx_errors);
2971
2972 /* ignore errors that occur stopping the Tx and Rx data paths */
2973 lan78xx_stop_tx_path(dev);
2974 lan78xx_stop_rx_path(dev);
2975
2976 if (net->phydev)
2977 phy_stop(net->phydev);
2978
2979 usb_kill_urb(dev->urb_intr);
2980
2981 skb_queue_purge(&dev->rxq_pause);
2982
2983 /* deferred work (task, timer, softirq) must also stop.
2984 * can't flush_scheduled_work() until we drop rtnl (later),
2985 * else workers could deadlock; so make workers a NOP.
2986 */
2987 clear_bit(EVENT_TX_HALT, &dev->flags);
2988 clear_bit(EVENT_RX_HALT, &dev->flags);
2989 clear_bit(EVENT_LINK_RESET, &dev->flags);
2990 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
2991
2992 cancel_delayed_work_sync(&dev->wq);
2993
2994 usb_autopm_put_interface(dev->intf);
2995
2996 mutex_unlock(&dev->dev_mutex);
2997
2998 return 0;
2999}
3000
3001static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
3002 struct sk_buff *skb, gfp_t flags)
3003{
3004 u32 tx_cmd_a, tx_cmd_b;
3005 void *ptr;
3006
3007 if (skb_cow_head(skb, TX_OVERHEAD)) {
3008 dev_kfree_skb_any(skb);
3009 return NULL;
3010 }
3011
3012 if (skb_linearize(skb)) {
3013 dev_kfree_skb_any(skb);
3014 return NULL;
3015 }
3016
3017 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3018
3019 if (skb->ip_summed == CHECKSUM_PARTIAL)
3020 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3021
3022 tx_cmd_b = 0;
3023 if (skb_is_gso(skb)) {
3024 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3025
3026 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3027
3028 tx_cmd_a |= TX_CMD_A_LSO_;
3029 }
3030
3031 if (skb_vlan_tag_present(skb)) {
3032 tx_cmd_a |= TX_CMD_A_IVTG_;
3033 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3034 }
3035
3036 ptr = skb_push(skb, 8);
3037 put_unaligned_le32(tx_cmd_a, ptr);
3038 put_unaligned_le32(tx_cmd_b, ptr + 4);
3039
3040 return skb;
3041}
3042
3043static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3044 struct sk_buff_head *list, enum skb_state state)
3045{
3046 unsigned long flags;
3047 enum skb_state old_state;
3048 struct skb_data *entry = (struct skb_data *)skb->cb;
3049
3050 spin_lock_irqsave(&list->lock, flags);
3051 old_state = entry->state;
3052 entry->state = state;
3053
3054 __skb_unlink(skb, list);
3055 spin_unlock(&list->lock);
3056 spin_lock(&dev->done.lock);
3057
3058 __skb_queue_tail(&dev->done, skb);
3059 if (skb_queue_len(&dev->done) == 1)
3060 tasklet_schedule(&dev->bh);
3061 spin_unlock_irqrestore(&dev->done.lock, flags);
3062
3063 return old_state;
3064}
3065
3066static void tx_complete(struct urb *urb)
3067{
3068 struct sk_buff *skb = (struct sk_buff *)urb->context;
3069 struct skb_data *entry = (struct skb_data *)skb->cb;
3070 struct lan78xx_net *dev = entry->dev;
3071
3072 if (urb->status == 0) {
3073 dev->net->stats.tx_packets += entry->num_of_packet;
3074 dev->net->stats.tx_bytes += entry->length;
3075 } else {
3076 dev->net->stats.tx_errors++;
3077
3078 switch (urb->status) {
3079 case -EPIPE:
3080 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3081 break;
3082
3083 /* software-driven interface shutdown */
3084 case -ECONNRESET:
3085 case -ESHUTDOWN:
3086 break;
3087
3088 case -EPROTO:
3089 case -ETIME:
3090 case -EILSEQ:
3091 netif_stop_queue(dev->net);
3092 break;
3093 default:
3094 netif_dbg(dev, tx_err, dev->net,
3095 "tx err %d\n", entry->urb->status);
3096 break;
3097 }
3098 }
3099
3100 usb_autopm_put_interface_async(dev->intf);
3101
3102 defer_bh(dev, skb, &dev->txq, tx_done);
3103}
3104
3105static void lan78xx_queue_skb(struct sk_buff_head *list,
3106 struct sk_buff *newsk, enum skb_state state)
3107{
3108 struct skb_data *entry = (struct skb_data *)newsk->cb;
3109
3110 __skb_queue_tail(list, newsk);
3111 entry->state = state;
3112}
3113
3114static netdev_tx_t
3115lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3116{
3117 struct lan78xx_net *dev = netdev_priv(net);
3118 struct sk_buff *skb2 = NULL;
3119
3120 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3121 schedule_delayed_work(&dev->wq, 0);
3122
3123 if (skb) {
3124 skb_tx_timestamp(skb);
3125 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
3126 }
3127
3128 if (skb2) {
3129 skb_queue_tail(&dev->txq_pend, skb2);
3130
3131 /* throttle TX patch at slower than SUPER SPEED USB */
3132 if ((dev->udev->speed < USB_SPEED_SUPER) &&
3133 (skb_queue_len(&dev->txq_pend) > 10))
3134 netif_stop_queue(net);
3135 } else {
3136 netif_dbg(dev, tx_err, dev->net,
3137 "lan78xx_tx_prep return NULL\n");
3138 dev->net->stats.tx_errors++;
3139 dev->net->stats.tx_dropped++;
3140 }
3141
3142 tasklet_schedule(&dev->bh);
3143
3144 return NETDEV_TX_OK;
3145}
3146
3147static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3148{
3149 struct lan78xx_priv *pdata = NULL;
3150 int ret;
3151 int i;
3152
3153 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3154
3155 pdata = (struct lan78xx_priv *)(dev->data[0]);
3156 if (!pdata) {
3157 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3158 return -ENOMEM;
3159 }
3160
3161 pdata->dev = dev;
3162
3163 spin_lock_init(&pdata->rfe_ctl_lock);
3164 mutex_init(&pdata->dataport_mutex);
3165
3166 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3167
3168 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3169 pdata->vlan_table[i] = 0;
3170
3171 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3172
3173 dev->net->features = 0;
3174
3175 if (DEFAULT_TX_CSUM_ENABLE)
3176 dev->net->features |= NETIF_F_HW_CSUM;
3177
3178 if (DEFAULT_RX_CSUM_ENABLE)
3179 dev->net->features |= NETIF_F_RXCSUM;
3180
3181 if (DEFAULT_TSO_CSUM_ENABLE)
3182 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3183
3184 if (DEFAULT_VLAN_RX_OFFLOAD)
3185 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3186
3187 if (DEFAULT_VLAN_FILTER_ENABLE)
3188 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3189
3190 dev->net->hw_features = dev->net->features;
3191
3192 ret = lan78xx_setup_irq_domain(dev);
3193 if (ret < 0) {
3194 netdev_warn(dev->net,
3195 "lan78xx_setup_irq_domain() failed : %d", ret);
3196 goto out1;
3197 }
3198
3199 dev->net->hard_header_len += TX_OVERHEAD;
3200 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3201
3202 /* Init all registers */
3203 ret = lan78xx_reset(dev);
3204 if (ret) {
3205 netdev_warn(dev->net, "Registers INIT FAILED....");
3206 goto out2;
3207 }
3208
3209 ret = lan78xx_mdio_init(dev);
3210 if (ret) {
3211 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3212 goto out2;
3213 }
3214
3215 dev->net->flags |= IFF_MULTICAST;
3216
3217 pdata->wol = WAKE_MAGIC;
3218
3219 return ret;
3220
3221out2:
3222 lan78xx_remove_irq_domain(dev);
3223
3224out1:
3225 netdev_warn(dev->net, "Bind routine FAILED");
3226 cancel_work_sync(&pdata->set_multicast);
3227 cancel_work_sync(&pdata->set_vlan);
3228 kfree(pdata);
3229 return ret;
3230}
3231
3232static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3233{
3234 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3235
3236 lan78xx_remove_irq_domain(dev);
3237
3238 lan78xx_remove_mdio(dev);
3239
3240 if (pdata) {
3241 cancel_work_sync(&pdata->set_multicast);
3242 cancel_work_sync(&pdata->set_vlan);
3243 netif_dbg(dev, ifdown, dev->net, "free pdata");
3244 kfree(pdata);
3245 pdata = NULL;
3246 dev->data[0] = 0;
3247 }
3248}
3249
3250static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3251 struct sk_buff *skb,
3252 u32 rx_cmd_a, u32 rx_cmd_b)
3253{
3254 /* HW Checksum offload appears to be flawed if used when not stripping
3255 * VLAN headers. Drop back to S/W checksums under these conditions.
3256 */
3257 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3258 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3259 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3260 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3261 skb->ip_summed = CHECKSUM_NONE;
3262 } else {
3263 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3264 skb->ip_summed = CHECKSUM_COMPLETE;
3265 }
3266}
3267
3268static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3269 struct sk_buff *skb,
3270 u32 rx_cmd_a, u32 rx_cmd_b)
3271{
3272 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3273 (rx_cmd_a & RX_CMD_A_FVTG_))
3274 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3275 (rx_cmd_b & 0xffff));
3276}
3277
3278static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3279{
3280 int status;
3281
3282 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3283 skb_queue_tail(&dev->rxq_pause, skb);
3284 return;
3285 }
3286
3287 dev->net->stats.rx_packets++;
3288 dev->net->stats.rx_bytes += skb->len;
3289
3290 skb->protocol = eth_type_trans(skb, dev->net);
3291
3292 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3293 skb->len + sizeof(struct ethhdr), skb->protocol);
3294 memset(skb->cb, 0, sizeof(struct skb_data));
3295
3296 if (skb_defer_rx_timestamp(skb))
3297 return;
3298
3299 status = netif_rx(skb);
3300 if (status != NET_RX_SUCCESS)
3301 netif_dbg(dev, rx_err, dev->net,
3302 "netif_rx status %d\n", status);
3303}
3304
3305static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3306{
3307 if (skb->len < dev->net->hard_header_len)
3308 return 0;
3309
3310 while (skb->len > 0) {
3311 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3312 u16 rx_cmd_c;
3313 struct sk_buff *skb2;
3314 unsigned char *packet;
3315
3316 rx_cmd_a = get_unaligned_le32(skb->data);
3317 skb_pull(skb, sizeof(rx_cmd_a));
3318
3319 rx_cmd_b = get_unaligned_le32(skb->data);
3320 skb_pull(skb, sizeof(rx_cmd_b));
3321
3322 rx_cmd_c = get_unaligned_le16(skb->data);
3323 skb_pull(skb, sizeof(rx_cmd_c));
3324
3325 packet = skb->data;
3326
3327 /* get the packet length */
3328 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3329 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3330
3331 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3332 netif_dbg(dev, rx_err, dev->net,
3333 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3334 } else {
3335 /* last frame in this batch */
3336 if (skb->len == size) {
3337 lan78xx_rx_csum_offload(dev, skb,
3338 rx_cmd_a, rx_cmd_b);
3339 lan78xx_rx_vlan_offload(dev, skb,
3340 rx_cmd_a, rx_cmd_b);
3341
3342 skb_trim(skb, skb->len - 4); /* remove fcs */
3343 skb->truesize = size + sizeof(struct sk_buff);
3344
3345 return 1;
3346 }
3347
3348 skb2 = skb_clone(skb, GFP_ATOMIC);
3349 if (unlikely(!skb2)) {
3350 netdev_warn(dev->net, "Error allocating skb");
3351 return 0;
3352 }
3353
3354 skb2->len = size;
3355 skb2->data = packet;
3356 skb_set_tail_pointer(skb2, size);
3357
3358 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3359 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3360
3361 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3362 skb2->truesize = size + sizeof(struct sk_buff);
3363
3364 lan78xx_skb_return(dev, skb2);
3365 }
3366
3367 skb_pull(skb, size);
3368
3369 /* padding bytes before the next frame starts */
3370 if (skb->len)
3371 skb_pull(skb, align_count);
3372 }
3373
3374 return 1;
3375}
3376
3377static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3378{
3379 if (!lan78xx_rx(dev, skb)) {
3380 dev->net->stats.rx_errors++;
3381 goto done;
3382 }
3383
3384 if (skb->len) {
3385 lan78xx_skb_return(dev, skb);
3386 return;
3387 }
3388
3389 netif_dbg(dev, rx_err, dev->net, "drop\n");
3390 dev->net->stats.rx_errors++;
3391done:
3392 skb_queue_tail(&dev->done, skb);
3393}
3394
3395static void rx_complete(struct urb *urb);
3396
3397static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3398{
3399 struct sk_buff *skb;
3400 struct skb_data *entry;
3401 unsigned long lockflags;
3402 size_t size = dev->rx_urb_size;
3403 int ret = 0;
3404
3405 skb = netdev_alloc_skb_ip_align(dev->net, size);
3406 if (!skb) {
3407 usb_free_urb(urb);
3408 return -ENOMEM;
3409 }
3410
3411 entry = (struct skb_data *)skb->cb;
3412 entry->urb = urb;
3413 entry->dev = dev;
3414 entry->length = 0;
3415
3416 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3417 skb->data, size, rx_complete, skb);
3418
3419 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3420
3421 if (netif_device_present(dev->net) &&
3422 netif_running(dev->net) &&
3423 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3424 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3425 ret = usb_submit_urb(urb, GFP_ATOMIC);
3426 switch (ret) {
3427 case 0:
3428 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3429 break;
3430 case -EPIPE:
3431 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3432 break;
3433 case -ENODEV:
3434 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3435 netif_device_detach(dev->net);
3436 break;
3437 case -EHOSTUNREACH:
3438 ret = -ENOLINK;
3439 break;
3440 default:
3441 netif_dbg(dev, rx_err, dev->net,
3442 "rx submit, %d\n", ret);
3443 tasklet_schedule(&dev->bh);
3444 }
3445 } else {
3446 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3447 ret = -ENOLINK;
3448 }
3449 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3450 if (ret) {
3451 dev_kfree_skb_any(skb);
3452 usb_free_urb(urb);
3453 }
3454 return ret;
3455}
3456
3457static void rx_complete(struct urb *urb)
3458{
3459 struct sk_buff *skb = (struct sk_buff *)urb->context;
3460 struct skb_data *entry = (struct skb_data *)skb->cb;
3461 struct lan78xx_net *dev = entry->dev;
3462 int urb_status = urb->status;
3463 enum skb_state state;
3464
3465 skb_put(skb, urb->actual_length);
3466 state = rx_done;
3467 entry->urb = NULL;
3468
3469 switch (urb_status) {
3470 case 0:
3471 if (skb->len < dev->net->hard_header_len) {
3472 state = rx_cleanup;
3473 dev->net->stats.rx_errors++;
3474 dev->net->stats.rx_length_errors++;
3475 netif_dbg(dev, rx_err, dev->net,
3476 "rx length %d\n", skb->len);
3477 }
3478 usb_mark_last_busy(dev->udev);
3479 break;
3480 case -EPIPE:
3481 dev->net->stats.rx_errors++;
3482 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3483 /* FALLTHROUGH */
3484 case -ECONNRESET: /* async unlink */
3485 case -ESHUTDOWN: /* hardware gone */
3486 netif_dbg(dev, ifdown, dev->net,
3487 "rx shutdown, code %d\n", urb_status);
3488 state = rx_cleanup;
3489 entry->urb = urb;
3490 urb = NULL;
3491 break;
3492 case -EPROTO:
3493 case -ETIME:
3494 case -EILSEQ:
3495 dev->net->stats.rx_errors++;
3496 state = rx_cleanup;
3497 entry->urb = urb;
3498 urb = NULL;
3499 break;
3500
3501 /* data overrun ... flush fifo? */
3502 case -EOVERFLOW:
3503 dev->net->stats.rx_over_errors++;
3504 /* FALLTHROUGH */
3505
3506 default:
3507 state = rx_cleanup;
3508 dev->net->stats.rx_errors++;
3509 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3510 break;
3511 }
3512
3513 state = defer_bh(dev, skb, &dev->rxq, state);
3514
3515 if (urb) {
3516 if (netif_running(dev->net) &&
3517 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3518 state != unlink_start) {
3519 rx_submit(dev, urb, GFP_ATOMIC);
3520 return;
3521 }
3522 usb_free_urb(urb);
3523 }
3524 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3525}
3526
3527static void lan78xx_tx_bh(struct lan78xx_net *dev)
3528{
3529 int length;
3530 struct urb *urb = NULL;
3531 struct skb_data *entry;
3532 unsigned long flags;
3533 struct sk_buff_head *tqp = &dev->txq_pend;
3534 struct sk_buff *skb, *skb2;
3535 int ret;
3536 int count, pos;
3537 int skb_totallen, pkt_cnt;
3538
3539 skb_totallen = 0;
3540 pkt_cnt = 0;
3541 count = 0;
3542 length = 0;
3543 spin_lock_irqsave(&tqp->lock, flags);
3544 skb_queue_walk(tqp, skb) {
3545 if (skb_is_gso(skb)) {
3546 if (!skb_queue_is_first(tqp, skb)) {
3547 /* handle previous packets first */
3548 break;
3549 }
3550 count = 1;
3551 length = skb->len - TX_OVERHEAD;
3552 __skb_unlink(skb, tqp);
3553 spin_unlock_irqrestore(&tqp->lock, flags);
3554 goto gso_skb;
3555 }
3556
3557 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3558 break;
3559 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3560 pkt_cnt++;
3561 }
3562 spin_unlock_irqrestore(&tqp->lock, flags);
3563
3564 /* copy to a single skb */
3565 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3566 if (!skb)
3567 goto drop;
3568
3569 skb_put(skb, skb_totallen);
3570
3571 for (count = pos = 0; count < pkt_cnt; count++) {
3572 skb2 = skb_dequeue(tqp);
3573 if (skb2) {
3574 length += (skb2->len - TX_OVERHEAD);
3575 memcpy(skb->data + pos, skb2->data, skb2->len);
3576 pos += roundup(skb2->len, sizeof(u32));
3577 dev_kfree_skb(skb2);
3578 }
3579 }
3580
3581gso_skb:
3582 urb = usb_alloc_urb(0, GFP_ATOMIC);
3583 if (!urb)
3584 goto drop;
3585
3586 entry = (struct skb_data *)skb->cb;
3587 entry->urb = urb;
3588 entry->dev = dev;
3589 entry->length = length;
3590 entry->num_of_packet = count;
3591
3592 spin_lock_irqsave(&dev->txq.lock, flags);
3593 ret = usb_autopm_get_interface_async(dev->intf);
3594 if (ret < 0) {
3595 spin_unlock_irqrestore(&dev->txq.lock, flags);
3596 goto drop;
3597 }
3598
3599 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3600 skb->data, skb->len, tx_complete, skb);
3601
3602 if (length % dev->maxpacket == 0) {
3603 /* send USB_ZERO_PACKET */
3604 urb->transfer_flags |= URB_ZERO_PACKET;
3605 }
3606
3607#ifdef CONFIG_PM
3608 /* if this triggers the device is still a sleep */
3609 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3610 /* transmission will be done in resume */
3611 usb_anchor_urb(urb, &dev->deferred);
3612 /* no use to process more packets */
3613 netif_stop_queue(dev->net);
3614 usb_put_urb(urb);
3615 spin_unlock_irqrestore(&dev->txq.lock, flags);
3616 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3617 return;
3618 }
3619#endif
3620
3621 ret = usb_submit_urb(urb, GFP_ATOMIC);
3622 switch (ret) {
3623 case 0:
3624 netif_trans_update(dev->net);
3625 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3626 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3627 netif_stop_queue(dev->net);
3628 break;
3629 case -EPIPE:
3630 netif_stop_queue(dev->net);
3631 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3632 usb_autopm_put_interface_async(dev->intf);
3633 break;
3634 default:
3635 usb_autopm_put_interface_async(dev->intf);
3636 netif_dbg(dev, tx_err, dev->net,
3637 "tx: submit urb err %d\n", ret);
3638 break;
3639 }
3640
3641 spin_unlock_irqrestore(&dev->txq.lock, flags);
3642
3643 if (ret) {
3644 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3645drop:
3646 dev->net->stats.tx_dropped++;
3647 if (skb)
3648 dev_kfree_skb_any(skb);
3649 usb_free_urb(urb);
3650 } else {
3651 netif_dbg(dev, tx_queued, dev->net,
3652 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3653 }
3654}
3655
3656static void lan78xx_rx_bh(struct lan78xx_net *dev)
3657{
3658 struct urb *urb;
3659 int i;
3660
3661 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3662 for (i = 0; i < 10; i++) {
3663 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3664 break;
3665 urb = usb_alloc_urb(0, GFP_ATOMIC);
3666 if (urb)
3667 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3668 return;
3669 }
3670
3671 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3672 tasklet_schedule(&dev->bh);
3673 }
3674 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3675 netif_wake_queue(dev->net);
3676}
3677
3678static void lan78xx_bh(unsigned long param)
3679{
3680 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3681 struct sk_buff *skb;
3682 struct skb_data *entry;
3683
3684 while ((skb = skb_dequeue(&dev->done))) {
3685 entry = (struct skb_data *)(skb->cb);
3686 switch (entry->state) {
3687 case rx_done:
3688 entry->state = rx_cleanup;
3689 rx_process(dev, skb);
3690 continue;
3691 case tx_done:
3692 usb_free_urb(entry->urb);
3693 dev_kfree_skb(skb);
3694 continue;
3695 case rx_cleanup:
3696 usb_free_urb(entry->urb);
3697 dev_kfree_skb(skb);
3698 continue;
3699 default:
3700 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3701 return;
3702 }
3703 }
3704
3705 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3706 /* reset update timer delta */
3707 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3708 dev->delta = 1;
3709 mod_timer(&dev->stat_monitor,
3710 jiffies + STAT_UPDATE_TIMER);
3711 }
3712
3713 if (!skb_queue_empty(&dev->txq_pend))
3714 lan78xx_tx_bh(dev);
3715
3716 if (!timer_pending(&dev->delay) &&
3717 !test_bit(EVENT_RX_HALT, &dev->flags))
3718 lan78xx_rx_bh(dev);
3719 }
3720}
3721
3722static void lan78xx_delayedwork(struct work_struct *work)
3723{
3724 int status;
3725 struct lan78xx_net *dev;
3726
3727 dev = container_of(work, struct lan78xx_net, wq.work);
3728
3729 if (usb_autopm_get_interface(dev->intf) < 0)
3730 return;
3731
3732 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3733 unlink_urbs(dev, &dev->txq);
3734
3735 status = usb_clear_halt(dev->udev, dev->pipe_out);
3736 if (status < 0 &&
3737 status != -EPIPE &&
3738 status != -ESHUTDOWN) {
3739 if (netif_msg_tx_err(dev))
3740 netdev_err(dev->net,
3741 "can't clear tx halt, status %d\n",
3742 status);
3743 } else {
3744 clear_bit(EVENT_TX_HALT, &dev->flags);
3745 if (status != -ESHUTDOWN)
3746 netif_wake_queue(dev->net);
3747 }
3748 }
3749
3750 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3751 unlink_urbs(dev, &dev->rxq);
3752 status = usb_clear_halt(dev->udev, dev->pipe_in);
3753 if (status < 0 &&
3754 status != -EPIPE &&
3755 status != -ESHUTDOWN) {
3756 if (netif_msg_rx_err(dev))
3757 netdev_err(dev->net,
3758 "can't clear rx halt, status %d\n",
3759 status);
3760 } else {
3761 clear_bit(EVENT_RX_HALT, &dev->flags);
3762 tasklet_schedule(&dev->bh);
3763 }
3764 }
3765
3766 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3767 int ret = 0;
3768
3769 clear_bit(EVENT_LINK_RESET, &dev->flags);
3770 if (lan78xx_link_reset(dev) < 0) {
3771 netdev_info(dev->net, "link reset failed (%d)\n",
3772 ret);
3773 }
3774 }
3775
3776 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3777 lan78xx_update_stats(dev);
3778
3779 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3780
3781 mod_timer(&dev->stat_monitor,
3782 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3783
3784 dev->delta = min((dev->delta * 2), 50);
3785 }
3786
3787 usb_autopm_put_interface(dev->intf);
3788}
3789
3790static void intr_complete(struct urb *urb)
3791{
3792 struct lan78xx_net *dev = urb->context;
3793 int status = urb->status;
3794
3795 switch (status) {
3796 /* success */
3797 case 0:
3798 lan78xx_status(dev, urb);
3799 break;
3800
3801 /* software-driven interface shutdown */
3802 case -ENOENT: /* urb killed */
3803 case -ESHUTDOWN: /* hardware gone */
3804 netif_dbg(dev, ifdown, dev->net,
3805 "intr shutdown, code %d\n", status);
3806 return;
3807
3808 /* NOTE: not throttling like RX/TX, since this endpoint
3809 * already polls infrequently
3810 */
3811 default:
3812 netdev_dbg(dev->net, "intr status %d\n", status);
3813 break;
3814 }
3815
3816 if (!netif_running(dev->net))
3817 return;
3818
3819 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3820 status = usb_submit_urb(urb, GFP_ATOMIC);
3821 if (status != 0)
3822 netif_err(dev, timer, dev->net,
3823 "intr resubmit --> %d\n", status);
3824}
3825
3826static void lan78xx_disconnect(struct usb_interface *intf)
3827{
3828 struct lan78xx_net *dev;
3829 struct usb_device *udev;
3830 struct net_device *net;
3831 struct phy_device *phydev;
3832
3833 dev = usb_get_intfdata(intf);
3834 usb_set_intfdata(intf, NULL);
3835 if (!dev)
3836 return;
3837
3838 udev = interface_to_usbdev(intf);
3839 net = dev->net;
3840 phydev = net->phydev;
3841
3842 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3843 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3844
3845 phy_disconnect(net->phydev);
3846
3847 if (phy_is_pseudo_fixed_link(phydev)) {
3848 fixed_phy_unregister(phydev);
3849 phy_device_free(phydev);
3850 }
3851
3852 unregister_netdev(net);
3853
3854 cancel_delayed_work_sync(&dev->wq);
3855
3856 usb_scuttle_anchored_urbs(&dev->deferred);
3857
3858 lan78xx_unbind(dev, intf);
3859
3860 usb_kill_urb(dev->urb_intr);
3861 usb_free_urb(dev->urb_intr);
3862
3863 free_netdev(net);
3864 usb_put_dev(udev);
3865}
3866
3867static void lan78xx_tx_timeout(struct net_device *net)
3868{
3869 struct lan78xx_net *dev = netdev_priv(net);
3870
3871 unlink_urbs(dev, &dev->txq);
3872 tasklet_schedule(&dev->bh);
3873}
3874
3875static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3876 struct net_device *netdev,
3877 netdev_features_t features)
3878{
3879 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3880 features &= ~NETIF_F_GSO_MASK;
3881
3882 features = vlan_features_check(skb, features);
3883 features = vxlan_features_check(skb, features);
3884
3885 return features;
3886}
3887
3888static const struct net_device_ops lan78xx_netdev_ops = {
3889 .ndo_open = lan78xx_open,
3890 .ndo_stop = lan78xx_stop,
3891 .ndo_start_xmit = lan78xx_start_xmit,
3892 .ndo_tx_timeout = lan78xx_tx_timeout,
3893 .ndo_change_mtu = lan78xx_change_mtu,
3894 .ndo_set_mac_address = lan78xx_set_mac_addr,
3895 .ndo_validate_addr = eth_validate_addr,
3896 .ndo_do_ioctl = lan78xx_ioctl,
3897 .ndo_set_rx_mode = lan78xx_set_multicast,
3898 .ndo_set_features = lan78xx_set_features,
3899 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3900 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3901 .ndo_features_check = lan78xx_features_check,
3902};
3903
3904static void lan78xx_stat_monitor(struct timer_list *t)
3905{
3906 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3907
3908 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3909}
3910
3911static int lan78xx_probe(struct usb_interface *intf,
3912 const struct usb_device_id *id)
3913{
3914 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3915 struct lan78xx_net *dev;
3916 struct net_device *netdev;
3917 struct usb_device *udev;
3918 int ret;
3919 unsigned int maxp;
3920 unsigned int period;
3921 u8 *buf = NULL;
3922
3923 udev = interface_to_usbdev(intf);
3924 udev = usb_get_dev(udev);
3925
3926 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3927 if (!netdev) {
3928 dev_err(&intf->dev, "Error: OOM\n");
3929 ret = -ENOMEM;
3930 goto out1;
3931 }
3932
3933 /* netdev_printk() needs this */
3934 SET_NETDEV_DEV(netdev, &intf->dev);
3935
3936 dev = netdev_priv(netdev);
3937 dev->udev = udev;
3938 dev->intf = intf;
3939 dev->net = netdev;
3940 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3941 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3942
3943 skb_queue_head_init(&dev->rxq);
3944 skb_queue_head_init(&dev->txq);
3945 skb_queue_head_init(&dev->done);
3946 skb_queue_head_init(&dev->rxq_pause);
3947 skb_queue_head_init(&dev->txq_pend);
3948 mutex_init(&dev->phy_mutex);
3949 mutex_init(&dev->dev_mutex);
3950
3951 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3952 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3953 init_usb_anchor(&dev->deferred);
3954
3955 netdev->netdev_ops = &lan78xx_netdev_ops;
3956 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3957 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3958
3959 dev->delta = 1;
3960 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3961
3962 mutex_init(&dev->stats.access_lock);
3963
3964 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3965 ret = -ENODEV;
3966 goto out2;
3967 }
3968
3969 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3970 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3971 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3972 ret = -ENODEV;
3973 goto out2;
3974 }
3975
3976 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3977 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3978 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3979 ret = -ENODEV;
3980 goto out2;
3981 }
3982
3983 ep_intr = &intf->cur_altsetting->endpoint[2];
3984 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3985 ret = -ENODEV;
3986 goto out2;
3987 }
3988
3989 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3990 usb_endpoint_num(&ep_intr->desc));
3991
3992 ret = lan78xx_bind(dev, intf);
3993 if (ret < 0)
3994 goto out2;
3995
3996 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3997 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3998
3999 /* MTU range: 68 - 9000 */
4000 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4001 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
4002
4003 period = ep_intr->desc.bInterval;
4004 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4005 buf = kmalloc(maxp, GFP_KERNEL);
4006 if (buf) {
4007 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4008 if (!dev->urb_intr) {
4009 ret = -ENOMEM;
4010 kfree(buf);
4011 goto out3;
4012 } else {
4013 usb_fill_int_urb(dev->urb_intr, dev->udev,
4014 dev->pipe_intr, buf, maxp,
4015 intr_complete, dev, period);
4016 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4017 }
4018 }
4019
4020 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4021
4022 /* Reject broken descriptors. */
4023 if (dev->maxpacket == 0) {
4024 ret = -ENODEV;
4025 goto out4;
4026 }
4027
4028 /* driver requires remote-wakeup capability during autosuspend. */
4029 intf->needs_remote_wakeup = 1;
4030
4031 ret = lan78xx_phy_init(dev);
4032 if (ret < 0)
4033 goto out4;
4034
4035 ret = register_netdev(netdev);
4036 if (ret != 0) {
4037 netif_err(dev, probe, netdev, "couldn't register the device\n");
4038 goto out5;
4039 }
4040
4041 usb_set_intfdata(intf, dev);
4042
4043 ret = device_set_wakeup_enable(&udev->dev, true);
4044
4045 /* Default delay of 2sec has more overhead than advantage.
4046 * Set to 10sec as default.
4047 */
4048 pm_runtime_set_autosuspend_delay(&udev->dev,
4049 DEFAULT_AUTOSUSPEND_DELAY);
4050
4051 return 0;
4052
4053out5:
4054 phy_disconnect(netdev->phydev);
4055out4:
4056 usb_free_urb(dev->urb_intr);
4057out3:
4058 lan78xx_unbind(dev, intf);
4059out2:
4060 free_netdev(netdev);
4061out1:
4062 usb_put_dev(udev);
4063
4064 return ret;
4065}
4066
4067static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4068{
4069 const u16 crc16poly = 0x8005;
4070 int i;
4071 u16 bit, crc, msb;
4072 u8 data;
4073
4074 crc = 0xFFFF;
4075 for (i = 0; i < len; i++) {
4076 data = *buf++;
4077 for (bit = 0; bit < 8; bit++) {
4078 msb = crc >> 15;
4079 crc <<= 1;
4080
4081 if (msb ^ (u16)(data & 1)) {
4082 crc ^= crc16poly;
4083 crc |= (u16)0x0001U;
4084 }
4085 data >>= 1;
4086 }
4087 }
4088
4089 return crc;
4090}
4091
4092static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4093{
4094 u32 buf;
4095 int ret;
4096
4097 ret = lan78xx_stop_tx_path(dev);
4098 if (ret < 0)
4099 return ret;
4100
4101 ret = lan78xx_stop_rx_path(dev);
4102 if (ret < 0)
4103 return ret;
4104
4105 /* auto suspend (selective suspend) */
4106
4107 ret = lan78xx_write_reg(dev, WUCSR, 0);
4108 if (ret < 0)
4109 return ret;
4110 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4111 if (ret < 0)
4112 return ret;
4113 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4114 if (ret < 0)
4115 return ret;
4116
4117 /* set goodframe wakeup */
4118
4119 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4120 if (ret < 0)
4121 return ret;
4122
4123 buf |= WUCSR_RFE_WAKE_EN_;
4124 buf |= WUCSR_STORE_WAKE_;
4125
4126 ret = lan78xx_write_reg(dev, WUCSR, buf);
4127 if (ret < 0)
4128 return ret;
4129
4130 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4131 if (ret < 0)
4132 return ret;
4133
4134 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4135 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4136 buf |= PMT_CTL_PHY_WAKE_EN_;
4137 buf |= PMT_CTL_WOL_EN_;
4138 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4139 buf |= PMT_CTL_SUS_MODE_3_;
4140
4141 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4142 if (ret < 0)
4143 return ret;
4144
4145 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4146 if (ret < 0)
4147 return ret;
4148
4149 buf |= PMT_CTL_WUPS_MASK_;
4150
4151 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4152 if (ret < 0)
4153 return ret;
4154
4155 ret = lan78xx_start_rx_path(dev);
4156
4157 return ret;
4158}
4159
4160static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4161{
4162 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4163 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4164 const u8 arp_type[2] = { 0x08, 0x06 };
4165 u32 temp_pmt_ctl;
4166 int mask_index;
4167 u32 temp_wucsr;
4168 u32 buf;
4169 u16 crc;
4170 int ret;
4171
4172 ret = lan78xx_stop_tx_path(dev);
4173 if (ret < 0)
4174 return ret;
4175 ret = lan78xx_stop_rx_path(dev);
4176 if (ret < 0)
4177 return ret;
4178
4179 ret = lan78xx_write_reg(dev, WUCSR, 0);
4180 if (ret < 0)
4181 return ret;
4182 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4183 if (ret < 0)
4184 return ret;
4185 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4186 if (ret < 0)
4187 return ret;
4188
4189 temp_wucsr = 0;
4190
4191 temp_pmt_ctl = 0;
4192
4193 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4194 if (ret < 0)
4195 return ret;
4196
4197 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4198 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4199
4200 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4201 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4202 if (ret < 0)
4203 return ret;
4204 }
4205
4206 mask_index = 0;
4207 if (wol & WAKE_PHY) {
4208 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4209
4210 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4211 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4212 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4213 }
4214 if (wol & WAKE_MAGIC) {
4215 temp_wucsr |= WUCSR_MPEN_;
4216
4217 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4218 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4219 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4220 }
4221 if (wol & WAKE_BCAST) {
4222 temp_wucsr |= WUCSR_BCST_EN_;
4223
4224 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4225 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4226 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4227 }
4228 if (wol & WAKE_MCAST) {
4229 temp_wucsr |= WUCSR_WAKE_EN_;
4230
4231 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4232 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4233 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4234 WUF_CFGX_EN_ |
4235 WUF_CFGX_TYPE_MCAST_ |
4236 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4237 (crc & WUF_CFGX_CRC16_MASK_));
4238 if (ret < 0)
4239 return ret;
4240
4241 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4242 if (ret < 0)
4243 return ret;
4244 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4245 if (ret < 0)
4246 return ret;
4247 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4248 if (ret < 0)
4249 return ret;
4250 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4251 if (ret < 0)
4252 return ret;
4253
4254 mask_index++;
4255
4256 /* for IPv6 Multicast */
4257 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4258 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4259 WUF_CFGX_EN_ |
4260 WUF_CFGX_TYPE_MCAST_ |
4261 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4262 (crc & WUF_CFGX_CRC16_MASK_));
4263 if (ret < 0)
4264 return ret;
4265
4266 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4267 if (ret < 0)
4268 return ret;
4269 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4270 if (ret < 0)
4271 return ret;
4272 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4273 if (ret < 0)
4274 return ret;
4275 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4276 if (ret < 0)
4277 return ret;
4278
4279 mask_index++;
4280
4281 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4282 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4283 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4284 }
4285 if (wol & WAKE_UCAST) {
4286 temp_wucsr |= WUCSR_PFDA_EN_;
4287
4288 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4289 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4290 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4291 }
4292 if (wol & WAKE_ARP) {
4293 temp_wucsr |= WUCSR_WAKE_EN_;
4294
4295 /* set WUF_CFG & WUF_MASK
4296 * for packettype (offset 12,13) = ARP (0x0806)
4297 */
4298 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4299 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4300 WUF_CFGX_EN_ |
4301 WUF_CFGX_TYPE_ALL_ |
4302 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4303 (crc & WUF_CFGX_CRC16_MASK_));
4304 if (ret < 0)
4305 return ret;
4306
4307 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4308 if (ret < 0)
4309 return ret;
4310 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4311 if (ret < 0)
4312 return ret;
4313 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4314 if (ret < 0)
4315 return ret;
4316 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4317 if (ret < 0)
4318 return ret;
4319
4320 mask_index++;
4321
4322 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4323 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4324 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4325 }
4326
4327 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4328 if (ret < 0)
4329 return ret;
4330
4331 /* when multiple WOL bits are set */
4332 if (hweight_long((unsigned long)wol) > 1) {
4333 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4334 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4335 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4336 }
4337 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4338 if (ret < 0)
4339 return ret;
4340
4341 /* clear WUPS */
4342 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4343 if (ret < 0)
4344 return ret;
4345
4346 buf |= PMT_CTL_WUPS_MASK_;
4347
4348 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4349 if (ret < 0)
4350 return ret;
4351
4352 ret = lan78xx_start_rx_path(dev);
4353
4354 return ret;
4355}
4356
4357static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4358{
4359 struct lan78xx_net *dev = usb_get_intfdata(intf);
4360 bool dev_open;
4361 int ret;
4362 int event;
4363
4364 event = message.event;
4365
4366 mutex_lock(&dev->dev_mutex);
4367
4368 netif_dbg(dev, ifdown, dev->net,
4369 "suspending: pm event %#x", message.event);
4370
4371 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4372
4373 if (dev_open) {
4374 spin_lock_irq(&dev->txq.lock);
4375 /* don't autosuspend while transmitting */
4376 if ((skb_queue_len(&dev->txq) ||
4377 skb_queue_len(&dev->txq_pend)) &&
4378 PMSG_IS_AUTO(message)) {
4379 spin_unlock_irq(&dev->txq.lock);
4380 ret = -EBUSY;
4381 goto out;
4382 } else {
4383 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4384 spin_unlock_irq(&dev->txq.lock);
4385 }
4386
4387 /* stop RX */
4388 ret = lan78xx_stop_rx_path(dev);
4389 if (ret < 0)
4390 goto out;
4391
4392 ret = lan78xx_flush_rx_fifo(dev);
4393 if (ret < 0)
4394 goto out;
4395
4396 /* stop Tx */
4397 ret = lan78xx_stop_tx_path(dev);
4398 if (ret < 0)
4399 goto out;
4400
4401 /* empty out the Rx and Tx queues */
4402 netif_device_detach(dev->net);
4403 lan78xx_terminate_urbs(dev);
4404 usb_kill_urb(dev->urb_intr);
4405
4406 /* reattach */
4407 netif_device_attach(dev->net);
4408
4409 del_timer(&dev->stat_monitor);
4410
4411 if (PMSG_IS_AUTO(message)) {
4412 ret = lan78xx_set_auto_suspend(dev);
4413 if (ret < 0)
4414 goto out;
4415 } else {
4416 struct lan78xx_priv *pdata;
4417
4418 pdata = (struct lan78xx_priv *)(dev->data[0]);
4419 netif_carrier_off(dev->net);
4420 ret = lan78xx_set_suspend(dev, pdata->wol);
4421 if (ret < 0)
4422 goto out;
4423 }
4424 } else {
4425 /* Interface is down; don't allow WOL and PHY
4426 * events to wake up the host
4427 */
4428 u32 buf;
4429
4430 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4431
4432 ret = lan78xx_write_reg(dev, WUCSR, 0);
4433 if (ret < 0)
4434 goto out;
4435 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4436 if (ret < 0)
4437 goto out;
4438
4439 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4440 if (ret < 0)
4441 goto out;
4442
4443 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4444 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4445 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4446 buf |= PMT_CTL_SUS_MODE_3_;
4447
4448 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4449 if (ret < 0)
4450 goto out;
4451
4452 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4453 if (ret < 0)
4454 goto out;
4455
4456 buf |= PMT_CTL_WUPS_MASK_;
4457
4458 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4459 if (ret < 0)
4460 goto out;
4461 }
4462
4463 ret = 0;
4464out:
4465 mutex_unlock(&dev->dev_mutex);
4466
4467 return ret;
4468}
4469
4470static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4471{
4472 bool pipe_halted = false;
4473 struct urb *urb;
4474
4475 while ((urb = usb_get_from_anchor(&dev->deferred))) {
4476 struct sk_buff *skb = urb->context;
4477 int ret;
4478
4479 if (!netif_device_present(dev->net) ||
4480 !netif_carrier_ok(dev->net) ||
4481 pipe_halted) {
4482 usb_free_urb(urb);
4483 dev_kfree_skb(skb);
4484 continue;
4485 }
4486
4487 ret = usb_submit_urb(urb, GFP_ATOMIC);
4488
4489 if (ret == 0) {
4490 netif_trans_update(dev->net);
4491 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4492 } else {
4493 usb_free_urb(urb);
4494 dev_kfree_skb(skb);
4495
4496 if (ret == -EPIPE) {
4497 netif_stop_queue(dev->net);
4498 pipe_halted = true;
4499 } else if (ret == -ENODEV) {
4500 netif_device_detach(dev->net);
4501 }
4502 }
4503 }
4504
4505 return pipe_halted;
4506}
4507
4508static int lan78xx_resume(struct usb_interface *intf)
4509{
4510 struct lan78xx_net *dev = usb_get_intfdata(intf);
4511 bool dev_open;
4512 int ret;
4513
4514 mutex_lock(&dev->dev_mutex);
4515
4516 netif_dbg(dev, ifup, dev->net, "resuming device");
4517
4518 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4519
4520 if (dev_open) {
4521 bool pipe_halted = false;
4522
4523 ret = lan78xx_flush_tx_fifo(dev);
4524 if (ret < 0)
4525 goto out;
4526
4527 if (dev->urb_intr) {
4528 int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4529
4530 if (ret < 0) {
4531 if (ret == -ENODEV)
4532 netif_device_detach(dev->net);
4533
4534 netdev_warn(dev->net, "Failed to submit intr URB");
4535 }
4536 }
4537
4538 spin_lock_irq(&dev->txq.lock);
4539
4540 if (netif_device_present(dev->net)) {
4541 pipe_halted = lan78xx_submit_deferred_urbs(dev);
4542
4543 if (pipe_halted)
4544 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4545 }
4546
4547 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4548
4549 spin_unlock_irq(&dev->txq.lock);
4550
4551 if (!pipe_halted &&
4552 netif_device_present(dev->net) &&
4553 (skb_queue_len(&dev->txq) < dev->tx_qlen))
4554 netif_start_queue(dev->net);
4555
4556 ret = lan78xx_start_tx_path(dev);
4557 if (ret < 0)
4558 goto out;
4559
4560 tasklet_schedule(&dev->bh);
4561
4562 if (!timer_pending(&dev->stat_monitor)) {
4563 dev->delta = 1;
4564 mod_timer(&dev->stat_monitor,
4565 jiffies + STAT_UPDATE_TIMER);
4566 }
4567
4568 } else {
4569 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4570 }
4571
4572 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4573 if (ret < 0)
4574 goto out;
4575 ret = lan78xx_write_reg(dev, WUCSR, 0);
4576 if (ret < 0)
4577 goto out;
4578 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4579 if (ret < 0)
4580 goto out;
4581
4582 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4583 WUCSR2_ARP_RCD_ |
4584 WUCSR2_IPV6_TCPSYN_RCD_ |
4585 WUCSR2_IPV4_TCPSYN_RCD_);
4586 if (ret < 0)
4587 goto out;
4588
4589 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4590 WUCSR_EEE_RX_WAKE_ |
4591 WUCSR_PFDA_FR_ |
4592 WUCSR_RFE_WAKE_FR_ |
4593 WUCSR_WUFR_ |
4594 WUCSR_MPR_ |
4595 WUCSR_BCST_FR_);
4596 if (ret < 0)
4597 goto out;
4598
4599 ret = 0;
4600out:
4601 mutex_unlock(&dev->dev_mutex);
4602
4603 return ret;
4604}
4605
4606static int lan78xx_reset_resume(struct usb_interface *intf)
4607{
4608 struct lan78xx_net *dev = usb_get_intfdata(intf);
4609 int ret;
4610
4611 netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
4612
4613 ret = lan78xx_reset(dev);
4614 if (ret < 0)
4615 return ret;
4616
4617 phy_start(dev->net->phydev);
4618
4619 ret = lan78xx_resume(intf);
4620
4621 return ret;
4622}
4623
4624static const struct usb_device_id products[] = {
4625 {
4626 /* LAN7800 USB Gigabit Ethernet Device */
4627 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4628 },
4629 {
4630 /* LAN7850 USB Gigabit Ethernet Device */
4631 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4632 },
4633 {
4634 /* LAN7801 USB Gigabit Ethernet Device */
4635 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4636 },
4637 {
4638 /* ATM2-AF USB Gigabit Ethernet Device */
4639 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4640 },
4641 {},
4642};
4643MODULE_DEVICE_TABLE(usb, products);
4644
4645static struct usb_driver lan78xx_driver = {
4646 .name = DRIVER_NAME,
4647 .id_table = products,
4648 .probe = lan78xx_probe,
4649 .disconnect = lan78xx_disconnect,
4650 .suspend = lan78xx_suspend,
4651 .resume = lan78xx_resume,
4652 .reset_resume = lan78xx_reset_resume,
4653 .supports_autosuspend = 1,
4654 .disable_hub_initiated_lpm = 1,
4655};
4656
4657module_usb_driver(lan78xx_driver);
4658
4659MODULE_AUTHOR(DRIVER_AUTHOR);
4660MODULE_DESCRIPTION(DRIVER_DESC);
4661MODULE_LICENSE("GPL");