blob: 120e99914fd629b5e6fadc51f21732563d4b5060 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <linux/phy.h>
33#include <net/ip6_checksum.h>
34#include <net/vxlan.h>
35#include <linux/interrupt.h>
36#include <linux/irqdomain.h>
37#include <linux/irq.h>
38#include <linux/irqchip/chained_irq.h>
39#include <linux/microchipphy.h>
40#include <linux/phy.h>
41#include <linux/of_net.h>
42#include "lan78xx.h"
43
44#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
45#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46#define DRIVER_NAME "lan78xx"
47#define DRIVER_VERSION "1.0.6"
48
49#define TX_TIMEOUT_JIFFIES (5 * HZ)
50#define THROTTLE_JIFFIES (HZ / 8)
51#define UNLINK_TIMEOUT_MS 3
52
53#define RX_MAX_QUEUE_MEMORY (60 * 1518)
54
55#define SS_USB_PKT_SIZE (1024)
56#define HS_USB_PKT_SIZE (512)
57#define FS_USB_PKT_SIZE (64)
58
59#define MAX_RX_FIFO_SIZE (12 * 1024)
60#define MAX_TX_FIFO_SIZE (12 * 1024)
61#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
62#define DEFAULT_BULK_IN_DELAY (0x0800)
63#define MAX_SINGLE_PACKET_SIZE (9000)
64#define DEFAULT_TX_CSUM_ENABLE (true)
65#define DEFAULT_RX_CSUM_ENABLE (true)
66#define DEFAULT_TSO_CSUM_ENABLE (true)
67#define DEFAULT_VLAN_FILTER_ENABLE (true)
68#define TX_OVERHEAD (8)
69#define RXW_PADDING 2
70
71#define LAN78XX_USB_VENDOR_ID (0x0424)
72#define LAN7800_USB_PRODUCT_ID (0x7800)
73#define LAN7850_USB_PRODUCT_ID (0x7850)
74#define LAN7801_USB_PRODUCT_ID (0x7801)
75#define LAN78XX_EEPROM_MAGIC (0x78A5)
76#define LAN78XX_OTP_MAGIC (0x78F3)
77
78#define MII_READ 1
79#define MII_WRITE 0
80
81#define EEPROM_INDICATOR (0xA5)
82#define EEPROM_MAC_OFFSET (0x01)
83#define MAX_EEPROM_SIZE 512
84#define OTP_INDICATOR_1 (0xF3)
85#define OTP_INDICATOR_2 (0xF7)
86
87#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
88 WAKE_MCAST | WAKE_BCAST | \
89 WAKE_ARP | WAKE_MAGIC)
90
91/* USB related defines */
92#define BULK_IN_PIPE 1
93#define BULK_OUT_PIPE 2
94
95/* default autosuspend delay (mSec)*/
96#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
97
98/* statistic update interval (mSec) */
99#define STAT_UPDATE_TIMER (1 * 1000)
100
101/* defines interrupts from interrupt EP */
102#define MAX_INT_EP (32)
103#define INT_EP_INTEP (31)
104#define INT_EP_OTP_WR_DONE (28)
105#define INT_EP_EEE_TX_LPI_START (26)
106#define INT_EP_EEE_TX_LPI_STOP (25)
107#define INT_EP_EEE_RX_LPI (24)
108#define INT_EP_MAC_RESET_TIMEOUT (23)
109#define INT_EP_RDFO (22)
110#define INT_EP_TXE (21)
111#define INT_EP_USB_STATUS (20)
112#define INT_EP_TX_DIS (19)
113#define INT_EP_RX_DIS (18)
114#define INT_EP_PHY (17)
115#define INT_EP_DP (16)
116#define INT_EP_MAC_ERR (15)
117#define INT_EP_TDFU (14)
118#define INT_EP_TDFO (13)
119#define INT_EP_UTX (12)
120#define INT_EP_GPIO_11 (11)
121#define INT_EP_GPIO_10 (10)
122#define INT_EP_GPIO_9 (9)
123#define INT_EP_GPIO_8 (8)
124#define INT_EP_GPIO_7 (7)
125#define INT_EP_GPIO_6 (6)
126#define INT_EP_GPIO_5 (5)
127#define INT_EP_GPIO_4 (4)
128#define INT_EP_GPIO_3 (3)
129#define INT_EP_GPIO_2 (2)
130#define INT_EP_GPIO_1 (1)
131#define INT_EP_GPIO_0 (0)
132
133static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
134 "RX FCS Errors",
135 "RX Alignment Errors",
136 "Rx Fragment Errors",
137 "RX Jabber Errors",
138 "RX Undersize Frame Errors",
139 "RX Oversize Frame Errors",
140 "RX Dropped Frames",
141 "RX Unicast Byte Count",
142 "RX Broadcast Byte Count",
143 "RX Multicast Byte Count",
144 "RX Unicast Frames",
145 "RX Broadcast Frames",
146 "RX Multicast Frames",
147 "RX Pause Frames",
148 "RX 64 Byte Frames",
149 "RX 65 - 127 Byte Frames",
150 "RX 128 - 255 Byte Frames",
151 "RX 256 - 511 Bytes Frames",
152 "RX 512 - 1023 Byte Frames",
153 "RX 1024 - 1518 Byte Frames",
154 "RX Greater 1518 Byte Frames",
155 "EEE RX LPI Transitions",
156 "EEE RX LPI Time",
157 "TX FCS Errors",
158 "TX Excess Deferral Errors",
159 "TX Carrier Errors",
160 "TX Bad Byte Count",
161 "TX Single Collisions",
162 "TX Multiple Collisions",
163 "TX Excessive Collision",
164 "TX Late Collisions",
165 "TX Unicast Byte Count",
166 "TX Broadcast Byte Count",
167 "TX Multicast Byte Count",
168 "TX Unicast Frames",
169 "TX Broadcast Frames",
170 "TX Multicast Frames",
171 "TX Pause Frames",
172 "TX 64 Byte Frames",
173 "TX 65 - 127 Byte Frames",
174 "TX 128 - 255 Byte Frames",
175 "TX 256 - 511 Bytes Frames",
176 "TX 512 - 1023 Byte Frames",
177 "TX 1024 - 1518 Byte Frames",
178 "TX Greater 1518 Byte Frames",
179 "EEE TX LPI Transitions",
180 "EEE TX LPI Time",
181};
182
183struct lan78xx_statstage {
184 u32 rx_fcs_errors;
185 u32 rx_alignment_errors;
186 u32 rx_fragment_errors;
187 u32 rx_jabber_errors;
188 u32 rx_undersize_frame_errors;
189 u32 rx_oversize_frame_errors;
190 u32 rx_dropped_frames;
191 u32 rx_unicast_byte_count;
192 u32 rx_broadcast_byte_count;
193 u32 rx_multicast_byte_count;
194 u32 rx_unicast_frames;
195 u32 rx_broadcast_frames;
196 u32 rx_multicast_frames;
197 u32 rx_pause_frames;
198 u32 rx_64_byte_frames;
199 u32 rx_65_127_byte_frames;
200 u32 rx_128_255_byte_frames;
201 u32 rx_256_511_bytes_frames;
202 u32 rx_512_1023_byte_frames;
203 u32 rx_1024_1518_byte_frames;
204 u32 rx_greater_1518_byte_frames;
205 u32 eee_rx_lpi_transitions;
206 u32 eee_rx_lpi_time;
207 u32 tx_fcs_errors;
208 u32 tx_excess_deferral_errors;
209 u32 tx_carrier_errors;
210 u32 tx_bad_byte_count;
211 u32 tx_single_collisions;
212 u32 tx_multiple_collisions;
213 u32 tx_excessive_collision;
214 u32 tx_late_collisions;
215 u32 tx_unicast_byte_count;
216 u32 tx_broadcast_byte_count;
217 u32 tx_multicast_byte_count;
218 u32 tx_unicast_frames;
219 u32 tx_broadcast_frames;
220 u32 tx_multicast_frames;
221 u32 tx_pause_frames;
222 u32 tx_64_byte_frames;
223 u32 tx_65_127_byte_frames;
224 u32 tx_128_255_byte_frames;
225 u32 tx_256_511_bytes_frames;
226 u32 tx_512_1023_byte_frames;
227 u32 tx_1024_1518_byte_frames;
228 u32 tx_greater_1518_byte_frames;
229 u32 eee_tx_lpi_transitions;
230 u32 eee_tx_lpi_time;
231};
232
233struct lan78xx_statstage64 {
234 u64 rx_fcs_errors;
235 u64 rx_alignment_errors;
236 u64 rx_fragment_errors;
237 u64 rx_jabber_errors;
238 u64 rx_undersize_frame_errors;
239 u64 rx_oversize_frame_errors;
240 u64 rx_dropped_frames;
241 u64 rx_unicast_byte_count;
242 u64 rx_broadcast_byte_count;
243 u64 rx_multicast_byte_count;
244 u64 rx_unicast_frames;
245 u64 rx_broadcast_frames;
246 u64 rx_multicast_frames;
247 u64 rx_pause_frames;
248 u64 rx_64_byte_frames;
249 u64 rx_65_127_byte_frames;
250 u64 rx_128_255_byte_frames;
251 u64 rx_256_511_bytes_frames;
252 u64 rx_512_1023_byte_frames;
253 u64 rx_1024_1518_byte_frames;
254 u64 rx_greater_1518_byte_frames;
255 u64 eee_rx_lpi_transitions;
256 u64 eee_rx_lpi_time;
257 u64 tx_fcs_errors;
258 u64 tx_excess_deferral_errors;
259 u64 tx_carrier_errors;
260 u64 tx_bad_byte_count;
261 u64 tx_single_collisions;
262 u64 tx_multiple_collisions;
263 u64 tx_excessive_collision;
264 u64 tx_late_collisions;
265 u64 tx_unicast_byte_count;
266 u64 tx_broadcast_byte_count;
267 u64 tx_multicast_byte_count;
268 u64 tx_unicast_frames;
269 u64 tx_broadcast_frames;
270 u64 tx_multicast_frames;
271 u64 tx_pause_frames;
272 u64 tx_64_byte_frames;
273 u64 tx_65_127_byte_frames;
274 u64 tx_128_255_byte_frames;
275 u64 tx_256_511_bytes_frames;
276 u64 tx_512_1023_byte_frames;
277 u64 tx_1024_1518_byte_frames;
278 u64 tx_greater_1518_byte_frames;
279 u64 eee_tx_lpi_transitions;
280 u64 eee_tx_lpi_time;
281};
282
283struct lan78xx_net;
284
285struct lan78xx_priv {
286 struct lan78xx_net *dev;
287 u32 rfe_ctl;
288 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
289 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
290 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
291 struct mutex dataport_mutex; /* for dataport access */
292 spinlock_t rfe_ctl_lock; /* for rfe register access */
293 struct work_struct set_multicast;
294 struct work_struct set_vlan;
295 u32 wol;
296};
297
298enum skb_state {
299 illegal = 0,
300 tx_start,
301 tx_done,
302 rx_start,
303 rx_done,
304 rx_cleanup,
305 unlink_start
306};
307
308struct skb_data { /* skb->cb is one of these */
309 struct urb *urb;
310 struct lan78xx_net *dev;
311 enum skb_state state;
312 size_t length;
313 int num_of_packet;
314};
315
316struct usb_context {
317 struct usb_ctrlrequest req;
318 struct lan78xx_net *dev;
319};
320
321#define EVENT_TX_HALT 0
322#define EVENT_RX_HALT 1
323#define EVENT_RX_MEMORY 2
324#define EVENT_STS_SPLIT 3
325#define EVENT_LINK_RESET 4
326#define EVENT_RX_PAUSED 5
327#define EVENT_DEV_WAKING 6
328#define EVENT_DEV_ASLEEP 7
329#define EVENT_DEV_OPEN 8
330#define EVENT_STAT_UPDATE 9
331
332struct statstage {
333 struct mutex access_lock; /* for stats access */
334 struct lan78xx_statstage saved;
335 struct lan78xx_statstage rollover_count;
336 struct lan78xx_statstage rollover_max;
337 struct lan78xx_statstage64 curr_stat;
338};
339
340struct irq_domain_data {
341 struct irq_domain *irqdomain;
342 unsigned int phyirq;
343 struct irq_chip *irqchip;
344 irq_flow_handler_t irq_handler;
345 u32 irqenable;
346 struct mutex irq_lock; /* for irq bus access */
347};
348
349struct lan78xx_net {
350 struct net_device *net;
351 struct usb_device *udev;
352 struct usb_interface *intf;
353 void *driver_priv;
354
355 int rx_qlen;
356 int tx_qlen;
357 struct sk_buff_head rxq;
358 struct sk_buff_head txq;
359 struct sk_buff_head done;
360 struct sk_buff_head rxq_pause;
361 struct sk_buff_head txq_pend;
362
363 struct tasklet_struct bh;
364 struct delayed_work wq;
365
366 int msg_enable;
367
368 struct urb *urb_intr;
369 struct usb_anchor deferred;
370
371 struct mutex phy_mutex; /* for phy access */
372 unsigned pipe_in, pipe_out, pipe_intr;
373
374 u32 hard_mtu; /* count any extra framing */
375 size_t rx_urb_size; /* size for rx urbs */
376
377 unsigned long flags;
378
379 wait_queue_head_t *wait;
380 unsigned char suspend_count;
381
382 unsigned maxpacket;
383 struct timer_list delay;
384 struct timer_list stat_monitor;
385
386 unsigned long data[5];
387
388 int link_on;
389 u8 mdix_ctrl;
390
391 u32 chipid;
392 u32 chiprev;
393 struct mii_bus *mdiobus;
394 phy_interface_t interface;
395
396 int fc_autoneg;
397 u8 fc_request_control;
398
399 int delta;
400 struct statstage stats;
401
402 struct irq_domain_data domain_data;
403};
404
405/* define external phy id */
406#define PHY_LAN8835 (0x0007C130)
407#define PHY_KSZ9031RNX (0x00221620)
408
409/* use ethtool to change the level for any given device */
410static int msg_level = -1;
411module_param(msg_level, int, 0);
412MODULE_PARM_DESC(msg_level, "Override default message level");
413
414static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
415{
416 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
417 int ret;
418
419 if (!buf)
420 return -ENOMEM;
421
422 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
423 USB_VENDOR_REQUEST_READ_REGISTER,
424 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
425 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
426 if (likely(ret >= 0)) {
427 le32_to_cpus(buf);
428 *data = *buf;
429 } else {
430 netdev_warn(dev->net,
431 "Failed to read register index 0x%08x. ret = %d",
432 index, ret);
433 }
434
435 kfree(buf);
436
437 return ret;
438}
439
440static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
441{
442 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
443 int ret;
444
445 if (!buf)
446 return -ENOMEM;
447
448 *buf = data;
449 cpu_to_le32s(buf);
450
451 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
452 USB_VENDOR_REQUEST_WRITE_REGISTER,
453 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
454 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
455 if (unlikely(ret < 0)) {
456 netdev_warn(dev->net,
457 "Failed to write register index 0x%08x. ret = %d",
458 index, ret);
459 }
460
461 kfree(buf);
462
463 return ret;
464}
465
466static int lan78xx_read_stats(struct lan78xx_net *dev,
467 struct lan78xx_statstage *data)
468{
469 int ret = 0;
470 int i;
471 struct lan78xx_statstage *stats;
472 u32 *src;
473 u32 *dst;
474
475 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
476 if (!stats)
477 return -ENOMEM;
478
479 ret = usb_control_msg(dev->udev,
480 usb_rcvctrlpipe(dev->udev, 0),
481 USB_VENDOR_REQUEST_GET_STATS,
482 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
483 0,
484 0,
485 (void *)stats,
486 sizeof(*stats),
487 USB_CTRL_SET_TIMEOUT);
488 if (likely(ret >= 0)) {
489 src = (u32 *)stats;
490 dst = (u32 *)data;
491 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
492 le32_to_cpus(&src[i]);
493 dst[i] = src[i];
494 }
495 } else {
496 netdev_warn(dev->net,
497 "Failed to read stat ret = %d", ret);
498 }
499
500 kfree(stats);
501
502 return ret;
503}
504
505#define check_counter_rollover(struct1, dev_stats, member) { \
506 if (struct1->member < dev_stats.saved.member) \
507 dev_stats.rollover_count.member++; \
508 }
509
510static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
511 struct lan78xx_statstage *stats)
512{
513 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
514 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
515 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
516 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
517 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
518 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
519 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
520 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
521 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
522 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
523 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
524 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
525 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
526 check_counter_rollover(stats, dev->stats, rx_pause_frames);
527 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
528 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
529 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
530 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
531 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
532 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
533 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
534 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
535 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
536 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
537 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
538 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
539 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
540 check_counter_rollover(stats, dev->stats, tx_single_collisions);
541 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
542 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
543 check_counter_rollover(stats, dev->stats, tx_late_collisions);
544 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
545 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
546 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
547 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
548 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
549 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
550 check_counter_rollover(stats, dev->stats, tx_pause_frames);
551 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
552 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
553 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
554 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
555 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
556 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
557 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
558 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
559 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
560
561 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
562}
563
564static void lan78xx_update_stats(struct lan78xx_net *dev)
565{
566 u32 *p, *count, *max;
567 u64 *data;
568 int i;
569 struct lan78xx_statstage lan78xx_stats;
570
571 if (usb_autopm_get_interface(dev->intf) < 0)
572 return;
573
574 p = (u32 *)&lan78xx_stats;
575 count = (u32 *)&dev->stats.rollover_count;
576 max = (u32 *)&dev->stats.rollover_max;
577 data = (u64 *)&dev->stats.curr_stat;
578
579 mutex_lock(&dev->stats.access_lock);
580
581 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
582 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
583
584 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
585 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
586
587 mutex_unlock(&dev->stats.access_lock);
588
589 usb_autopm_put_interface(dev->intf);
590}
591
592/* Loop until the read is completed with timeout called with phy_mutex held */
593static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
594{
595 unsigned long start_time = jiffies;
596 u32 val;
597 int ret;
598
599 do {
600 ret = lan78xx_read_reg(dev, MII_ACC, &val);
601 if (unlikely(ret < 0))
602 return -EIO;
603
604 if (!(val & MII_ACC_MII_BUSY_))
605 return 0;
606 } while (!time_after(jiffies, start_time + HZ));
607
608 return -EIO;
609}
610
611static inline u32 mii_access(int id, int index, int read)
612{
613 u32 ret;
614
615 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
616 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
617 if (read)
618 ret |= MII_ACC_MII_READ_;
619 else
620 ret |= MII_ACC_MII_WRITE_;
621 ret |= MII_ACC_MII_BUSY_;
622
623 return ret;
624}
625
626static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
627{
628 unsigned long start_time = jiffies;
629 u32 val;
630 int ret;
631
632 do {
633 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
634 if (unlikely(ret < 0))
635 return -EIO;
636
637 if (!(val & E2P_CMD_EPC_BUSY_) ||
638 (val & E2P_CMD_EPC_TIMEOUT_))
639 break;
640 usleep_range(40, 100);
641 } while (!time_after(jiffies, start_time + HZ));
642
643 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
644 netdev_warn(dev->net, "EEPROM read operation timeout");
645 return -EIO;
646 }
647
648 return 0;
649}
650
651static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
652{
653 unsigned long start_time = jiffies;
654 u32 val;
655 int ret;
656
657 do {
658 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
659 if (unlikely(ret < 0))
660 return -EIO;
661
662 if (!(val & E2P_CMD_EPC_BUSY_))
663 return 0;
664
665 usleep_range(40, 100);
666 } while (!time_after(jiffies, start_time + HZ));
667
668 netdev_warn(dev->net, "EEPROM is busy");
669 return -EIO;
670}
671
672static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
673 u32 length, u8 *data)
674{
675 u32 val;
676 u32 saved;
677 int i, ret;
678 int retval;
679
680 /* depends on chip, some EEPROM pins are muxed with LED function.
681 * disable & restore LED function to access EEPROM.
682 */
683 ret = lan78xx_read_reg(dev, HW_CFG, &val);
684 saved = val;
685 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
686 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
687 ret = lan78xx_write_reg(dev, HW_CFG, val);
688 }
689
690 retval = lan78xx_eeprom_confirm_not_busy(dev);
691 if (retval)
692 return retval;
693
694 for (i = 0; i < length; i++) {
695 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
696 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
697 ret = lan78xx_write_reg(dev, E2P_CMD, val);
698 if (unlikely(ret < 0)) {
699 retval = -EIO;
700 goto exit;
701 }
702
703 retval = lan78xx_wait_eeprom(dev);
704 if (retval < 0)
705 goto exit;
706
707 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
708 if (unlikely(ret < 0)) {
709 retval = -EIO;
710 goto exit;
711 }
712
713 data[i] = val & 0xFF;
714 offset++;
715 }
716
717 retval = 0;
718exit:
719 if (dev->chipid == ID_REV_CHIP_ID_7800_)
720 ret = lan78xx_write_reg(dev, HW_CFG, saved);
721
722 return retval;
723}
724
725static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
726 u32 length, u8 *data)
727{
728 u8 sig;
729 int ret;
730
731 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
732 if ((ret == 0) && (sig == EEPROM_INDICATOR))
733 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
734 else
735 ret = -EINVAL;
736
737 return ret;
738}
739
740static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
741 u32 length, u8 *data)
742{
743 u32 val;
744 u32 saved;
745 int i, ret;
746 int retval;
747
748 /* depends on chip, some EEPROM pins are muxed with LED function.
749 * disable & restore LED function to access EEPROM.
750 */
751 ret = lan78xx_read_reg(dev, HW_CFG, &val);
752 saved = val;
753 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
754 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
755 ret = lan78xx_write_reg(dev, HW_CFG, val);
756 }
757
758 retval = lan78xx_eeprom_confirm_not_busy(dev);
759 if (retval)
760 goto exit;
761
762 /* Issue write/erase enable command */
763 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
764 ret = lan78xx_write_reg(dev, E2P_CMD, val);
765 if (unlikely(ret < 0)) {
766 retval = -EIO;
767 goto exit;
768 }
769
770 retval = lan78xx_wait_eeprom(dev);
771 if (retval < 0)
772 goto exit;
773
774 for (i = 0; i < length; i++) {
775 /* Fill data register */
776 val = data[i];
777 ret = lan78xx_write_reg(dev, E2P_DATA, val);
778 if (ret < 0) {
779 retval = -EIO;
780 goto exit;
781 }
782
783 /* Send "write" command */
784 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
785 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
786 ret = lan78xx_write_reg(dev, E2P_CMD, val);
787 if (ret < 0) {
788 retval = -EIO;
789 goto exit;
790 }
791
792 retval = lan78xx_wait_eeprom(dev);
793 if (retval < 0)
794 goto exit;
795
796 offset++;
797 }
798
799 retval = 0;
800exit:
801 if (dev->chipid == ID_REV_CHIP_ID_7800_)
802 ret = lan78xx_write_reg(dev, HW_CFG, saved);
803
804 return retval;
805}
806
807static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
808 u32 length, u8 *data)
809{
810 int i;
811 int ret;
812 u32 buf;
813 unsigned long timeout;
814
815 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
816
817 if (buf & OTP_PWR_DN_PWRDN_N_) {
818 /* clear it and wait to be cleared */
819 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
820
821 timeout = jiffies + HZ;
822 do {
823 usleep_range(1, 10);
824 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
825 if (time_after(jiffies, timeout)) {
826 netdev_warn(dev->net,
827 "timeout on OTP_PWR_DN");
828 return -EIO;
829 }
830 } while (buf & OTP_PWR_DN_PWRDN_N_);
831 }
832
833 for (i = 0; i < length; i++) {
834 ret = lan78xx_write_reg(dev, OTP_ADDR1,
835 ((offset + i) >> 8) & OTP_ADDR1_15_11);
836 ret = lan78xx_write_reg(dev, OTP_ADDR2,
837 ((offset + i) & OTP_ADDR2_10_3));
838
839 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
840 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
841
842 timeout = jiffies + HZ;
843 do {
844 udelay(1);
845 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
846 if (time_after(jiffies, timeout)) {
847 netdev_warn(dev->net,
848 "timeout on OTP_STATUS");
849 return -EIO;
850 }
851 } while (buf & OTP_STATUS_BUSY_);
852
853 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
854
855 data[i] = (u8)(buf & 0xFF);
856 }
857
858 return 0;
859}
860
861static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
862 u32 length, u8 *data)
863{
864 int i;
865 int ret;
866 u32 buf;
867 unsigned long timeout;
868
869 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
870
871 if (buf & OTP_PWR_DN_PWRDN_N_) {
872 /* clear it and wait to be cleared */
873 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
874
875 timeout = jiffies + HZ;
876 do {
877 udelay(1);
878 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
879 if (time_after(jiffies, timeout)) {
880 netdev_warn(dev->net,
881 "timeout on OTP_PWR_DN completion");
882 return -EIO;
883 }
884 } while (buf & OTP_PWR_DN_PWRDN_N_);
885 }
886
887 /* set to BYTE program mode */
888 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
889
890 for (i = 0; i < length; i++) {
891 ret = lan78xx_write_reg(dev, OTP_ADDR1,
892 ((offset + i) >> 8) & OTP_ADDR1_15_11);
893 ret = lan78xx_write_reg(dev, OTP_ADDR2,
894 ((offset + i) & OTP_ADDR2_10_3));
895 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
896 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
897 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
898
899 timeout = jiffies + HZ;
900 do {
901 udelay(1);
902 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
903 if (time_after(jiffies, timeout)) {
904 netdev_warn(dev->net,
905 "Timeout on OTP_STATUS completion");
906 return -EIO;
907 }
908 } while (buf & OTP_STATUS_BUSY_);
909 }
910
911 return 0;
912}
913
914static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
915 u32 length, u8 *data)
916{
917 u8 sig;
918 int ret;
919
920 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
921
922 if (ret == 0) {
923 if (sig == OTP_INDICATOR_1)
924 offset = offset;
925 else if (sig == OTP_INDICATOR_2)
926 offset += 0x100;
927 else
928 ret = -EINVAL;
929 if (!ret)
930 ret = lan78xx_read_raw_otp(dev, offset, length, data);
931 }
932
933 return ret;
934}
935
936static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
937{
938 int i, ret;
939
940 for (i = 0; i < 100; i++) {
941 u32 dp_sel;
942
943 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
944 if (unlikely(ret < 0))
945 return -EIO;
946
947 if (dp_sel & DP_SEL_DPRDY_)
948 return 0;
949
950 usleep_range(40, 100);
951 }
952
953 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
954
955 return -EIO;
956}
957
958static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
959 u32 addr, u32 length, u32 *buf)
960{
961 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
962 u32 dp_sel;
963 int i, ret;
964
965 if (usb_autopm_get_interface(dev->intf) < 0)
966 return 0;
967
968 mutex_lock(&pdata->dataport_mutex);
969
970 ret = lan78xx_dataport_wait_not_busy(dev);
971 if (ret < 0)
972 goto done;
973
974 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
975
976 dp_sel &= ~DP_SEL_RSEL_MASK_;
977 dp_sel |= ram_select;
978 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
979
980 for (i = 0; i < length; i++) {
981 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
982
983 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
984
985 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
986
987 ret = lan78xx_dataport_wait_not_busy(dev);
988 if (ret < 0)
989 goto done;
990 }
991
992done:
993 mutex_unlock(&pdata->dataport_mutex);
994 usb_autopm_put_interface(dev->intf);
995
996 return ret;
997}
998
999static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1000 int index, u8 addr[ETH_ALEN])
1001{
1002 u32 temp;
1003
1004 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1005 temp = addr[3];
1006 temp = addr[2] | (temp << 8);
1007 temp = addr[1] | (temp << 8);
1008 temp = addr[0] | (temp << 8);
1009 pdata->pfilter_table[index][1] = temp;
1010 temp = addr[5];
1011 temp = addr[4] | (temp << 8);
1012 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1013 pdata->pfilter_table[index][0] = temp;
1014 }
1015}
1016
1017/* returns hash bit number for given MAC address */
1018static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1019{
1020 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1021}
1022
1023static void lan78xx_deferred_multicast_write(struct work_struct *param)
1024{
1025 struct lan78xx_priv *pdata =
1026 container_of(param, struct lan78xx_priv, set_multicast);
1027 struct lan78xx_net *dev = pdata->dev;
1028 int i;
1029 int ret;
1030
1031 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1032 pdata->rfe_ctl);
1033
1034 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1035 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1036
1037 for (i = 1; i < NUM_OF_MAF; i++) {
1038 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1039 ret = lan78xx_write_reg(dev, MAF_LO(i),
1040 pdata->pfilter_table[i][1]);
1041 ret = lan78xx_write_reg(dev, MAF_HI(i),
1042 pdata->pfilter_table[i][0]);
1043 }
1044
1045 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1046}
1047
1048static void lan78xx_set_multicast(struct net_device *netdev)
1049{
1050 struct lan78xx_net *dev = netdev_priv(netdev);
1051 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1052 unsigned long flags;
1053 int i;
1054
1055 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1056
1057 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1058 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1059
1060 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1061 pdata->mchash_table[i] = 0;
1062 /* pfilter_table[0] has own HW address */
1063 for (i = 1; i < NUM_OF_MAF; i++) {
1064 pdata->pfilter_table[i][0] =
1065 pdata->pfilter_table[i][1] = 0;
1066 }
1067
1068 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1069
1070 if (dev->net->flags & IFF_PROMISC) {
1071 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1072 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1073 } else {
1074 if (dev->net->flags & IFF_ALLMULTI) {
1075 netif_dbg(dev, drv, dev->net,
1076 "receive all multicast enabled");
1077 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1078 }
1079 }
1080
1081 if (netdev_mc_count(dev->net)) {
1082 struct netdev_hw_addr *ha;
1083 int i;
1084
1085 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1086
1087 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1088
1089 i = 1;
1090 netdev_for_each_mc_addr(ha, netdev) {
1091 /* set first 32 into Perfect Filter */
1092 if (i < 33) {
1093 lan78xx_set_addr_filter(pdata, i, ha->addr);
1094 } else {
1095 u32 bitnum = lan78xx_hash(ha->addr);
1096
1097 pdata->mchash_table[bitnum / 32] |=
1098 (1 << (bitnum % 32));
1099 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1100 }
1101 i++;
1102 }
1103 }
1104
1105 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1106
1107 /* defer register writes to a sleepable context */
1108 schedule_work(&pdata->set_multicast);
1109}
1110
1111static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1112 u16 lcladv, u16 rmtadv)
1113{
1114 u32 flow = 0, fct_flow = 0;
1115 int ret;
1116 u8 cap;
1117
1118 if (dev->fc_autoneg)
1119 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1120 else
1121 cap = dev->fc_request_control;
1122
1123 if (cap & FLOW_CTRL_TX)
1124 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1125
1126 if (cap & FLOW_CTRL_RX)
1127 flow |= FLOW_CR_RX_FCEN_;
1128
1129 if (dev->udev->speed == USB_SPEED_SUPER)
1130 fct_flow = 0x817;
1131 else if (dev->udev->speed == USB_SPEED_HIGH)
1132 fct_flow = 0x211;
1133
1134 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1135 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1136 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1137
1138 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1139
1140 /* threshold value should be set before enabling flow */
1141 ret = lan78xx_write_reg(dev, FLOW, flow);
1142
1143 return 0;
1144}
1145
1146static int lan78xx_link_reset(struct lan78xx_net *dev)
1147{
1148 struct phy_device *phydev = dev->net->phydev;
1149 struct ethtool_link_ksettings ecmd;
1150 int ladv, radv, ret;
1151 u32 buf;
1152
1153 /* clear LAN78xx interrupt status */
1154 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1155 if (unlikely(ret < 0))
1156 return -EIO;
1157
1158 phy_read_status(phydev);
1159
1160 if (!phydev->link && dev->link_on) {
1161 dev->link_on = false;
1162
1163 /* reset MAC */
1164 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1165 if (unlikely(ret < 0))
1166 return -EIO;
1167 buf |= MAC_CR_RST_;
1168 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1169 if (unlikely(ret < 0))
1170 return -EIO;
1171
1172 del_timer(&dev->stat_monitor);
1173 } else if (phydev->link && !dev->link_on) {
1174 dev->link_on = true;
1175
1176 phy_ethtool_ksettings_get(phydev, &ecmd);
1177
1178 if (dev->udev->speed == USB_SPEED_SUPER) {
1179 if (ecmd.base.speed == 1000) {
1180 /* disable U2 */
1181 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1182 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1183 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1184 /* enable U1 */
1185 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1186 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1187 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1188 } else {
1189 /* enable U1 & U2 */
1190 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1191 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1192 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1193 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1194 }
1195 }
1196
1197 ladv = phy_read(phydev, MII_ADVERTISE);
1198 if (ladv < 0)
1199 return ladv;
1200
1201 radv = phy_read(phydev, MII_LPA);
1202 if (radv < 0)
1203 return radv;
1204
1205 netif_dbg(dev, link, dev->net,
1206 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1207 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1208
1209 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1210 radv);
1211
1212 if (!timer_pending(&dev->stat_monitor)) {
1213 dev->delta = 1;
1214 mod_timer(&dev->stat_monitor,
1215 jiffies + STAT_UPDATE_TIMER);
1216 }
1217
1218 tasklet_schedule(&dev->bh);
1219 }
1220
1221 return ret;
1222}
1223
1224/* some work can't be done in tasklets, so we use keventd
1225 *
1226 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1227 * but tasklet_schedule() doesn't. hope the failure is rare.
1228 */
1229static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1230{
1231 set_bit(work, &dev->flags);
1232 if (!schedule_delayed_work(&dev->wq, 0))
1233 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1234}
1235
1236static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1237{
1238 u32 intdata;
1239
1240 if (urb->actual_length != 4) {
1241 netdev_warn(dev->net,
1242 "unexpected urb length %d", urb->actual_length);
1243 return;
1244 }
1245
1246 memcpy(&intdata, urb->transfer_buffer, 4);
1247 le32_to_cpus(&intdata);
1248
1249 if (intdata & INT_ENP_PHY_INT) {
1250 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1251 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1252
1253 if (dev->domain_data.phyirq > 0)
1254 generic_handle_irq(dev->domain_data.phyirq);
1255 } else
1256 netdev_warn(dev->net,
1257 "unexpected interrupt: 0x%08x\n", intdata);
1258}
1259
1260static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1261{
1262 return MAX_EEPROM_SIZE;
1263}
1264
1265static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1266 struct ethtool_eeprom *ee, u8 *data)
1267{
1268 struct lan78xx_net *dev = netdev_priv(netdev);
1269 int ret;
1270
1271 ret = usb_autopm_get_interface(dev->intf);
1272 if (ret)
1273 return ret;
1274
1275 ee->magic = LAN78XX_EEPROM_MAGIC;
1276
1277 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1278
1279 usb_autopm_put_interface(dev->intf);
1280
1281 return ret;
1282}
1283
1284static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1285 struct ethtool_eeprom *ee, u8 *data)
1286{
1287 struct lan78xx_net *dev = netdev_priv(netdev);
1288 int ret;
1289
1290 ret = usb_autopm_get_interface(dev->intf);
1291 if (ret)
1292 return ret;
1293
1294 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1295 * to load data from EEPROM
1296 */
1297 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1298 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1299 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1300 (ee->offset == 0) &&
1301 (ee->len == 512) &&
1302 (data[0] == OTP_INDICATOR_1))
1303 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1304
1305 usb_autopm_put_interface(dev->intf);
1306
1307 return ret;
1308}
1309
1310static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1311 u8 *data)
1312{
1313 if (stringset == ETH_SS_STATS)
1314 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1315}
1316
1317static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1318{
1319 if (sset == ETH_SS_STATS)
1320 return ARRAY_SIZE(lan78xx_gstrings);
1321 else
1322 return -EOPNOTSUPP;
1323}
1324
1325static void lan78xx_get_stats(struct net_device *netdev,
1326 struct ethtool_stats *stats, u64 *data)
1327{
1328 struct lan78xx_net *dev = netdev_priv(netdev);
1329
1330 lan78xx_update_stats(dev);
1331
1332 mutex_lock(&dev->stats.access_lock);
1333 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1334 mutex_unlock(&dev->stats.access_lock);
1335}
1336
1337static void lan78xx_get_wol(struct net_device *netdev,
1338 struct ethtool_wolinfo *wol)
1339{
1340 struct lan78xx_net *dev = netdev_priv(netdev);
1341 int ret;
1342 u32 buf;
1343 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1344
1345 if (usb_autopm_get_interface(dev->intf) < 0)
1346 return;
1347
1348 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1349 if (unlikely(ret < 0)) {
1350 wol->supported = 0;
1351 wol->wolopts = 0;
1352 } else {
1353 if (buf & USB_CFG_RMT_WKP_) {
1354 wol->supported = WAKE_ALL;
1355 wol->wolopts = pdata->wol;
1356 } else {
1357 wol->supported = 0;
1358 wol->wolopts = 0;
1359 }
1360 }
1361
1362 usb_autopm_put_interface(dev->intf);
1363}
1364
1365static int lan78xx_set_wol(struct net_device *netdev,
1366 struct ethtool_wolinfo *wol)
1367{
1368 struct lan78xx_net *dev = netdev_priv(netdev);
1369 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1370 int ret;
1371
1372 ret = usb_autopm_get_interface(dev->intf);
1373 if (ret < 0)
1374 return ret;
1375
1376 if (wol->wolopts & ~WAKE_ALL)
1377 return -EINVAL;
1378
1379 pdata->wol = wol->wolopts;
1380
1381 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1382
1383 phy_ethtool_set_wol(netdev->phydev, wol);
1384
1385 usb_autopm_put_interface(dev->intf);
1386
1387 return ret;
1388}
1389
1390static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1391{
1392 struct lan78xx_net *dev = netdev_priv(net);
1393 struct phy_device *phydev = net->phydev;
1394 int ret;
1395 u32 buf;
1396
1397 ret = usb_autopm_get_interface(dev->intf);
1398 if (ret < 0)
1399 return ret;
1400
1401 ret = phy_ethtool_get_eee(phydev, edata);
1402 if (ret < 0)
1403 goto exit;
1404
1405 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1406 if (buf & MAC_CR_EEE_EN_) {
1407 edata->eee_enabled = true;
1408 edata->eee_active = !!(edata->advertised &
1409 edata->lp_advertised);
1410 edata->tx_lpi_enabled = true;
1411 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1412 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1413 edata->tx_lpi_timer = buf;
1414 } else {
1415 edata->eee_enabled = false;
1416 edata->eee_active = false;
1417 edata->tx_lpi_enabled = false;
1418 edata->tx_lpi_timer = 0;
1419 }
1420
1421 ret = 0;
1422exit:
1423 usb_autopm_put_interface(dev->intf);
1424
1425 return ret;
1426}
1427
1428static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1429{
1430 struct lan78xx_net *dev = netdev_priv(net);
1431 int ret;
1432 u32 buf;
1433
1434 ret = usb_autopm_get_interface(dev->intf);
1435 if (ret < 0)
1436 return ret;
1437
1438 if (edata->eee_enabled) {
1439 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1440 buf |= MAC_CR_EEE_EN_;
1441 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1442
1443 phy_ethtool_set_eee(net->phydev, edata);
1444
1445 buf = (u32)edata->tx_lpi_timer;
1446 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1447 } else {
1448 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1449 buf &= ~MAC_CR_EEE_EN_;
1450 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1451 }
1452
1453 usb_autopm_put_interface(dev->intf);
1454
1455 return 0;
1456}
1457
1458static u32 lan78xx_get_link(struct net_device *net)
1459{
1460 phy_read_status(net->phydev);
1461
1462 return net->phydev->link;
1463}
1464
1465static void lan78xx_get_drvinfo(struct net_device *net,
1466 struct ethtool_drvinfo *info)
1467{
1468 struct lan78xx_net *dev = netdev_priv(net);
1469
1470 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1471 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1472 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1473}
1474
1475static u32 lan78xx_get_msglevel(struct net_device *net)
1476{
1477 struct lan78xx_net *dev = netdev_priv(net);
1478
1479 return dev->msg_enable;
1480}
1481
1482static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1483{
1484 struct lan78xx_net *dev = netdev_priv(net);
1485
1486 dev->msg_enable = level;
1487}
1488
1489static int lan78xx_get_link_ksettings(struct net_device *net,
1490 struct ethtool_link_ksettings *cmd)
1491{
1492 struct lan78xx_net *dev = netdev_priv(net);
1493 struct phy_device *phydev = net->phydev;
1494 int ret;
1495
1496 ret = usb_autopm_get_interface(dev->intf);
1497 if (ret < 0)
1498 return ret;
1499
1500 phy_ethtool_ksettings_get(phydev, cmd);
1501
1502 usb_autopm_put_interface(dev->intf);
1503
1504 return ret;
1505}
1506
1507static int lan78xx_set_link_ksettings(struct net_device *net,
1508 const struct ethtool_link_ksettings *cmd)
1509{
1510 struct lan78xx_net *dev = netdev_priv(net);
1511 struct phy_device *phydev = net->phydev;
1512 int ret = 0;
1513 int temp;
1514
1515 ret = usb_autopm_get_interface(dev->intf);
1516 if (ret < 0)
1517 return ret;
1518
1519 /* change speed & duplex */
1520 ret = phy_ethtool_ksettings_set(phydev, cmd);
1521
1522 if (!cmd->base.autoneg) {
1523 /* force link down */
1524 temp = phy_read(phydev, MII_BMCR);
1525 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1526 mdelay(1);
1527 phy_write(phydev, MII_BMCR, temp);
1528 }
1529
1530 usb_autopm_put_interface(dev->intf);
1531
1532 return ret;
1533}
1534
1535static void lan78xx_get_pause(struct net_device *net,
1536 struct ethtool_pauseparam *pause)
1537{
1538 struct lan78xx_net *dev = netdev_priv(net);
1539 struct phy_device *phydev = net->phydev;
1540 struct ethtool_link_ksettings ecmd;
1541
1542 phy_ethtool_ksettings_get(phydev, &ecmd);
1543
1544 pause->autoneg = dev->fc_autoneg;
1545
1546 if (dev->fc_request_control & FLOW_CTRL_TX)
1547 pause->tx_pause = 1;
1548
1549 if (dev->fc_request_control & FLOW_CTRL_RX)
1550 pause->rx_pause = 1;
1551}
1552
1553static int lan78xx_set_pause(struct net_device *net,
1554 struct ethtool_pauseparam *pause)
1555{
1556 struct lan78xx_net *dev = netdev_priv(net);
1557 struct phy_device *phydev = net->phydev;
1558 struct ethtool_link_ksettings ecmd;
1559 int ret;
1560
1561 phy_ethtool_ksettings_get(phydev, &ecmd);
1562
1563 if (pause->autoneg && !ecmd.base.autoneg) {
1564 ret = -EINVAL;
1565 goto exit;
1566 }
1567
1568 dev->fc_request_control = 0;
1569 if (pause->rx_pause)
1570 dev->fc_request_control |= FLOW_CTRL_RX;
1571
1572 if (pause->tx_pause)
1573 dev->fc_request_control |= FLOW_CTRL_TX;
1574
1575 if (ecmd.base.autoneg) {
1576 u32 mii_adv;
1577 u32 advertising;
1578
1579 ethtool_convert_link_mode_to_legacy_u32(
1580 &advertising, ecmd.link_modes.advertising);
1581
1582 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1583 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1584 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1585
1586 ethtool_convert_legacy_u32_to_link_mode(
1587 ecmd.link_modes.advertising, advertising);
1588
1589 phy_ethtool_ksettings_set(phydev, &ecmd);
1590 }
1591
1592 dev->fc_autoneg = pause->autoneg;
1593
1594 ret = 0;
1595exit:
1596 return ret;
1597}
1598
1599static const struct ethtool_ops lan78xx_ethtool_ops = {
1600 .get_link = lan78xx_get_link,
1601 .nway_reset = phy_ethtool_nway_reset,
1602 .get_drvinfo = lan78xx_get_drvinfo,
1603 .get_msglevel = lan78xx_get_msglevel,
1604 .set_msglevel = lan78xx_set_msglevel,
1605 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1606 .get_eeprom = lan78xx_ethtool_get_eeprom,
1607 .set_eeprom = lan78xx_ethtool_set_eeprom,
1608 .get_ethtool_stats = lan78xx_get_stats,
1609 .get_sset_count = lan78xx_get_sset_count,
1610 .get_strings = lan78xx_get_strings,
1611 .get_wol = lan78xx_get_wol,
1612 .set_wol = lan78xx_set_wol,
1613 .get_eee = lan78xx_get_eee,
1614 .set_eee = lan78xx_set_eee,
1615 .get_pauseparam = lan78xx_get_pause,
1616 .set_pauseparam = lan78xx_set_pause,
1617 .get_link_ksettings = lan78xx_get_link_ksettings,
1618 .set_link_ksettings = lan78xx_set_link_ksettings,
1619};
1620
1621static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1622{
1623 if (!netif_running(netdev))
1624 return -EINVAL;
1625
1626 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1627}
1628
1629static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1630{
1631 u32 addr_lo, addr_hi;
1632 int ret;
1633 u8 addr[6];
1634
1635 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1636 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1637
1638 addr[0] = addr_lo & 0xFF;
1639 addr[1] = (addr_lo >> 8) & 0xFF;
1640 addr[2] = (addr_lo >> 16) & 0xFF;
1641 addr[3] = (addr_lo >> 24) & 0xFF;
1642 addr[4] = addr_hi & 0xFF;
1643 addr[5] = (addr_hi >> 8) & 0xFF;
1644
1645 if (!is_valid_ether_addr(addr)) {
1646 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1647 /* valid address present in Device Tree */
1648 netif_dbg(dev, ifup, dev->net,
1649 "MAC address read from Device Tree");
1650 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1651 ETH_ALEN, addr) == 0) ||
1652 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1653 ETH_ALEN, addr) == 0)) &&
1654 is_valid_ether_addr(addr)) {
1655 /* eeprom values are valid so use them */
1656 netif_dbg(dev, ifup, dev->net,
1657 "MAC address read from EEPROM");
1658 } else {
1659 /* generate random MAC */
1660 random_ether_addr(addr);
1661 netif_dbg(dev, ifup, dev->net,
1662 "MAC address set to random addr");
1663 }
1664
1665 addr_lo = addr[0] | (addr[1] << 8) |
1666 (addr[2] << 16) | (addr[3] << 24);
1667 addr_hi = addr[4] | (addr[5] << 8);
1668
1669 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1670 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1671 }
1672
1673 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1674 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1675
1676 ether_addr_copy(dev->net->dev_addr, addr);
1677}
1678
1679/* MDIO read and write wrappers for phylib */
1680static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1681{
1682 struct lan78xx_net *dev = bus->priv;
1683 u32 val, addr;
1684 int ret;
1685
1686 ret = usb_autopm_get_interface(dev->intf);
1687 if (ret < 0)
1688 return ret;
1689
1690 mutex_lock(&dev->phy_mutex);
1691
1692 /* confirm MII not busy */
1693 ret = lan78xx_phy_wait_not_busy(dev);
1694 if (ret < 0)
1695 goto done;
1696
1697 /* set the address, index & direction (read from PHY) */
1698 addr = mii_access(phy_id, idx, MII_READ);
1699 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1700
1701 ret = lan78xx_phy_wait_not_busy(dev);
1702 if (ret < 0)
1703 goto done;
1704
1705 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1706
1707 ret = (int)(val & 0xFFFF);
1708
1709done:
1710 mutex_unlock(&dev->phy_mutex);
1711 usb_autopm_put_interface(dev->intf);
1712
1713 return ret;
1714}
1715
1716static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1717 u16 regval)
1718{
1719 struct lan78xx_net *dev = bus->priv;
1720 u32 val, addr;
1721 int ret;
1722
1723 ret = usb_autopm_get_interface(dev->intf);
1724 if (ret < 0)
1725 return ret;
1726
1727 mutex_lock(&dev->phy_mutex);
1728
1729 /* confirm MII not busy */
1730 ret = lan78xx_phy_wait_not_busy(dev);
1731 if (ret < 0)
1732 goto done;
1733
1734 val = (u32)regval;
1735 ret = lan78xx_write_reg(dev, MII_DATA, val);
1736
1737 /* set the address, index & direction (write to PHY) */
1738 addr = mii_access(phy_id, idx, MII_WRITE);
1739 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1740
1741 ret = lan78xx_phy_wait_not_busy(dev);
1742 if (ret < 0)
1743 goto done;
1744
1745done:
1746 mutex_unlock(&dev->phy_mutex);
1747 usb_autopm_put_interface(dev->intf);
1748 return 0;
1749}
1750
1751static int lan78xx_mdio_init(struct lan78xx_net *dev)
1752{
1753 int ret;
1754
1755 dev->mdiobus = mdiobus_alloc();
1756 if (!dev->mdiobus) {
1757 netdev_err(dev->net, "can't allocate MDIO bus\n");
1758 return -ENOMEM;
1759 }
1760
1761 dev->mdiobus->priv = (void *)dev;
1762 dev->mdiobus->read = lan78xx_mdiobus_read;
1763 dev->mdiobus->write = lan78xx_mdiobus_write;
1764 dev->mdiobus->name = "lan78xx-mdiobus";
1765 dev->mdiobus->parent = &dev->udev->dev;
1766
1767 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1768 dev->udev->bus->busnum, dev->udev->devnum);
1769
1770 switch (dev->chipid) {
1771 case ID_REV_CHIP_ID_7800_:
1772 case ID_REV_CHIP_ID_7850_:
1773 /* set to internal PHY id */
1774 dev->mdiobus->phy_mask = ~(1 << 1);
1775 break;
1776 case ID_REV_CHIP_ID_7801_:
1777 /* scan thru PHYAD[2..0] */
1778 dev->mdiobus->phy_mask = ~(0xFF);
1779 break;
1780 }
1781
1782 ret = mdiobus_register(dev->mdiobus);
1783 if (ret) {
1784 netdev_err(dev->net, "can't register MDIO bus\n");
1785 goto exit1;
1786 }
1787
1788 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1789 return 0;
1790exit1:
1791 mdiobus_free(dev->mdiobus);
1792 return ret;
1793}
1794
1795static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1796{
1797 mdiobus_unregister(dev->mdiobus);
1798 mdiobus_free(dev->mdiobus);
1799}
1800
1801static void lan78xx_link_status_change(struct net_device *net)
1802{
1803 struct phy_device *phydev = net->phydev;
1804 int ret, temp;
1805
1806 /* At forced 100 F/H mode, chip may fail to set mode correctly
1807 * when cable is switched between long(~50+m) and short one.
1808 * As workaround, set to 10 before setting to 100
1809 * at forced 100 F/H mode.
1810 */
1811 if (!phydev->autoneg && (phydev->speed == 100)) {
1812 /* disable phy interrupt */
1813 temp = phy_read(phydev, LAN88XX_INT_MASK);
1814 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1815 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1816
1817 temp = phy_read(phydev, MII_BMCR);
1818 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1819 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1820 temp |= BMCR_SPEED100;
1821 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1822
1823 /* clear pending interrupt generated while workaround */
1824 temp = phy_read(phydev, LAN88XX_INT_STS);
1825
1826 /* enable phy interrupt back */
1827 temp = phy_read(phydev, LAN88XX_INT_MASK);
1828 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1829 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1830 }
1831}
1832
1833static int irq_map(struct irq_domain *d, unsigned int irq,
1834 irq_hw_number_t hwirq)
1835{
1836 struct irq_domain_data *data = d->host_data;
1837
1838 irq_set_chip_data(irq, data);
1839 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1840 irq_set_noprobe(irq);
1841
1842 return 0;
1843}
1844
1845static void irq_unmap(struct irq_domain *d, unsigned int irq)
1846{
1847 irq_set_chip_and_handler(irq, NULL, NULL);
1848 irq_set_chip_data(irq, NULL);
1849}
1850
1851static const struct irq_domain_ops chip_domain_ops = {
1852 .map = irq_map,
1853 .unmap = irq_unmap,
1854};
1855
1856static void lan78xx_irq_mask(struct irq_data *irqd)
1857{
1858 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1859
1860 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1861}
1862
1863static void lan78xx_irq_unmask(struct irq_data *irqd)
1864{
1865 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1866
1867 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1868}
1869
1870static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1871{
1872 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1873
1874 mutex_lock(&data->irq_lock);
1875}
1876
1877static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1878{
1879 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1880 struct lan78xx_net *dev =
1881 container_of(data, struct lan78xx_net, domain_data);
1882 u32 buf;
1883 int ret;
1884
1885 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1886 * are only two callbacks executed in non-atomic contex.
1887 */
1888 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1889 if (buf != data->irqenable)
1890 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1891
1892 mutex_unlock(&data->irq_lock);
1893}
1894
1895static struct irq_chip lan78xx_irqchip = {
1896 .name = "lan78xx-irqs",
1897 .irq_mask = lan78xx_irq_mask,
1898 .irq_unmask = lan78xx_irq_unmask,
1899 .irq_bus_lock = lan78xx_irq_bus_lock,
1900 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1901};
1902
1903static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1904{
1905 struct device_node *of_node;
1906 struct irq_domain *irqdomain;
1907 unsigned int irqmap = 0;
1908 u32 buf;
1909 int ret = 0;
1910
1911 of_node = dev->udev->dev.parent->of_node;
1912
1913 mutex_init(&dev->domain_data.irq_lock);
1914
1915 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1916 dev->domain_data.irqenable = buf;
1917
1918 dev->domain_data.irqchip = &lan78xx_irqchip;
1919 dev->domain_data.irq_handler = handle_simple_irq;
1920
1921 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1922 &chip_domain_ops, &dev->domain_data);
1923 if (irqdomain) {
1924 /* create mapping for PHY interrupt */
1925 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1926 if (!irqmap) {
1927 irq_domain_remove(irqdomain);
1928
1929 irqdomain = NULL;
1930 ret = -EINVAL;
1931 }
1932 } else {
1933 ret = -EINVAL;
1934 }
1935
1936 dev->domain_data.irqdomain = irqdomain;
1937 dev->domain_data.phyirq = irqmap;
1938
1939 return ret;
1940}
1941
1942static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1943{
1944 if (dev->domain_data.phyirq > 0) {
1945 irq_dispose_mapping(dev->domain_data.phyirq);
1946
1947 if (dev->domain_data.irqdomain)
1948 irq_domain_remove(dev->domain_data.irqdomain);
1949 }
1950 dev->domain_data.phyirq = 0;
1951 dev->domain_data.irqdomain = NULL;
1952}
1953
1954static int lan8835_fixup(struct phy_device *phydev)
1955{
1956 int buf;
1957 int ret;
1958 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1959
1960 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1961 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1962 buf &= ~0x1800;
1963 buf |= 0x0800;
1964 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1965
1966 /* RGMII MAC TXC Delay Enable */
1967 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1968 MAC_RGMII_ID_TXC_DELAY_EN_);
1969
1970 /* RGMII TX DLL Tune Adjust */
1971 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1972
1973 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1974
1975 return 1;
1976}
1977
1978static int ksz9031rnx_fixup(struct phy_device *phydev)
1979{
1980 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1981
1982 /* Micrel9301RNX PHY configuration */
1983 /* RGMII Control Signal Pad Skew */
1984 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1985 /* RGMII RX Data Pad Skew */
1986 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1987 /* RGMII RX Clock Pad Skew */
1988 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1989
1990 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1991
1992 return 1;
1993}
1994
1995static int lan78xx_phy_init(struct lan78xx_net *dev)
1996{
1997 int ret;
1998 u32 mii_adv;
1999 struct phy_device *phydev = dev->net->phydev;
2000
2001 phydev = phy_find_first(dev->mdiobus);
2002 if (!phydev) {
2003 netdev_err(dev->net, "no PHY found\n");
2004 return -EIO;
2005 }
2006
2007 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2008 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2009 phydev->is_internal = true;
2010 dev->interface = PHY_INTERFACE_MODE_GMII;
2011
2012 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2013 if (!phydev->drv) {
2014 netdev_err(dev->net, "no PHY driver found\n");
2015 return -EIO;
2016 }
2017
2018 dev->interface = PHY_INTERFACE_MODE_RGMII;
2019
2020 /* external PHY fixup for KSZ9031RNX */
2021 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2022 ksz9031rnx_fixup);
2023 if (ret < 0) {
2024 netdev_err(dev->net, "fail to register fixup\n");
2025 return ret;
2026 }
2027 /* external PHY fixup for LAN8835 */
2028 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2029 lan8835_fixup);
2030 if (ret < 0) {
2031 netdev_err(dev->net, "fail to register fixup\n");
2032 return ret;
2033 }
2034 /* add more external PHY fixup here if needed */
2035
2036 phydev->is_internal = false;
2037 } else {
2038 netdev_err(dev->net, "unknown ID found\n");
2039 ret = -EIO;
2040 goto error;
2041 }
2042
2043 /* if phyirq is not set, use polling mode in phylib */
2044 if (dev->domain_data.phyirq > 0)
2045 phydev->irq = dev->domain_data.phyirq;
2046 else
2047 phydev->irq = 0;
2048 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2049
2050 /* set to AUTOMDIX */
2051 phydev->mdix = ETH_TP_MDI_AUTO;
2052
2053 ret = phy_connect_direct(dev->net, phydev,
2054 lan78xx_link_status_change,
2055 dev->interface);
2056 if (ret) {
2057 netdev_err(dev->net, "can't attach PHY to %s\n",
2058 dev->mdiobus->id);
2059 return -EIO;
2060 }
2061
2062 /* MAC doesn't support 1000T Half */
2063 phydev->supported &= ~SUPPORTED_1000baseT_Half;
2064
2065 /* support both flow controls */
2066 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2067 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2068 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2069 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2070
2071 genphy_config_aneg(phydev);
2072
2073 dev->fc_autoneg = phydev->autoneg;
2074
2075 return 0;
2076
2077error:
2078 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2079 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2080
2081 return ret;
2082}
2083
2084static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2085{
2086 int ret = 0;
2087 u32 buf;
2088 bool rxenabled;
2089
2090 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2091
2092 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2093
2094 if (rxenabled) {
2095 buf &= ~MAC_RX_RXEN_;
2096 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2097 }
2098
2099 /* add 4 to size for FCS */
2100 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2101 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2102
2103 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2104
2105 if (rxenabled) {
2106 buf |= MAC_RX_RXEN_;
2107 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2108 }
2109
2110 return 0;
2111}
2112
2113static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2114{
2115 struct sk_buff *skb;
2116 unsigned long flags;
2117 int count = 0;
2118
2119 spin_lock_irqsave(&q->lock, flags);
2120 while (!skb_queue_empty(q)) {
2121 struct skb_data *entry;
2122 struct urb *urb;
2123 int ret;
2124
2125 skb_queue_walk(q, skb) {
2126 entry = (struct skb_data *)skb->cb;
2127 if (entry->state != unlink_start)
2128 goto found;
2129 }
2130 break;
2131found:
2132 entry->state = unlink_start;
2133 urb = entry->urb;
2134
2135 /* Get reference count of the URB to avoid it to be
2136 * freed during usb_unlink_urb, which may trigger
2137 * use-after-free problem inside usb_unlink_urb since
2138 * usb_unlink_urb is always racing with .complete
2139 * handler(include defer_bh).
2140 */
2141 usb_get_urb(urb);
2142 spin_unlock_irqrestore(&q->lock, flags);
2143 /* during some PM-driven resume scenarios,
2144 * these (async) unlinks complete immediately
2145 */
2146 ret = usb_unlink_urb(urb);
2147 if (ret != -EINPROGRESS && ret != 0)
2148 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2149 else
2150 count++;
2151 usb_put_urb(urb);
2152 spin_lock_irqsave(&q->lock, flags);
2153 }
2154 spin_unlock_irqrestore(&q->lock, flags);
2155 return count;
2156}
2157
2158static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2159{
2160 struct lan78xx_net *dev = netdev_priv(netdev);
2161 int ll_mtu = new_mtu + netdev->hard_header_len;
2162 int old_hard_mtu = dev->hard_mtu;
2163 int old_rx_urb_size = dev->rx_urb_size;
2164 int ret;
2165
2166 /* no second zero-length packet read wanted after mtu-sized packets */
2167 if ((ll_mtu % dev->maxpacket) == 0)
2168 return -EDOM;
2169
2170 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2171
2172 netdev->mtu = new_mtu;
2173
2174 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2175 if (dev->rx_urb_size == old_hard_mtu) {
2176 dev->rx_urb_size = dev->hard_mtu;
2177 if (dev->rx_urb_size > old_rx_urb_size) {
2178 if (netif_running(dev->net)) {
2179 unlink_urbs(dev, &dev->rxq);
2180 tasklet_schedule(&dev->bh);
2181 }
2182 }
2183 }
2184
2185 return 0;
2186}
2187
2188static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2189{
2190 struct lan78xx_net *dev = netdev_priv(netdev);
2191 struct sockaddr *addr = p;
2192 u32 addr_lo, addr_hi;
2193 int ret;
2194
2195 if (netif_running(netdev))
2196 return -EBUSY;
2197
2198 if (!is_valid_ether_addr(addr->sa_data))
2199 return -EADDRNOTAVAIL;
2200
2201 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2202
2203 addr_lo = netdev->dev_addr[0] |
2204 netdev->dev_addr[1] << 8 |
2205 netdev->dev_addr[2] << 16 |
2206 netdev->dev_addr[3] << 24;
2207 addr_hi = netdev->dev_addr[4] |
2208 netdev->dev_addr[5] << 8;
2209
2210 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2211 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2212
2213 /* Added to support MAC address changes */
2214 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2215 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2216
2217 return 0;
2218}
2219
2220/* Enable or disable Rx checksum offload engine */
2221static int lan78xx_set_features(struct net_device *netdev,
2222 netdev_features_t features)
2223{
2224 struct lan78xx_net *dev = netdev_priv(netdev);
2225 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2226 unsigned long flags;
2227 int ret;
2228
2229 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2230
2231 if (features & NETIF_F_RXCSUM) {
2232 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2233 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2234 } else {
2235 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2236 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2237 }
2238
2239 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2240 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2241 else
2242 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2243
2244 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2245
2246 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2247
2248 return 0;
2249}
2250
2251static void lan78xx_deferred_vlan_write(struct work_struct *param)
2252{
2253 struct lan78xx_priv *pdata =
2254 container_of(param, struct lan78xx_priv, set_vlan);
2255 struct lan78xx_net *dev = pdata->dev;
2256
2257 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2258 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2259}
2260
2261static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2262 __be16 proto, u16 vid)
2263{
2264 struct lan78xx_net *dev = netdev_priv(netdev);
2265 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2266 u16 vid_bit_index;
2267 u16 vid_dword_index;
2268
2269 vid_dword_index = (vid >> 5) & 0x7F;
2270 vid_bit_index = vid & 0x1F;
2271
2272 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2273
2274 /* defer register writes to a sleepable context */
2275 schedule_work(&pdata->set_vlan);
2276
2277 return 0;
2278}
2279
2280static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2281 __be16 proto, u16 vid)
2282{
2283 struct lan78xx_net *dev = netdev_priv(netdev);
2284 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2285 u16 vid_bit_index;
2286 u16 vid_dword_index;
2287
2288 vid_dword_index = (vid >> 5) & 0x7F;
2289 vid_bit_index = vid & 0x1F;
2290
2291 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2292
2293 /* defer register writes to a sleepable context */
2294 schedule_work(&pdata->set_vlan);
2295
2296 return 0;
2297}
2298
2299static void lan78xx_init_ltm(struct lan78xx_net *dev)
2300{
2301 int ret;
2302 u32 buf;
2303 u32 regs[6] = { 0 };
2304
2305 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2306 if (buf & USB_CFG1_LTM_ENABLE_) {
2307 u8 temp[2];
2308 /* Get values from EEPROM first */
2309 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2310 if (temp[0] == 24) {
2311 ret = lan78xx_read_raw_eeprom(dev,
2312 temp[1] * 2,
2313 24,
2314 (u8 *)regs);
2315 if (ret < 0)
2316 return;
2317 }
2318 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2319 if (temp[0] == 24) {
2320 ret = lan78xx_read_raw_otp(dev,
2321 temp[1] * 2,
2322 24,
2323 (u8 *)regs);
2324 if (ret < 0)
2325 return;
2326 }
2327 }
2328 }
2329
2330 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2331 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2332 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2333 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2334 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2335 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2336}
2337
2338static int lan78xx_reset(struct lan78xx_net *dev)
2339{
2340 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2341 u32 buf;
2342 int ret = 0;
2343 unsigned long timeout;
2344 u8 sig;
2345
2346 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2347 buf |= HW_CFG_LRST_;
2348 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2349
2350 timeout = jiffies + HZ;
2351 do {
2352 mdelay(1);
2353 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2354 if (time_after(jiffies, timeout)) {
2355 netdev_warn(dev->net,
2356 "timeout on completion of LiteReset");
2357 return -EIO;
2358 }
2359 } while (buf & HW_CFG_LRST_);
2360
2361 lan78xx_init_mac_address(dev);
2362
2363 /* save DEVID for later usage */
2364 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2365 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2366 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2367
2368 /* Respond to the IN token with a NAK */
2369 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2370 buf |= USB_CFG_BIR_;
2371 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2372
2373 /* Init LTM */
2374 lan78xx_init_ltm(dev);
2375
2376 if (dev->udev->speed == USB_SPEED_SUPER) {
2377 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2378 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2379 dev->rx_qlen = 4;
2380 dev->tx_qlen = 4;
2381 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2382 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2383 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2384 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2385 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2386 } else {
2387 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2388 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2389 dev->rx_qlen = 4;
2390 dev->tx_qlen = 4;
2391 }
2392
2393 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2394 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2395
2396 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2397 buf |= HW_CFG_MEF_;
2398 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2399
2400 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2401 buf |= USB_CFG_BCE_;
2402 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2403
2404 /* set FIFO sizes */
2405 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2406 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2407
2408 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2409 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2410
2411 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2412 ret = lan78xx_write_reg(dev, FLOW, 0);
2413 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2414
2415 /* Don't need rfe_ctl_lock during initialisation */
2416 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2417 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2418 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2419
2420 /* Enable or disable checksum offload engines */
2421 lan78xx_set_features(dev->net, dev->net->features);
2422
2423 lan78xx_set_multicast(dev->net);
2424
2425 /* reset PHY */
2426 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2427 buf |= PMT_CTL_PHY_RST_;
2428 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2429
2430 timeout = jiffies + HZ;
2431 do {
2432 mdelay(1);
2433 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2434 if (time_after(jiffies, timeout)) {
2435 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2436 return -EIO;
2437 }
2438 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2439
2440 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2441 /* LAN7801 only has RGMII mode */
2442 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2443 buf &= ~MAC_CR_GMII_EN_;
2444
2445 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2446 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2447 if (!ret && sig != EEPROM_INDICATOR) {
2448 /* Implies there is no external eeprom. Set mac speed */
2449 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2450 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2451 }
2452 }
2453 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2454
2455 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2456 buf |= MAC_TX_TXEN_;
2457 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2458
2459 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2460 buf |= FCT_TX_CTL_EN_;
2461 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2462
2463 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2464
2465 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2466 buf |= MAC_RX_RXEN_;
2467 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2468
2469 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2470 buf |= FCT_RX_CTL_EN_;
2471 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2472
2473 return 0;
2474}
2475
2476static void lan78xx_init_stats(struct lan78xx_net *dev)
2477{
2478 u32 *p;
2479 int i;
2480
2481 /* initialize for stats update
2482 * some counters are 20bits and some are 32bits
2483 */
2484 p = (u32 *)&dev->stats.rollover_max;
2485 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2486 p[i] = 0xFFFFF;
2487
2488 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2489 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2490 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2491 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2492 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2493 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2494 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2495 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2496 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2497 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2498
2499 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2500}
2501
2502static int lan78xx_open(struct net_device *net)
2503{
2504 struct lan78xx_net *dev = netdev_priv(net);
2505 int ret;
2506
2507 ret = usb_autopm_get_interface(dev->intf);
2508 if (ret < 0)
2509 goto out;
2510
2511 phy_start(net->phydev);
2512
2513 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2514
2515 /* for Link Check */
2516 if (dev->urb_intr) {
2517 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2518 if (ret < 0) {
2519 netif_err(dev, ifup, dev->net,
2520 "intr submit %d\n", ret);
2521 goto done;
2522 }
2523 }
2524
2525 lan78xx_init_stats(dev);
2526
2527 set_bit(EVENT_DEV_OPEN, &dev->flags);
2528
2529 netif_start_queue(net);
2530
2531 dev->link_on = false;
2532
2533 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2534done:
2535 usb_autopm_put_interface(dev->intf);
2536
2537out:
2538 return ret;
2539}
2540
2541static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2542{
2543 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2544 DECLARE_WAITQUEUE(wait, current);
2545 int temp;
2546
2547 /* ensure there are no more active urbs */
2548 add_wait_queue(&unlink_wakeup, &wait);
2549 set_current_state(TASK_UNINTERRUPTIBLE);
2550 dev->wait = &unlink_wakeup;
2551 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2552
2553 /* maybe wait for deletions to finish. */
2554 while (!skb_queue_empty(&dev->rxq) &&
2555 !skb_queue_empty(&dev->txq) &&
2556 !skb_queue_empty(&dev->done)) {
2557 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2558 set_current_state(TASK_UNINTERRUPTIBLE);
2559 netif_dbg(dev, ifdown, dev->net,
2560 "waited for %d urb completions\n", temp);
2561 }
2562 set_current_state(TASK_RUNNING);
2563 dev->wait = NULL;
2564 remove_wait_queue(&unlink_wakeup, &wait);
2565}
2566
2567static int lan78xx_stop(struct net_device *net)
2568{
2569 struct lan78xx_net *dev = netdev_priv(net);
2570
2571 if (timer_pending(&dev->stat_monitor))
2572 del_timer_sync(&dev->stat_monitor);
2573
2574 if (net->phydev)
2575 phy_stop(net->phydev);
2576
2577 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2578 netif_stop_queue(net);
2579
2580 netif_info(dev, ifdown, dev->net,
2581 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2582 net->stats.rx_packets, net->stats.tx_packets,
2583 net->stats.rx_errors, net->stats.tx_errors);
2584
2585 lan78xx_terminate_urbs(dev);
2586
2587 usb_kill_urb(dev->urb_intr);
2588
2589 skb_queue_purge(&dev->rxq_pause);
2590
2591 /* deferred work (task, timer, softirq) must also stop.
2592 * can't flush_scheduled_work() until we drop rtnl (later),
2593 * else workers could deadlock; so make workers a NOP.
2594 */
2595 dev->flags = 0;
2596 cancel_delayed_work_sync(&dev->wq);
2597 tasklet_kill(&dev->bh);
2598
2599 usb_autopm_put_interface(dev->intf);
2600
2601 return 0;
2602}
2603
2604static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2605 struct sk_buff *skb, gfp_t flags)
2606{
2607 u32 tx_cmd_a, tx_cmd_b;
2608
2609 if (skb_cow_head(skb, TX_OVERHEAD)) {
2610 dev_kfree_skb_any(skb);
2611 return NULL;
2612 }
2613
2614 if (skb_linearize(skb)) {
2615 dev_kfree_skb_any(skb);
2616 return NULL;
2617 }
2618
2619 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2620
2621 if (skb->ip_summed == CHECKSUM_PARTIAL)
2622 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2623
2624 tx_cmd_b = 0;
2625 if (skb_is_gso(skb)) {
2626 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2627
2628 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2629
2630 tx_cmd_a |= TX_CMD_A_LSO_;
2631 }
2632
2633 if (skb_vlan_tag_present(skb)) {
2634 tx_cmd_a |= TX_CMD_A_IVTG_;
2635 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2636 }
2637
2638 skb_push(skb, 4);
2639 cpu_to_le32s(&tx_cmd_b);
2640 memcpy(skb->data, &tx_cmd_b, 4);
2641
2642 skb_push(skb, 4);
2643 cpu_to_le32s(&tx_cmd_a);
2644 memcpy(skb->data, &tx_cmd_a, 4);
2645
2646 return skb;
2647}
2648
2649static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2650 struct sk_buff_head *list, enum skb_state state)
2651{
2652 unsigned long flags;
2653 enum skb_state old_state;
2654 struct skb_data *entry = (struct skb_data *)skb->cb;
2655
2656 spin_lock_irqsave(&list->lock, flags);
2657 old_state = entry->state;
2658 entry->state = state;
2659
2660 __skb_unlink(skb, list);
2661 spin_unlock(&list->lock);
2662 spin_lock(&dev->done.lock);
2663
2664 __skb_queue_tail(&dev->done, skb);
2665 if (skb_queue_len(&dev->done) == 1)
2666 tasklet_schedule(&dev->bh);
2667 spin_unlock_irqrestore(&dev->done.lock, flags);
2668
2669 return old_state;
2670}
2671
2672static void tx_complete(struct urb *urb)
2673{
2674 struct sk_buff *skb = (struct sk_buff *)urb->context;
2675 struct skb_data *entry = (struct skb_data *)skb->cb;
2676 struct lan78xx_net *dev = entry->dev;
2677
2678 if (urb->status == 0) {
2679 dev->net->stats.tx_packets += entry->num_of_packet;
2680 dev->net->stats.tx_bytes += entry->length;
2681 } else {
2682 dev->net->stats.tx_errors++;
2683
2684 switch (urb->status) {
2685 case -EPIPE:
2686 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2687 break;
2688
2689 /* software-driven interface shutdown */
2690 case -ECONNRESET:
2691 case -ESHUTDOWN:
2692 break;
2693
2694 case -EPROTO:
2695 case -ETIME:
2696 case -EILSEQ:
2697 netif_stop_queue(dev->net);
2698 break;
2699 default:
2700 netif_dbg(dev, tx_err, dev->net,
2701 "tx err %d\n", entry->urb->status);
2702 break;
2703 }
2704 }
2705
2706 usb_autopm_put_interface_async(dev->intf);
2707
2708 defer_bh(dev, skb, &dev->txq, tx_done);
2709}
2710
2711static void lan78xx_queue_skb(struct sk_buff_head *list,
2712 struct sk_buff *newsk, enum skb_state state)
2713{
2714 struct skb_data *entry = (struct skb_data *)newsk->cb;
2715
2716 __skb_queue_tail(list, newsk);
2717 entry->state = state;
2718}
2719
2720static netdev_tx_t
2721lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2722{
2723 struct lan78xx_net *dev = netdev_priv(net);
2724 struct sk_buff *skb2 = NULL;
2725
2726 if (skb) {
2727 skb_tx_timestamp(skb);
2728 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2729 }
2730
2731 if (skb2) {
2732 skb_queue_tail(&dev->txq_pend, skb2);
2733
2734 /* throttle TX patch at slower than SUPER SPEED USB */
2735 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2736 (skb_queue_len(&dev->txq_pend) > 10))
2737 netif_stop_queue(net);
2738 } else {
2739 netif_dbg(dev, tx_err, dev->net,
2740 "lan78xx_tx_prep return NULL\n");
2741 dev->net->stats.tx_errors++;
2742 dev->net->stats.tx_dropped++;
2743 }
2744
2745 tasklet_schedule(&dev->bh);
2746
2747 return NETDEV_TX_OK;
2748}
2749
2750static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2751{
2752 struct lan78xx_priv *pdata = NULL;
2753 int ret;
2754 int i;
2755
2756 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2757
2758 pdata = (struct lan78xx_priv *)(dev->data[0]);
2759 if (!pdata) {
2760 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2761 return -ENOMEM;
2762 }
2763
2764 pdata->dev = dev;
2765
2766 spin_lock_init(&pdata->rfe_ctl_lock);
2767 mutex_init(&pdata->dataport_mutex);
2768
2769 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2770
2771 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2772 pdata->vlan_table[i] = 0;
2773
2774 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2775
2776 dev->net->features = 0;
2777
2778 if (DEFAULT_TX_CSUM_ENABLE)
2779 dev->net->features |= NETIF_F_HW_CSUM;
2780
2781 if (DEFAULT_RX_CSUM_ENABLE)
2782 dev->net->features |= NETIF_F_RXCSUM;
2783
2784 if (DEFAULT_TSO_CSUM_ENABLE)
2785 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2786
2787 dev->net->hw_features = dev->net->features;
2788
2789 ret = lan78xx_setup_irq_domain(dev);
2790 if (ret < 0) {
2791 netdev_warn(dev->net,
2792 "lan78xx_setup_irq_domain() failed : %d", ret);
2793 goto out1;
2794 }
2795
2796 dev->net->hard_header_len += TX_OVERHEAD;
2797 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2798
2799 /* Init all registers */
2800 ret = lan78xx_reset(dev);
2801 if (ret) {
2802 netdev_warn(dev->net, "Registers INIT FAILED....");
2803 goto out2;
2804 }
2805
2806 ret = lan78xx_mdio_init(dev);
2807 if (ret) {
2808 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2809 goto out2;
2810 }
2811
2812 dev->net->flags |= IFF_MULTICAST;
2813
2814 pdata->wol = WAKE_MAGIC;
2815
2816 return ret;
2817
2818out2:
2819 lan78xx_remove_irq_domain(dev);
2820
2821out1:
2822 netdev_warn(dev->net, "Bind routine FAILED");
2823 cancel_work_sync(&pdata->set_multicast);
2824 cancel_work_sync(&pdata->set_vlan);
2825 kfree(pdata);
2826 return ret;
2827}
2828
2829static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2830{
2831 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2832
2833 lan78xx_remove_irq_domain(dev);
2834
2835 lan78xx_remove_mdio(dev);
2836
2837 if (pdata) {
2838 cancel_work_sync(&pdata->set_multicast);
2839 cancel_work_sync(&pdata->set_vlan);
2840 netif_dbg(dev, ifdown, dev->net, "free pdata");
2841 kfree(pdata);
2842 pdata = NULL;
2843 dev->data[0] = 0;
2844 }
2845}
2846
2847static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2848 struct sk_buff *skb,
2849 u32 rx_cmd_a, u32 rx_cmd_b)
2850{
2851 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2852 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2853 skb->ip_summed = CHECKSUM_NONE;
2854 } else {
2855 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2856 skb->ip_summed = CHECKSUM_COMPLETE;
2857 }
2858}
2859
2860static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2861{
2862 int status;
2863
2864 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2865 skb_queue_tail(&dev->rxq_pause, skb);
2866 return;
2867 }
2868
2869 dev->net->stats.rx_packets++;
2870 dev->net->stats.rx_bytes += skb->len;
2871
2872 skb->protocol = eth_type_trans(skb, dev->net);
2873
2874 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2875 skb->len + sizeof(struct ethhdr), skb->protocol);
2876 memset(skb->cb, 0, sizeof(struct skb_data));
2877
2878 if (skb_defer_rx_timestamp(skb))
2879 return;
2880
2881 status = netif_rx(skb);
2882 if (status != NET_RX_SUCCESS)
2883 netif_dbg(dev, rx_err, dev->net,
2884 "netif_rx status %d\n", status);
2885}
2886
2887static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2888{
2889 if (skb->len < dev->net->hard_header_len)
2890 return 0;
2891
2892 while (skb->len > 0) {
2893 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2894 u16 rx_cmd_c;
2895 struct sk_buff *skb2;
2896 unsigned char *packet;
2897
2898 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2899 le32_to_cpus(&rx_cmd_a);
2900 skb_pull(skb, sizeof(rx_cmd_a));
2901
2902 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2903 le32_to_cpus(&rx_cmd_b);
2904 skb_pull(skb, sizeof(rx_cmd_b));
2905
2906 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2907 le16_to_cpus(&rx_cmd_c);
2908 skb_pull(skb, sizeof(rx_cmd_c));
2909
2910 packet = skb->data;
2911
2912 /* get the packet length */
2913 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2914 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2915
2916 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2917 netif_dbg(dev, rx_err, dev->net,
2918 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2919 } else {
2920 /* last frame in this batch */
2921 if (skb->len == size) {
2922 lan78xx_rx_csum_offload(dev, skb,
2923 rx_cmd_a, rx_cmd_b);
2924
2925 skb_trim(skb, skb->len - 4); /* remove fcs */
2926 skb->truesize = size + sizeof(struct sk_buff);
2927
2928 return 1;
2929 }
2930
2931 skb2 = skb_clone(skb, GFP_ATOMIC);
2932 if (unlikely(!skb2)) {
2933 netdev_warn(dev->net, "Error allocating skb");
2934 return 0;
2935 }
2936
2937 skb2->len = size;
2938 skb2->data = packet;
2939 skb_set_tail_pointer(skb2, size);
2940
2941 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2942
2943 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2944 skb2->truesize = size + sizeof(struct sk_buff);
2945
2946 lan78xx_skb_return(dev, skb2);
2947 }
2948
2949 skb_pull(skb, size);
2950
2951 /* padding bytes before the next frame starts */
2952 if (skb->len)
2953 skb_pull(skb, align_count);
2954 }
2955
2956 return 1;
2957}
2958
2959static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2960{
2961 if (!lan78xx_rx(dev, skb)) {
2962 dev->net->stats.rx_errors++;
2963 goto done;
2964 }
2965
2966 if (skb->len) {
2967 lan78xx_skb_return(dev, skb);
2968 return;
2969 }
2970
2971 netif_dbg(dev, rx_err, dev->net, "drop\n");
2972 dev->net->stats.rx_errors++;
2973done:
2974 skb_queue_tail(&dev->done, skb);
2975}
2976
2977static void rx_complete(struct urb *urb);
2978
2979static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2980{
2981 struct sk_buff *skb;
2982 struct skb_data *entry;
2983 unsigned long lockflags;
2984 size_t size = dev->rx_urb_size;
2985 int ret = 0;
2986
2987 skb = netdev_alloc_skb_ip_align(dev->net, size);
2988 if (!skb) {
2989 usb_free_urb(urb);
2990 return -ENOMEM;
2991 }
2992
2993 entry = (struct skb_data *)skb->cb;
2994 entry->urb = urb;
2995 entry->dev = dev;
2996 entry->length = 0;
2997
2998 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2999 skb->data, size, rx_complete, skb);
3000
3001 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3002
3003 if (netif_device_present(dev->net) &&
3004 netif_running(dev->net) &&
3005 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3006 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3007 ret = usb_submit_urb(urb, GFP_ATOMIC);
3008 switch (ret) {
3009 case 0:
3010 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3011 break;
3012 case -EPIPE:
3013 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3014 break;
3015 case -ENODEV:
3016 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3017 netif_device_detach(dev->net);
3018 break;
3019 case -EHOSTUNREACH:
3020 ret = -ENOLINK;
3021 break;
3022 default:
3023 netif_dbg(dev, rx_err, dev->net,
3024 "rx submit, %d\n", ret);
3025 tasklet_schedule(&dev->bh);
3026 }
3027 } else {
3028 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3029 ret = -ENOLINK;
3030 }
3031 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3032 if (ret) {
3033 dev_kfree_skb_any(skb);
3034 usb_free_urb(urb);
3035 }
3036 return ret;
3037}
3038
3039static void rx_complete(struct urb *urb)
3040{
3041 struct sk_buff *skb = (struct sk_buff *)urb->context;
3042 struct skb_data *entry = (struct skb_data *)skb->cb;
3043 struct lan78xx_net *dev = entry->dev;
3044 int urb_status = urb->status;
3045 enum skb_state state;
3046
3047 skb_put(skb, urb->actual_length);
3048 state = rx_done;
3049 entry->urb = NULL;
3050
3051 switch (urb_status) {
3052 case 0:
3053 if (skb->len < dev->net->hard_header_len) {
3054 state = rx_cleanup;
3055 dev->net->stats.rx_errors++;
3056 dev->net->stats.rx_length_errors++;
3057 netif_dbg(dev, rx_err, dev->net,
3058 "rx length %d\n", skb->len);
3059 }
3060 usb_mark_last_busy(dev->udev);
3061 break;
3062 case -EPIPE:
3063 dev->net->stats.rx_errors++;
3064 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3065 /* FALLTHROUGH */
3066 case -ECONNRESET: /* async unlink */
3067 case -ESHUTDOWN: /* hardware gone */
3068 netif_dbg(dev, ifdown, dev->net,
3069 "rx shutdown, code %d\n", urb_status);
3070 state = rx_cleanup;
3071 entry->urb = urb;
3072 urb = NULL;
3073 break;
3074 case -EPROTO:
3075 case -ETIME:
3076 case -EILSEQ:
3077 dev->net->stats.rx_errors++;
3078 state = rx_cleanup;
3079 entry->urb = urb;
3080 urb = NULL;
3081 break;
3082
3083 /* data overrun ... flush fifo? */
3084 case -EOVERFLOW:
3085 dev->net->stats.rx_over_errors++;
3086 /* FALLTHROUGH */
3087
3088 default:
3089 state = rx_cleanup;
3090 dev->net->stats.rx_errors++;
3091 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3092 break;
3093 }
3094
3095 state = defer_bh(dev, skb, &dev->rxq, state);
3096
3097 if (urb) {
3098 if (netif_running(dev->net) &&
3099 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3100 state != unlink_start) {
3101 rx_submit(dev, urb, GFP_ATOMIC);
3102 return;
3103 }
3104 usb_free_urb(urb);
3105 }
3106 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3107}
3108
3109static void lan78xx_tx_bh(struct lan78xx_net *dev)
3110{
3111 int length;
3112 struct urb *urb = NULL;
3113 struct skb_data *entry;
3114 unsigned long flags;
3115 struct sk_buff_head *tqp = &dev->txq_pend;
3116 struct sk_buff *skb, *skb2;
3117 int ret;
3118 int count, pos;
3119 int skb_totallen, pkt_cnt;
3120
3121 skb_totallen = 0;
3122 pkt_cnt = 0;
3123 count = 0;
3124 length = 0;
3125 spin_lock_irqsave(&tqp->lock, flags);
3126 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3127 if (skb_is_gso(skb)) {
3128 if (pkt_cnt) {
3129 /* handle previous packets first */
3130 break;
3131 }
3132 count = 1;
3133 length = skb->len - TX_OVERHEAD;
3134 __skb_unlink(skb, tqp);
3135 spin_unlock_irqrestore(&tqp->lock, flags);
3136 goto gso_skb;
3137 }
3138
3139 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3140 break;
3141 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3142 pkt_cnt++;
3143 }
3144 spin_unlock_irqrestore(&tqp->lock, flags);
3145
3146 /* copy to a single skb */
3147 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3148 if (!skb)
3149 goto drop;
3150
3151 skb_put(skb, skb_totallen);
3152
3153 for (count = pos = 0; count < pkt_cnt; count++) {
3154 skb2 = skb_dequeue(tqp);
3155 if (skb2) {
3156 length += (skb2->len - TX_OVERHEAD);
3157 memcpy(skb->data + pos, skb2->data, skb2->len);
3158 pos += roundup(skb2->len, sizeof(u32));
3159 dev_kfree_skb(skb2);
3160 }
3161 }
3162
3163gso_skb:
3164 urb = usb_alloc_urb(0, GFP_ATOMIC);
3165 if (!urb)
3166 goto drop;
3167
3168 entry = (struct skb_data *)skb->cb;
3169 entry->urb = urb;
3170 entry->dev = dev;
3171 entry->length = length;
3172 entry->num_of_packet = count;
3173
3174 spin_lock_irqsave(&dev->txq.lock, flags);
3175 ret = usb_autopm_get_interface_async(dev->intf);
3176 if (ret < 0) {
3177 spin_unlock_irqrestore(&dev->txq.lock, flags);
3178 goto drop;
3179 }
3180
3181 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3182 skb->data, skb->len, tx_complete, skb);
3183
3184 if (length % dev->maxpacket == 0) {
3185 /* send USB_ZERO_PACKET */
3186 urb->transfer_flags |= URB_ZERO_PACKET;
3187 }
3188
3189#ifdef CONFIG_PM
3190 /* if this triggers the device is still a sleep */
3191 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3192 /* transmission will be done in resume */
3193 usb_anchor_urb(urb, &dev->deferred);
3194 /* no use to process more packets */
3195 netif_stop_queue(dev->net);
3196 usb_put_urb(urb);
3197 spin_unlock_irqrestore(&dev->txq.lock, flags);
3198 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3199 return;
3200 }
3201#endif
3202
3203 ret = usb_submit_urb(urb, GFP_ATOMIC);
3204 switch (ret) {
3205 case 0:
3206 netif_trans_update(dev->net);
3207 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3208 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3209 netif_stop_queue(dev->net);
3210 break;
3211 case -EPIPE:
3212 netif_stop_queue(dev->net);
3213 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3214 usb_autopm_put_interface_async(dev->intf);
3215 break;
3216 default:
3217 usb_autopm_put_interface_async(dev->intf);
3218 netif_dbg(dev, tx_err, dev->net,
3219 "tx: submit urb err %d\n", ret);
3220 break;
3221 }
3222
3223 spin_unlock_irqrestore(&dev->txq.lock, flags);
3224
3225 if (ret) {
3226 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3227drop:
3228 dev->net->stats.tx_dropped++;
3229 if (skb)
3230 dev_kfree_skb_any(skb);
3231 usb_free_urb(urb);
3232 } else
3233 netif_dbg(dev, tx_queued, dev->net,
3234 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3235}
3236
3237static void lan78xx_rx_bh(struct lan78xx_net *dev)
3238{
3239 struct urb *urb;
3240 int i;
3241
3242 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3243 for (i = 0; i < 10; i++) {
3244 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3245 break;
3246 urb = usb_alloc_urb(0, GFP_ATOMIC);
3247 if (urb)
3248 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3249 return;
3250 }
3251
3252 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3253 tasklet_schedule(&dev->bh);
3254 }
3255 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3256 netif_wake_queue(dev->net);
3257}
3258
3259static void lan78xx_bh(unsigned long param)
3260{
3261 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3262 struct sk_buff *skb;
3263 struct skb_data *entry;
3264
3265 while ((skb = skb_dequeue(&dev->done))) {
3266 entry = (struct skb_data *)(skb->cb);
3267 switch (entry->state) {
3268 case rx_done:
3269 entry->state = rx_cleanup;
3270 rx_process(dev, skb);
3271 continue;
3272 case tx_done:
3273 usb_free_urb(entry->urb);
3274 dev_kfree_skb(skb);
3275 continue;
3276 case rx_cleanup:
3277 usb_free_urb(entry->urb);
3278 dev_kfree_skb(skb);
3279 continue;
3280 default:
3281 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3282 return;
3283 }
3284 }
3285
3286 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3287 /* reset update timer delta */
3288 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3289 dev->delta = 1;
3290 mod_timer(&dev->stat_monitor,
3291 jiffies + STAT_UPDATE_TIMER);
3292 }
3293
3294 if (!skb_queue_empty(&dev->txq_pend))
3295 lan78xx_tx_bh(dev);
3296
3297 if (!timer_pending(&dev->delay) &&
3298 !test_bit(EVENT_RX_HALT, &dev->flags))
3299 lan78xx_rx_bh(dev);
3300 }
3301}
3302
3303static void lan78xx_delayedwork(struct work_struct *work)
3304{
3305 int status;
3306 struct lan78xx_net *dev;
3307
3308 dev = container_of(work, struct lan78xx_net, wq.work);
3309
3310 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3311 unlink_urbs(dev, &dev->txq);
3312 status = usb_autopm_get_interface(dev->intf);
3313 if (status < 0)
3314 goto fail_pipe;
3315 status = usb_clear_halt(dev->udev, dev->pipe_out);
3316 usb_autopm_put_interface(dev->intf);
3317 if (status < 0 &&
3318 status != -EPIPE &&
3319 status != -ESHUTDOWN) {
3320 if (netif_msg_tx_err(dev))
3321fail_pipe:
3322 netdev_err(dev->net,
3323 "can't clear tx halt, status %d\n",
3324 status);
3325 } else {
3326 clear_bit(EVENT_TX_HALT, &dev->flags);
3327 if (status != -ESHUTDOWN)
3328 netif_wake_queue(dev->net);
3329 }
3330 }
3331 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3332 unlink_urbs(dev, &dev->rxq);
3333 status = usb_autopm_get_interface(dev->intf);
3334 if (status < 0)
3335 goto fail_halt;
3336 status = usb_clear_halt(dev->udev, dev->pipe_in);
3337 usb_autopm_put_interface(dev->intf);
3338 if (status < 0 &&
3339 status != -EPIPE &&
3340 status != -ESHUTDOWN) {
3341 if (netif_msg_rx_err(dev))
3342fail_halt:
3343 netdev_err(dev->net,
3344 "can't clear rx halt, status %d\n",
3345 status);
3346 } else {
3347 clear_bit(EVENT_RX_HALT, &dev->flags);
3348 tasklet_schedule(&dev->bh);
3349 }
3350 }
3351
3352 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3353 int ret = 0;
3354
3355 clear_bit(EVENT_LINK_RESET, &dev->flags);
3356 status = usb_autopm_get_interface(dev->intf);
3357 if (status < 0)
3358 goto skip_reset;
3359 if (lan78xx_link_reset(dev) < 0) {
3360 usb_autopm_put_interface(dev->intf);
3361skip_reset:
3362 netdev_info(dev->net, "link reset failed (%d)\n",
3363 ret);
3364 } else {
3365 usb_autopm_put_interface(dev->intf);
3366 }
3367 }
3368
3369 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3370 lan78xx_update_stats(dev);
3371
3372 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3373
3374 mod_timer(&dev->stat_monitor,
3375 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3376
3377 dev->delta = min((dev->delta * 2), 50);
3378 }
3379}
3380
3381static void intr_complete(struct urb *urb)
3382{
3383 struct lan78xx_net *dev = urb->context;
3384 int status = urb->status;
3385
3386 switch (status) {
3387 /* success */
3388 case 0:
3389 lan78xx_status(dev, urb);
3390 break;
3391
3392 /* software-driven interface shutdown */
3393 case -ENOENT: /* urb killed */
3394 case -ESHUTDOWN: /* hardware gone */
3395 netif_dbg(dev, ifdown, dev->net,
3396 "intr shutdown, code %d\n", status);
3397 return;
3398
3399 /* NOTE: not throttling like RX/TX, since this endpoint
3400 * already polls infrequently
3401 */
3402 default:
3403 netdev_dbg(dev->net, "intr status %d\n", status);
3404 break;
3405 }
3406
3407 if (!netif_running(dev->net))
3408 return;
3409
3410 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3411 status = usb_submit_urb(urb, GFP_ATOMIC);
3412 if (status != 0)
3413 netif_err(dev, timer, dev->net,
3414 "intr resubmit --> %d\n", status);
3415}
3416
3417static void lan78xx_disconnect(struct usb_interface *intf)
3418{
3419 struct lan78xx_net *dev;
3420 struct usb_device *udev;
3421 struct net_device *net;
3422
3423 dev = usb_get_intfdata(intf);
3424 usb_set_intfdata(intf, NULL);
3425 if (!dev)
3426 return;
3427
3428 udev = interface_to_usbdev(intf);
3429 net = dev->net;
3430
3431 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3432 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3433
3434 phy_disconnect(net->phydev);
3435
3436 unregister_netdev(net);
3437
3438 cancel_delayed_work_sync(&dev->wq);
3439
3440 usb_scuttle_anchored_urbs(&dev->deferred);
3441
3442 lan78xx_unbind(dev, intf);
3443
3444 usb_kill_urb(dev->urb_intr);
3445 usb_free_urb(dev->urb_intr);
3446
3447 free_netdev(net);
3448 usb_put_dev(udev);
3449}
3450
3451static void lan78xx_tx_timeout(struct net_device *net)
3452{
3453 struct lan78xx_net *dev = netdev_priv(net);
3454
3455 unlink_urbs(dev, &dev->txq);
3456 tasklet_schedule(&dev->bh);
3457}
3458
3459static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3460 struct net_device *netdev,
3461 netdev_features_t features)
3462{
3463 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3464 features &= ~NETIF_F_GSO_MASK;
3465
3466 features = vlan_features_check(skb, features);
3467 features = vxlan_features_check(skb, features);
3468
3469 return features;
3470}
3471
3472static const struct net_device_ops lan78xx_netdev_ops = {
3473 .ndo_open = lan78xx_open,
3474 .ndo_stop = lan78xx_stop,
3475 .ndo_start_xmit = lan78xx_start_xmit,
3476 .ndo_tx_timeout = lan78xx_tx_timeout,
3477 .ndo_change_mtu = lan78xx_change_mtu,
3478 .ndo_set_mac_address = lan78xx_set_mac_addr,
3479 .ndo_validate_addr = eth_validate_addr,
3480 .ndo_do_ioctl = lan78xx_ioctl,
3481 .ndo_set_rx_mode = lan78xx_set_multicast,
3482 .ndo_set_features = lan78xx_set_features,
3483 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3484 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3485 .ndo_features_check = lan78xx_features_check,
3486};
3487
3488static void lan78xx_stat_monitor(unsigned long param)
3489{
3490 struct lan78xx_net *dev;
3491
3492 dev = (struct lan78xx_net *)param;
3493
3494 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3495}
3496
3497static int lan78xx_probe(struct usb_interface *intf,
3498 const struct usb_device_id *id)
3499{
3500 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3501 struct lan78xx_net *dev;
3502 struct net_device *netdev;
3503 struct usb_device *udev;
3504 int ret;
3505 unsigned maxp;
3506 unsigned period;
3507 u8 *buf = NULL;
3508
3509 udev = interface_to_usbdev(intf);
3510 udev = usb_get_dev(udev);
3511
3512 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3513 if (!netdev) {
3514 dev_err(&intf->dev, "Error: OOM\n");
3515 ret = -ENOMEM;
3516 goto out1;
3517 }
3518
3519 /* netdev_printk() needs this */
3520 SET_NETDEV_DEV(netdev, &intf->dev);
3521
3522 dev = netdev_priv(netdev);
3523 dev->udev = udev;
3524 dev->intf = intf;
3525 dev->net = netdev;
3526 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3527 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3528
3529 skb_queue_head_init(&dev->rxq);
3530 skb_queue_head_init(&dev->txq);
3531 skb_queue_head_init(&dev->done);
3532 skb_queue_head_init(&dev->rxq_pause);
3533 skb_queue_head_init(&dev->txq_pend);
3534 mutex_init(&dev->phy_mutex);
3535
3536 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3537 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3538 init_usb_anchor(&dev->deferred);
3539
3540 netdev->netdev_ops = &lan78xx_netdev_ops;
3541 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3542 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3543
3544 dev->stat_monitor.function = lan78xx_stat_monitor;
3545 dev->stat_monitor.data = (unsigned long)dev;
3546 dev->delta = 1;
3547 init_timer(&dev->stat_monitor);
3548
3549 mutex_init(&dev->stats.access_lock);
3550
3551 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3552 ret = -ENODEV;
3553 goto out2;
3554 }
3555
3556 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3557 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3558 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3559 ret = -ENODEV;
3560 goto out2;
3561 }
3562
3563 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3564 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3565 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3566 ret = -ENODEV;
3567 goto out2;
3568 }
3569
3570 ep_intr = &intf->cur_altsetting->endpoint[2];
3571 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3572 ret = -ENODEV;
3573 goto out2;
3574 }
3575
3576 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3577 usb_endpoint_num(&ep_intr->desc));
3578
3579 ret = lan78xx_bind(dev, intf);
3580 if (ret < 0)
3581 goto out2;
3582 strcpy(netdev->name, "eth%d");
3583
3584 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3585 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3586
3587 /* MTU range: 68 - 9000 */
3588 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3589 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3590
3591 period = ep_intr->desc.bInterval;
3592 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3593 buf = kmalloc(maxp, GFP_KERNEL);
3594 if (buf) {
3595 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3596 if (!dev->urb_intr) {
3597 ret = -ENOMEM;
3598 kfree(buf);
3599 goto out3;
3600 } else {
3601 usb_fill_int_urb(dev->urb_intr, dev->udev,
3602 dev->pipe_intr, buf, maxp,
3603 intr_complete, dev, period);
3604 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3605 }
3606 }
3607
3608 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3609
3610 /* driver requires remote-wakeup capability during autosuspend. */
3611 intf->needs_remote_wakeup = 1;
3612
3613 ret = lan78xx_phy_init(dev);
3614 if (ret < 0)
3615 goto out4;
3616
3617 ret = register_netdev(netdev);
3618 if (ret != 0) {
3619 netif_err(dev, probe, netdev, "couldn't register the device\n");
3620 goto out5;
3621 }
3622
3623 usb_set_intfdata(intf, dev);
3624
3625 ret = device_set_wakeup_enable(&udev->dev, true);
3626
3627 /* Default delay of 2sec has more overhead than advantage.
3628 * Set to 10sec as default.
3629 */
3630 pm_runtime_set_autosuspend_delay(&udev->dev,
3631 DEFAULT_AUTOSUSPEND_DELAY);
3632
3633 return 0;
3634
3635out5:
3636 phy_disconnect(netdev->phydev);
3637out4:
3638 usb_free_urb(dev->urb_intr);
3639out3:
3640 lan78xx_unbind(dev, intf);
3641out2:
3642 free_netdev(netdev);
3643out1:
3644 usb_put_dev(udev);
3645
3646 return ret;
3647}
3648
3649static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3650{
3651 const u16 crc16poly = 0x8005;
3652 int i;
3653 u16 bit, crc, msb;
3654 u8 data;
3655
3656 crc = 0xFFFF;
3657 for (i = 0; i < len; i++) {
3658 data = *buf++;
3659 for (bit = 0; bit < 8; bit++) {
3660 msb = crc >> 15;
3661 crc <<= 1;
3662
3663 if (msb ^ (u16)(data & 1)) {
3664 crc ^= crc16poly;
3665 crc |= (u16)0x0001U;
3666 }
3667 data >>= 1;
3668 }
3669 }
3670
3671 return crc;
3672}
3673
3674static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3675{
3676 u32 buf;
3677 int ret;
3678 int mask_index;
3679 u16 crc;
3680 u32 temp_wucsr;
3681 u32 temp_pmt_ctl;
3682 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3683 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3684 const u8 arp_type[2] = { 0x08, 0x06 };
3685
3686 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3687 buf &= ~MAC_TX_TXEN_;
3688 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3689 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3690 buf &= ~MAC_RX_RXEN_;
3691 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3692
3693 ret = lan78xx_write_reg(dev, WUCSR, 0);
3694 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3695 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3696
3697 temp_wucsr = 0;
3698
3699 temp_pmt_ctl = 0;
3700 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3701 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3702 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3703
3704 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3705 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3706
3707 mask_index = 0;
3708 if (wol & WAKE_PHY) {
3709 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3710
3711 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3712 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3713 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3714 }
3715 if (wol & WAKE_MAGIC) {
3716 temp_wucsr |= WUCSR_MPEN_;
3717
3718 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3719 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3720 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3721 }
3722 if (wol & WAKE_BCAST) {
3723 temp_wucsr |= WUCSR_BCST_EN_;
3724
3725 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3726 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3727 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3728 }
3729 if (wol & WAKE_MCAST) {
3730 temp_wucsr |= WUCSR_WAKE_EN_;
3731
3732 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3733 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3734 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3735 WUF_CFGX_EN_ |
3736 WUF_CFGX_TYPE_MCAST_ |
3737 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3738 (crc & WUF_CFGX_CRC16_MASK_));
3739
3740 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3741 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3742 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3743 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3744 mask_index++;
3745
3746 /* for IPv6 Multicast */
3747 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3748 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3749 WUF_CFGX_EN_ |
3750 WUF_CFGX_TYPE_MCAST_ |
3751 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3752 (crc & WUF_CFGX_CRC16_MASK_));
3753
3754 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3755 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3756 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3757 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3758 mask_index++;
3759
3760 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3761 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3762 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3763 }
3764 if (wol & WAKE_UCAST) {
3765 temp_wucsr |= WUCSR_PFDA_EN_;
3766
3767 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3768 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3769 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3770 }
3771 if (wol & WAKE_ARP) {
3772 temp_wucsr |= WUCSR_WAKE_EN_;
3773
3774 /* set WUF_CFG & WUF_MASK
3775 * for packettype (offset 12,13) = ARP (0x0806)
3776 */
3777 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3778 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3779 WUF_CFGX_EN_ |
3780 WUF_CFGX_TYPE_ALL_ |
3781 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3782 (crc & WUF_CFGX_CRC16_MASK_));
3783
3784 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3785 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3786 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3787 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3788 mask_index++;
3789
3790 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3791 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3792 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3793 }
3794
3795 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3796
3797 /* when multiple WOL bits are set */
3798 if (hweight_long((unsigned long)wol) > 1) {
3799 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3800 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3801 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3802 }
3803 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3804
3805 /* clear WUPS */
3806 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3807 buf |= PMT_CTL_WUPS_MASK_;
3808 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3809
3810 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3811 buf |= MAC_RX_RXEN_;
3812 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3813
3814 return 0;
3815}
3816
3817static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3818{
3819 struct lan78xx_net *dev = usb_get_intfdata(intf);
3820 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3821 u32 buf;
3822 int ret;
3823 int event;
3824
3825 event = message.event;
3826
3827 if (!dev->suspend_count++) {
3828 spin_lock_irq(&dev->txq.lock);
3829 /* don't autosuspend while transmitting */
3830 if ((skb_queue_len(&dev->txq) ||
3831 skb_queue_len(&dev->txq_pend)) &&
3832 PMSG_IS_AUTO(message)) {
3833 spin_unlock_irq(&dev->txq.lock);
3834 ret = -EBUSY;
3835 goto out;
3836 } else {
3837 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3838 spin_unlock_irq(&dev->txq.lock);
3839 }
3840
3841 /* stop TX & RX */
3842 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3843 buf &= ~MAC_TX_TXEN_;
3844 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3845 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3846 buf &= ~MAC_RX_RXEN_;
3847 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3848
3849 /* empty out the rx and queues */
3850 netif_device_detach(dev->net);
3851 lan78xx_terminate_urbs(dev);
3852 usb_kill_urb(dev->urb_intr);
3853
3854 /* reattach */
3855 netif_device_attach(dev->net);
3856 }
3857
3858 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3859 del_timer(&dev->stat_monitor);
3860
3861 if (PMSG_IS_AUTO(message)) {
3862 /* auto suspend (selective suspend) */
3863 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3864 buf &= ~MAC_TX_TXEN_;
3865 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3866 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3867 buf &= ~MAC_RX_RXEN_;
3868 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3869
3870 ret = lan78xx_write_reg(dev, WUCSR, 0);
3871 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3872 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3873
3874 /* set goodframe wakeup */
3875 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3876
3877 buf |= WUCSR_RFE_WAKE_EN_;
3878 buf |= WUCSR_STORE_WAKE_;
3879
3880 ret = lan78xx_write_reg(dev, WUCSR, buf);
3881
3882 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3883
3884 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3885 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3886
3887 buf |= PMT_CTL_PHY_WAKE_EN_;
3888 buf |= PMT_CTL_WOL_EN_;
3889 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3890 buf |= PMT_CTL_SUS_MODE_3_;
3891
3892 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3893
3894 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3895
3896 buf |= PMT_CTL_WUPS_MASK_;
3897
3898 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3899
3900 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3901 buf |= MAC_RX_RXEN_;
3902 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3903 } else {
3904 lan78xx_set_suspend(dev, pdata->wol);
3905 }
3906 }
3907
3908 ret = 0;
3909out:
3910 return ret;
3911}
3912
3913static int lan78xx_resume(struct usb_interface *intf)
3914{
3915 struct lan78xx_net *dev = usb_get_intfdata(intf);
3916 struct sk_buff *skb;
3917 struct urb *res;
3918 int ret;
3919 u32 buf;
3920
3921 if (!timer_pending(&dev->stat_monitor)) {
3922 dev->delta = 1;
3923 mod_timer(&dev->stat_monitor,
3924 jiffies + STAT_UPDATE_TIMER);
3925 }
3926
3927 if (!--dev->suspend_count) {
3928 /* resume interrupt URBs */
3929 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3930 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3931
3932 spin_lock_irq(&dev->txq.lock);
3933 while ((res = usb_get_from_anchor(&dev->deferred))) {
3934 skb = (struct sk_buff *)res->context;
3935 ret = usb_submit_urb(res, GFP_ATOMIC);
3936 if (ret < 0) {
3937 dev_kfree_skb_any(skb);
3938 usb_free_urb(res);
3939 usb_autopm_put_interface_async(dev->intf);
3940 } else {
3941 netif_trans_update(dev->net);
3942 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3943 }
3944 }
3945
3946 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3947 spin_unlock_irq(&dev->txq.lock);
3948
3949 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3950 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3951 netif_start_queue(dev->net);
3952 tasklet_schedule(&dev->bh);
3953 }
3954 }
3955
3956 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3957 ret = lan78xx_write_reg(dev, WUCSR, 0);
3958 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3959
3960 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3961 WUCSR2_ARP_RCD_ |
3962 WUCSR2_IPV6_TCPSYN_RCD_ |
3963 WUCSR2_IPV4_TCPSYN_RCD_);
3964
3965 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3966 WUCSR_EEE_RX_WAKE_ |
3967 WUCSR_PFDA_FR_ |
3968 WUCSR_RFE_WAKE_FR_ |
3969 WUCSR_WUFR_ |
3970 WUCSR_MPR_ |
3971 WUCSR_BCST_FR_);
3972
3973 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3974 buf |= MAC_TX_TXEN_;
3975 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3976
3977 return 0;
3978}
3979
3980static int lan78xx_reset_resume(struct usb_interface *intf)
3981{
3982 struct lan78xx_net *dev = usb_get_intfdata(intf);
3983
3984 lan78xx_reset(dev);
3985
3986 phy_start(dev->net->phydev);
3987
3988 return lan78xx_resume(intf);
3989}
3990
3991static const struct usb_device_id products[] = {
3992 {
3993 /* LAN7800 USB Gigabit Ethernet Device */
3994 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3995 },
3996 {
3997 /* LAN7850 USB Gigabit Ethernet Device */
3998 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3999 },
4000 {
4001 /* LAN7801 USB Gigabit Ethernet Device */
4002 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4003 },
4004 {},
4005};
4006MODULE_DEVICE_TABLE(usb, products);
4007
4008static struct usb_driver lan78xx_driver = {
4009 .name = DRIVER_NAME,
4010 .id_table = products,
4011 .probe = lan78xx_probe,
4012 .disconnect = lan78xx_disconnect,
4013 .suspend = lan78xx_suspend,
4014 .resume = lan78xx_resume,
4015 .reset_resume = lan78xx_reset_resume,
4016 .supports_autosuspend = 1,
4017 .disable_hub_initiated_lpm = 1,
4018};
4019
4020module_usb_driver(lan78xx_driver);
4021
4022MODULE_AUTHOR(DRIVER_AUTHOR);
4023MODULE_DESCRIPTION(DRIVER_DESC);
4024MODULE_LICENSE("GPL");