blob: a97f54b9a79e136348c1705cf089b20319d781d0 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
4 *
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9 */
10
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/platform_device.h>
16#include <linux/pm_runtime.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/list.h>
20#include <linux/dma-mapping.h>
21#include <linux/irq.h>
22#include <linux/pm_qos.h>
23#include <linux/usb/ch9.h>
24#include <linux/usb/phy.h>
25#include <linux/usb/gadget.h>
26#include <linux/platform_data/mv_usb.h>
27#include <linux/usb/mv_usb2_phy.h>
28#include <linux/power_supply.h>
29#include <linux/cputype.h>
30#include <soc/asr/addr-map.h>
31#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
32#include <linux/usb/composite.h>
33#endif
34#include <linux/memblock.h>
35
36#include "debug.h"
37#include "core.h"
38#include "gadget.h"
39#include "io.h"
40#ifdef CONFIG_CPU_ASR1901
41#include "../gadget/function/u_ether.h"
42#endif
43
44#define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \
45 & ~((d)->interval - 1))
46#define ENUMERATION_DELAY (2 * HZ)
47#define DWC3_WAKEUP_TIMEOUT_SEC (5)
48
49
50static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
51 bool interrupt);
52static void dwc3_stop_active_transfers(struct dwc3 *dwc);
53static void dwc3_clear_stall_all_ep(struct dwc3 *dwc);
54static void __dwc3_gadget_stop(struct dwc3 *dwc);
55static int __dwc3_gadget_start(struct dwc3 *dwc);
56static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend);
57static void dwc3_gadget_set_speed_nolock(struct usb_gadget *g,
58 enum usb_device_speed speed);
59#ifdef CONFIG_USB_G_ANDROID
60extern void android_dev_enable(uint8_t enabled);
61#endif
62
63static BLOCKING_NOTIFIER_HEAD(dwc3_notifier_list);
64static struct dwc3 *the_controller;
65
66static DEFINE_MUTEX(usb_con_mutex);
67
68static ATOMIC_NOTIFIER_HEAD(asr_udc_resume_list);
69
70struct dwc3 *dwc3_get_controller(void)
71{
72 return the_controller;
73}
74
75static void dwc3_restart_work(struct work_struct *work)
76{
77 u32 vbus = 0;
78 int ret;
79
80 the_controller->usb_do_restart = 0;
81 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
82 if (ret) {
83 vbus = usb_phy_get_vbus(the_controller->usb2_phy);
84 }
85
86 if ((!the_controller->vbus_active) || (!vbus)) {
87 pr_err("%s vbus is off\n", __func__);
88 return;
89 }
90
91 msleep(500);
92 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
93 if (ret) {
94 vbus = usb_phy_get_vbus(the_controller->usb2_phy);
95 }
96
97 if ((!the_controller->vbus_active) || (!vbus)) {
98 pr_err("%s vbus is off2\n", __func__);
99 return;
100 }
101
102 pr_info("%s \n", __func__);
103 android_dev_enable(0);
104 android_dev_enable(1);
105}
106#ifdef CONFIG_DWC3_HWSULOG
107#define SULOG_BASE_OFFSET (0x20000)
108#define SULOG_PORT0_OFFSET (0x0)
109#define SULOG_PORT1_OFFSET (0x8000)
110#define SULOG_REG_CTRL (0x0)
111#define SULOG_EP_CFG (0x4)
112#define SULOG_DLY_TIMER (0x74)
113#define SULOG_INT_CFG (0x80)
114#define SULOG_INT_STS (0x84)
115
116#ifndef CONFIG_CPU_ASR1901
117#define RIPC_VIRT_BASE (APB_VIRT_BASE + 0x03D100 + 0x1C)
118#else
119#define RIPC_VIRT_BASE (APB_VIRT_BASE + 0x0B0100 + 0x1C)
120#endif
121
122static int hwsulog_ep;
123static bool hwsulog_enabled = false;
124static bool hwsulog_on = false;
125extern u32 sulog_ep_num;
126
127extern void register_hwsulog_udc_func(int (*func) (int ep_num, u32 flag));
128extern void unregister_hwsulog_udc_func(void);
129
130bool dwc3_hwsulog_is_on(void)
131{
132 return hwsulog_on;
133}
134
135void dwc3_hwsulog_on(bool is_on)
136{
137 hwsulog_on = is_on;
138
139 if (cpu_is_asr1901() || cpu_is_asr1906()) {
140 if (is_on) {
141 pr_info("enable sulog\n");
142 writel(3, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_CFG));
143 writel((readl(the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_STS)),
144 (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_STS));
145 writel((0x1 << 0) | (0x1 << 7) | (0x1 << 6)| (0x1 << 31),
146 (the_controller->hwsulog_regs + SULOG_REG_CTRL));
147 } else {
148 writel((0x1 << 7) | (0x1 << 6)| (0x1 << 31),
149 (the_controller->hwsulog_regs + SULOG_REG_CTRL));
150 pr_info("disable sulog\n");
151 }
152 }
153}
154
155int hwsulog_set_clear_stop_flag(bool set)
156{
157 u32 regval;
158
159 regval = readl(RIPC_VIRT_BASE);
160 if (set)
161 regval |= (0x1 << 0);
162 else
163 regval &= ~(0x1 << 0);
164 writel(regval, RIPC_VIRT_BASE);
165
166 return 0;
167}
168
169int asr_udc_hwsulog_callback(int ep_num, u32 flag)
170{
171 pr_info("%s flag: %d, ep: %d\n", __func__, flag, ep_num);
172 BUG_ON(!the_controller);
173 BUG_ON(!the_controller->hwsulog_regs);
174 ep_num = ep_num * 2 + 1;
175
176 if (1 == flag) {
177 /* don't configure sulog again if sulog is already working */
178 if (true == hwsulog_enabled) {
179 pr_err("hwsulog already enabled");
180 }
181
182 /* clear flag */
183 hwsulog_set_clear_stop_flag(false);
184 writel(ep_num & 0x1f, (the_controller->hwsulog_regs + SULOG_EP_CFG));
185 /* 1828: 4g only, 1903: 4g/5g */
186 if (cpu_is_asr1828()) {
187 writel((0x1 << 0) | (0x1 << 7),
188 (the_controller->hwsulog_regs + SULOG_REG_CTRL));
189 writel(0x0, (the_controller->hwsulog_regs +
190 SULOG_PORT1_OFFSET + SULOG_REG_CTRL));
191
192 writel(0, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_CFG));
193 } else if (cpu_is_asr1903()) {
194 writel((0x1 << 0) | (0x1 << 7) | (0x1 << 6) | (0x1 << 31),
195 (the_controller->hwsulog_regs + SULOG_REG_CTRL));
196
197 writel(3, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_CFG));
198 writel(3, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_STS));
199 writel(0x80000200, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_DLY_TIMER));
200 } else { /* asr1901 and asr1906 */
201 writel((0x1 << 7) | (0x1 << 6) | (0x1 << 31),
202 (the_controller->hwsulog_regs + SULOG_REG_CTRL));
203 writel(0, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_CFG));
204 writel(0, (the_controller->hwsulog_regs + SULOG_PORT1_OFFSET + SULOG_INT_CFG));
205 }
206
207 hwsulog_ep = ep_num;
208 hwsulog_enabled = true;
209 } else {
210 hwsulog_set_clear_stop_flag(true);
211 writel(0, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_CFG));
212 //writel(0x0, (the_controller->hwsulog_regs + SULOG_REG_CTRL));
213 hwsulog_ep = 0;
214 hwsulog_enabled = false;
215 }
216
217 return 0;
218}
219
220static void hwsulog_dump_regs(void)
221{
222 u32 i;
223
224 for (i = 0; i < 0xa0; i += 0x20) {
225 pr_info("0x%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
226 i, readl(the_controller->hwsulog_regs + i + 0),
227 readl(the_controller->hwsulog_regs + i + 4),
228 readl(the_controller->hwsulog_regs + i + 8),
229 readl(the_controller->hwsulog_regs + i + 0xc),
230 readl(the_controller->hwsulog_regs + i + 0x10),
231 readl(the_controller->hwsulog_regs + i + 0x14),
232 readl(the_controller->hwsulog_regs + i + 0x18),
233 readl(the_controller->hwsulog_regs + i + 0x1c));
234 }
235 for (i = 0x100; i < 0x160; i += 0x20) {
236 pr_info("0x%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
237 i, readl(the_controller->hwsulog_regs + i + 0),
238 readl(the_controller->hwsulog_regs + i + 4),
239 readl(the_controller->hwsulog_regs + i + 8),
240 readl(the_controller->hwsulog_regs + i + 0xc),
241 readl(the_controller->hwsulog_regs + i + 0x10),
242 readl(the_controller->hwsulog_regs + i + 0x14),
243 readl(the_controller->hwsulog_regs + i + 0x18),
244 readl(the_controller->hwsulog_regs + i + 0x1c));
245 }
246}
247
248static int hwsulog_restart_usb(void)
249{
250 writel(0, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_CFG));
251 writel(3, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_STS));
252
253 if (!work_pending(&the_controller->usb_restart_work.work)) {
254 hwsulog_set_clear_stop_flag(true);
255 schedule_delayed_work(&the_controller->usb_restart_work, 0);
256 }
257 hwsulog_dump_regs();
258 return 0;
259}
260
261static int hwsulog_restart_xfer(void)
262{
263 struct dwc3_gadget_ep_cmd_params params;
264 u32 cmd;
265 unsigned long flags;
266 struct dwc3_ep *dep = the_controller->eps[hwsulog_ep];
267 int ret;
268
269 memset(&params, 0, sizeof(params));
270 params.param1 = 0xc0000000;
271 cmd = DWC3_DEPCMD_STARTTRANSFER;
272
273 spin_lock_irqsave(&the_controller->lock, flags);
274 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
275 spin_unlock_irqrestore(&the_controller->lock, flags);
276
277 hwsulog_dump_regs();
278 return ret;
279}
280
281int hwsulog_error_handler(void)
282{
283 u32 regval;
284 int ret;
285
286 BUG_ON(!the_controller);
287 BUG_ON(!the_controller->hwsulog_regs);
288
289 regval = readl(the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_STS);
290 writel(regval, (the_controller->hwsulog_regs + SULOG_PORT0_OFFSET + SULOG_INT_STS));
291 pr_info_ratelimited("%s, sts: 0x%x\n", __func__, regval);
292
293 /* usb controller is dead, the only thing to do is to restart usb */
294 if (regval & (0x1 << 0)) {
295 hwsulog_restart_usb();
296 return 0;
297 }
298
299 if (regval & (0x1 << 1)) {
300 ret = hwsulog_restart_xfer();
301 if (-ETIMEDOUT == ret)
302 hwsulog_restart_usb();
303 }
304
305 return 0;
306}
307
308void dwc3_hwsulog_clear_int(void)
309{
310 hwsulog_error_handler();
311}
312#endif
313
314void dwc3_release_wakeup_event(void)
315{
316 pm_relax(the_controller->dev);
317}
318
319void dwc3_release_wakeup_event_timeout(u32 sec)
320{
321 pm_wakeup_event(the_controller->dev, (sec * 1000));
322}
323
324void dwc3_acquire_wakeup_event(void)
325{
326 pm_stay_awake(the_controller->dev);
327}
328
329void dwc3_release_pm_qos(void)
330{
331 pm_qos_update_request(&the_controller->qos_idle,
332 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
333}
334
335void dwc3_release_pm_qos_timeout(u32 sec)
336{
337 pm_qos_update_request_timeout(&the_controller->qos_idle,
338 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE, sec * (1000 * 1000));
339}
340
341void dwc3_acquire_pm_qos(void)
342{
343 pm_qos_update_request(&the_controller->qos_idle, the_controller->lpm_qos);
344}
345
346int asr_udc_register_resume_notifier(struct notifier_block *nb)
347{
348 int ret = 0;
349
350 ret = atomic_notifier_chain_register(&asr_udc_resume_list, nb);
351 if (ret)
352 return ret;
353
354 return 0;
355}
356
357int asr_udc_unregister_resume_notifier(struct notifier_block *nb)
358{
359 return atomic_notifier_chain_unregister(&asr_udc_resume_list, nb);
360}
361
362static void asr_udc_notify_resume_event(struct dwc3 *dwc, int event)
363{
364 atomic_notifier_call_chain(&asr_udc_resume_list, event, NULL);
365}
366
367static const char *charger_type(unsigned int type)
368{
369 switch (type) {
370 case NULL_CHARGER: return "NULL_CHARGER";
371 case DEFAULT_CHARGER: return "DEFAULT_CHARGER";
372 case DCP_CHARGER: return "DCP_CHARGER";
373 case CDP_CHARGER: return "CDP_CHARGER";
374 case SDP_CHARGER: return "SDP_CHARGER";
375 default: return "NONE_STANDARD_CHARGER";
376 }
377}
378
379int udc_get_charger_type(void)
380{
381 struct dwc3 *dwc = the_controller;
382
383 if (!dwc)
384 return POWER_SUPPLY_TYPE_UNKNOWN;
385
386 switch(dwc->charger_type) {
387 case SDP_CHARGER:
388 return POWER_SUPPLY_TYPE_USB;
389 case DCP_CHARGER:
390 return POWER_SUPPLY_TYPE_USB_DCP;
391
392 case DEFAULT_CHARGER:
393 case CDP_CHARGER:
394 case NONE_STANDARD_CHARGER:
395 default:
396 return POWER_SUPPLY_TYPE_UNKNOWN;
397 }
398}
399
400static void call_charger_notifier(struct dwc3 *dwc)
401{
402 blocking_notifier_call_chain(&dwc3_notifier_list,
403 dwc->charger_type, NULL);
404}
405
406/* For any user that care about USB udc events, for example the charger*/
407int mv_udc_register_client(struct notifier_block *nb)
408{
409 struct dwc3 *dwc = the_controller;
410 int ret = 0;
411
412 if (!dwc)
413 return -ENODEV;
414
415 ret = blocking_notifier_chain_register(&dwc3_notifier_list, nb);
416 if (ret)
417 return ret;
418
419 if (dwc->charger_type)
420 call_charger_notifier(dwc);
421
422 return 0;
423}
424EXPORT_SYMBOL(mv_udc_register_client);
425
426int mv_udc_unregister_client(struct notifier_block *nb)
427{
428 return blocking_notifier_chain_unregister(&dwc3_notifier_list, nb);
429}
430EXPORT_SYMBOL(mv_udc_unregister_client);
431
432static void do_delayed_charger_work(struct work_struct *work)
433{
434 struct dwc3 *dwc = NULL;
435
436 dwc = container_of(work, struct dwc3, delayed_charger_work.work);
437
438 /* if still see DEFAULT_CHARGER, check again */
439 if (dwc->charger_type == DEFAULT_CHARGER) {
440 dwc->charger_type = NONE_STANDARD_CHARGER;
441 }
442
443 dev_info(dwc->dev, "final charger type: %s\n",
444 charger_type(dwc->charger_type));
445
446 call_charger_notifier(dwc);
447
448
449 /* SDP or CDP need transfer data, hold wake lock
450 * also hold lock for DCP: some chargers have watchdog or do not
451 * have irq indication so need to work in polling mode
452 * and should not suspend
453 */
454 if ((dwc->charger_type == SDP_CHARGER) ||
455 (dwc->charger_type == NONE_STANDARD_CHARGER) ||
456 (dwc->charger_type == CDP_CHARGER) ||
457 (dwc->charger_type == DCP_CHARGER)) {
458 pm_stay_awake(dwc->dev);
459 pm_qos_update_request(&dwc->qos_idle, dwc->lpm_qos);
460 } else {
461 dev_info(dwc->dev, "rls pm lock\n");
462 pm_qos_update_request(&dwc->qos_idle,
463 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
464 /*
465 * leave some delay for charger driver to do something
466 * for nz3 we need some extra time to restore os type,
467 * so change it to 4s here
468 */
469 pm_wakeup_event(dwc->dev, 5000);
470 }
471}
472
473void dwc3_report_sdp_charger(struct dwc3 *dwc)
474{
475 if (work_pending(&dwc->delayed_charger_work.work))
476 cancel_delayed_work(&dwc->delayed_charger_work);
477 dwc->charger_type = SDP_CHARGER;
478 schedule_delayed_work(&dwc->delayed_charger_work, 0);
479}
480
481
482static void __maybe_unused dump_dwc3_regs(struct dwc3 *dwc)
483{
484 u32 i;
485
486 for (i = 0xc100; i < 0xcc00; i += 32) {
487 pr_info("0x%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", i,
488 dwc3_readl(dwc->regs, i + 0),
489 dwc3_readl(dwc->regs, i + 4),
490 dwc3_readl(dwc->regs, i + 8),
491 dwc3_readl(dwc->regs, i + 12),
492 dwc3_readl(dwc->regs, i + 16),
493 dwc3_readl(dwc->regs, i + 20),
494 dwc3_readl(dwc->regs, i + 24),
495 dwc3_readl(dwc->regs, i + 28));
496 }
497
498 usb_phy_dump_cfg(dwc->usb2_phy);
499}
500
501/**
502 * dwc3_gadget_set_test_mode - enables usb2 test modes
503 * @dwc: pointer to our context structure
504 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
505 *
506 * Caller should take care of locking. This function will return 0 on
507 * success or -EINVAL if wrong Test Selector is passed.
508 */
509int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
510{
511 u32 reg;
512
513 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
514 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
515
516 switch (mode) {
517 case TEST_J:
518 case TEST_K:
519 case TEST_SE0_NAK:
520 case TEST_PACKET:
521 case TEST_FORCE_EN:
522 reg |= mode << 1;
523 break;
524 default:
525 return -EINVAL;
526 }
527
528 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
529
530 return 0;
531}
532
533/**
534 * dwc3_gadget_get_link_state - gets current state of usb link
535 * @dwc: pointer to our context structure
536 *
537 * Caller should take care of locking. This function will
538 * return the link state on success (>= 0) or -ETIMEDOUT.
539 */
540int dwc3_gadget_get_link_state(struct dwc3 *dwc)
541{
542 u32 reg;
543
544 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
545
546 return DWC3_DSTS_USBLNKST(reg);
547}
548
549/**
550 * dwc3_gadget_set_link_state - sets usb link to a particular state
551 * @dwc: pointer to our context structure
552 * @state: the state to put link into
553 *
554 * Caller should take care of locking. This function will
555 * return 0 on success or -ETIMEDOUT.
556 */
557int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
558{
559 int retries = 10000;
560 u32 reg;
561
562 /*
563 * Wait until device controller is ready. Only applies to 1.94a and
564 * later RTL.
565 */
566 if (dwc->revision >= DWC3_REVISION_194A) {
567 while (--retries) {
568 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
569 if (reg & DWC3_DSTS_DCNRD)
570 udelay(5);
571 else
572 break;
573 }
574
575 if (retries <= 0)
576 return -ETIMEDOUT;
577 }
578
579 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
580 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
581
582 /* set requested state */
583 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
584 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
585
586 /*
587 * The following code is racy when called from dwc3_gadget_wakeup,
588 * and is not needed, at least on newer versions
589 */
590 if (dwc->revision >= DWC3_REVISION_194A)
591 return 0;
592
593 /* wait for a change in DSTS */
594 retries = 10000;
595 while (--retries) {
596 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
597
598 if (DWC3_DSTS_USBLNKST(reg) == state)
599 return 0;
600
601 udelay(5);
602 }
603
604 return -ETIMEDOUT;
605}
606
607/**
608 * dwc3_ep_inc_trb - increment a trb index.
609 * @index: Pointer to the TRB index to increment.
610 *
611 * The index should never point to the link TRB. After incrementing,
612 * if it is point to the link TRB, wrap around to the beginning. The
613 * link TRB is always at the last TRB entry.
614 */
615static inline void dwc3_ep_inc_trb(u16 *index, struct dwc3_ep *dep)
616{
617 (*index)++;
618 (*index) = (*index) & (dep->trb_num - 1);
619 if (*index == (dep->trb_num - 1))
620 *index = 0;
621}
622
623/**
624 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer
625 * @dep: The endpoint whose enqueue pointer we're incrementing
626 */
627static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
628{
629 dwc3_ep_inc_trb(&dep->trb_enqueue, dep);
630}
631
632/**
633 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer
634 * @dep: The endpoint whose enqueue pointer we're incrementing
635 */
636static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
637{
638 dwc3_ep_inc_trb(&dep->trb_dequeue, dep);
639}
640
641static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
642 struct dwc3_request *req, int status)
643{
644 struct dwc3 *dwc = dep->dwc;
645
646 list_del(&req->list);
647 req->remaining = 0;
648 req->needs_extra_trb = false;
649 req->num_trbs = 0;
650
651 if (req->request.status == -EINPROGRESS)
652 req->request.status = status;
653
654 if (req->trb)
655 usb_gadget_unmap_request_by_dev(dwc->sysdev,
656 &req->request, req->direction);
657
658 req->trb = NULL;
659 trace_dwc3_gadget_giveback(req);
660
661#if 0 /* ASR private */
662 if (dep->number > 1)
663 pm_runtime_put(dwc->dev);
664#endif
665}
666
667/**
668 * dwc3_gadget_giveback - call struct usb_request's ->complete callback
669 * @dep: The endpoint to whom the request belongs to
670 * @req: The request we're giving back
671 * @status: completion code for the request
672 *
673 * Must be called with controller's lock held and interrupts disabled. This
674 * function will unmap @req and call its ->complete() callback to notify upper
675 * layers that it has completed.
676 */
677void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
678 int status)
679{
680 struct dwc3 *dwc = dep->dwc;
681
682 if (unlikely((dwc->vbus_active == 0) && (req->request.actual == 0) && (status == 0))) {
683 pr_info_ratelimited("dwc3 gbk: vbus = 0\n");
684 status = -ESHUTDOWN;
685 }
686
687 dwc3_gadget_del_and_unmap_request(dep, req, status);
688 req->status = DWC3_REQUEST_STATUS_COMPLETED;
689
690 spin_unlock(&dwc->lock);
691 usb_gadget_giveback_request(&dep->endpoint, &req->request);
692 spin_lock(&dwc->lock);
693}
694
695/**
696 * dwc3_send_gadget_generic_command - issue a generic command for the controller
697 * @dwc: pointer to the controller context
698 * @cmd: the command to be issued
699 * @param: command parameter
700 *
701 * Caller should take care of locking. Issue @cmd with a given @param to @dwc
702 * and wait for its completion.
703 */
704int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
705{
706 u32 timeout = 500;
707 int status = 0;
708 int ret = 0;
709 u32 reg;
710
711 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
712 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
713
714 do {
715 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
716 if (!(reg & DWC3_DGCMD_CMDACT)) {
717 status = DWC3_DGCMD_STATUS(reg);
718 if (status)
719 ret = -EINVAL;
720 break;
721 }
722 } while (--timeout);
723
724 if (!timeout) {
725 ret = -ETIMEDOUT;
726 status = -ETIMEDOUT;
727 }
728
729 trace_dwc3_gadget_generic_cmd(cmd, param, status);
730
731 return ret;
732}
733
734static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
735
736/**
737 * dwc3_send_gadget_ep_cmd - issue an endpoint command
738 * @dep: the endpoint to which the command is going to be issued
739 * @cmd: the command to be issued
740 * @params: parameters to the command
741 *
742 * Caller should handle locking. This function will issue @cmd with given
743 * @params to @dep and wait for its completion.
744 */
745int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
746 struct dwc3_gadget_ep_cmd_params *params)
747{
748 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
749 struct dwc3 *dwc = dep->dwc;
750 u32 timeout = 100000;
751 u32 saved_config = 0;
752 u32 reg;
753
754 int cmd_status = 0;
755 int ret = -EINVAL;
756
757 /*
758 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
759 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
760 * endpoint command.
761 *
762 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
763 * settings. Restore them after the command is completed.
764 *
765 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
766 */
767 if (dwc->gadget.speed <= USB_SPEED_HIGH) {
768 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
769 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
770 saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
771 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
772 }
773
774 if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
775 saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
776 reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
777 }
778
779 if (saved_config)
780 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
781 }
782
783 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
784 int link_state;
785
786 link_state = dwc3_gadget_get_link_state(dwc);
787 if ((link_state == DWC3_LINK_STATE_U1 ||
788 link_state == DWC3_LINK_STATE_U2 ||
789 link_state == DWC3_LINK_STATE_U3) &&
790 (dwc->gadget.state >= USB_STATE_ADDRESS)) {
791 ret = __dwc3_gadget_wakeup(dwc);
792 dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
793 ret);
794 }
795 }
796
797 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER) {
798 int link_state;
799
800 link_state = dwc3_gadget_get_link_state(dwc);
801 if ((link_state == DWC3_LINK_STATE_U3) &&
802 (dwc->gadget.state >= USB_STATE_ADDRESS)) {
803 ret = __dwc3_gadget_wakeup(dwc);
804 if (ret < 0)
805 dev_warn_ratelimited(dwc->dev, "u3 wakeup failed --> %d\n",
806 ret);
807 }
808 }
809
810 dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
811 dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
812 dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
813
814 /*
815 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
816 * not relying on XferNotReady, we can make use of a special "No
817 * Response Update Transfer" command where we should clear both CmdAct
818 * and CmdIOC bits.
819 *
820 * With this, we don't need to wait for command completion and can
821 * straight away issue further commands to the endpoint.
822 *
823 * NOTICE: We're making an assumption that control endpoints will never
824 * make use of Update Transfer command. This is a safe assumption
825 * because we can never have more than one request at a time with
826 * Control Endpoints. If anybody changes that assumption, this chunk
827 * needs to be updated accordingly.
828 */
829 if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
830 !usb_endpoint_xfer_isoc(desc))
831 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
832 else
833 cmd |= DWC3_DEPCMD_CMDACT;
834 dep->num_cmds++;
835
836 dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
837 do {
838 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
839 if (!(reg & DWC3_DEPCMD_CMDACT)) {
840 cmd_status = DWC3_DEPCMD_STATUS(reg);
841
842 switch (cmd_status) {
843 case 0:
844 ret = 0;
845 break;
846 case DEPEVT_TRANSFER_NO_RESOURCE:
847 dev_err(dwc->dev, "ep%d XFER_NO_RESOURCE cmd: 0x%x, param: 0x%x 0x%x 0x%x\n",
848 dep->number, cmd,
849 params->param0, params->param1, params->param2);
850 ret = -EINVAL;
851 break;
852 case DEPEVT_TRANSFER_BUS_EXPIRY:
853 /*
854 * SW issues START TRANSFER command to
855 * isochronous ep with future frame interval. If
856 * future interval time has already passed when
857 * core receives the command, it will respond
858 * with an error status of 'Bus Expiry'.
859 *
860 * Instead of always returning -EINVAL, let's
861 * give a hint to the gadget driver that this is
862 * the case by returning -EAGAIN.
863 */
864 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
865 printk_ratelimited(KERN_DEBUG "ep%d XFER_BUS_EXPIRY cmd: 0x%x, param: 0x%x 0x%x 0x%x 0x%x\n",
866 dep->number, cmd,
867 params->param0, params->param1, params->param2,
868 dwc3_readl(dwc->regs, DWC3_DSTS));
869 else
870 dev_err(dwc->dev, "ep%d XFER_BUS_EXPIRY cmd: 0x%x, param: 0x%x 0x%x 0x%x 0x%x\n",
871 dep->number, cmd,
872 params->param0, params->param1, params->param2,
873 dwc3_readl(dwc->regs, DWC3_DSTS));
874 ret = -EAGAIN;
875 break;
876 default:
877 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
878 }
879
880 break;
881 }
882 } while (--timeout);
883
884 if ((timeout == 0) && (cmd != 0x402)) {
885 dev_err(dwc->dev, "cmd: 0x%x timeout == 0, usb restart\n", cmd);
886 ret = -ETIMEDOUT;
887 cmd_status = -ETIMEDOUT;
888 /* dump_dwc3_regs(dwc); */
889 dwc->usb_do_restart = 1;
890 usb_phy_dump_cfg(dwc->usb2_phy);
891 }
892
893 trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
894
895 if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
896 dep->flags |= DWC3_EP_TRANSFER_STARTED;
897 dwc3_gadget_ep_get_transfer_index(dep);
898 }
899
900 if (saved_config) {
901 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
902 reg |= saved_config;
903 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
904 }
905
906 return ret;
907}
908
909static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
910{
911 struct dwc3 *dwc = dep->dwc;
912 struct dwc3_gadget_ep_cmd_params params;
913 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
914
915 /*
916 * As of core revision 2.60a the recommended programming model
917 * is to set the ClearPendIN bit when issuing a Clear Stall EP
918 * command for IN endpoints. This is to prevent an issue where
919 * some (non-compliant) hosts may not send ACK TPs for pending
920 * IN transfers due to a mishandled error condition. Synopsys
921 * STAR 9000614252.
922 */
923 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
924 (dwc->gadget.speed >= USB_SPEED_SUPER))
925 cmd |= DWC3_DEPCMD_CLEARPENDIN;
926
927 memset(&params, 0, sizeof(params));
928
929 return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
930}
931
932static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
933 struct dwc3_trb *trb)
934{
935 u32 offset = (char *) trb - (char *) dep->trb_pool;
936
937 return dep->trb_pool_dma + offset;
938}
939
940static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
941{
942 struct dwc3 *dwc = dep->dwc;
943
944 if (dep->trb_pool)
945 return 0;
946
947 if ((cpu_is_asr1901() || cpu_is_asr1906()) && (dep->number == 2))
948 dep->trb_num = ASR1901_DWC3_EP2_TRB_NUM;
949 else if ((cpu_is_asr1901() || cpu_is_asr1906()) && (dep->number == 3))
950 dep->trb_num = ASR1901_DWC3_EP3_TRB_NUM;
951 else
952 dep->trb_num = DWC3_TRB_NUM;
953
954 dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
955 sizeof(struct dwc3_trb) * dep->trb_num,
956 &dep->trb_pool_dma, GFP_KERNEL);
957 if (!dep->trb_pool) {
958 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
959 dep->name);
960 return -ENOMEM;
961 }
962 memset(dep->trb_pool, 0x0, sizeof(struct dwc3_trb) * dep->trb_num);
963
964 return 0;
965}
966
967static void dwc3_free_trb_pool(struct dwc3_ep *dep)
968{
969 struct dwc3 *dwc = dep->dwc;
970
971
972 dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * dep->trb_num,
973 dep->trb_pool, dep->trb_pool_dma);
974
975 dep->trb_pool = NULL;
976 dep->trb_pool_dma = 0;
977}
978
979static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
980{
981 struct dwc3_gadget_ep_cmd_params params;
982
983 memset(&params, 0x00, sizeof(params));
984
985 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
986
987 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
988 &params);
989}
990
991/**
992 * dwc3_gadget_start_config - configure ep resources
993 * @dep: endpoint that is being enabled
994 *
995 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
996 * completion, it will set Transfer Resource for all available endpoints.
997 *
998 * The assignment of transfer resources cannot perfectly follow the data book
999 * due to the fact that the controller driver does not have all knowledge of the
1000 * configuration in advance. It is given this information piecemeal by the
1001 * composite gadget framework after every SET_CONFIGURATION and
1002 * SET_INTERFACE. Trying to follow the databook programming model in this
1003 * scenario can cause errors. For two reasons:
1004 *
1005 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
1006 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
1007 * incorrect in the scenario of multiple interfaces.
1008 *
1009 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
1010 * endpoint on alt setting (8.1.6).
1011 *
1012 * The following simplified method is used instead:
1013 *
1014 * All hardware endpoints can be assigned a transfer resource and this setting
1015 * will stay persistent until either a core reset or hibernation. So whenever we
1016 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
1017 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
1018 * guaranteed that there are as many transfer resources as endpoints.
1019 *
1020 * This function is called for each endpoint when it is being enabled but is
1021 * triggered only when called for EP0-out, which always happens first, and which
1022 * should only happen in one of the above conditions.
1023 */
1024static int dwc3_gadget_start_config(struct dwc3_ep *dep)
1025{
1026 struct dwc3_gadget_ep_cmd_params params;
1027 struct dwc3 *dwc;
1028 u32 cmd;
1029 int i;
1030 int ret;
1031
1032 if (dep->number)
1033 return 0;
1034
1035 memset(&params, 0x00, sizeof(params));
1036 cmd = DWC3_DEPCMD_DEPSTARTCFG;
1037 dwc = dep->dwc;
1038
1039 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1040 if (ret) {
1041 dev_err(dwc->dev, "DWC3_DEPCMD_DEPSTARTCFG failed\n");
1042 return ret;
1043 }
1044 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1045 struct dwc3_ep *dep = dwc->eps[i];
1046
1047 if (!dep)
1048 continue;
1049
1050 ret = dwc3_gadget_set_xfer_resource(dep);
1051 if (ret) {
1052 dev_err(dwc->dev, "dwc3_gadget_set_xfer_resource failed\n");
1053 return ret;
1054 }
1055 }
1056
1057 dev_info(dep->dwc->dev, "st ep cfg done\n");
1058 return 0;
1059}
1060
1061static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
1062{
1063 const struct usb_ss_ep_comp_descriptor *comp_desc;
1064 const struct usb_endpoint_descriptor *desc;
1065 struct dwc3_gadget_ep_cmd_params params;
1066 struct dwc3 *dwc = dep->dwc;
1067
1068 comp_desc = dep->endpoint.comp_desc;
1069 desc = dep->endpoint.desc;
1070
1071 memset(&params, 0x00, sizeof(params));
1072
1073 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1074 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1075
1076 /* Burst size is only needed in SuperSpeed mode */
1077 if (dwc->gadget.speed >= USB_SPEED_SUPER) {
1078 u32 burst = dep->endpoint.maxburst;
1079 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
1080 }
1081
1082 params.param0 |= action;
1083 if (action == DWC3_DEPCFG_ACTION_RESTORE)
1084 params.param2 |= dep->saved_state;
1085
1086 if (usb_endpoint_xfer_control(desc))
1087 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
1088
1089 if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
1090 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
1091
1092 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1093 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1094 | DWC3_DEPCFG_STREAM_EVENT_EN;
1095 dep->stream_capable = true;
1096 }
1097
1098 if (!usb_endpoint_xfer_control(desc))
1099 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1100
1101 /*
1102 * We are doing 1:1 mapping for endpoints, meaning
1103 * Physical Endpoints 2 maps to Logical Endpoint 2 and
1104 * so on. We consider the direction bit as part of the physical
1105 * endpoint number. So USB endpoint 0x81 is 0x03.
1106 */
1107 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1108
1109 /*
1110 * We must use the lower 16 TX FIFOs even though
1111 * HW might have more
1112 */
1113 if (dep->direction)
1114 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1115
1116 if (desc->bInterval) {
1117 u8 bInterval_m1;
1118
1119 /*
1120 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
1121 * must be set to 0 when the controller operates in full-speed.
1122 */
1123 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
1124 if (dwc->gadget.speed == USB_SPEED_FULL)
1125 bInterval_m1 = 0;
1126
1127 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
1128 dwc->gadget.speed == USB_SPEED_FULL)
1129 dep->interval = desc->bInterval;
1130 else
1131 dep->interval = 1 << (desc->bInterval - 1);
1132
1133 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
1134 }
1135
1136 return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
1137}
1138
1139/**
1140 * __dwc3_gadget_ep_enable - initializes a hw endpoint
1141 * @dep: endpoint to be initialized
1142 * @action: one of INIT, MODIFY or RESTORE
1143 *
1144 * Caller should take care of locking. Execute all necessary commands to
1145 * initialize a HW endpoint so it can be used by a gadget driver.
1146 */
1147static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
1148{
1149 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
1150 struct dwc3 *dwc = dep->dwc;
1151
1152 u32 reg;
1153 int ret;
1154
1155 if (!(dep->flags & DWC3_EP_ENABLED)) {
1156 ret = dwc3_gadget_start_config(dep);
1157 if (ret) {
1158 dev_err(dep->dwc->dev, "start ep%d config failed %d\n", dep->number, ret);
1159 return ret;
1160 }
1161 }
1162
1163 ret = dwc3_gadget_set_ep_config(dep, action);
1164 if (ret) {
1165 dev_err(dep->dwc->dev, "set ep%d config failed %d\n", dep->number, ret);
1166 return ret;
1167 }
1168
1169 if (!(dep->flags & DWC3_EP_ENABLED)) {
1170 struct dwc3_trb *trb_st_hw;
1171 struct dwc3_trb *trb_link;
1172
1173 dep->type = usb_endpoint_type(desc);
1174 dep->flags |= DWC3_EP_ENABLED;
1175
1176 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1177 reg |= DWC3_DALEPENA_EP(dep->number);
1178 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1179
1180 if ((dep->number == 0) || (dep->number == 1)) {
1181 if (dep->trb_dequeue || dep->trb_enqueue) {
1182 pr_info("reset trb index(%d %d) for ep%d\n", dep->trb_dequeue, dep->trb_enqueue, dep->number);
1183 dep->trb_dequeue = 0;
1184 dep->trb_enqueue = 0;
1185 }
1186 }
1187
1188 if (usb_endpoint_xfer_control(desc))
1189 goto out;
1190
1191 /* Initialize the TRB ring */
1192 dep->trb_dequeue = 0;
1193 dep->trb_enqueue = 0;
1194
1195 memset(dep->trb_pool, 0,
1196 sizeof(struct dwc3_trb) * dep->trb_num);
1197 /* Link TRB. The HWO bit is never reset */
1198 trb_st_hw = &dep->trb_pool[0];
1199
1200 trb_link = &dep->trb_pool[dep->trb_num - 1];
1201
1202 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
1203 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
1204 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
1205 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
1206 }
1207
1208#ifndef CONFIG_DWC3_HWSULOG
1209 /*
1210 * Issue StartTransfer here with no-op TRB so we can always rely on No
1211 * Response Update Transfer command.
1212 */
1213 if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
1214 usb_endpoint_xfer_int(desc)) {
1215 struct dwc3_gadget_ep_cmd_params params;
1216 struct dwc3_trb *trb;
1217 dma_addr_t trb_dma;
1218 u32 cmd;
1219
1220 memset(&params, 0, sizeof(params));
1221 trb = &dep->trb_pool[0];
1222 trb_dma = dwc3_trb_dma_offset(dep, trb);
1223
1224 params.param0 = upper_32_bits(trb_dma);
1225 params.param1 = lower_32_bits(trb_dma);
1226
1227 cmd = DWC3_DEPCMD_STARTTRANSFER;
1228
1229 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1230 if (ret < 0) {
1231 dev_info(dep->dwc->dev, "ep%d STARTTRANSFER failed: %d\n", dep->number, ret);
1232 return ret;
1233 }
1234 }
1235#else
1236 /*
1237 * Issue StartTransfer here with no-op TRB so we can always rely on No
1238 * Response Update Transfer command.
1239 */
1240 if (sulog_ep_num && (dep->number == (sulog_ep_num * 2 + 1))) {
1241 pr_info("skip sulog ep: %d\n", sulog_ep_num);
1242 } else if (((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
1243 usb_endpoint_xfer_int(desc))) {
1244 struct dwc3_gadget_ep_cmd_params params;
1245 struct dwc3_trb *trb;
1246 dma_addr_t trb_dma;
1247 u32 cmd;
1248
1249 memset(&params, 0, sizeof(params));
1250 trb = &dep->trb_pool[0];
1251 trb_dma = dwc3_trb_dma_offset(dep, trb);
1252
1253 params.param0 = upper_32_bits(trb_dma);
1254 params.param1 = lower_32_bits(trb_dma);
1255
1256 cmd = DWC3_DEPCMD_STARTTRANSFER;
1257
1258 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1259 if (ret < 0) {
1260 dev_info(dep->dwc->dev, "ep%d STARTTRANSFER failed: %d\n", dep->number, ret);
1261 return ret;
1262 }
1263 }
1264#endif
1265out:
1266 trace_dwc3_gadget_ep_enable(dep);
1267 if (dep->number <= 1)
1268 dev_info(dep->dwc->dev, "ep%d enable done\n", dep->number);
1269 return 0;
1270}
1271
1272static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
1273{
1274 struct dwc3_request *req;
1275
1276 dwc3_stop_active_transfer(dep, true, false);
1277
1278 /* - giveback all requests to gadget driver */
1279 while (!list_empty(&dep->started_list)) {
1280 req = next_request(&dep->started_list);
1281
1282 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
1283 }
1284
1285 while (!list_empty(&dep->pending_list)) {
1286 req = next_request(&dep->pending_list);
1287
1288 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
1289 }
1290
1291 while (!list_empty(&dep->cancelled_list)) {
1292 req = next_request(&dep->cancelled_list);
1293
1294 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
1295 }
1296}
1297
1298/**
1299 * __dwc3_gadget_ep_disable - disables a hw endpoint
1300 * @dep: the endpoint to disable
1301 *
1302 * This function undoes what __dwc3_gadget_ep_enable did and also removes
1303 * requests which are currently being processed by the hardware and those which
1304 * are not yet scheduled.
1305 *
1306 * Caller should take care of locking.
1307 */
1308static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
1309{
1310 struct dwc3 *dwc = dep->dwc;
1311 u32 reg;
1312
1313 trace_dwc3_gadget_ep_disable(dep);
1314
1315 /* make sure HW endpoint isn't stalled */
1316 if (dep->flags & DWC3_EP_STALL)
1317 __dwc3_gadget_ep_set_halt(dep, 0, false);
1318
1319 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1320 reg &= ~DWC3_DALEPENA_EP(dep->number);
1321 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1322
1323 /* Clear out the ep descriptors for non-ep0 */
1324 if (dep->number > 1) {
1325 dep->endpoint.comp_desc = NULL;
1326 dep->endpoint.desc = NULL;
1327 }
1328
1329 dwc3_remove_requests(dwc, dep);
1330
1331 dep->stream_capable = false;
1332 dep->type = 0;
1333 dep->flags = 0;
1334
1335 return 0;
1336}
1337
1338/* -------------------------------------------------------------------------- */
1339
1340static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
1341 const struct usb_endpoint_descriptor *desc)
1342{
1343 return -EINVAL;
1344}
1345
1346static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
1347{
1348 return -EINVAL;
1349}
1350
1351/* -------------------------------------------------------------------------- */
1352
1353static int dwc3_gadget_ep_enable(struct usb_ep *ep,
1354 const struct usb_endpoint_descriptor *desc)
1355{
1356 struct dwc3_ep *dep;
1357 struct dwc3 *dwc;
1358 unsigned long flags;
1359 int ret;
1360
1361 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
1362 pr_debug("dwc3: invalid parameters\n");
1363 return -EINVAL;
1364 }
1365
1366 if (!desc->wMaxPacketSize) {
1367 pr_debug("dwc3: missing wMaxPacketSize\n");
1368 return -EINVAL;
1369 }
1370
1371 dep = to_dwc3_ep(ep);
1372 dwc = dep->dwc;
1373
1374 if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
1375 "%s is already enabled\n",
1376 dep->name))
1377 return 0;
1378
1379 spin_lock_irqsave(&dwc->lock, flags);
1380 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
1381 spin_unlock_irqrestore(&dwc->lock, flags);
1382
1383 return ret;
1384}
1385
1386static int dwc3_gadget_ep_disable(struct usb_ep *ep)
1387{
1388 struct dwc3_ep *dep;
1389 struct dwc3 *dwc;
1390 unsigned long flags;
1391 int ret;
1392
1393 if (!ep) {
1394 pr_debug("dwc3: invalid parameters\n");
1395 return -EINVAL;
1396 }
1397
1398 dep = to_dwc3_ep(ep);
1399 dwc = dep->dwc;
1400
1401 if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
1402 "%s is already disabled\n",
1403 dep->name))
1404 return 0;
1405
1406 spin_lock_irqsave(&dwc->lock, flags);
1407 ret = __dwc3_gadget_ep_disable(dep);
1408 spin_unlock_irqrestore(&dwc->lock, flags);
1409
1410 return ret;
1411}
1412
1413static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
1414 gfp_t gfp_flags)
1415{
1416 struct dwc3_request *req;
1417 struct dwc3_ep *dep = to_dwc3_ep(ep);
1418
1419 req = kzalloc(sizeof(*req), gfp_flags);
1420 if (!req)
1421 return NULL;
1422
1423 req->direction = dep->direction;
1424 req->epnum = dep->number;
1425 req->dep = dep;
1426 req->status = DWC3_REQUEST_STATUS_UNKNOWN;
1427
1428 trace_dwc3_alloc_request(req);
1429
1430 return &req->request;
1431}
1432
1433static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
1434 struct usb_request *request)
1435{
1436 struct dwc3_request *req = to_dwc3_request(request);
1437
1438 trace_dwc3_free_request(req);
1439 kfree(req);
1440}
1441
1442/**
1443 * dwc3_ep_prev_trb - returns the previous TRB in the ring
1444 * @dep: The endpoint with the TRB ring
1445 * @index: The index of the current TRB in the ring
1446 *
1447 * Returns the TRB prior to the one pointed to by the index. If the
1448 * index is 0, we will wrap backwards, skip the link TRB, and return
1449 * the one just before that.
1450 */
1451static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u16 index)
1452{
1453 u16 tmp = index;
1454
1455
1456 if (!tmp)
1457 tmp = dep->trb_num - 1;
1458
1459 return &dep->trb_pool[tmp - 1];
1460}
1461
1462static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
1463{
1464 u16 trbs_left;
1465
1466 /*
1467 * If the enqueue & dequeue are equal then the TRB ring is either full
1468 * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
1469 * pending to be processed by the driver.
1470 */
1471 if (dep->trb_enqueue == dep->trb_dequeue) {
1472 struct dwc3_request *req;
1473
1474 /*
1475 * If there is any request remained in the started_list with
1476 * active TRBs at this point, then there is no TRB available.
1477 */
1478 req = next_request(&dep->started_list);
1479 if (req && req->num_trbs)
1480 return 0;
1481
1482 return dep->trb_num - 1;
1483 }
1484
1485 trbs_left = dep->trb_dequeue - dep->trb_enqueue;
1486
1487 trbs_left &= (dep->trb_num - 1);
1488
1489
1490 if (dep->trb_dequeue < dep->trb_enqueue)
1491 trbs_left--;
1492
1493 return trbs_left;
1494}
1495
1496static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
1497 dma_addr_t dma, unsigned length, unsigned chain, unsigned node,
1498 unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt)
1499{
1500 struct dwc3 *dwc = dep->dwc;
1501 struct usb_gadget *gadget = &dwc->gadget;
1502 enum usb_device_speed speed = gadget->speed;
1503
1504 /* sanity check */
1505 if (unlikely(length && ((dma > dwc->phys_mem_end)
1506 || ((dma + length) > dwc->phys_mem_end)))) {
1507 pr_emerg("dwc3 dma: 0x%x, len: 0x%x, dwc->phys_mem_end: 0x%lx\n",
1508 dma, length, dwc->phys_mem_end);
1509 BUG();
1510 }
1511
1512 trb->size = DWC3_TRB_SIZE_LENGTH(length);
1513 trb->bpl = lower_32_bits(dma);
1514
1515#ifndef CONFIG_CPU_ASR1901
1516 trb->bph = upper_32_bits(dma);
1517#endif
1518
1519 switch (usb_endpoint_type(dep->endpoint.desc)) {
1520 case USB_ENDPOINT_XFER_CONTROL:
1521 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
1522 break;
1523
1524 case USB_ENDPOINT_XFER_ISOC:
1525 if (!node) {
1526 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
1527
1528 /*
1529 * USB Specification 2.0 Section 5.9.2 states that: "If
1530 * there is only a single transaction in the microframe,
1531 * only a DATA0 data packet PID is used. If there are
1532 * two transactions per microframe, DATA1 is used for
1533 * the first transaction data packet and DATA0 is used
1534 * for the second transaction data packet. If there are
1535 * three transactions per microframe, DATA2 is used for
1536 * the first transaction data packet, DATA1 is used for
1537 * the second, and DATA0 is used for the third."
1538 *
1539 * IOW, we should satisfy the following cases:
1540 *
1541 * 1) length <= maxpacket
1542 * - DATA0
1543 *
1544 * 2) maxpacket < length <= (2 * maxpacket)
1545 * - DATA1, DATA0
1546 *
1547 * 3) (2 * maxpacket) < length <= (3 * maxpacket)
1548 * - DATA2, DATA1, DATA0
1549 */
1550 if (speed == USB_SPEED_HIGH) {
1551 struct usb_ep *ep = &dep->endpoint;
1552 unsigned int mult = 2;
1553 unsigned int maxp = usb_endpoint_maxp(ep->desc);
1554
1555 if (length <= (2 * maxp))
1556 mult--;
1557
1558 if (length <= maxp)
1559 mult--;
1560
1561 trb->size |= DWC3_TRB_SIZE_PCM1(mult);
1562 }
1563 } else {
1564 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
1565 }
1566
1567 if (!no_interrupt && !chain)
1568 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1569 break;
1570
1571 case USB_ENDPOINT_XFER_BULK:
1572 case USB_ENDPOINT_XFER_INT:
1573 trb->ctrl = DWC3_TRBCTL_NORMAL;
1574 break;
1575 default:
1576 /*
1577 * This is only possible with faulty memory because we
1578 * checked it already :)
1579 */
1580 dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
1581 usb_endpoint_type(dep->endpoint.desc));
1582 }
1583
1584 /*
1585 * Enable Continue on Short Packet
1586 * when endpoint is not a stream capable
1587 */
1588 if (usb_endpoint_dir_out(dep->endpoint.desc)) {
1589 if (!dep->stream_capable)
1590 trb->ctrl |= DWC3_TRB_CTRL_CSP;
1591
1592 if (short_not_ok)
1593 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1594 }
1595
1596 if ((!no_interrupt && !chain) ||
1597 (dwc3_calc_trbs_left(dep) == 1))
1598 trb->ctrl |= DWC3_TRB_CTRL_IOC;
1599
1600 if (chain)
1601 trb->ctrl |= DWC3_TRB_CTRL_CHN;
1602
1603 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
1604 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
1605
1606 /*
1607 * As per data book 4.2.3.2TRB Control Bit Rules section
1608 *
1609 * The controller autonomously checks the HWO field of a TRB to determine if the
1610 * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
1611 * is valid before setting the HWO field to '1'. In most systems, this means that
1612 * software must update the fourth DWORD of a TRB last.
1613 *
1614 * However there is a possibility of CPU re-ordering here which can cause
1615 * controller to observe the HWO bit set prematurely.
1616 * Add a write memory barrier to prevent CPU re-ordering.
1617 */
1618 wmb();
1619 trb->ctrl |= DWC3_TRB_CTRL_HWO;
1620
1621 dwc3_ep_inc_enq(dep);
1622
1623 trace_dwc3_prepare_trb(dep, trb);
1624}
1625
1626/**
1627 * dwc3_prepare_one_trb - setup one TRB from one request
1628 * @dep: endpoint for which this request is prepared
1629 * @req: dwc3_request pointer
1630 * @trb_length: buffer size of the TRB
1631 * @chain: should this TRB be chained to the next?
1632 * @node: only for isochronous endpoints. First TRB needs different type.
1633 */
1634static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
1635 struct dwc3_request *req, unsigned int trb_length,
1636 unsigned chain, unsigned node)
1637{
1638 struct dwc3_trb *trb;
1639 dma_addr_t dma;
1640 unsigned stream_id = req->request.stream_id;
1641 unsigned short_not_ok = req->request.short_not_ok;
1642 unsigned no_interrupt = req->request.no_interrupt;
1643
1644 if (req->request.num_sgs > 0)
1645 dma = sg_dma_address(req->start_sg);
1646 else
1647 dma = req->request.dma;
1648
1649 trb = &dep->trb_pool[dep->trb_enqueue];
1650
1651 if (!req->trb) {
1652 dwc3_gadget_move_started_request(req);
1653 req->trb = trb;
1654 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
1655 }
1656
1657 req->num_trbs++;
1658
1659 __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
1660 stream_id, short_not_ok, no_interrupt);
1661}
1662
1663static int dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
1664 struct dwc3_request *req)
1665{
1666 struct scatterlist *sg = req->start_sg;
1667 struct scatterlist *s;
1668 int i, used_trbs = 0;
1669 unsigned int length = req->request.length;
1670 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1671 unsigned int rem = length % maxp;
1672 unsigned int remaining = req->request.num_mapped_sgs
1673 - req->num_queued_sgs;
1674
1675 /*
1676 * If we resume preparing the request, then get the remaining length of
1677 * the request and resume where we left off.
1678 */
1679 for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
1680 length -= sg_dma_len(s);
1681
1682 for_each_sg(sg, s, remaining, i) {
1683 unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
1684 unsigned int trb_length;
1685 unsigned chain = true;
1686
1687 trb_length = min_t(unsigned int, length, sg_dma_len(s));
1688
1689 length -= trb_length;
1690
1691 /*
1692 * IOMMU driver is coalescing the list of sgs which shares a
1693 * page boundary into one and giving it to USB driver. With
1694 * this the number of sgs mapped is not equal to the number of
1695 * sgs passed. So mark the chain bit to false if it isthe last
1696 * mapped sg.
1697 */
1698 if ((i == remaining - 1) || !length)
1699 chain = false;
1700
1701 if (!num_trbs_left)
1702 break;
1703
1704 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
1705 struct dwc3 *dwc = dep->dwc;
1706 struct dwc3_trb *trb;
1707
1708 if (num_trbs_left < 2) {
1709 break;
1710 }
1711
1712 req->needs_extra_trb = true;
1713
1714 /* prepare normal TRB */
1715 dwc3_prepare_one_trb(dep, req, trb_length, true, i);
1716 used_trbs++;
1717
1718 /* Now prepare one extra TRB to align transfer size */
1719 trb = &dep->trb_pool[dep->trb_enqueue];
1720 req->num_trbs++;
1721 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
1722 maxp - rem, false, 1,
1723 req->request.stream_id,
1724 req->request.short_not_ok,
1725 req->request.no_interrupt);
1726 used_trbs++;
1727 } else if (req->request.zero && req->request.length &&
1728 !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1729 !rem && !chain) {
1730 struct dwc3 *dwc = dep->dwc;
1731 struct dwc3_trb *trb;
1732
1733 if ((req->direction && (num_trbs_left < 2)) ||
1734 ((!req->direction) && (num_trbs_left < 3))) {
1735 break;
1736 }
1737
1738 req->needs_extra_trb = true;
1739
1740 /* Prepare normal TRB */
1741 dwc3_prepare_one_trb(dep, req, trb_length, true, i);
1742 used_trbs++;
1743 /* Prepare one extra TRB to handle ZLP */
1744 trb = &dep->trb_pool[dep->trb_enqueue];
1745 req->num_trbs++;
1746 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
1747 !req->direction, 1,
1748 req->request.stream_id,
1749 req->request.short_not_ok,
1750 req->request.no_interrupt);
1751 used_trbs++;
1752 /* Prepare one more TRB to handle MPS alignment */
1753 if (!req->direction) {
1754 trb = &dep->trb_pool[dep->trb_enqueue];
1755 req->num_trbs++;
1756 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
1757 false, 1, req->request.stream_id,
1758 req->request.short_not_ok,
1759 req->request.no_interrupt);
1760 used_trbs++;
1761 }
1762 } else {
1763 dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
1764 used_trbs++;
1765 }
1766
1767 /*
1768 * There can be a situation where all sgs in sglist are not
1769 * queued because of insufficient trb number. To handle this
1770 * case, update start_sg to next sg to be queued, so that
1771 * we have free trbs we can continue queuing from where we
1772 * previously stopped
1773 */
1774 if (chain)
1775 req->start_sg = sg_next(s);
1776
1777 req->num_queued_sgs++;
1778 req->num_pending_sgs--;
1779
1780 /*
1781 * The number of pending SG entries may not correspond to the
1782 * number of mapped SG entries. If all the data are queued, then
1783 * don't include unused SG entries.
1784 */
1785 if (length == 0) {
1786 req->num_pending_sgs = 0;
1787 break;
1788 }
1789
1790 if (!dwc3_calc_trbs_left(dep))
1791 break;
1792 }
1793
1794 return used_trbs;
1795}
1796
1797static int dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1798 struct dwc3_request *req)
1799{
1800 unsigned int length = req->request.length;
1801 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1802 unsigned int rem = length % maxp;
1803 int used_trbs = 0;
1804
1805 if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
1806 struct dwc3 *dwc = dep->dwc;
1807 struct dwc3_trb *trb;
1808
1809 if (dwc3_calc_trbs_left(dep) < 2) {
1810 pr_err_ratelimited("ep%d L%d: left trbs: %d\n",
1811 dep->number, __LINE__, dwc3_calc_trbs_left(dep));
1812 return used_trbs;
1813 }
1814
1815 req->needs_extra_trb = true;
1816
1817 /* prepare normal TRB */
1818 dwc3_prepare_one_trb(dep, req, length, true, 0);
1819 used_trbs++;
1820
1821 /* Now prepare one extra TRB to align transfer size */
1822 trb = &dep->trb_pool[dep->trb_enqueue];
1823 req->num_trbs++;
1824 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
1825 false, 1, req->request.stream_id,
1826 req->request.short_not_ok,
1827 req->request.no_interrupt);
1828 used_trbs++;
1829 } else if (req->request.zero && req->request.length &&
1830 !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1831 (IS_ALIGNED(req->request.length, maxp))) {
1832 struct dwc3 *dwc = dep->dwc;
1833 struct dwc3_trb *trb;
1834 unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
1835
1836 if ((req->direction && (num_trbs_left < 2)) ||
1837 ((!req->direction) && (num_trbs_left < 3))) {
1838 pr_err_ratelimited("ep%d L%d: left trbs: %d\n",
1839 dep->number, __LINE__, dwc3_calc_trbs_left(dep));
1840 return used_trbs;
1841 }
1842
1843 req->needs_extra_trb = true;
1844
1845 /* prepare normal TRB */
1846 dwc3_prepare_one_trb(dep, req, length, true, 0);
1847 used_trbs++;
1848 /* Prepare one extra TRB to handle ZLP */
1849 trb = &dep->trb_pool[dep->trb_enqueue];
1850 req->num_trbs++;
1851 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
1852 !req->direction, 1, req->request.stream_id,
1853 req->request.short_not_ok,
1854 req->request.no_interrupt);
1855 used_trbs++;
1856 /* Prepare one more TRB to handle MPS alignment for OUT */
1857 if (!req->direction) {
1858 trb = &dep->trb_pool[dep->trb_enqueue];
1859 req->num_trbs++;
1860 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
1861 false, 1, req->request.stream_id,
1862 req->request.short_not_ok,
1863 req->request.no_interrupt);
1864 used_trbs++;
1865 }
1866 } else {
1867 dwc3_prepare_one_trb(dep, req, length, false, 0);
1868 used_trbs++;
1869 }
1870
1871 return used_trbs;
1872}
1873
1874/*
1875 * dwc3_prepare_trbs - setup TRBs from requests
1876 * @dep: endpoint for which requests are being prepared
1877 *
1878 * The function goes through the requests list and sets up TRBs for the
1879 * transfers. The function returns once there are no more TRBs available or
1880 * it runs out of requests.
1881 */
1882static void dwc3_prepare_trbs(struct dwc3_ep *dep)
1883{
1884 struct dwc3_request *req, *n;
1885 int ret = 0;
1886
1887 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
1888 BUILD_BUG_ON_NOT_POWER_OF_2(ASR1901_DWC3_EP2_TRB_NUM);
1889 BUILD_BUG_ON_NOT_POWER_OF_2(ASR1901_DWC3_EP3_TRB_NUM);
1890
1891 /*
1892 * We can get in a situation where there's a request in the started list
1893 * but there weren't enough TRBs to fully kick it in the first time
1894 * around, so it has been waiting for more TRBs to be freed up.
1895 *
1896 * In that case, we should check if we have a request with pending_sgs
1897 * in the started list and prepare TRBs for that request first,
1898 * otherwise we will prepare TRBs completely out of order and that will
1899 * break things.
1900 */
1901 list_for_each_entry(req, &dep->started_list, list) {
1902 if (req->num_pending_sgs > 0) {
1903 ret = dwc3_prepare_one_trb_sg(dep, req);
1904 if ((!ret) || req->num_pending_sgs)
1905 return;
1906 }
1907 if (!dwc3_calc_trbs_left(dep))
1908 return;
1909 }
1910
1911 list_for_each_entry_safe(req, n, &dep->pending_list, list) {
1912 struct dwc3 *dwc = dep->dwc;
1913
1914 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
1915 dep->direction);
1916 if (ret)
1917 return;
1918
1919 req->sg = req->request.sg;
1920 req->start_sg = req->sg;
1921 req->num_queued_sgs = 0;
1922 req->num_pending_sgs = req->request.num_mapped_sgs;
1923#ifdef CONFIG_ASR_TOE
1924 /* to catch toe error for u_ther_toe.c */
1925 if (unlikely(dep->number == 3 && req->request.length > 0x10000)) {
1926 pr_err("req size error: %d\n", req->request.length);
1927 BUG();
1928 }
1929#endif
1930 if (req->num_pending_sgs > 0) {
1931 ret = dwc3_prepare_one_trb_sg(dep, req);
1932 if (req->num_pending_sgs)
1933 return;
1934 } else {
1935 ret = dwc3_prepare_one_trb_linear(dep, req);
1936 }
1937
1938 if ((!ret) || (!dwc3_calc_trbs_left(dep)))
1939 return;
1940 }
1941}
1942
1943static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
1944
1945static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
1946{
1947 struct dwc3_gadget_ep_cmd_params params;
1948 struct dwc3_request *req;
1949 int starting;
1950 int ret;
1951 u32 cmd;
1952
1953 if (!dwc3_calc_trbs_left(dep))
1954 return 0;
1955
1956 starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED);
1957
1958 dwc3_prepare_trbs(dep);
1959 req = next_request(&dep->started_list);
1960 if (!req) {
1961 dep->flags |= DWC3_EP_PENDING_REQUEST;
1962 return 0;
1963 }
1964
1965 memset(&params, 0, sizeof(params));
1966
1967 if (starting) {
1968 params.param0 = upper_32_bits(req->trb_dma);
1969 params.param1 = lower_32_bits(req->trb_dma);
1970 cmd = DWC3_DEPCMD_STARTTRANSFER;
1971
1972 if (dep->stream_capable)
1973 cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
1974
1975 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
1976 cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
1977 } else {
1978 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1979 DWC3_DEPCMD_PARAM(dep->resource_index);
1980 }
1981
1982 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1983 if (ret < 0) {
1984 struct dwc3_request *tmp;
1985
1986 if (ret == -EAGAIN)
1987 return ret;
1988
1989 dwc3_stop_active_transfer(dep, true, true);
1990
1991 list_for_each_entry_safe(req, tmp, &dep->started_list, list)
1992 dwc3_gadget_move_cancelled_request(req);
1993
1994 /* If ep isn't started, then there's no end transfer pending */
1995 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
1996 dwc3_gadget_ep_cleanup_cancelled_requests(dep);
1997
1998 return ret;
1999 }
2000
2001 return 0;
2002}
2003
2004static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
2005{
2006 u32 reg;
2007
2008 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2009 return DWC3_DSTS_SOFFN(reg);
2010}
2011
2012/**
2013 * dwc3_gadget_start_isoc_quirk - workaround invalid frame number
2014 * @dep: isoc endpoint
2015 *
2016 * This function tests for the correct combination of BIT[15:14] from the 16-bit
2017 * microframe number reported by the XferNotReady event for the future frame
2018 * number to start the isoc transfer.
2019 *
2020 * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed
2021 * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the
2022 * XferNotReady event are invalid. The driver uses this number to schedule the
2023 * isochronous transfer and passes it to the START TRANSFER command. Because
2024 * this number is invalid, the command may fail. If BIT[15:14] matches the
2025 * internal 16-bit microframe, the START TRANSFER command will pass and the
2026 * transfer will start at the scheduled time, if it is off by 1, the command
2027 * will still pass, but the transfer will start 2 seconds in the future. For all
2028 * other conditions, the START TRANSFER command will fail with bus-expiry.
2029 *
2030 * In order to workaround this issue, we can test for the correct combination of
2031 * BIT[15:14] by sending START TRANSFER commands with different values of
2032 * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart
2033 * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status.
2034 * As the result, within the 4 possible combinations for BIT[15:14], there will
2035 * be 2 successful and 2 failure START COMMAND status. One of the 2 successful
2036 * command status will result in a 2-second delay start. The smaller BIT[15:14]
2037 * value is the correct combination.
2038 *
2039 * Since there are only 4 outcomes and the results are ordered, we can simply
2040 * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to
2041 * deduce the smaller successful combination.
2042 *
2043 * Let test0 = test status for combination 'b00 and test1 = test status for 'b01
2044 * of BIT[15:14]. The correct combination is as follow:
2045 *
2046 * if test0 fails and test1 passes, BIT[15:14] is 'b01
2047 * if test0 fails and test1 fails, BIT[15:14] is 'b10
2048 * if test0 passes and test1 fails, BIT[15:14] is 'b11
2049 * if test0 passes and test1 passes, BIT[15:14] is 'b00
2050 *
2051 * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN
2052 * endpoints.
2053 */
2054static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep)
2055{
2056 int cmd_status = 0;
2057 bool test0;
2058 bool test1;
2059
2060 while (dep->combo_num < 2) {
2061 struct dwc3_gadget_ep_cmd_params params;
2062 u32 test_frame_number;
2063 u32 cmd;
2064
2065 /*
2066 * Check if we can start isoc transfer on the next interval or
2067 * 4 uframes in the future with BIT[15:14] as dep->combo_num
2068 */
2069 test_frame_number = dep->frame_number & 0x3fff;
2070 test_frame_number |= dep->combo_num << 14;
2071 test_frame_number += max_t(u32, 4, dep->interval);
2072
2073 params.param0 = upper_32_bits(dep->dwc->bounce_addr);
2074 params.param1 = lower_32_bits(dep->dwc->bounce_addr);
2075
2076 cmd = DWC3_DEPCMD_STARTTRANSFER;
2077 cmd |= DWC3_DEPCMD_PARAM(test_frame_number);
2078 cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2079
2080 /* Redo if some other failure beside bus-expiry is received */
2081 if (cmd_status && cmd_status != -EAGAIN) {
2082 dep->start_cmd_status = 0;
2083 dep->combo_num = 0;
2084 return 0;
2085 }
2086
2087 /* Store the first test status */
2088 if (dep->combo_num == 0)
2089 dep->start_cmd_status = cmd_status;
2090
2091 dep->combo_num++;
2092
2093 /*
2094 * End the transfer if the START_TRANSFER command is successful
2095 * to wait for the next XferNotReady to test the command again
2096 */
2097 if (cmd_status == 0) {
2098 dwc3_stop_active_transfer(dep, true, true);
2099 return 0;
2100 }
2101 }
2102
2103 /* test0 and test1 are both completed at this point */
2104 test0 = (dep->start_cmd_status == 0);
2105 test1 = (cmd_status == 0);
2106
2107 if (!test0 && test1)
2108 dep->combo_num = 1;
2109 else if (!test0 && !test1)
2110 dep->combo_num = 2;
2111 else if (test0 && !test1)
2112 dep->combo_num = 3;
2113 else if (test0 && test1)
2114 dep->combo_num = 0;
2115
2116 dep->frame_number &= 0x3fff;
2117 dep->frame_number |= dep->combo_num << 14;
2118 dep->frame_number += max_t(u32, 4, dep->interval);
2119
2120 /* Reinitialize test variables */
2121 dep->start_cmd_status = 0;
2122 dep->combo_num = 0;
2123
2124 return __dwc3_gadget_kick_transfer(dep);
2125}
2126
2127static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
2128{
2129 struct dwc3 *dwc = dep->dwc;
2130 int ret;
2131 int i;
2132
2133 if (list_empty(&dep->pending_list)) {
2134 dep->flags |= DWC3_EP_PENDING_REQUEST;
2135 return -EAGAIN;
2136 }
2137
2138 if (!dwc->dis_start_transfer_quirk && dwc3_is_usb31(dwc) &&
2139 (dwc->revision <= DWC3_USB31_REVISION_160A ||
2140 (dwc->revision == DWC3_USB31_REVISION_170A &&
2141 dwc->version_type >= DWC31_VERSIONTYPE_EA01 &&
2142 dwc->version_type <= DWC31_VERSIONTYPE_EA06))) {
2143
2144 if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction)
2145 return dwc3_gadget_start_isoc_quirk(dep);
2146 }
2147
2148 for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) {
2149 dep->frame_number = DWC3_ALIGN_FRAME(dep, i + 1);
2150
2151 ret = __dwc3_gadget_kick_transfer(dep);
2152 if (ret != -EAGAIN)
2153 break;
2154 }
2155 if (ret == -EAGAIN)
2156 dev_err(dwc->dev, "%s failed: frame: 0x%x\n", __func__, dep->frame_number);
2157
2158 return ret;
2159}
2160
2161static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
2162{
2163 struct dwc3 *dwc = dep->dwc;
2164
2165 if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
2166 dev_err_ratelimited(dwc->dev, "%s: can't queue to disabled endpoint\n",
2167 dep->name);
2168 return -ESHUTDOWN;
2169 }
2170
2171 if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
2172 &req->request, req->dep->name))
2173 return -EINVAL;
2174
2175 if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
2176 "%s: request %pK already in flight\n",
2177 dep->name, &req->request))
2178 return -EINVAL;
2179
2180#if 0 /* ASR private */
2181 pm_runtime_get(dwc->dev);
2182#endif
2183
2184 if ((!list_empty(&dep->cancelled_list)) && (!(dep->flags & DWC3_EP_STALL_IN_PROGRESS))) {
2185 pr_err("!!!ep%d has cancelled reqs\n", dep->number);
2186 dwc3_gadget_ep_cleanup_cancelled_requests(dep);
2187 }
2188
2189 req->request.actual = 0;
2190 req->request.status = -EINPROGRESS;
2191 dep->num_reqs++;
2192
2193 trace_dwc3_ep_queue(req);
2194
2195 list_add_tail(&req->list, &dep->pending_list);
2196 req->status = DWC3_REQUEST_STATUS_QUEUED;
2197
2198 /*
2199 * Start the transfer only after the END_TRANSFER is completed
2200 * and endpoint STALL is cleared.
2201 */
2202 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
2203 (dep->flags & DWC3_EP_WEDGE) ||
2204 (dep->flags & DWC3_EP_STALL)) {
2205 dep->flags |= DWC3_EP_DELAY_START;
2206 return 0;
2207 }
2208
2209 /*
2210 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
2211 * wait for a XferNotReady event so we will know what's the current
2212 * (micro-)frame number.
2213 *
2214 * Without this trick, we are very, very likely gonna get Bus Expiry
2215 * errors which will force us issue EndTransfer command.
2216 */
2217 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2218 if (!(dep->flags & DWC3_EP_PENDING_REQUEST) &&
2219 !(dep->flags & DWC3_EP_TRANSFER_STARTED))
2220 return 0;
2221
2222 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
2223 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
2224 return __dwc3_gadget_start_isoc(dep);
2225 }
2226 }
2227 }
2228
2229#ifdef CONFIG_CPU_ASR18XX
2230 if ((dep->number == 2) && (!usb_endpoint_xfer_isoc(dep->endpoint.desc))) {
2231 if ((dep->num_reqs & 0xF) == 1)
2232 __dwc3_gadget_kick_transfer(dep);
2233 else
2234 return 0;
2235 } else {
2236 __dwc3_gadget_kick_transfer(dep);
2237 }
2238#else
2239 __dwc3_gadget_kick_transfer(dep);
2240#endif
2241
2242 return 0;
2243}
2244
2245#ifdef CONFIG_ASR_TOE
2246
2247static int __dwc3_gadget_ep_queue_mult(struct dwc3_ep *dep, void *data)
2248{
2249 int i;
2250 struct dwc3 *dwc = dep->dwc;
2251 struct uether_rx_aggr *rx_aggr = (struct uether_rx_aggr *)data;
2252 struct dwc3_request *req;
2253
2254 if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected
2255 || (dwc->link_state == DWC3_LINK_STATE_U3)) {
2256 dev_err_ratelimited(dwc->dev,
2257 "%s: queue fail, pullups_connected: %d, connected: %d, link_st: %d\n",
2258 dep->name, dwc->pullups_connected, dwc->connected, dwc->link_state);
2259 return -ESHUTDOWN;
2260 }
2261
2262 if (dwc->gadget.state != USB_STATE_CONFIGURED) {
2263 dev_err_ratelimited(dwc->dev,
2264 "state%d ncfg\n", dwc->gadget.state);
2265 return -ESHUTDOWN;
2266 }
2267
2268#if 0 /* ASR private */
2269 pm_runtime_get(dwc->dev);
2270#endif
2271
2272 if ((!list_empty(&dep->cancelled_list)) && (!(dep->flags & DWC3_EP_STALL_IN_PROGRESS))) {
2273 pr_err("!!!ep%d has cancelled reqs\n", dep->number);
2274 dwc3_gadget_ep_cleanup_cancelled_requests(dep);
2275 }
2276
2277 BUG_ON(rx_aggr->nr_rx_req > MAX_RX_REQ_NUMBER);
2278
2279 for (i = 0; i < rx_aggr->nr_rx_req; i++) {
2280 req = to_dwc3_request(rx_aggr->rx_req_array[i]);
2281 if (unlikely(WARN(req->dep != dep, "request %pK belongs to '%s'\n",
2282 &req->request, req->dep->name)))
2283 return -EINVAL;
2284
2285 if (unlikely(WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
2286 "%s: request %pK already in flight\n",
2287 dep->name, &req->request)))
2288 return -EINVAL;
2289
2290 req->request.actual = 0;
2291 req->request.status = -EINPROGRESS;
2292
2293 trace_dwc3_ep_queue(req);
2294
2295 list_add_tail(&req->list, &dep->pending_list);
2296 req->status = DWC3_REQUEST_STATUS_QUEUED;
2297 }
2298 rx_aggr->nr_rx_req = 0;
2299
2300 /*
2301 * Start the transfer only after the END_TRANSFER is completed
2302 * and endpoint STALL is cleared.
2303 */
2304 if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
2305 (dep->flags & DWC3_EP_WEDGE) ||
2306 (dep->flags & DWC3_EP_STALL)) {
2307 pr_info_ratelimited("ep2 queue mult : 0x%x\n", dep->flags);
2308 dep->flags |= DWC3_EP_DELAY_START;
2309 return 0;
2310 }
2311
2312 /*
2313 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
2314 * wait for a XferNotReady event so we will know what's the current
2315 * (micro-)frame number.
2316 *
2317 * Without this trick, we are very, very likely gonna get Bus Expiry
2318 * errors which will force us issue EndTransfer command.
2319 */
2320 if (unlikely(usb_endpoint_xfer_isoc(dep->endpoint.desc))) {
2321 if (!(dep->flags & DWC3_EP_PENDING_REQUEST) &&
2322 !(dep->flags & DWC3_EP_TRANSFER_STARTED))
2323 return 0;
2324
2325 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
2326 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
2327 return __dwc3_gadget_start_isoc(dep);
2328 }
2329 }
2330 }
2331
2332 __dwc3_gadget_kick_transfer(dep);
2333
2334 return 0;
2335}
2336#endif
2337static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
2338 gfp_t gfp_flags)
2339{
2340 struct dwc3_request *req = to_dwc3_request(request);
2341 struct dwc3_ep *dep = to_dwc3_ep(ep);
2342 struct dwc3 *dwc = dep->dwc;
2343
2344 unsigned long flags;
2345
2346 int ret;
2347
2348 spin_lock_irqsave(&dwc->lock, flags);
2349 if (unlikely(!dwc->vbus_active || !dwc->softconnect)) {
2350 spin_unlock_irqrestore(&dwc->lock, flags);
2351 dev_err_ratelimited(dwc->dev, "dwc3 already disconnected\n");
2352 return -ESHUTDOWN;
2353 }
2354
2355#ifdef CONFIG_DWC3_HWSULOG
2356 if (unlikely(sulog_ep_num && (dep->number == (sulog_ep_num * 2 + 1)))) {
2357 dev_err(dwc->dev, "sulog error\n");
2358 spin_unlock_irqrestore(&dwc->lock, flags);
2359 return -ESHUTDOWN;
2360 }
2361#endif
2362
2363 ret = __dwc3_gadget_ep_queue(dep, req);
2364 spin_unlock_irqrestore(&dwc->lock, flags);
2365
2366 return ret;
2367}
2368
2369#ifdef CONFIG_ASR_TOE
2370static int dwc3_gadget_ep_queue_mult(struct usb_ep *ep,
2371 gfp_t gfp_flags, void *data)
2372{
2373 struct dwc3_ep *dep = to_dwc3_ep(ep);
2374 struct dwc3 *dwc = dep->dwc;
2375 unsigned long flags;
2376 int ret;
2377
2378 spin_lock_irqsave(&dwc->lock, flags);
2379 if (unlikely(!dwc->vbus_active || !dwc->softconnect)) {
2380 spin_unlock_irqrestore(&dwc->lock, flags);
2381 dev_err_ratelimited(dwc->dev, "usb already disconnected\n");
2382 return -ESHUTDOWN;
2383 }
2384
2385 ret = __dwc3_gadget_ep_queue_mult(dep, data);
2386 spin_unlock_irqrestore(&dwc->lock, flags);
2387
2388 return ret;
2389}
2390#endif
2391
2392static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req)
2393{
2394 int i;
2395
2396 /* If req->trb is not set, then the request has not started */
2397 if (!req->trb)
2398 return;
2399
2400 /*
2401 * If request was already started, this means we had to
2402 * stop the transfer. With that we also need to ignore
2403 * all TRBs used by the request, however TRBs can only
2404 * be modified after completion of END_TRANSFER
2405 * command. So what we do here is that we wait for
2406 * END_TRANSFER completion and only after that, we jump
2407 * over TRBs by clearing HWO and incrementing dequeue
2408 * pointer.
2409 */
2410 for (i = 0; i < req->num_trbs; i++) {
2411 struct dwc3_trb *trb;
2412
2413 trb = &dep->trb_pool[dep->trb_dequeue];
2414 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2415 dwc3_ep_inc_deq(dep);
2416 }
2417
2418 req->num_trbs = 0;
2419}
2420
2421static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
2422{
2423 struct dwc3_request *req;
2424 struct dwc3_request *tmp;
2425
2426 list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
2427 dwc3_gadget_ep_skip_trbs(dep, req);
2428 dwc3_gadget_giveback(dep, req, -ECONNRESET);
2429 }
2430}
2431
2432static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
2433 struct usb_request *request)
2434{
2435 struct dwc3_request *req = to_dwc3_request(request);
2436 struct dwc3_request *r = NULL;
2437
2438 struct dwc3_ep *dep = to_dwc3_ep(ep);
2439 struct dwc3 *dwc = dep->dwc;
2440
2441 unsigned long flags;
2442 int ret = 0;
2443
2444 trace_dwc3_ep_dequeue(req);
2445
2446 spin_lock_irqsave(&dwc->lock, flags);
2447
2448 list_for_each_entry(r, &dep->pending_list, list) {
2449 if (r == req)
2450 break;
2451 }
2452
2453 if (r != req) {
2454 list_for_each_entry(r, &dep->started_list, list) {
2455 if (r == req)
2456 break;
2457 }
2458 if (r == req) {
2459 /* wait until it is processed */
2460 dwc3_stop_active_transfer(dep, true, true);
2461
2462 if (!r->trb)
2463 goto out0;
2464
2465 dwc3_gadget_move_cancelled_request(req);
2466 if (dep->flags & DWC3_EP_TRANSFER_STARTED)
2467 goto out0;
2468 else
2469 goto out1;
2470 }
2471 dev_err(dwc->dev, "request %pK was not queued to %s\n",
2472 request, ep->name);
2473 ret = -EINVAL;
2474 goto out0;
2475 }
2476
2477out1:
2478 dwc3_gadget_giveback(dep, req, -ECONNRESET);
2479
2480out0:
2481 spin_unlock_irqrestore(&dwc->lock, flags);
2482
2483 return ret;
2484}
2485
2486int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
2487{
2488 struct dwc3_gadget_ep_cmd_params params;
2489 struct dwc3 *dwc = dep->dwc;
2490 struct dwc3_request *req;
2491 struct dwc3_request *tmp;
2492 int ret;
2493
2494 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2495 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
2496 return -EINVAL;
2497 }
2498
2499 memset(&params, 0x00, sizeof(params));
2500
2501 pr_info("ep%d hlt%d\n", dep->number, value);
2502
2503 if (value) {
2504 struct dwc3_trb *trb;
2505
2506 unsigned transfer_in_flight;
2507 unsigned started;
2508
2509 if (dep->number > 1)
2510 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
2511 else
2512 trb = &dwc->ep0_trb[dep->trb_enqueue];
2513
2514 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
2515 started = !list_empty(&dep->started_list);
2516
2517 if (!protocol && ((dep->direction && transfer_in_flight) ||
2518 (!dep->direction && started))) {
2519 return -EAGAIN;
2520 }
2521
2522 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
2523 &params);
2524 if (ret)
2525 dev_err(dwc->dev, "failed to set STALL on %s\n",
2526 dep->name);
2527 else
2528 dep->flags |= DWC3_EP_STALL;
2529 } else {
2530 /*
2531 * Don't issue CLEAR_STALL command to control endpoints. The
2532 * controller automatically clears the STALL when it receives
2533 * the SETUP token.
2534 */
2535 if (dep->number <= 1) {
2536 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
2537 return 0;
2538 }
2539
2540 dep->flags |= DWC3_EP_STALL_IN_PROGRESS;
2541
2542 dwc3_stop_active_transfer(dep, true, true);
2543
2544 list_for_each_entry_safe(req, tmp, &dep->started_list, list)
2545 dwc3_gadget_move_cancelled_request(req);
2546
2547 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
2548 dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
2549 return 0;
2550 }
2551
2552 dwc3_gadget_ep_cleanup_cancelled_requests(dep);
2553
2554 dep->flags &= ~DWC3_EP_STALL_IN_PROGRESS;
2555
2556 ret = dwc3_send_clear_stall_ep_cmd(dep);
2557 if (ret) {
2558 dev_err(dwc->dev, "failed to clear STALL on %s\n",
2559 dep->name);
2560 return ret;
2561 }
2562
2563 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
2564
2565 if ((dep->flags & DWC3_EP_DELAY_START) &&
2566 !usb_endpoint_xfer_isoc(dep->endpoint.desc))
2567 __dwc3_gadget_kick_transfer(dep);
2568
2569 dep->flags &= ~DWC3_EP_DELAY_START;
2570 }
2571
2572 return ret;
2573}
2574
2575static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
2576{
2577 struct dwc3_ep *dep = to_dwc3_ep(ep);
2578 struct dwc3 *dwc = dep->dwc;
2579
2580 unsigned long flags;
2581
2582 int ret;
2583
2584 spin_lock_irqsave(&dwc->lock, flags);
2585 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
2586 spin_unlock_irqrestore(&dwc->lock, flags);
2587
2588 return ret;
2589}
2590
2591static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
2592{
2593 struct dwc3_ep *dep = to_dwc3_ep(ep);
2594 struct dwc3 *dwc = dep->dwc;
2595 unsigned long flags;
2596 int ret;
2597
2598 spin_lock_irqsave(&dwc->lock, flags);
2599 dep->flags |= DWC3_EP_WEDGE;
2600
2601 if (dep->number == 0 || dep->number == 1)
2602 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
2603 else
2604 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
2605 spin_unlock_irqrestore(&dwc->lock, flags);
2606
2607 return ret;
2608}
2609
2610/* -------------------------------------------------------------------------- */
2611
2612static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
2613 .bLength = USB_DT_ENDPOINT_SIZE,
2614 .bDescriptorType = USB_DT_ENDPOINT,
2615 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
2616};
2617
2618static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
2619 .enable = dwc3_gadget_ep0_enable,
2620 .disable = dwc3_gadget_ep0_disable,
2621 .alloc_request = dwc3_gadget_ep_alloc_request,
2622 .free_request = dwc3_gadget_ep_free_request,
2623 .queue = dwc3_gadget_ep0_queue,
2624 .dequeue = dwc3_gadget_ep_dequeue,
2625 .set_halt = dwc3_gadget_ep0_set_halt,
2626 .set_wedge = dwc3_gadget_ep_set_wedge,
2627};
2628
2629static const struct usb_ep_ops dwc3_gadget_ep_ops = {
2630 .enable = dwc3_gadget_ep_enable,
2631 .disable = dwc3_gadget_ep_disable,
2632 .alloc_request = dwc3_gadget_ep_alloc_request,
2633 .free_request = dwc3_gadget_ep_free_request,
2634 .queue = dwc3_gadget_ep_queue,
2635#ifdef CONFIG_ASR_TOE
2636 .queue_mult = dwc3_gadget_ep_queue_mult,
2637#endif
2638 .dequeue = dwc3_gadget_ep_dequeue,
2639 .set_halt = dwc3_gadget_ep_set_halt,
2640 .set_wedge = dwc3_gadget_ep_set_wedge,
2641};
2642
2643/* -------------------------------------------------------------------------- */
2644
2645static int dwc3_gadget_get_frame(struct usb_gadget *g)
2646{
2647 struct dwc3 *dwc = gadget_to_dwc(g);
2648
2649 return __dwc3_gadget_get_frame(dwc);
2650}
2651
2652static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
2653{
2654 int retries;
2655
2656 int ret;
2657 u32 reg;
2658
2659 u8 link_state;
2660
2661 /*
2662 * According to the Databook Remote wakeup request should
2663 * be issued only when the device is in early suspend state.
2664 *
2665 * We can check that via USB Link State bits in DSTS register.
2666 */
2667 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2668
2669 link_state = DWC3_DSTS_USBLNKST(reg);
2670
2671 switch (link_state) {
2672 case DWC3_LINK_STATE_RESET:
2673 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
2674 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
2675 case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */
2676 case DWC3_LINK_STATE_U1:
2677 case DWC3_LINK_STATE_RESUME:
2678 break;
2679 default:
2680 return -EINVAL;
2681 }
2682
2683 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
2684 if (ret < 0) {
2685 dev_err(dwc->dev, "failed to put link in Recovery\n");
2686 return ret;
2687 }
2688
2689 /* Recent versions do this automatically */
2690 if (dwc->revision < DWC3_REVISION_194A) {
2691 /* write zeroes to Link Change Request */
2692 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2693 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
2694 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2695 }
2696
2697 /* poll until Link State changes to ON */
2698 retries = 20000;
2699
2700 while (retries--) {
2701 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2702
2703 /* in HS, means ON */
2704 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
2705 break;
2706 }
2707
2708 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
2709 dev_err_ratelimited(dwc->dev, "1st failed to send remote wakeup, link_st: %d cur link_st: %d\n",
2710 link_state, DWC3_DSTS_USBLNKST(reg));
2711 /* try again, poll until Link State changes to ON, max delay 1ms */
2712 retries = 100;
2713 while (retries--) {
2714 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2715
2716 /* in HS, means ON */
2717 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
2718 break;
2719 }
2720
2721 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
2722 dev_err_ratelimited(dwc->dev, "2nd failed to send remote wakeup, %d, link_st: %d cur link_st: %d\n",
2723 retries, link_state, DWC3_DSTS_USBLNKST(reg));
2724 return -EINVAL;
2725 }
2726 }
2727
2728 return 0;
2729}
2730
2731static int dwc3_gadget_wakeup(struct usb_gadget *g)
2732{
2733 struct dwc3 *dwc = gadget_to_dwc(g);
2734 unsigned long flags;
2735 int ret;
2736
2737 spin_lock_irqsave(&dwc->lock, flags);
2738 ret = __dwc3_gadget_wakeup(dwc);
2739 spin_unlock_irqrestore(&dwc->lock, flags);
2740
2741 return ret;
2742}
2743
2744static void dwc3_charger_type_confirm(struct dwc3 *dwc)
2745{
2746 unsigned long flags;
2747 int ret, timeout = 95; /* 950 ms, should get charger type in 1s */
2748 unsigned int vbus = 0;
2749
2750 pr_emerg("%s\n", __func__);
2751
2752 if (dwc->no_acchg_det) {
2753 pr_info("don't detect ac charger type\n");
2754 return;
2755 }
2756
2757 while (timeout--) {
2758 if (dwc->bus_reset_received || dwc->suspend_received) {
2759 pr_err("udc_charger: reset/suspend = %d/%d\n",
2760 dwc->bus_reset_received, dwc->suspend_received);
2761 return;
2762 }
2763
2764 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
2765 if (ret) {
2766 vbus = usb_phy_get_vbus(dwc->usb2_phy);
2767 if (vbus == 0) {
2768 pr_err("%s: usb plugout\n", __func__);
2769 return;
2770 }
2771 } else if (vbus == 0) {
2772 pr_err("%s: usb plugout\n", __func__);
2773 return;
2774 }
2775 msleep(10);
2776 }
2777
2778 dev_err(dwc->dev, "!!!dsts: 0x%x, gdbgltssm: 0x%x\n",
2779 dwc3_readl(dwc->regs, DWC3_DSTS),
2780 dwc3_readl(dwc->regs, DWC3_GDBGLTSSM));
2781 usb_phy_dump_cfg(dwc->usb2_phy);
2782
2783 spin_lock_irqsave(&dwc->lock, flags);
2784 __dwc3_gadget_stop(dwc);
2785 dwc3_gadget_run_stop(dwc, 0, false);
2786 spin_unlock_irqrestore(&dwc->lock, flags);
2787
2788 dwc3_controller_reset(dwc);
2789 spin_lock_irqsave(&dwc->lock, flags);
2790 __dwc3_gadget_start(dwc);
2791 spin_unlock_irqrestore(&dwc->lock, flags);
2792
2793 dwc->charger_type = usb_phy_charger_detect(dwc->usb2_phy);
2794 pr_err("%s: suspend usb phy\n", __func__);
2795 spin_lock_irqsave(&dwc->lock, flags);
2796 usb_phy_set_suspend(dwc->usb2_phy, 1);
2797 spin_unlock_irqrestore(&dwc->lock, flags);
2798
2799 if (work_pending(&dwc->delayed_charger_work.work))
2800 cancel_delayed_work(&dwc->delayed_charger_work);
2801 schedule_delayed_work(&dwc->delayed_charger_work, 0);
2802}
2803
2804static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
2805 int is_selfpowered)
2806{
2807 struct dwc3 *dwc = gadget_to_dwc(g);
2808 unsigned long flags;
2809
2810 spin_lock_irqsave(&dwc->lock, flags);
2811 g->is_selfpowered = !!is_selfpowered;
2812 spin_unlock_irqrestore(&dwc->lock, flags);
2813
2814 return 0;
2815}
2816
2817static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2818{
2819 u32 epnum;
2820
2821 for (epnum = 2; epnum < dwc->num_eps; epnum++) {
2822 struct dwc3_ep *dep;
2823
2824 dep = dwc->eps[epnum];
2825 if (!dep)
2826 continue;
2827
2828 dwc3_remove_requests(dwc, dep);
2829 }
2830}
2831
2832static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
2833{
2834 u32 reg;
2835 u32 timeout = 500;
2836
2837 dev_info(dwc->dev, "%s: is_on: %d, suspend: %d active: %d\n",
2838 __func__, is_on, suspend, dwc->active);
2839 if (is_on && dwc->active)
2840 return 0;
2841
2842 if ((!is_on) && (!dwc->active))
2843 return 0;
2844
2845#if 0 /* ASR private */
2846 if (pm_runtime_suspended(dwc->dev))
2847 return 0;
2848#endif
2849
2850 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2851 if (is_on) {
2852 dwc->bus_reset_received = 0;
2853 dwc->suspend_received = 0;
2854 if (dwc->revision <= DWC3_REVISION_187A) {
2855 reg &= ~DWC3_DCTL_TRGTULST_MASK;
2856 reg |= DWC3_DCTL_TRGTULST_RX_DET;
2857 }
2858
2859 if (dwc->revision >= DWC3_REVISION_194A)
2860 reg &= ~DWC3_DCTL_KEEP_CONNECT;
2861 reg |= DWC3_DCTL_RUN_STOP;
2862
2863 if (dwc->has_hibernation)
2864 reg |= DWC3_DCTL_KEEP_CONNECT;
2865
2866 dwc->pullups_connected = true;
2867 } else {
2868 reg &= ~DWC3_DCTL_RUN_STOP;
2869
2870 if (dwc->has_hibernation && !suspend)
2871 reg &= ~DWC3_DCTL_KEEP_CONNECT;
2872
2873 dwc->pullups_connected = false;
2874 }
2875
2876 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2877 do {
2878 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2879 reg &= DWC3_DSTS_DEVCTRLHLT;
2880 } while (--timeout && !(!is_on ^ !reg));
2881
2882 if (!timeout) {
2883 /* return 0; */
2884 dev_warn(dwc->dev, "dctl: 0x%x, dsts: 0x%x, ecount: 0x%x\n",
2885 dwc3_readl(dwc->regs, DWC3_DCTL),
2886 dwc3_readl(dwc->regs, DWC3_DSTS),
2887 dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)));
2888 if (!is_on) {
2889 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2890 reg &= DWC3_GEVNTCOUNT_MASK;
2891 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
2892 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2893 reg &= ~DWC3_DCTL_RUN_STOP;
2894 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2895 }
2896 }
2897
2898 if (!is_on) {
2899 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2900 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2901 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2902 dwc->test_mode = false;
2903
2904 dwc3_stop_active_transfers(dwc);
2905 dwc3_clear_stall_all_ep(dwc);
2906
2907 /* Reset device address to zero */
2908 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2909 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2910 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2911
2912 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2913 reg &= ~DWC3_DCFG_LPM_CAP;
2914 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2915 usb_gadget_set_state(&dwc->gadget, USB_STATE_ATTACHED);
2916
2917 /* report disconnect; the driver is already quiesced */
2918 if (dwc->gadget_driver && dwc->gadget_driver->disconnect && dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2919 if (irqs_disabled()) {
2920 spin_unlock_irq(&dwc->lock);
2921 dwc->gadget_driver->disconnect(&dwc->gadget);
2922 spin_lock_irq(&dwc->lock);
2923 } else {
2924 spin_unlock(&dwc->lock);
2925 dwc->gadget_driver->disconnect(&dwc->gadget);
2926 spin_lock(&dwc->lock);
2927 }
2928 }
2929 dwc->ev_buf->count = 0;
2930 dwc->ev_buf->lpos = 0;
2931 dwc->ev_buf->flags &= ~DWC3_EVENT_PENDING;
2932 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2933 }
2934 dwc->active = is_on;
2935
2936 dev_err(dwc->dev, "gadget %s data soft-%s",
2937 dwc->gadget_driver
2938 ? dwc->gadget_driver->function : "no-function",
2939 is_on ? "connect" : "disconnect");
2940 dev_info(dwc->dev, "%s: dwc3 controller %s\n",
2941 __func__, is_on ? "connect" : "disconnect");
2942 return 0;
2943}
2944
2945static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
2946static void __dwc3_gadget_stop(struct dwc3 *dwc);
2947static int __dwc3_gadget_start(struct dwc3 *dwc);
2948
2949#ifndef CONFIG_USB_DWC3_ASR_OTG
2950static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
2951{
2952 struct dwc3 *dwc = gadget_to_dwc(g);
2953 unsigned long flags;
2954 int ret = 0;
2955
2956 is_on = !!is_on;
2957
2958 mutex_lock(&usb_con_mutex);
2959
2960 if (dwc->softconnect == is_on) {
2961 dev_info(dwc->dev, "dwc3 already pulled up\n");
2962 goto out;
2963 }
2964
2965 dwc->softconnect = (is_on != 0);
2966
2967 if (dwc->charger_type == DCP_CHARGER) {
2968 dev_info(dwc->dev, "dwc3 pullup out on DCP_CHARGER\n");
2969 goto out;
2970 }
2971
2972 dev_info(dwc->dev, "%s: softconnect %d, vbus_active %d, pre_chrgr: %d\n",
2973 __func__, dwc->softconnect, dwc->vbus_active, dwc->prev_charger_type);
2974
2975 /*
2976 * Per databook, when we want to stop the gadget, if a control transfer
2977 * is still in process, complete it and get the core into setup phase.
2978 */
2979
2980 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
2981 dev_info(dwc->dev, "waiting dwc->ep0state %d\n", dwc->ep0state);
2982 reinit_completion(&dwc->ep0_in_setup);
2983
2984 ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
2985 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
2986 if (ret == 0)
2987 dev_warn(dwc->dev, "timed out waiting for SETUP phase: %d\n", dwc->ep0state);
2988 }
2989
2990 /*
2991 * Avoid issuing a runtime resume if the device is already in the
2992 * suspended state during gadget disconnect. DWC3 gadget was already
2993 * halted/stopped during runtime suspend.
2994 */
2995
2996#if 0 /* ASR private */
2997 if (!is_on) {
2998 pm_runtime_barrier(dwc->dev);
2999 if (pm_runtime_suspended(dwc->dev))
3000 return 0;
3001 }
3002#endif
3003
3004 /*
3005 * Check the return value for successful resume, or error. For a
3006 * successful resume, the DWC3 runtime PM resume routine will handle
3007 * the run stop sequence, so avoid duplicate operations here.
3008 */
3009#if 0 /* ASR private */
3010 ret = pm_runtime_get_sync(dwc->dev);
3011 if (!ret || ret < 0) {
3012 pm_runtime_put(dwc->dev);
3013 if (ret < 0)
3014 pm_runtime_set_suspended(dwc->dev);
3015 return ret;
3016 }
3017#endif
3018 /*
3019 * Synchronize and disable any further event handling while controller
3020 * is being enabled/disabled.
3021 */
3022 disable_irq(dwc->irq_gadget);
3023
3024 spin_lock_irqsave(&dwc->lock, flags);
3025 if (dwc->gadget_driver && (!dwc->softconnect) && dwc->vbus_active) {
3026 u32 count;
3027
3028#ifdef CONFIG_DWC3_HWSULOG
3029 hwsulog_on = false;
3030#endif
3031 dwc->connected = false;
3032 /*
3033 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3034 * Section 4.1.8 Table 4-7, it states that for a device-initiated
3035 * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
3036 * command for any active transfers" before clearing the RunStop
3037 * bit.
3038 */
3039 dwc3_stop_active_transfers(dwc);
3040 __dwc3_gadget_stop(dwc);
3041 /*
3042 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3043 * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
3044 * "software needs to acknowledge the events that are generated
3045 * (by writing to GEVNTCOUNTn) while it is waiting for this bit
3046 * to be set to '1'."
3047 */
3048 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
3049 count &= DWC3_GEVNTCOUNT_MASK;
3050 if (count > 0) {
3051 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
3052 dwc->ev_buf->lpos = 0; /* (dwc->ev_buf->lpos + count) %
3053 dwc->ev_buf->length; */
3054 dev_err(dwc->dev, "skip: %d event, dwc->ev_buf->lpos: 0x%x\n",
3055 count, dwc->ev_buf->lpos);
3056 }
3057 ret = dwc3_gadget_run_stop(dwc, 0, false);
3058 if (cpu_is_asr1901() || cpu_is_asr1906())
3059 spin_unlock_irqrestore(&dwc->lock, flags);
3060 dwc3_controller_reset(dwc);
3061 if (cpu_is_asr1901() || cpu_is_asr1906())
3062 spin_lock_irqsave(&dwc->lock, flags);
3063
3064#if 0 //upstream already done by ASR code
3065 /*
3066 * In the Synopsys DWC_usb31 1.90a programming guide section
3067 * 4.1.9, it specifies that for a reconnect after a
3068 * device-initiated disconnect requires a core soft reset
3069 * (DCTL.CSftRst) before enabling the run/stop bit.
3070 */
3071 spin_unlock_irqrestore(&dwc->lock, flags);
3072 dwc3_core_soft_reset(dwc);
3073 spin_lock_irqsave(&dwc->lock, flags);
3074#endif
3075 __dwc3_gadget_start(dwc);
3076 dwc->ev_buf->count = 0;
3077 dwc->ev_buf->flags &= ~DWC3_EVENT_PENDING;
3078 dwc->ev_buf->lpos = 0;
3079 memset(dwc->ev_buf->buf, 0x0, DWC3_EVENT_BUFFERS_SIZE);
3080 memset(dwc->ev_buf->cache, 0x0, DWC3_EVENT_BUFFERS_SIZE);
3081 } else if (dwc->gadget_driver && dwc->softconnect && dwc->vbus_active) {
3082 if (work_pending(&dwc->usb_restart_work.work))
3083 cancel_delayed_work(&dwc->usb_restart_work);
3084 ret = dwc3_gadget_run_stop(dwc, 1, false);
3085 spin_unlock_irqrestore(&dwc->lock, flags);
3086 enable_irq(dwc->irq_gadget);
3087 dwc3_charger_type_confirm(dwc);
3088 goto out;
3089 }
3090
3091 spin_unlock_irqrestore(&dwc->lock, flags);
3092 enable_irq(dwc->irq_gadget);
3093
3094#if 0 /* ASR private */
3095 pm_runtime_put(dwc->dev);
3096#endif
3097
3098out:
3099 mutex_unlock(&usb_con_mutex);
3100 return ret;
3101}
3102#else
3103static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
3104{
3105 struct dwc3 *dwc = gadget_to_dwc(g);
3106 unsigned long flags;
3107 int ret = 0;
3108
3109 is_on = !!is_on;
3110
3111 pr_emerg("dwc3 pullup(%d) otg state: %d - %s\n",
3112 is_on, dwc->otg_state, usb_otg_state_string(dwc->otg_state));
3113
3114 if (dwc->otg_state != OTG_STATE_B_IDLE
3115 && dwc->otg_state != OTG_STATE_B_PERIPHERAL) {
3116 dwc->softconnect = (is_on != 0);
3117 pr_info("pullup exit for host mode\n");
3118 return 0;
3119 }
3120
3121 mutex_lock(&usb_con_mutex);
3122
3123 if (dwc->softconnect == is_on) {
3124 dev_info(dwc->dev, "dwc3 already pulled up\n");
3125 goto out;
3126 }
3127
3128 dwc->softconnect = (is_on != 0);
3129
3130 if (dwc->charger_type == DCP_CHARGER) {
3131 dev_info(dwc->dev, "dwc3 pullup out on DCP_CHARGER\n");
3132 goto out;
3133 }
3134
3135 dev_info(dwc->dev, "%s: softconnect %d, vbus_active %d, pre_chrgr: %d\n",
3136 __func__, dwc->softconnect, dwc->vbus_active, dwc->prev_charger_type);
3137
3138 /*
3139 * Per databook, when we want to stop the gadget, if a control transfer
3140 * is still in process, complete it and get the core into setup phase.
3141 */
3142
3143 if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
3144 dev_info(dwc->dev, "waiting dwc->ep0state %d\n", dwc->ep0state);
3145 reinit_completion(&dwc->ep0_in_setup);
3146
3147 ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
3148 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
3149 if (ret == 0)
3150 dev_warn(dwc->dev, "timed out waiting for SETUP phase: %d\n", dwc->ep0state);
3151 }
3152
3153 /*
3154 * Avoid issuing a runtime resume if the device is already in the
3155 * suspended state during gadget disconnect. DWC3 gadget was already
3156 * halted/stopped during runtime suspend.
3157 */
3158
3159#if 0 /* ASR private */
3160 if (!is_on) {
3161 pm_runtime_barrier(dwc->dev);
3162 if (pm_runtime_suspended(dwc->dev))
3163 return 0;
3164 }
3165#endif
3166
3167 /*
3168 * Check the return value for successful resume, or error. For a
3169 * successful resume, the DWC3 runtime PM resume routine will handle
3170 * the run stop sequence, so avoid duplicate operations here.
3171 */
3172#if 0 /* ASR private */
3173 ret = pm_runtime_get_sync(dwc->dev);
3174 if (!ret || ret < 0) {
3175 pm_runtime_put(dwc->dev);
3176 if (ret < 0)
3177 pm_runtime_set_suspended(dwc->dev);
3178 return ret;
3179 }
3180#endif
3181 /*
3182 * Synchronize and disable any further event handling while controller
3183 * is being enabled/disabled.
3184 */
3185 disable_irq(dwc->irq_gadget);
3186
3187 spin_lock_irqsave(&dwc->lock, flags);
3188 if (dwc->gadget_driver && (!dwc->softconnect) && dwc->vbus_active) {
3189 u32 count;
3190#ifdef CONFIG_DWC3_HWSULOG
3191 hwsulog_on = false;
3192#endif
3193 dwc->connected = false;
3194 /*
3195 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3196 * Section 4.1.8 Table 4-7, it states that for a device-initiated
3197 * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
3198 * command for any active transfers" before clearing the RunStop
3199 * bit.
3200 */
3201 dwc3_stop_active_transfers(dwc);
3202 __dwc3_gadget_stop(dwc);
3203 /*
3204 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3205 * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
3206 * "software needs to acknowledge the events that are generated
3207 * (by writing to GEVNTCOUNTn) while it is waiting for this bit
3208 * to be set to '1'."
3209 */
3210 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
3211 count &= DWC3_GEVNTCOUNT_MASK;
3212 if (count > 0) {
3213 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
3214 dwc->ev_buf->lpos = 0; /* (dwc->ev_buf->lpos + count) %
3215 dwc->ev_buf->length; */
3216 dev_err(dwc->dev, "skip: %d event, dwc->ev_buf->lpos: 0x%x\n",
3217 count, dwc->ev_buf->lpos);
3218 }
3219 ret = dwc3_gadget_run_stop(dwc, 0, false);
3220 if (cpu_is_asr1901() || cpu_is_asr1906())
3221 spin_unlock_irqrestore(&dwc->lock, flags);
3222 dwc3_controller_reset(dwc);
3223 if (cpu_is_asr1901() || cpu_is_asr1906())
3224 spin_lock_irqsave(&dwc->lock, flags);
3225 /* __dwc3_gadget_start(dwc); */
3226 dwc->ev_buf->count = 0;
3227 dwc->ev_buf->flags &= ~DWC3_EVENT_PENDING;
3228 dwc->ev_buf->lpos = 0;
3229 memset(dwc->ev_buf->buf, 0x0, DWC3_EVENT_BUFFERS_SIZE);
3230 memset(dwc->ev_buf->cache, 0x0, DWC3_EVENT_BUFFERS_SIZE);
3231 } else if (dwc->gadget_driver && dwc->softconnect && dwc->vbus_active) {
3232 if (work_pending(&dwc->usb_restart_work.work))
3233 cancel_delayed_work(&dwc->usb_restart_work);
3234
3235#if 0 //upstream already done by ASR code
3236 /*
3237 * In the Synopsys DWC_usb31 1.90a programming guide section
3238 * 4.1.9, it specifies that for a reconnect after a
3239 * device-initiated disconnect requires a core soft reset
3240 * (DCTL.CSftRst) before enabling the run/stop bit.
3241 */
3242 spin_unlock_irqrestore(&dwc->lock, flags);
3243 dwc3_core_soft_reset(dwc);
3244 spin_lock_irqsave(&dwc->lock, flags);
3245#endif
3246 __dwc3_gadget_start(dwc);
3247 ret = dwc3_gadget_run_stop(dwc, 1, false);
3248 spin_unlock_irqrestore(&dwc->lock, flags);
3249 enable_irq(dwc->irq_gadget);
3250 dwc3_charger_type_confirm(dwc);
3251 goto out;
3252 }
3253
3254 spin_unlock_irqrestore(&dwc->lock, flags);
3255 enable_irq(dwc->irq_gadget);
3256
3257#if 0 /* ASR private */
3258 pm_runtime_put(dwc->dev);
3259#endif
3260
3261out:
3262 mutex_unlock(&usb_con_mutex);
3263 return ret;
3264}
3265#endif
3266static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
3267{
3268 u32 reg;
3269
3270 /* Enable all but Start and End of Frame IRQs */
3271 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
3272 DWC3_DEVTEN_EVNTOVERFLOWEN |
3273 DWC3_DEVTEN_CMDCMPLTEN |
3274 DWC3_DEVTEN_ERRTICERREN |
3275 DWC3_DEVTEN_WKUPEVTEN |
3276 DWC3_DEVTEN_CONNECTDONEEN |
3277 DWC3_DEVTEN_USBRSTEN |
3278 DWC3_DEVTEN_DISCONNEVTEN);
3279
3280 if (dwc->revision < DWC3_REVISION_250A)
3281 reg |= DWC3_DEVTEN_ULSTCNGEN;
3282
3283 /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
3284 if (dwc->revision >= DWC3_REVISION_230A)
3285 reg |= DWC3_DEVTEN_EOPFEN;
3286
3287 /* add link event irq for state change handler */
3288 reg |= DWC3_DEVTEN_ULSTCNGEN;
3289
3290 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
3291}
3292
3293static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
3294{
3295 /* mask all interrupts */
3296 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
3297}
3298
3299static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
3300static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
3301
3302/**
3303 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
3304 * @dwc: pointer to our context structure
3305 *
3306 * The following looks like complex but it's actually very simple. In order to
3307 * calculate the number of packets we can burst at once on OUT transfers, we're
3308 * gonna use RxFIFO size.
3309 *
3310 * To calculate RxFIFO size we need two numbers:
3311 * MDWIDTH = size, in bits, of the internal memory bus
3312 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
3313 *
3314 * Given these two numbers, the formula is simple:
3315 *
3316 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
3317 *
3318 * 24 bytes is for 3x SETUP packets
3319 * 16 bytes is a clock domain crossing tolerance
3320 *
3321 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
3322 */
3323static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
3324{
3325 u32 ram2_depth;
3326 u32 mdwidth;
3327 u32 nump;
3328 u32 reg;
3329
3330 ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
3331 mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
3332
3333 nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
3334 nump = min_t(u32, nump, 16);
3335
3336 /* update NumP */
3337 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3338 reg &= ~DWC3_DCFG_NUMP_MASK;
3339 reg |= nump << DWC3_DCFG_NUMP_SHIFT;
3340 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3341}
3342
3343static int __dwc3_gadget_start(struct dwc3 *dwc)
3344{
3345 struct dwc3_ep *dep;
3346 int ret = 0;
3347 u32 reg;
3348
3349 /*
3350 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
3351 * the core supports IMOD, disable it.
3352 */
3353 if (dwc->imod_interval) {
3354 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
3355 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
3356 } else if (dwc3_has_imod(dwc)) {
3357 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
3358 }
3359
3360 /*
3361 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
3362 * field instead of letting dwc3 itself calculate that automatically.
3363 *
3364 * This way, we maximize the chances that we'll be able to get several
3365 * bursts of data without going through any sort of endpoint throttling.
3366 */
3367 reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
3368 if (dwc3_is_usb31(dwc))
3369 reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
3370 else
3371 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
3372
3373 dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
3374
3375 dwc3_gadget_setup_nump(dwc);
3376 dwc3_gadget_set_speed_nolock(&dwc->gadget, dwc->maximum_speed);
3377
3378 /* Start with SuperSpeed Default */
3379 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
3380
3381 dep = dwc->eps[0];
3382 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
3383 if (ret) {
3384 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3385 goto err0;
3386 }
3387
3388 dep = dwc->eps[1];
3389 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
3390 if (ret) {
3391 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3392 goto err1;
3393 }
3394
3395 if (dwc->ep0state != EP0_SETUP_PHASE)
3396 pr_info("dwc->ep0state: %d\n", dwc->ep0state);
3397 /* begin to receive SETUP packets */
3398 dwc->ep0state = EP0_SETUP_PHASE;
3399 dwc->link_state = DWC3_LINK_STATE_SS_DIS;
3400 dwc->delayed_status = false;
3401 dwc->three_stage_setup = 0;
3402 dwc->setup_packet_pending = 0;
3403 dwc->ep0_bounced = 0;
3404 dwc3_ep0_out_start(dwc);
3405
3406 dwc3_gadget_enable_irq(dwc);
3407
3408 return 0;
3409
3410err1:
3411 __dwc3_gadget_ep_disable(dwc->eps[0]);
3412
3413err0:
3414 return ret;
3415}
3416
3417#ifndef CONFIG_USB_DWC3_ASR_OTG
3418static int dwc3_gadget_start(struct usb_gadget *g,
3419 struct usb_gadget_driver *driver)
3420{
3421 struct dwc3 *dwc = gadget_to_dwc(g);
3422 unsigned long flags;
3423 int ret = 0;
3424 int irq;
3425#ifdef CONFIG_CPU_ASR1901
3426 struct irq_desc *desc;
3427#endif
3428
3429 irq = dwc->irq_gadget;
3430 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
3431 IRQF_SHARED, "dwc3", dwc->ev_buf);
3432 if (ret) {
3433 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
3434 irq, ret);
3435 goto err0;
3436 }
3437
3438#ifdef CONFIG_CPU_ASR1901
3439 desc = irq_to_desc(irq);
3440 if (desc && nr_cpu_ids > 1) {
3441 dev_err(dwc->dev, "set dwc3 irq thread on cpu-1 by default\n");
3442 sched_setaffinity(desc->action->thread->pid, cpumask_of(1));
3443 }
3444#endif
3445
3446 if (cpu_is_asr1901() || cpu_is_asr1906())
3447 usb_phy_set_suspend(dwc->usb2_phy, 0);
3448 spin_lock_irqsave(&dwc->lock, flags);
3449 if (dwc->gadget_driver) {
3450 dev_err(dwc->dev, "%s is already bound to %s\n",
3451 dwc->gadget.name,
3452 dwc->gadget_driver->driver.name);
3453 ret = -EBUSY;
3454 goto err1;
3455 }
3456
3457 dwc->gadget_driver = driver;
3458
3459 if (!(cpu_is_asr1901() || cpu_is_asr1906()))
3460 usb_phy_set_suspend(dwc->usb2_phy, 0);
3461 __dwc3_gadget_start(dwc);
3462
3463 spin_unlock_irqrestore(&dwc->lock, flags);
3464
3465 if (dwc->qwork)
3466 queue_work(dwc->qwork, &dwc->vbus_work);
3467
3468 return 0;
3469
3470err1:
3471 spin_unlock_irqrestore(&dwc->lock, flags);
3472 free_irq(irq, dwc);
3473
3474err0:
3475 return ret;
3476}
3477#else
3478static int dwc3_gadget_start(struct usb_gadget *g,
3479 struct usb_gadget_driver *driver)
3480{
3481 struct dwc3 *dwc = gadget_to_dwc(g);
3482 unsigned long flags;
3483 int ret = 0;
3484 int irq;
3485#ifdef CONFIG_CPU_ASR1901
3486 struct irq_desc *desc;
3487 struct irqaction *action;
3488#endif
3489
3490 irq = dwc->irq_gadget;
3491 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
3492 IRQF_SHARED, "dwc3", dwc->ev_buf);
3493 if (ret) {
3494 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
3495 irq, ret);
3496 goto err0;
3497 }
3498
3499#ifdef CONFIG_CPU_ASR1901
3500 desc = irq_to_desc(irq);
3501 if (desc && nr_cpu_ids > 1) {
3502 for (action = desc->action; action != NULL; action = action->next) {
3503 if (action->handler == dwc3_interrupt) {
3504 dev_err(dwc->dev, "set dwc3 irq thread on cpu-1 by default\n");
3505 sched_setaffinity(action->thread->pid, cpumask_of(1));
3506 }
3507 }
3508 }
3509#endif
3510
3511 if (cpu_is_asr1901() || cpu_is_asr1906())
3512 usb_phy_set_suspend(dwc->usb2_phy, 0);
3513 spin_lock_irqsave(&dwc->lock, flags);
3514 if (dwc->gadget_driver) {
3515 dev_err(dwc->dev, "%s is already bound to %s\n",
3516 dwc->gadget.name,
3517 dwc->gadget_driver->driver.name);
3518 ret = -EBUSY;
3519 goto err1;
3520 }
3521
3522 dwc->gadget_driver = driver;
3523/*
3524 if (!(cpu_is_asr1901() || cpu_is_asr1906()))
3525 usb_phy_set_suspend(dwc->usb2_phy, 0);
3526 __dwc3_gadget_start(dwc);
3527*/
3528 spin_unlock_irqrestore(&dwc->lock, flags);
3529 return 0;
3530
3531err1:
3532 spin_unlock_irqrestore(&dwc->lock, flags);
3533 free_irq(irq, dwc);
3534
3535err0:
3536 return ret;
3537}
3538#endif
3539
3540static void __dwc3_gadget_stop(struct dwc3 *dwc)
3541{
3542 dwc3_gadget_disable_irq(dwc);
3543 __dwc3_gadget_ep_disable(dwc->eps[0]);
3544 __dwc3_gadget_ep_disable(dwc->eps[1]);
3545}
3546
3547static int dwc3_gadget_stop(struct usb_gadget *g)
3548{
3549 struct dwc3 *dwc = gadget_to_dwc(g);
3550 unsigned long flags;
3551
3552 spin_lock_irqsave(&dwc->lock, flags);
3553 dwc->gadget_driver = NULL;
3554 spin_unlock_irqrestore(&dwc->lock, flags);
3555
3556 free_irq(dwc->irq_gadget, dwc->ev_buf);
3557
3558 return 0;
3559}
3560
3561static void dwc3_gadget_config_params(struct usb_gadget *g,
3562 struct usb_dcd_config_params *params)
3563{
3564 struct dwc3 *dwc = gadget_to_dwc(g);
3565
3566 params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED;
3567 params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED;
3568
3569 /* Recommended BESL */
3570 if (!dwc->dis_enblslpm_quirk) {
3571 /*
3572 * If the recommended BESL baseline is 0 or if the BESL deep is
3573 * less than 2, Microsoft's Windows 10 host usb stack will issue
3574 * a usb reset immediately after it receives the extended BOS
3575 * descriptor and the enumeration will fail. To maintain
3576 * compatibility with the Windows' usb stack, let's set the
3577 * recommended BESL baseline to 1 and clamp the BESL deep to be
3578 * within 2 to 15.
3579 */
3580 params->besl_baseline = 1;
3581 if (dwc->is_utmi_l1_suspend)
3582 params->besl_deep =
3583 clamp_t(u8, dwc->hird_threshold, 2, 15);
3584 }
3585
3586 /* U1 Device exit Latency */
3587 if (dwc->dis_u1_entry_quirk)
3588 params->bU1devExitLat = 0;
3589 else
3590 params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT;
3591
3592 /* U2 Device exit Latency */
3593 if (dwc->dis_u2_entry_quirk)
3594 params->bU2DevExitLat = 0;
3595 else
3596 params->bU2DevExitLat =
3597 cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT);
3598}
3599
3600static void dwc3_gadget_set_speed_nolock(struct usb_gadget *g,
3601 enum usb_device_speed speed)
3602{
3603 struct dwc3 *dwc = gadget_to_dwc(g);
3604 u32 reg;
3605
3606 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3607 reg &= ~(DWC3_DCFG_SPEED_MASK);
3608
3609 /*
3610 * WORKAROUND: DWC3 revision < 2.20a have an issue
3611 * which would cause metastability state on Run/Stop
3612 * bit if we try to force the IP to USB2-only mode.
3613 *
3614 * Because of that, we cannot configure the IP to any
3615 * speed other than the SuperSpeed
3616 *
3617 * Refers to:
3618 *
3619 * STAR#9000525659: Clock Domain Crossing on DCTL in
3620 * USB 2.0 Mode
3621 */
3622 if (dwc->revision < DWC3_REVISION_220A &&
3623 !dwc->dis_metastability_quirk) {
3624 reg |= DWC3_DCFG_SUPERSPEED;
3625 } else {
3626 switch (speed) {
3627 case USB_SPEED_LOW:
3628 reg |= DWC3_DCFG_LOWSPEED;
3629 break;
3630 case USB_SPEED_FULL:
3631 reg |= DWC3_DCFG_FULLSPEED;
3632 break;
3633 case USB_SPEED_HIGH:
3634 reg |= DWC3_DCFG_HIGHSPEED;
3635 break;
3636 case USB_SPEED_SUPER:
3637 reg |= DWC3_DCFG_SUPERSPEED;
3638 break;
3639 case USB_SPEED_SUPER_PLUS:
3640 if (dwc3_is_usb31(dwc))
3641 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
3642 else
3643 reg |= DWC3_DCFG_SUPERSPEED;
3644 break;
3645 default:
3646 dev_err(dwc->dev, "invalid speed (%d)\n", speed);
3647
3648 if (dwc->revision & DWC3_REVISION_IS_DWC31)
3649 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
3650 else
3651 reg |= DWC3_DCFG_SUPERSPEED;
3652 }
3653 }
3654 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3655}
3656
3657static void dwc3_gadget_set_speed(struct usb_gadget *g,
3658 enum usb_device_speed speed)
3659{
3660 struct dwc3 *dwc = gadget_to_dwc(g);
3661 unsigned long flags;
3662
3663 spin_lock_irqsave(&dwc->lock, flags);
3664 dwc3_gadget_set_speed_nolock(g, speed);
3665 spin_unlock_irqrestore(&dwc->lock, flags);
3666}
3667
3668static int asr_usb_vbus_session(struct usb_gadget *gadget, int is_active);
3669static const struct usb_gadget_ops dwc3_gadget_ops = {
3670 .get_frame = dwc3_gadget_get_frame,
3671 .wakeup = dwc3_gadget_wakeup,
3672 .set_selfpowered = dwc3_gadget_set_selfpowered,
3673 .pullup = dwc3_gadget_pullup,
3674 .udc_start = dwc3_gadget_start,
3675 .udc_stop = dwc3_gadget_stop,
3676 .udc_set_speed = dwc3_gadget_set_speed,
3677 .get_config_params = dwc3_gadget_config_params,
3678 .vbus_session = asr_usb_vbus_session,
3679};
3680
3681/* -------------------------------------------------------------------------- */
3682
3683static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
3684{
3685 struct dwc3 *dwc = dep->dwc;
3686
3687 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
3688 dep->endpoint.maxburst = 1;
3689 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
3690 if (!dep->direction)
3691 dwc->gadget.ep0 = &dep->endpoint;
3692
3693 dep->endpoint.caps.type_control = true;
3694
3695 return 0;
3696}
3697
3698static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
3699{
3700 struct dwc3 *dwc = dep->dwc;
3701 int mdwidth;
3702 int size;
3703
3704 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
3705 /* MDWIDTH is represented in bits, we need it in bytes */
3706 mdwidth /= 8;
3707
3708 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
3709 if (dwc3_is_usb31(dwc))
3710 size = DWC31_GTXFIFOSIZ_TXFDEF(size);
3711 else
3712 size = DWC3_GTXFIFOSIZ_TXFDEF(size);
3713
3714 /* FIFO Depth is in MDWDITH bytes. Multiply */
3715 size *= mdwidth;
3716
3717 /*
3718 * To meet performance requirement, a minimum TxFIFO size of 3x
3719 * MaxPacketSize is recommended for endpoints that support burst and a
3720 * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
3721 * support burst. Use those numbers and we can calculate the max packet
3722 * limit as below.
3723 */
3724 if (dwc->maximum_speed >= USB_SPEED_SUPER)
3725 size /= 3;
3726 else
3727 size /= 2;
3728
3729 usb_ep_set_maxpacket_limit(&dep->endpoint, size);
3730
3731 dep->endpoint.max_streams = 15;
3732 dep->endpoint.ops = &dwc3_gadget_ep_ops;
3733 list_add_tail(&dep->endpoint.ep_list,
3734 &dwc->gadget.ep_list);
3735 dep->endpoint.caps.type_iso = true;
3736 dep->endpoint.caps.type_bulk = true;
3737 dep->endpoint.caps.type_int = true;
3738
3739 return dwc3_alloc_trb_pool(dep);
3740}
3741
3742static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
3743{
3744 struct dwc3 *dwc = dep->dwc;
3745 int mdwidth;
3746 int size;
3747
3748 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
3749
3750 /* MDWIDTH is represented in bits, convert to bytes */
3751 mdwidth /= 8;
3752
3753 /* All OUT endpoints share a single RxFIFO space */
3754 size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
3755 if (dwc3_is_usb31(dwc))
3756 size = DWC31_GRXFIFOSIZ_RXFDEP(size);
3757 else
3758 size = DWC3_GRXFIFOSIZ_RXFDEP(size);
3759
3760 /* FIFO depth is in MDWDITH bytes */
3761 size *= mdwidth;
3762
3763 /*
3764 * To meet performance requirement, a minimum recommended RxFIFO size
3765 * is defined as follow:
3766 * RxFIFO size >= (3 x MaxPacketSize) +
3767 * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
3768 *
3769 * Then calculate the max packet limit as below.
3770 */
3771 size -= (3 * 8) + 16;
3772 if (size < 0)
3773 size = 0;
3774 else
3775 size /= 3;
3776
3777 usb_ep_set_maxpacket_limit(&dep->endpoint, size);
3778 dep->endpoint.max_streams = 15;
3779 dep->endpoint.ops = &dwc3_gadget_ep_ops;
3780 list_add_tail(&dep->endpoint.ep_list,
3781 &dwc->gadget.ep_list);
3782 dep->endpoint.caps.type_iso = true;
3783 dep->endpoint.caps.type_bulk = true;
3784 dep->endpoint.caps.type_int = true;
3785
3786 return dwc3_alloc_trb_pool(dep);
3787}
3788
3789static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
3790{
3791 struct dwc3_ep *dep;
3792 bool direction = epnum & 1;
3793 int ret;
3794 u8 num = epnum >> 1;
3795
3796 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
3797 if (!dep)
3798 return -ENOMEM;
3799
3800 dep->dwc = dwc;
3801 dep->number = epnum;
3802 dep->direction = direction;
3803 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
3804 dwc->eps[epnum] = dep;
3805 dep->combo_num = 0;
3806 dep->start_cmd_status = 0;
3807
3808 snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
3809 direction ? "in" : "out");
3810
3811 dep->endpoint.name = dep->name;
3812
3813 if (!(dep->number > 1)) {
3814 dep->endpoint.desc = &dwc3_gadget_ep0_desc;
3815 dep->endpoint.comp_desc = NULL;
3816 }
3817
3818 if (num == 0)
3819 ret = dwc3_gadget_init_control_endpoint(dep);
3820 else if (direction)
3821 ret = dwc3_gadget_init_in_endpoint(dep);
3822 else
3823 ret = dwc3_gadget_init_out_endpoint(dep);
3824
3825 if (ret)
3826 return ret;
3827
3828 dep->endpoint.caps.dir_in = direction;
3829 dep->endpoint.caps.dir_out = !direction;
3830
3831 INIT_LIST_HEAD(&dep->pending_list);
3832 INIT_LIST_HEAD(&dep->started_list);
3833 INIT_LIST_HEAD(&dep->cancelled_list);
3834
3835 if (dep->number < 16)
3836 dwc3_debugfs_create_endpoint_dir(dep);
3837
3838 return 0;
3839}
3840
3841static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
3842{
3843 u8 epnum;
3844
3845 INIT_LIST_HEAD(&dwc->gadget.ep_list);
3846
3847 for (epnum = 0; epnum < total; epnum++) {
3848 int ret;
3849
3850 ret = dwc3_gadget_init_endpoint(dwc, epnum);
3851 if (ret)
3852 return ret;
3853 }
3854
3855 return 0;
3856}
3857
3858static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
3859{
3860 struct dwc3_ep *dep;
3861 u8 epnum;
3862
3863 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
3864 dep = dwc->eps[epnum];
3865 if (!dep)
3866 continue;
3867 /*
3868 * Physical endpoints 0 and 1 are special; they form the
3869 * bi-directional USB endpoint 0.
3870 *
3871 * For those two physical endpoints, we don't allocate a TRB
3872 * pool nor do we add them the endpoints list. Due to that, we
3873 * shouldn't do these two operations otherwise we would end up
3874 * with all sorts of bugs when removing dwc3.ko.
3875 */
3876 if (epnum != 0 && epnum != 1) {
3877 dwc3_free_trb_pool(dep);
3878 list_del(&dep->endpoint.ep_list);
3879 }
3880
3881 debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
3882 kfree(dep);
3883 }
3884}
3885
3886/* -------------------------------------------------------------------------- */
3887
3888static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
3889 struct dwc3_request *req, struct dwc3_trb *trb,
3890 const struct dwc3_event_depevt *event, int status, int chain)
3891{
3892 unsigned int count;
3893
3894 dwc3_ep_inc_deq(dep);
3895
3896 trace_dwc3_complete_trb(dep, trb);
3897 req->num_trbs--;
3898
3899 /*
3900 * If we're in the middle of series of chained TRBs and we
3901 * receive a short transfer along the way, DWC3 will skip
3902 * through all TRBs including the last TRB in the chain (the
3903 * where CHN bit is zero. DWC3 will also avoid clearing HWO
3904 * bit and SW has to do it manually.
3905 *
3906 * We're going to do that here to avoid problems of HW trying
3907 * to use bogus TRBs for transfers.
3908 */
3909 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
3910 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
3911
3912 /*
3913 * For isochronous transfers, the first TRB in a service interval must
3914 * have the Isoc-First type. Track and report its interval frame number.
3915 */
3916 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
3917 (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) {
3918 unsigned int frame_number;
3919
3920 frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl);
3921 frame_number &= ~(dep->interval - 1);
3922 req->request.frame_number = frame_number;
3923 }
3924
3925 /*
3926 * If we're dealing with unaligned size OUT transfer, we will be left
3927 * with one TRB pending in the ring. We need to manually clear HWO bit
3928 * from that TRB.
3929 */
3930
3931 if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
3932 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
3933 return 1;
3934 }
3935
3936 count = trb->size & DWC3_TRB_SIZE_MASK;
3937 req->remaining += count;
3938
3939 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
3940 return 1;
3941
3942 if (event->status & DEPEVT_STATUS_SHORT && !chain)
3943 return 1;
3944
3945 if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) &&
3946 DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC)
3947 return 1;
3948
3949 if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
3950 (trb->ctrl & DWC3_TRB_CTRL_LST))
3951 return 1;
3952
3953 return 0;
3954}
3955
3956static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
3957 struct dwc3_request *req, const struct dwc3_event_depevt *event,
3958 int status)
3959{
3960 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
3961 struct scatterlist *sg = req->sg;
3962 struct scatterlist *s;
3963 unsigned int num_queued = req->num_queued_sgs;
3964 unsigned int i;
3965 int ret = 0;
3966
3967 for_each_sg(sg, s, num_queued, i) {
3968 trb = &dep->trb_pool[dep->trb_dequeue];
3969
3970 req->sg = sg_next(s);
3971 req->num_queued_sgs--;
3972
3973 ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
3974 trb, event, status, true);
3975 if (ret)
3976 break;
3977 }
3978
3979 return ret;
3980}
3981
3982static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
3983 struct dwc3_request *req, const struct dwc3_event_depevt *event,
3984 int status)
3985{
3986 struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
3987
3988 return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
3989 event, status, false);
3990}
3991
3992static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
3993{
3994 return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
3995}
3996
3997static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
3998 const struct dwc3_event_depevt *event,
3999 struct dwc3_request *req, int status)
4000{
4001 int request_status;
4002 int ret;
4003
4004 if (req->request.num_mapped_sgs)
4005 ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
4006 status);
4007 else
4008 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
4009 status);
4010
4011 req->request.actual = req->request.length - req->remaining;
4012
4013 if (!dwc3_gadget_ep_request_completed(req))
4014 goto out;
4015
4016 if (req->needs_extra_trb) {
4017 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
4018
4019 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
4020 status);
4021
4022 /* Reclaim MPS padding TRB for ZLP */
4023 if (!req->direction && req->request.zero && req->request.length &&
4024 !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
4025 (IS_ALIGNED(req->request.length, maxp)))
4026 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
4027
4028 req->needs_extra_trb = false;
4029 }
4030
4031 /*
4032 * The event status only reflects the status of the TRB with IOC set.
4033 * For the requests that don't set interrupt on completion, the driver
4034 * needs to check and return the status of the completed TRBs associated
4035 * with the request. Use the status of the last TRB of the request.
4036 */
4037 if (req->request.no_interrupt) {
4038 struct dwc3_trb *trb;
4039
4040 trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
4041 switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
4042 case DWC3_TRBSTS_MISSED_ISOC:
4043 /* Isoc endpoint only */
4044 request_status = -EXDEV;
4045 break;
4046 case DWC3_TRB_STS_XFER_IN_PROG:
4047 /* Applicable when End Transfer with ForceRM=0 */
4048 case DWC3_TRBSTS_SETUP_PENDING:
4049 /* Control endpoint only */
4050 case DWC3_TRBSTS_OK:
4051 default:
4052 request_status = 0;
4053 break;
4054 }
4055 } else {
4056 request_status = status;
4057 }
4058
4059 dwc3_gadget_giveback(dep, req, request_status);
4060
4061out:
4062 return ret;
4063}
4064
4065static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
4066 const struct dwc3_event_depevt *event, int status)
4067{
4068 struct dwc3_request *req;
4069 struct dwc3_request *tmp;
4070
4071 list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
4072 int ret;
4073
4074 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
4075 req, status);
4076 if (ret)
4077 break;
4078 }
4079}
4080
4081static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
4082{
4083 struct dwc3_request *req;
4084
4085#ifdef CONFIG_CPU_ASR18XX
4086 if (dep->number == 2)
4087 return false;
4088
4089 if ((dep->number == 3) && (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) && (dwc3_calc_trbs_left(dep) < 16))
4090 return false;
4091#endif
4092
4093 if (!list_empty(&dep->pending_list))
4094 return true;
4095
4096 /*
4097 * We only need to check the first entry of the started list. We can
4098 * assume the completed requests are removed from the started list.
4099 */
4100 req = next_request(&dep->started_list);
4101 if (!req)
4102 return false;
4103
4104 return !dwc3_gadget_ep_request_completed(req);
4105}
4106
4107static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
4108 const struct dwc3_event_depevt *event)
4109{
4110 dep->frame_number = event->parameters;
4111}
4112
4113static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
4114 const struct dwc3_event_depevt *event)
4115{
4116 struct dwc3 *dwc = dep->dwc;
4117 unsigned status = 0;
4118 bool stop = false;
4119
4120 dwc3_gadget_endpoint_frame_from_event(dep, event);
4121
4122 if (event->status & DEPEVT_STATUS_BUSERR)
4123 status = -ECONNRESET;
4124
4125 if (event->status & DEPEVT_STATUS_MISSED_ISOC) {
4126 status = -EXDEV;
4127
4128 if (list_empty(&dep->started_list))
4129 stop = true;
4130 }
4131
4132 dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
4133
4134 if (stop)
4135 dwc3_stop_active_transfer(dep, true, true);
4136 else if (dwc3_gadget_ep_should_continue(dep))
4137 __dwc3_gadget_kick_transfer(dep);
4138
4139 /*
4140 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
4141 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
4142 */
4143 if (dwc->revision < DWC3_REVISION_183A) {
4144 u32 reg;
4145 int i;
4146
4147 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
4148 dep = dwc->eps[i];
4149
4150 if (!(dep->flags & DWC3_EP_ENABLED))
4151 continue;
4152
4153 if (!list_empty(&dep->started_list))
4154 return;
4155 }
4156
4157 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
4158 reg |= dwc->u1u2;
4159 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
4160
4161 dwc->u1u2 = 0;
4162 }
4163}
4164
4165static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
4166 const struct dwc3_event_depevt *event)
4167{
4168 dwc3_gadget_endpoint_frame_from_event(dep, event);
4169 (void) __dwc3_gadget_start_isoc(dep);
4170}
4171
4172static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
4173 const struct dwc3_event_depevt *event)
4174{
4175 struct dwc3_ep *dep;
4176 u8 epnum = event->endpoint_number;
4177 u8 cmd;
4178
4179#ifdef CONFIG_DWC3_HWSULOG
4180 if (unlikely(sulog_ep_num && (epnum == (sulog_ep_num * 2 + 1))))
4181 BUG();
4182#endif
4183
4184 dep = dwc->eps[epnum];
4185
4186 if (!(dep->flags & DWC3_EP_ENABLED)) {
4187 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
4188 return;
4189
4190 /* Handle only EPCMDCMPLT when EP disabled */
4191 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
4192 return;
4193 }
4194
4195 if (epnum == 0 || epnum == 1) {
4196 dwc3_ep0_interrupt(dwc, event);
4197 return;
4198 }
4199
4200 switch (event->endpoint_event) {
4201 case DWC3_DEPEVT_XFERINPROGRESS:
4202 dwc3_gadget_endpoint_transfer_in_progress(dep, event);
4203 break;
4204 case DWC3_DEPEVT_XFERNOTREADY:
4205 dwc3_gadget_endpoint_transfer_not_ready(dep, event);
4206 break;
4207 case DWC3_DEPEVT_EPCMDCMPLT:
4208 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
4209
4210 if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
4211 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
4212 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
4213 pr_info("ep%d 0x%x ENDCMD\n", dep->number, dep->flags);
4214
4215 dwc3_gadget_ep_cleanup_cancelled_requests(dep);
4216
4217 dep->flags &= ~DWC3_EP_STALL_IN_PROGRESS;
4218
4219 if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
4220 struct dwc3 *dwc = dep->dwc;
4221
4222 dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
4223 if (dwc3_send_clear_stall_ep_cmd(dep)) {
4224 struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
4225
4226 dev_err(dwc->dev, "failed to clear STALL on %s\n",
4227 dep->name);
4228 if (dwc->delayed_status)
4229 __dwc3_gadget_ep0_set_halt(ep0, 1);
4230 return;
4231 }
4232
4233 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
4234 if (dwc->delayed_status)
4235 dwc3_ep0_send_delayed_status(dwc);
4236 }
4237
4238 if ((dep->flags & DWC3_EP_DELAY_START) &&
4239 !usb_endpoint_xfer_isoc(dep->endpoint.desc))
4240 __dwc3_gadget_kick_transfer(dep);
4241
4242 dep->flags &= ~DWC3_EP_DELAY_START;
4243 }
4244 break;
4245 case DWC3_DEPEVT_STREAMEVT:
4246 case DWC3_DEPEVT_XFERCOMPLETE:
4247 case DWC3_DEPEVT_RXTXFIFOEVT:
4248 break;
4249 }
4250}
4251
4252static void dwc3_disconnect_gadget(struct dwc3 *dwc)
4253{
4254 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
4255 if (irqs_disabled()) {
4256 spin_unlock_irq(&dwc->lock);
4257 dwc->gadget_driver->disconnect(&dwc->gadget);
4258 spin_lock_irq(&dwc->lock);
4259 } else {
4260 spin_unlock(&dwc->lock);
4261 dwc->gadget_driver->disconnect(&dwc->gadget);
4262 spin_lock(&dwc->lock);
4263 }
4264 }
4265}
4266
4267static void dwc3_suspend_gadget(struct dwc3 *dwc)
4268{
4269 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
4270 spin_unlock(&dwc->lock);
4271 dwc->gadget_driver->suspend(&dwc->gadget);
4272 spin_lock(&dwc->lock);
4273 }
4274 if (dwc->link_state != 0 && dwc->link_state != 1)
4275 dev_err(dwc->dev, "%s: st: %d, link_st: %d 0x%x\n",
4276 __func__, dwc->gadget.state, dwc->link_state,
4277 dwc3_readl(dwc->regs, DWC3_DSTS));
4278
4279 if (dwc->allow_suspend && dwc->gadget.state >= USB_STATE_CONFIGURED
4280 && dwc->link_state == DWC3_LINK_STATE_U3)
4281 dwc3_release_wakeup_event_timeout(DWC3_WAKEUP_TIMEOUT_SEC);
4282}
4283
4284static void dwc3_resume_gadget(struct dwc3 *dwc)
4285{
4286 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
4287 spin_unlock(&dwc->lock);
4288 dwc->gadget_driver->resume(&dwc->gadget);
4289 spin_lock(&dwc->lock);
4290 }
4291 dev_err(dwc->dev, "%s: st: %d, link_st: %d 0x%x\n",
4292 __func__, dwc->gadget.state, dwc->link_state,
4293 dwc3_readl(dwc->regs, DWC3_DSTS));
4294
4295 if (dwc->allow_suspend)
4296 dwc3_acquire_wakeup_event();
4297}
4298
4299#if 0 /* ASR private */
4300static void dwc3_reset_gadget(struct dwc3 *dwc)
4301{
4302 if (!dwc->gadget_driver)
4303 return;
4304
4305 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
4306 spin_unlock(&dwc->lock);
4307 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
4308 spin_lock(&dwc->lock);
4309 }
4310}
4311#endif
4312
4313static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
4314 bool interrupt)
4315{
4316 struct dwc3 *dwc = dep->dwc;
4317 struct dwc3_gadget_ep_cmd_params params;
4318 u32 cmd;
4319 int ret;
4320
4321 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
4322 (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
4323 return;
4324
4325 /*
4326 * NOTICE: We are violating what the Databook says about the
4327 * EndTransfer command. Ideally we would _always_ wait for the
4328 * EndTransfer Command Completion IRQ, but that's causing too
4329 * much trouble synchronizing between us and gadget driver.
4330 *
4331 * We have discussed this with the IP Provider and it was
4332 * suggested to giveback all requests here, but give HW some
4333 * extra time to synchronize with the interconnect. We're using
4334 * an arbitrary 100us delay for that.
4335 *
4336 * Note also that a similar handling was tested by Synopsys
4337 * (thanks a lot Paul) and nothing bad has come out of it.
4338 * In short, what we're doing is:
4339 *
4340 * - Issue EndTransfer WITH CMDIOC bit set
4341 * - Wait 100us
4342 *
4343 * As of IP version 3.10a of the DWC_usb3 IP, the controller
4344 * supports a mode to work around the above limitation. The
4345 * software can poll the CMDACT bit in the DEPCMD register
4346 * after issuing a EndTransfer command. This mode is enabled
4347 * by writing GUCTL2[14]. This polling is already done in the
4348 * dwc3_send_gadget_ep_cmd() function so if the mode is
4349 * enabled, the EndTransfer command will have completed upon
4350 * returning from this function and we don't need to delay for
4351 * 100us.
4352 *
4353 * This mode is NOT available on the DWC_usb31 IP.
4354 */
4355
4356 cmd = DWC3_DEPCMD_ENDTRANSFER;
4357 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
4358 cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
4359 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
4360 memset(&params, 0, sizeof(params));
4361 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
4362 WARN_ON_ONCE(ret);
4363 if (ret) {
4364 dev_err(dwc->dev, "cmd: 0x%x failed->%d, 0x%x 0x%x 0x%x\n",
4365 cmd, ret, dwc3_readl(dwc->regs, DWC3_DCFG),
4366 dwc3_readl(dwc->regs, DWC3_DCTL),
4367 dwc3_readl(dwc->regs, DWC3_DSTS));
4368 }
4369 dep->resource_index = 0;
4370
4371 if (!interrupt)
4372 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
4373 else
4374 dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
4375
4376 if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
4377 udelay(100);
4378}
4379
4380static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
4381{
4382 u32 epnum;
4383
4384 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
4385 struct dwc3_ep *dep;
4386 int ret;
4387
4388 dep = dwc->eps[epnum];
4389 if (!dep)
4390 continue;
4391
4392 if (!(dep->flags & DWC3_EP_STALL))
4393 continue;
4394
4395 dep->flags &= ~DWC3_EP_STALL;
4396
4397 ret = dwc3_send_clear_stall_ep_cmd(dep);
4398 WARN_ON_ONCE(ret);
4399 }
4400}
4401
4402static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
4403{
4404 int reg;
4405
4406 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
4407 reg &= ~DWC3_DCTL_INITU1ENA;
4408 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
4409
4410 reg &= ~DWC3_DCTL_INITU2ENA;
4411 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
4412
4413 dwc3_disconnect_gadget(dwc);
4414
4415 dwc->gadget.speed = USB_SPEED_UNKNOWN;
4416 dwc->setup_packet_pending = false;
4417 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
4418
4419 dwc->connected = false;
4420}
4421
4422static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
4423{
4424 u32 reg;
4425 u32 vbus = 0;
4426 int ret;
4427
4428 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
4429 if (ret) {
4430 vbus = usb_phy_get_vbus(dwc->usb2_phy);
4431 }
4432
4433 /*
4434 * Ideally, dwc3_reset_gadget() would trigger the function
4435 * drivers to stop any active transfers through ep disable.
4436 * However, for functions which defer ep disable, such as mass
4437 * storage, we will need to rely on the call to stop active
4438 * transfers here, and avoid allowing of request queuing.
4439 */
4440 dwc->connected = false;
4441
4442 /*
4443 * WORKAROUND: DWC3 revisions <1.88a have an issue which
4444 * would cause a missing Disconnect Event if there's a
4445 * pending Setup Packet in the FIFO.
4446 *
4447 * There's no suggested workaround on the official Bug
4448 * report, which states that "unless the driver/application
4449 * is doing any special handling of a disconnect event,
4450 * there is no functional issue".
4451 *
4452 * Unfortunately, it turns out that we _do_ some special
4453 * handling of a disconnect event, namely complete all
4454 * pending transfers, notify gadget driver of the
4455 * disconnection, and so on.
4456 *
4457 * Our suggested workaround is to follow the Disconnect
4458 * Event steps here, instead, based on a setup_packet_pending
4459 * flag. Such flag gets set whenever we have a SETUP_PENDING
4460 * status for EP0 TRBs and gets cleared on XferComplete for the
4461 * same endpoint.
4462 *
4463 * Refers to:
4464 *
4465 * STAR#9000466709: RTL: Device : Disconnect event not
4466 * generated if setup packet pending in FIFO
4467 */
4468 if (dwc->revision < DWC3_REVISION_188A) {
4469 if (dwc->setup_packet_pending)
4470 dwc3_gadget_disconnect_interrupt(dwc);
4471 }
4472
4473#if 0 /* ASR private */
4474 dwc3_reset_gadget(dwc);
4475#endif
4476 /*
4477 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
4478 * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
4479 * needs to ensure that it sends "a DEPENDXFER command for any active
4480 * transfers."
4481 */
4482 dwc3_stop_active_transfers(dwc);
4483 dwc->connected = true;
4484
4485 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
4486 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
4487 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
4488 dwc->test_mode = false;
4489 dwc3_clear_stall_all_ep(dwc);
4490
4491 /* Reset device address to zero */
4492 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
4493 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
4494 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
4495
4496 if ((dwc->gadget.state >= USB_STATE_CONFIGURED) && vbus) {
4497 dwc->usb_do_restart = 1;
4498 } else {
4499 printk("st:%d, vbus:%d\n", dwc->gadget.state, vbus);
4500 }
4501
4502 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
4503
4504 /* ASR private */
4505 /* report disconnect; the driver is already quiesced */
4506 if (dwc->gadget_driver && dwc->gadget_driver->disconnect && dwc->gadget.speed != USB_SPEED_UNKNOWN) {
4507 if (irqs_disabled()) {
4508 spin_unlock_irq(&dwc->lock);
4509 dwc->gadget_driver->disconnect(&dwc->gadget);
4510 spin_lock_irq(&dwc->lock);
4511 } else {
4512 spin_unlock(&dwc->lock);
4513 dwc->gadget_driver->disconnect(&dwc->gadget);
4514 spin_lock(&dwc->lock);
4515 }
4516 }
4517
4518#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
4519 usb_os_detect_reset_state();
4520#endif
4521 dwc->bus_reset_received = true;
4522 dev_err(dwc->dev, "reset irq\n");
4523}
4524
4525static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
4526{
4527 struct dwc3_ep *dep;
4528 int ret;
4529 u32 reg;
4530 u8 speed;
4531
4532 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
4533 speed = reg & DWC3_DSTS_CONNECTSPD;
4534 dwc->speed = speed;
4535
4536 /*
4537 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
4538 * each time on Connect Done.
4539 *
4540 * Currently we always use the reset value. If any platform
4541 * wants to set this to a different value, we need to add a
4542 * setting and update GCTL.RAMCLKSEL here.
4543 */
4544
4545 switch (speed) {
4546 case DWC3_DSTS_SUPERSPEED_PLUS:
4547 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
4548 dwc->gadget.ep0->maxpacket = 512;
4549 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
4550 if (dwc->allow_suspend) {
4551 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
4552 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
4553 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
4554 }
4555 break;
4556 case DWC3_DSTS_SUPERSPEED:
4557 /*
4558 * WORKAROUND: DWC3 revisions <1.90a have an issue which
4559 * would cause a missing USB3 Reset event.
4560 *
4561 * In such situations, we should force a USB3 Reset
4562 * event by calling our dwc3_gadget_reset_interrupt()
4563 * routine.
4564 *
4565 * Refers to:
4566 *
4567 * STAR#9000483510: RTL: SS : USB3 reset event may
4568 * not be generated always when the link enters poll
4569 */
4570 if (dwc->revision < DWC3_REVISION_190A)
4571 dwc3_gadget_reset_interrupt(dwc);
4572
4573 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
4574 dwc->gadget.ep0->maxpacket = 512;
4575 dwc->gadget.speed = USB_SPEED_SUPER;
4576 if (dwc->allow_suspend) {
4577 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
4578 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
4579 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
4580 }
4581 break;
4582 case DWC3_DSTS_HIGHSPEED:
4583 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
4584 dwc->gadget.ep0->maxpacket = 64;
4585 dwc->gadget.speed = USB_SPEED_HIGH;
4586 break;
4587 case DWC3_DSTS_FULLSPEED:
4588 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
4589 dwc->gadget.ep0->maxpacket = 64;
4590 dwc->gadget.speed = USB_SPEED_FULL;
4591 break;
4592 case DWC3_DSTS_LOWSPEED:
4593 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
4594 dwc->gadget.ep0->maxpacket = 8;
4595 dwc->gadget.speed = USB_SPEED_LOW;
4596 break;
4597 }
4598
4599 dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
4600
4601 /* Enable USB2 LPM Capability */
4602
4603 if ((dwc->revision > DWC3_REVISION_194A) &&
4604 (speed != DWC3_DSTS_SUPERSPEED) &&
4605 (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
4606 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
4607 reg |= DWC3_DCFG_LPM_CAP;
4608 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
4609
4610 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
4611 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
4612
4613 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold |
4614 (dwc->is_utmi_l1_suspend << 4));
4615
4616 /*
4617 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
4618 * DCFG.LPMCap is set, core responses with an ACK and the
4619 * BESL value in the LPM token is less than or equal to LPM
4620 * NYET threshold.
4621 */
4622 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
4623 && dwc->has_lpm_erratum,
4624 "LPM Erratum not available on dwc3 revisions < 2.40a\n");
4625
4626 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
4627 reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
4628
4629 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
4630 } else {
4631 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
4632 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
4633 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
4634 }
4635
4636 dep = dwc->eps[0];
4637 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
4638 if (ret) {
4639 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
4640 return;
4641 }
4642
4643 dep = dwc->eps[1];
4644 ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
4645 if (ret) {
4646 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
4647 return;
4648 }
4649
4650 /*
4651 * Configure PHY via GUSB3PIPECTLn if required.
4652 *
4653 * Update GTXFIFOSIZn
4654 *
4655 * In both cases reset values should be sufficient.
4656 */
4657}
4658
4659static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
4660{
4661 /*
4662 * TODO take core out of low power mode when that's
4663 * implemented.
4664 */
4665
4666 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
4667 spin_unlock(&dwc->lock);
4668 dwc->gadget_driver->resume(&dwc->gadget);
4669 spin_lock(&dwc->lock);
4670 }
4671 dev_err(dwc->dev, "%s: st: %d, link_st: %d 0x%x\n",
4672 __func__, dwc->gadget.state, dwc->link_state,
4673 dwc3_readl(dwc->regs, DWC3_DSTS));
4674
4675 if (dwc->allow_suspend)
4676 dwc3_acquire_wakeup_event();
4677}
4678
4679static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
4680 unsigned int evtinfo)
4681{
4682 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
4683 unsigned int pwropt;
4684
4685 /*
4686 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
4687 * Hibernation mode enabled which would show up when device detects
4688 * host-initiated U3 exit.
4689 *
4690 * In that case, device will generate a Link State Change Interrupt
4691 * from U3 to RESUME which is only necessary if Hibernation is
4692 * configured in.
4693 *
4694 * There are no functional changes due to such spurious event and we
4695 * just need to ignore it.
4696 *
4697 * Refers to:
4698 *
4699 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
4700 * operational mode
4701 */
4702 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
4703 if ((dwc->revision < DWC3_REVISION_250A) &&
4704 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
4705 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
4706 (next == DWC3_LINK_STATE_RESUME)) {
4707 return;
4708 }
4709 }
4710
4711 /*
4712 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
4713 * on the link partner, the USB session might do multiple entry/exit
4714 * of low power states before a transfer takes place.
4715 *
4716 * Due to this problem, we might experience lower throughput. The
4717 * suggested workaround is to disable DCTL[12:9] bits if we're
4718 * transitioning from U1/U2 to U0 and enable those bits again
4719 * after a transfer completes and there are no pending transfers
4720 * on any of the enabled endpoints.
4721 *
4722 * This is the first half of that workaround.
4723 *
4724 * Refers to:
4725 *
4726 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
4727 * core send LGO_Ux entering U0
4728 */
4729 if (dwc->revision < DWC3_REVISION_183A) {
4730 if (next == DWC3_LINK_STATE_U0) {
4731 u32 u1u2;
4732 u32 reg;
4733
4734 switch (dwc->link_state) {
4735 case DWC3_LINK_STATE_U1:
4736 case DWC3_LINK_STATE_U2:
4737 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
4738 u1u2 = reg & (DWC3_DCTL_INITU2ENA
4739 | DWC3_DCTL_ACCEPTU2ENA
4740 | DWC3_DCTL_INITU1ENA
4741 | DWC3_DCTL_ACCEPTU1ENA);
4742
4743 if (!dwc->u1u2)
4744 dwc->u1u2 = reg & u1u2;
4745
4746 reg &= ~u1u2;
4747
4748 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
4749 break;
4750 default:
4751 /* do nothing */
4752 break;
4753 }
4754 }
4755 }
4756
4757 switch (next) {
4758 case DWC3_LINK_STATE_U1:
4759 if (dwc->speed == USB_SPEED_SUPER)
4760 dwc3_suspend_gadget(dwc);
4761 break;
4762 case DWC3_LINK_STATE_U2:
4763 case DWC3_LINK_STATE_U3:
4764 dwc3_suspend_gadget(dwc);
4765 break;
4766 case DWC3_LINK_STATE_RESUME:
4767 dwc3_resume_gadget(dwc);
4768 break;
4769 /* in HS means early suspend */
4770 case DWC3_LINK_STATE_RX_DET:
4771 if ((evtinfo & (0x1 << 4)) == 0)
4772 dwc->suspend_received = true;
4773 break;
4774 default:
4775 /* do nothing */
4776 break;
4777 }
4778
4779 dwc->link_state = next;
4780
4781 if (dwc->allow_suspend && dwc->link_state != DWC3_LINK_STATE_U3)
4782 dwc3_acquire_wakeup_event();
4783}
4784
4785static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
4786 unsigned int evtinfo)
4787{
4788 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
4789
4790 if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
4791 dwc3_suspend_gadget(dwc);
4792
4793 dev_err(dwc->dev, "%s: st: %d, link_st: %d 0x%x\n",
4794 __func__, dwc->gadget.state, dwc->link_state,
4795 dwc3_readl(dwc->regs, DWC3_DSTS));
4796 dwc->link_state = next;
4797 if (dwc->allow_suspend && dwc->gadget.state >= USB_STATE_CONFIGURED
4798 && dwc->link_state == DWC3_LINK_STATE_U3)
4799 dwc3_release_wakeup_event_timeout(DWC3_WAKEUP_TIMEOUT_SEC);
4800}
4801
4802static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
4803 unsigned int evtinfo)
4804{
4805 unsigned int is_ss = evtinfo & BIT(4);
4806
4807 /*
4808 * WORKAROUND: DWC3 revison 2.20a with hibernation support
4809 * have a known issue which can cause USB CV TD.9.23 to fail
4810 * randomly.
4811 *
4812 * Because of this issue, core could generate bogus hibernation
4813 * events which SW needs to ignore.
4814 *
4815 * Refers to:
4816 *
4817 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
4818 * Device Fallback from SuperSpeed
4819 */
4820 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
4821 return;
4822
4823 /* enter hibernation here */
4824}
4825
4826static void dwc3_gadget_interrupt(struct dwc3 *dwc,
4827 const struct dwc3_event_devt *event)
4828{
4829 switch (event->type) {
4830 case DWC3_DEVICE_EVENT_DISCONNECT:
4831 dwc3_gadget_disconnect_interrupt(dwc);
4832 break;
4833 case DWC3_DEVICE_EVENT_RESET:
4834 dwc3_gadget_reset_interrupt(dwc);
4835 break;
4836 case DWC3_DEVICE_EVENT_CONNECT_DONE:
4837 dwc3_gadget_conndone_interrupt(dwc);
4838 break;
4839 case DWC3_DEVICE_EVENT_WAKEUP:
4840 dwc3_gadget_wakeup_interrupt(dwc);
4841 if (dwc->gadget.state >= USB_STATE_CONFIGURED)
4842 asr_udc_notify_resume_event(dwc, 0);
4843 break;
4844 case DWC3_DEVICE_EVENT_HIBER_REQ:
4845 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
4846 "unexpected hibernation event\n"))
4847 break;
4848
4849 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
4850 break;
4851 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
4852 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
4853 break;
4854 case DWC3_DEVICE_EVENT_EOPF:
4855 dwc->suspend_received = true;
4856 /* It changed to be suspend event for version 2.30a and above */
4857 if (dwc->revision >= DWC3_REVISION_230A) {
4858 /*
4859 * Ignore suspend event until the gadget enters into
4860 * USB_STATE_CONFIGURED state.
4861 */
4862 if (dwc->gadget.state >= USB_STATE_CONFIGURED)
4863 dwc3_gadget_suspend_interrupt(dwc,
4864 event->event_info);
4865 }
4866 break;
4867 case DWC3_DEVICE_EVENT_SOF:
4868 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
4869 case DWC3_DEVICE_EVENT_CMD_CMPL:
4870 case DWC3_DEVICE_EVENT_OVERFLOW:
4871 break;
4872 default:
4873 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
4874 }
4875}
4876
4877static void dwc3_process_event_entry(struct dwc3 *dwc,
4878 const union dwc3_event *event)
4879{
4880 trace_dwc3_event(event->raw, dwc);
4881
4882 if (!event->type.is_devspec) {
4883 dwc3_endpoint_interrupt(dwc, &event->depevt);
4884 } else if (event->type.type == DWC3_EVENT_TYPE_DEV) {
4885#ifdef CONFIG_CPU_ASR1901
4886 if (0x110301 != event->raw && 0x120301 != event->raw) {
4887 if (0x100301 == event->raw) {
4888 dev_err_ratelimited(dwc->dev, "dev event: 0x%x ep0state: %d\n", event->raw, dwc->ep0state);
4889 usb_phy_dump_cfg(dwc->usb2_phy);
4890 } else {
4891 dev_err(dwc->dev, "dev event: 0x%x\n", event->raw);
4892 }
4893 }
4894#else
4895 if (0x100301 != event->raw && 0x110301 != event->raw
4896 && 0x120301 != event->raw) {
4897 dev_err(dwc->dev, "dev event: 0x%x\n", event->raw);
4898 }
4899#endif
4900 dwc3_gadget_interrupt(dwc, &event->devt);
4901 } else {
4902 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
4903 }
4904}
4905
4906static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
4907{
4908 struct dwc3 *dwc = evt->dwc;
4909 irqreturn_t ret = IRQ_NONE;
4910 int left;
4911 u32 reg;
4912#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
4913 u32 count;
4914#endif
4915
4916 left = evt->count;
4917
4918 if (!(evt->flags & DWC3_EVENT_PENDING))
4919 return IRQ_NONE;
4920
4921#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
4922 if (unlikely(!dwc->active)) {
4923 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
4924 reg &= DWC3_GEVNTCOUNT_MASK;
4925 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
4926 /* Unmask interrupt */
4927 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
4928 reg &= ~DWC3_GEVNTSIZ_INTMASK;
4929 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
4930 evt->lpos = 0;
4931 evt->count = 0;
4932 evt->flags &= ~DWC3_EVENT_PENDING;
4933 return IRQ_HANDLED;
4934 }
4935#endif
4936
4937 while (left > 0 && dwc->active) {
4938 union dwc3_event event;
4939
4940 event.raw = *(u32 *) (evt->cache + evt->lpos);
4941
4942 dwc3_process_event_entry(dwc, &event);
4943
4944 /*
4945 * FIXME we wrap around correctly to the next entry as
4946 * almost all entries are 4 bytes in size. There is one
4947 * entry which has 12 bytes which is a regular entry
4948 * followed by 8 bytes data. ATM I don't know how
4949 * things are organized if we get next to the a
4950 * boundary so I worry about that once we try to handle
4951 * that.
4952 */
4953 evt->lpos = (evt->lpos + 4) % evt->length;
4954 left -= 4;
4955
4956#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
4957 if (unlikely(os_detect_is_done())) {
4958 dev_info(dwc->dev, "%s: break on os detect done\n", __func__);
4959 left = 0;
4960 break;
4961 }
4962#endif
4963 }
4964
4965#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
4966 if (unlikely(os_detect_is_done())) {
4967 dev_info(dwc->dev, "%s: handle os detect done\n", __func__);
4968 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
4969 count &= DWC3_GEVNTCOUNT_MASK;
4970 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
4971 dwc3_gadget_run_stop(dwc, 0, false);
4972 /* Unmask interrupt */
4973 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
4974 reg &= ~DWC3_GEVNTSIZ_INTMASK;
4975 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
4976 evt->count = 0;
4977 evt->flags &= ~DWC3_EVENT_PENDING;
4978 evt->lpos = 0;
4979 memset(evt->buf, 0x0, DWC3_EVENT_BUFFERS_SIZE);
4980 os_detect_clear_done();
4981 return IRQ_HANDLED;
4982 }
4983#endif
4984 if (unlikely(!dwc->active)) {
4985 dev_info(dwc->dev, "dwc3 is inactive\n");
4986 reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
4987 reg &= DWC3_GEVNTCOUNT_MASK;
4988 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
4989 evt->lpos = 0;
4990 }
4991
4992 evt->count = 0;
4993 evt->flags &= ~DWC3_EVENT_PENDING;
4994 ret = IRQ_HANDLED;
4995
4996 /* Unmask interrupt */
4997 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
4998 reg &= ~DWC3_GEVNTSIZ_INTMASK;
4999 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
5000
5001 if (dwc->imod_interval) {
5002 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
5003 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
5004 }
5005
5006 if (dwc->usb_do_restart) {
5007 pr_info("!!!!!dwc3 usb restart\n");
5008 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
5009 count &= DWC3_GEVNTCOUNT_MASK;
5010 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
5011 dwc3_gadget_run_stop(dwc, 0, false);
5012
5013 if (work_pending(&dwc->usb_restart_work.work))
5014 cancel_delayed_work(&dwc->usb_restart_work);
5015
5016#ifdef CONFIG_DWC3_HWSULOG
5017 hwsulog_set_clear_stop_flag(true);
5018#endif
5019 schedule_delayed_work(&dwc->usb_restart_work, 0);
5020 }
5021
5022 return ret;
5023}
5024
5025static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
5026{
5027 struct dwc3_event_buffer *evt = _evt;
5028 struct dwc3 *dwc = evt->dwc;
5029 unsigned long flags;
5030 irqreturn_t ret = IRQ_NONE;
5031
5032 //ASR private: disable bh enable/disable, ASR has no such issue
5033#ifndef CONFIG_ASR_TOE
5034 //local_bh_disable();
5035#endif
5036 spin_lock_irqsave(&dwc->lock, flags);
5037 ret = dwc3_process_event_buf(evt);
5038 spin_unlock_irqrestore(&dwc->lock, flags);
5039#ifndef CONFIG_ASR_TOE
5040 //local_bh_enable();
5041#endif
5042
5043 return ret;
5044}
5045
5046static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
5047{
5048 struct dwc3 *dwc = evt->dwc;
5049 u32 amount;
5050 u32 count;
5051 u32 reg;
5052
5053#if 0 /* ASR private */
5054 if (pm_runtime_suspended(dwc->dev)) {
5055 dwc->pending_events = true;
5056 /*
5057 * Trigger runtime resume. The get() function will be balanced
5058 * after processing the pending events in dwc3_process_pending
5059 * events().
5060 */
5061 pm_runtime_get(dwc->dev);
5062 disable_irq_nosync(dwc->irq_gadget);
5063 return IRQ_HANDLED;
5064 }
5065#endif
5066
5067 /*
5068 * With PCIe legacy interrupt, test shows that top-half irq handler can
5069 * be called again after HW interrupt deassertion. Check if bottom-half
5070 * irq event handler completes before caching new event to prevent
5071 * losing events.
5072 */
5073 if (evt->flags & DWC3_EVENT_PENDING)
5074 return IRQ_HANDLED;
5075
5076 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
5077 count &= DWC3_GEVNTCOUNT_MASK;
5078 if (!count)
5079 return IRQ_NONE;
5080
5081 evt->count = count;
5082 evt->flags |= DWC3_EVENT_PENDING;
5083
5084 /* Mask interrupt */
5085 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
5086 reg |= DWC3_GEVNTSIZ_INTMASK;
5087 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
5088
5089 amount = min(count, evt->length - evt->lpos);
5090 memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
5091
5092 if (amount < count)
5093 memcpy(evt->cache, evt->buf, count - amount);
5094
5095 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
5096
5097 return IRQ_WAKE_THREAD;
5098}
5099
5100static irqreturn_t dwc3_interrupt(int irq, void *_evt)
5101{
5102 struct dwc3_event_buffer *evt = _evt;
5103
5104 return dwc3_check_event_buf(evt);
5105}
5106
5107#ifdef CONFIG_USB_DWC3_ASR_OTG
5108static int asr_usb_vbus_session(struct usb_gadget *gadget, int is_active)
5109{
5110 struct dwc3 *dwc;
5111 unsigned long flags;
5112 int retval = 0;
5113 static unsigned int vbus = 0;
5114 static bool first_vbus = true;
5115
5116 dwc = container_of(gadget, struct dwc3, gadget);
5117 dwc->vbus_active = (is_active != 0);
5118 dev_info(dwc->dev, "%s: softconnect %d, vbus_active %d, pre_chrgr: %d\n",
5119 __func__, dwc->softconnect, dwc->vbus_active, dwc->prev_charger_type);
5120
5121 if (work_pending(&dwc->usb_restart_work.work)) {
5122 dev_info(dwc->dev, "cancel restart work...");
5123 cancel_delayed_work_sync(&dwc->usb_restart_work);
5124 dev_info(dwc->dev, "done\n");
5125 }
5126
5127 if (!first_vbus) {
5128 if (vbus == dwc->vbus_active) {
5129 dev_info(dwc->dev, "!!!skip vbus event %d -> %d\n", vbus, dwc->vbus_active);
5130 if (vbus)
5131 pm_stay_awake(dwc->dev);
5132 return 0;
5133 }
5134 }
5135 if (first_vbus)
5136 first_vbus = false;
5137 vbus = dwc->vbus_active;
5138
5139 pr_emerg("vbus session otg state: %d - %s\n",
5140 dwc->otg_state, usb_otg_state_string(dwc->otg_state));
5141
5142 if (dwc->otg_state != OTG_STATE_B_IDLE
5143 && dwc->otg_state != OTG_STATE_B_PERIPHERAL) {
5144 pr_info("vbus exit for host mode\n");
5145 return 0;
5146 }
5147
5148 mutex_lock(&usb_con_mutex);
5149 dwc->prev_charger_type = dwc->charger_type;
5150 usb_phy_set_suspend(dwc->usb2_phy, 0);
5151 if (dwc->vbus_active) {
5152 pm_stay_awake(dwc->dev);
5153 pm_qos_update_request(&dwc->qos_idle, dwc->lpm_qos);
5154 dwc->charger_type = DEFAULT_CHARGER;
5155 } else {
5156 pm_wakeup_event(dwc->dev, 5000);
5157 pm_qos_update_request_timeout(&dwc->qos_idle,
5158 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE, (5000 * 1000));
5159 dwc->charger_type = NULL_CHARGER;
5160 }
5161
5162 if (work_pending(&dwc->delayed_charger_work.work))
5163 cancel_delayed_work(&dwc->delayed_charger_work);
5164
5165 if (dwc->charger_type == NULL_CHARGER)
5166 schedule_delayed_work(&dwc->delayed_charger_work, 0);
5167
5168 if (dwc->charger_type == DEFAULT_CHARGER) {
5169 int enum_delay = ENUMERATION_DELAY;
5170 dev_info(dwc->dev, "1st stage charger type: %s\n",
5171 charger_type(dwc->charger_type));
5172 call_charger_notifier(dwc);
5173 schedule_delayed_work(&dwc->delayed_charger_work, enum_delay);
5174 }
5175
5176 spin_lock_irqsave(&dwc->lock, flags);
5177 if (dwc->gadget_driver && dwc->softconnect && dwc->vbus_active) {
5178 if (work_pending(&dwc->usb_restart_work.work))
5179 cancel_delayed_work(&dwc->usb_restart_work);
5180 __dwc3_gadget_start(dwc);
5181 dwc3_gadget_run_stop(dwc, 1, false);
5182 spin_unlock_irqrestore(&dwc->lock, flags);
5183 dwc3_charger_type_confirm(dwc);
5184 spin_lock_irqsave(&dwc->lock, flags);
5185 } else if (dwc->gadget_driver && dwc->softconnect) {
5186 if (dwc->prev_charger_type != DCP_CHARGER) {
5187 dwc3_stop_active_transfers(dwc);
5188 __dwc3_gadget_stop(dwc);
5189 dwc3_gadget_run_stop(dwc, 0, false);
5190 if (cpu_is_asr1901() || cpu_is_asr1906())
5191 spin_unlock_irqrestore(&dwc->lock, flags);
5192 dwc3_controller_reset(dwc);
5193 if (cpu_is_asr1901() || cpu_is_asr1906())
5194 spin_lock_irqsave(&dwc->lock, flags);
5195 /* __dwc3_gadget_start(dwc); */
5196 }
5197#ifdef CONFIG_DWC3_HWSULOG
5198 hwsulog_on = false;
5199#endif
5200 dwc->ev_buf->count = 0;
5201 dwc->ev_buf->flags &= ~DWC3_EVENT_PENDING;
5202 dwc->ev_buf->lpos = 0;
5203 memset(dwc->ev_buf->buf, 0x0, DWC3_EVENT_BUFFERS_SIZE);
5204 memset(dwc->ev_buf->cache, 0x0, DWC3_EVENT_BUFFERS_SIZE);
5205 }
5206 spin_unlock_irqrestore(&dwc->lock, flags);
5207
5208#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
5209 if ((!dwc->vbus_active) && dwc->gadget_driver && dwc->softconnect
5210 && (dwc->prev_charger_type == SDP_CHARGER
5211 || dwc->prev_charger_type == CDP_CHARGER)) {
5212 mutex_unlock(&usb_con_mutex);
5213 cancel_reconfigure_work();
5214 /* restore os type to default state */
5215 android_dev_enable(0);
5216 usb_os_restore();
5217 android_dev_enable(1);
5218 mutex_lock(&usb_con_mutex);
5219 dwc->prev_charger_type = NULL_CHARGER;
5220 }
5221#endif
5222
5223 spin_lock_irqsave(&dwc->lock, flags);
5224 if (!dwc->vbus_active) {
5225 dwc->prev_charger_type = NULL_CHARGER;
5226 usb_phy_set_suspend(dwc->usb2_phy, 1);
5227 }
5228 spin_unlock_irqrestore(&dwc->lock, flags);
5229 mutex_unlock(&usb_con_mutex);
5230 return retval;
5231}
5232#else
5233static int asr_usb_vbus_session(struct usb_gadget *gadget, int is_active)
5234{
5235 struct dwc3 *dwc;
5236 unsigned long flags;
5237 int retval = 0;
5238
5239 dwc = container_of(gadget, struct dwc3, gadget);
5240 dwc->vbus_active = (is_active != 0);
5241 dev_info(dwc->dev, "%s: softconnect %d, vbus_active %d, pre_chrgr: %d\n",
5242 __func__, dwc->softconnect, dwc->vbus_active, dwc->prev_charger_type);
5243
5244 if (work_pending(&dwc->usb_restart_work.work)) {
5245 dev_info(dwc->dev, "cancel restart work...");
5246 cancel_delayed_work_sync(&dwc->usb_restart_work);
5247 dev_info(dwc->dev, "done\n");
5248 }
5249
5250 mutex_lock(&usb_con_mutex);
5251 dwc->prev_charger_type = dwc->charger_type;
5252 usb_phy_set_suspend(dwc->usb2_phy, 0);
5253 if (dwc->vbus_active) {
5254 pm_stay_awake(dwc->dev);
5255 pm_qos_update_request(&dwc->qos_idle, dwc->lpm_qos);
5256 dwc->charger_type = DEFAULT_CHARGER;
5257 } else {
5258 pm_wakeup_event(dwc->dev, 5000);
5259 pm_qos_update_request_timeout(&dwc->qos_idle,
5260 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE, (5000 * 1000));
5261 dwc->charger_type = NULL_CHARGER;
5262 }
5263
5264 if (work_pending(&dwc->delayed_charger_work.work))
5265 cancel_delayed_work(&dwc->delayed_charger_work);
5266
5267 if (dwc->charger_type == NULL_CHARGER)
5268 schedule_delayed_work(&dwc->delayed_charger_work, 0);
5269
5270 if (dwc->charger_type == DEFAULT_CHARGER) {
5271 int enum_delay = ENUMERATION_DELAY;
5272 dev_info(dwc->dev, "1st stage charger type: %s\n",
5273 charger_type(dwc->charger_type));
5274 call_charger_notifier(dwc);
5275 schedule_delayed_work(&dwc->delayed_charger_work, enum_delay);
5276 }
5277
5278 spin_lock_irqsave(&dwc->lock, flags);
5279 if (dwc->gadget_driver && dwc->softconnect && dwc->vbus_active) {
5280 if (work_pending(&dwc->usb_restart_work.work))
5281 cancel_delayed_work(&dwc->usb_restart_work);
5282 dwc3_gadget_run_stop(dwc, 1, false);
5283 spin_unlock_irqrestore(&dwc->lock, flags);
5284 dwc3_charger_type_confirm(dwc);
5285 spin_lock_irqsave(&dwc->lock, flags);
5286 } else if (dwc->gadget_driver && dwc->softconnect) {
5287 if (dwc->prev_charger_type != DCP_CHARGER) {
5288 dwc3_stop_active_transfers(dwc);
5289 __dwc3_gadget_stop(dwc);
5290 dwc3_gadget_run_stop(dwc, 0, false);
5291 if (cpu_is_asr1901() || cpu_is_asr1906())
5292 spin_unlock_irqrestore(&dwc->lock, flags);
5293 dwc3_controller_reset(dwc);
5294 if (cpu_is_asr1901() || cpu_is_asr1906())
5295 spin_lock_irqsave(&dwc->lock, flags);
5296 __dwc3_gadget_start(dwc);
5297 }
5298
5299#ifdef CONFIG_DWC3_HWSULOG
5300 hwsulog_on = false;
5301#endif
5302 dwc->ev_buf->count = 0;
5303 dwc->ev_buf->flags &= ~DWC3_EVENT_PENDING;
5304 dwc->ev_buf->lpos = 0;
5305 memset(dwc->ev_buf->buf, 0x0, DWC3_EVENT_BUFFERS_SIZE);
5306 memset(dwc->ev_buf->cache, 0x0, DWC3_EVENT_BUFFERS_SIZE);
5307 }
5308 spin_unlock_irqrestore(&dwc->lock, flags);
5309
5310#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
5311 if ((!dwc->vbus_active) && dwc->gadget_driver && dwc->softconnect
5312 && (dwc->prev_charger_type == SDP_CHARGER
5313 || dwc->prev_charger_type == CDP_CHARGER)) {
5314 mutex_unlock(&usb_con_mutex);
5315 cancel_reconfigure_work();
5316 /* restore os type to default state */
5317 android_dev_enable(0);
5318 usb_os_restore();
5319 android_dev_enable(1);
5320 mutex_lock(&usb_con_mutex);
5321 dwc->prev_charger_type = NULL_CHARGER;
5322 }
5323#endif
5324
5325 spin_lock_irqsave(&dwc->lock, flags);
5326 if (!dwc->vbus_active) {
5327 dwc->prev_charger_type = NULL_CHARGER;
5328 usb_phy_set_suspend(dwc->usb2_phy, 1);
5329 }
5330 spin_unlock_irqrestore(&dwc->lock, flags);
5331 mutex_unlock(&usb_con_mutex);
5332 return retval;
5333}
5334#endif
5335
5336static int asr_usb_vbus_notifier_call(struct notifier_block *nb,
5337 unsigned long val, void *v)
5338{
5339 struct dwc3 *dwc = container_of(nb, struct dwc3, notifier);
5340 /* polling VBUS and init phy may cause too much time*/
5341 if (dwc->qwork && val == EVENT_VBUS)
5342 queue_work(dwc->qwork, &dwc->vbus_work);
5343
5344 return 0;
5345}
5346
5347static void asr_usb_vbus_work(struct work_struct *work)
5348{
5349 struct dwc3 *dwc;
5350 unsigned int vbus = 0;
5351 int ret;
5352 static bool first_vbus = true;
5353
5354 dwc = container_of(work, struct dwc3, vbus_work);
5355 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
5356 if (ret) {
5357 vbus = usb_phy_get_vbus(dwc->usb2_phy);
5358 }
5359
5360 if (!first_vbus) {
5361 if (vbus == dwc->vbus_active) {
5362 dev_info(dwc->dev, "!!!skip vbus event %d -> %d\n", vbus, dwc->vbus_active);
5363 if (vbus)
5364 pm_stay_awake(dwc->dev);
5365 return;
5366 }
5367 }
5368
5369 if (first_vbus)
5370 first_vbus = false;
5371 dev_info(dwc->dev, "vbus is %s.\n", vbus ? "on" : "off");
5372 asr_usb_vbus_session(&dwc->gadget, vbus);
5373}
5374static int dwc3_gadget_get_irq(struct dwc3 *dwc)
5375{
5376 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
5377 int irq;
5378
5379 irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
5380 if (irq > 0)
5381 goto out;
5382
5383 if (irq == -EPROBE_DEFER)
5384 goto out;
5385
5386 irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
5387 if (irq > 0)
5388 goto out;
5389
5390 if (irq == -EPROBE_DEFER)
5391 goto out;
5392
5393 irq = platform_get_irq(dwc3_pdev, 0);
5394 if (irq > 0)
5395 goto out;
5396
5397 if (!irq)
5398 irq = -EINVAL;
5399
5400out:
5401 return irq;
5402}
5403
5404#ifdef CONFIG_USB_DWC3_ASR_OTG
5405extern int usb_otg_set_peripheral(struct usb_gadget *gadget);
5406extern int usb_otg_set_phy(struct usb_phy *usb2phy, struct usb_phy *usb3phy);
5407extern int usb_otg_set_controller(struct dwc3 *dwc);
5408#endif
5409
5410/**
5411 * dwc3_gadget_init - initializes gadget related registers
5412 * @dwc: pointer to our controller context structure
5413 *
5414 * Returns 0 on success otherwise negative errno.
5415 */
5416int dwc3_gadget_init(struct dwc3 *dwc)
5417{
5418 int ret;
5419 int irq;
5420
5421 irq = dwc3_gadget_get_irq(dwc);
5422 if (irq < 0) {
5423 ret = irq;
5424 goto err0;
5425 }
5426
5427 dwc->irq_gadget = irq;
5428
5429 dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
5430 sizeof(*dwc->ep0_trb) * 2,
5431 &dwc->ep0_trb_addr, GFP_KERNEL);
5432 if (!dwc->ep0_trb) {
5433 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
5434 ret = -ENOMEM;
5435 goto err0;
5436 }
5437
5438 dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
5439 if (!dwc->setup_buf) {
5440 ret = -ENOMEM;
5441 goto err1;
5442 }
5443
5444 dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
5445 &dwc->bounce_addr, GFP_KERNEL);
5446 if (!dwc->bounce) {
5447 ret = -ENOMEM;
5448 goto err2;
5449 }
5450
5451 dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
5452 &dwc->ctrl_req_addr, GFP_KERNEL);
5453 if (!dwc->ctrl_req) {
5454 ret = -ENOMEM;
5455 goto err3;
5456 }
5457
5458 init_completion(&dwc->ep0_in_setup);
5459
5460 dwc->gadget.ops = &dwc3_gadget_ops;
5461 dwc->gadget.speed = USB_SPEED_UNKNOWN;
5462 dwc->gadget.sg_supported = true;
5463 dwc->gadget.name = "dwc3-gadget";
5464 dwc->gadget.lpm_capable = true;
5465
5466 /*
5467 * FIXME We might be setting max_speed to <SUPER, however versions
5468 * <2.20a of dwc3 have an issue with metastability (documented
5469 * elsewhere in this driver) which tells us we can't set max speed to
5470 * anything lower than SUPER.
5471 *
5472 * Because gadget.max_speed is only used by composite.c and function
5473 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
5474 * to happen so we avoid sending SuperSpeed Capability descriptor
5475 * together with our BOS descriptor as that could confuse host into
5476 * thinking we can handle super speed.
5477 *
5478 * Note that, in fact, we won't even support GetBOS requests when speed
5479 * is less than super speed because we don't have means, yet, to tell
5480 * composite.c that we are USB 2.0 + LPM ECN.
5481 */
5482 if (dwc->revision < DWC3_REVISION_220A &&
5483 !dwc->dis_metastability_quirk)
5484 dev_info(dwc->dev, "changing max_speed on rev %08x\n",
5485 dwc->revision);
5486
5487 if (cpu_is_asr1828() || cpu_is_asr1903())
5488 dwc->gadget.max_speed = USB_SPEED_SUPER;
5489 else
5490 dwc->gadget.max_speed = dwc->maximum_speed;
5491
5492 /*
5493 * REVISIT: Here we should clear all pending IRQs to be
5494 * sure we're starting from a well known location.
5495 */
5496
5497 ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
5498 if (ret)
5499 goto err4;
5500
5501 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
5502 if (ret) {
5503 dev_err(dwc->dev, "failed to register udc\n");
5504 goto err5;
5505 }
5506
5507 dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
5508 dwc->qwork = create_singlethread_workqueue("asr_vbus_queue");
5509 if (!dwc->qwork) {
5510 dev_err(dwc->dev, "cannot create workqueue\n");
5511 ret = -ENOMEM;
5512 goto err5;
5513 }
5514 INIT_WORK(&dwc->vbus_work, asr_usb_vbus_work);
5515 dwc->notifier.notifier_call = asr_usb_vbus_notifier_call;
5516 pxa_usb_register_notifier(PXA_USB_DEV_OTG, &dwc->notifier);
5517
5518 INIT_DELAYED_WORK(&dwc->delayed_charger_work, do_delayed_charger_work);
5519 dwc->charger_type = NULL_CHARGER;
5520
5521 INIT_DELAYED_WORK(&dwc->usb_restart_work, dwc3_restart_work);
5522
5523 device_init_wakeup(dwc->dev, 1);
5524
5525#ifdef CONFIG_DWC3_HWSULOG
5526 dwc->hwsulog_regs = dwc->regs - DWC3_GLOBALS_REGS_START + SULOG_BASE_OFFSET;
5527 if (cpu_is_asr1903() || cpu_is_asr1828() || cpu_is_asr1901() || cpu_is_asr1906())
5528 register_hwsulog_udc_func(asr_udc_hwsulog_callback);
5529#endif
5530
5531#ifdef CONFIG_USB_DWC3_ASR_OTG
5532 usb_otg_set_peripheral(&dwc->gadget);
5533 usb_otg_set_phy(dwc->usb2_phy, dwc->usb3_phy);
5534 usb_otg_set_controller(dwc);
5535#endif
5536 dwc->phys_mem_end = PAGE_SIZE * max_pfn;
5537 dev_info(dwc->dev, "phys_mem_end: 0x%lx\n", dwc->phys_mem_end);
5538
5539 the_controller = dwc;
5540
5541 return 0;
5542
5543err5:
5544 dwc3_gadget_free_endpoints(dwc);
5545
5546err4:
5547 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), dwc->ctrl_req,
5548 dwc->ctrl_req_addr);
5549
5550err3:
5551 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
5552 dwc->bounce_addr);
5553
5554err2:
5555 kfree(dwc->setup_buf);
5556
5557err1:
5558 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
5559 dwc->ep0_trb, dwc->ep0_trb_addr);
5560
5561err0:
5562 return ret;
5563}
5564
5565/* -------------------------------------------------------------------------- */
5566
5567void dwc3_gadget_exit(struct dwc3 *dwc)
5568{
5569 usb_del_gadget_udc(&dwc->gadget);
5570 dwc3_gadget_free_endpoints(dwc);
5571 dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
5572 dwc->bounce_addr);
5573 kfree(dwc->setup_buf);
5574 dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
5575 dwc->ep0_trb, dwc->ep0_trb_addr);
5576}
5577
5578int dwc3_gadget_suspend(struct dwc3 *dwc)
5579{
5580 if (!dwc->gadget_driver)
5581 return 0;
5582
5583 dwc3_gadget_run_stop(dwc, false, false);
5584 dwc3_disconnect_gadget(dwc);
5585 __dwc3_gadget_stop(dwc);
5586
5587 return 0;
5588}
5589
5590int dwc3_gadget_resume(struct dwc3 *dwc)
5591{
5592 int ret;
5593
5594 if (!dwc->gadget_driver || !dwc->softconnect)
5595 return 0;
5596
5597 ret = __dwc3_gadget_start(dwc);
5598 if (ret < 0)
5599 goto err0;
5600
5601 ret = dwc3_gadget_run_stop(dwc, true, false);
5602 if (ret < 0)
5603 goto err1;
5604
5605 return 0;
5606
5607err1:
5608 __dwc3_gadget_stop(dwc);
5609
5610err0:
5611 return ret;
5612}