blob: 8be7083b7988a99344f83bf97afea55e784f9cec [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Copyright 2008 Openmoko, Inc.
7 * Copyright 2008 Simtec Electronics
8 * Ben Dooks <ben@simtec.co.uk>
9 * http://armlinux.simtec.co.uk/
10 *
11 * S3C USB2.0 High-speed / OtG driver
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/mutex.h>
21#include <linux/seq_file.h>
22#include <linux/delay.h>
23#include <linux/io.h>
24#include <linux/slab.h>
25#include <linux/of_platform.h>
26#include <linux/usb/ch9.h>
27#include <linux/usb/gadget.h>
28#include <linux/usb/phy.h>
29#include <linux/usb/composite.h>
30#include <linux/platform_data/mv_usb.h>
31#include <linux/usb/mv_usb2_phy.h>
32#include <linux/pm_qos.h>
33#include <linux/power_supply.h>
34#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
35#include <linux/usb/composite.h>
36#endif
37#include <linux/memblock.h>
38
39#include "core.h"
40#include "hw.h"
41
42#define ENUMERATION_DELAY (2 * HZ)
43#define DWC2_WAKEUP_TIMEOUT_SEC (5)
44
45#ifdef CONFIG_USB_G_ANDROID
46extern void android_dev_enable(uint8_t enabled);
47#endif
48static BLOCKING_NOTIFIER_HEAD(dwc2_notifier_list);
49static struct dwc2_hsotg *the_controller;
50static DEFINE_MUTEX(usb_con_mutex);
51static ATOMIC_NOTIFIER_HEAD(asr_udc_resume_list);
52
53void dwc2_release_wakeup_event(void)
54{
55 pm_relax(the_controller->dev);
56}
57
58void dwc2_release_wakeup_event_timeout(u32 sec)
59{
60 pm_wakeup_event(the_controller->dev, (sec * 1000));
61}
62
63void dwc2_acquire_wakeup_event(void)
64{
65 pm_stay_awake(the_controller->dev);
66}
67
68void dwc2_release_pm_qos(void)
69{
70 pm_qos_update_request(&the_controller->qos_idle,
71 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
72}
73
74void dwc2_release_pm_qos_timeout(u32 sec)
75{
76 pm_qos_update_request_timeout(&the_controller->qos_idle,
77 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE, sec * (1000 * 1000));
78}
79
80void dwc2_acquire_pm_qos(void)
81{
82 pm_qos_update_request(&the_controller->qos_idle, the_controller->lpm_qos);
83}
84
85int asr_udc_register_resume_notifier(struct notifier_block *nb)
86{
87 int ret = 0;
88
89 ret = atomic_notifier_chain_register(&asr_udc_resume_list, nb);
90 if (ret)
91 return ret;
92
93 return 0;
94}
95
96int asr_udc_unregister_resume_notifier(struct notifier_block *nb)
97{
98 return atomic_notifier_chain_unregister(&asr_udc_resume_list, nb);
99}
100
101static void __maybe_unused asr_udc_notify_resume_event(struct dwc2_hsotg *hsotg, int event)
102{
103 atomic_notifier_call_chain(&asr_udc_resume_list, event, NULL);
104}
105
106static const char *charger_type(unsigned int type)
107{
108 switch (type) {
109 case NULL_CHARGER: return "NULL_CHARGER";
110 case DEFAULT_CHARGER: return "DEFAULT_CHARGER";
111 case DCP_CHARGER: return "DCP_CHARGER";
112 case CDP_CHARGER: return "CDP_CHARGER";
113 case SDP_CHARGER: return "SDP_CHARGER";
114 default: return "NONE_STANDARD_CHARGER";
115 }
116}
117
118int udc_get_charger_type(void)
119{
120 struct dwc2_hsotg *hsotg = the_controller;
121
122 if (!hsotg)
123 return POWER_SUPPLY_TYPE_UNKNOWN;
124
125 switch(hsotg->charger_type) {
126 case SDP_CHARGER:
127 return POWER_SUPPLY_TYPE_USB;
128 case DCP_CHARGER:
129 return POWER_SUPPLY_TYPE_USB_DCP;
130
131 case DEFAULT_CHARGER:
132 case CDP_CHARGER:
133 case NONE_STANDARD_CHARGER:
134 default:
135 return POWER_SUPPLY_TYPE_UNKNOWN;
136 }
137}
138
139static void call_charger_notifier(struct dwc2_hsotg *hsotg)
140{
141 blocking_notifier_call_chain(&dwc2_notifier_list,
142 hsotg->charger_type, NULL);
143}
144
145/* For any user that care about USB udc events, for example the charger*/
146int mv_udc_register_client(struct notifier_block *nb)
147{
148 struct dwc2_hsotg *hsotg = the_controller;
149 int ret = 0;
150
151 if (!hsotg)
152 return -ENODEV;
153
154 ret = blocking_notifier_chain_register(&dwc2_notifier_list, nb);
155 if (ret)
156 return ret;
157
158 if (hsotg->charger_type)
159 call_charger_notifier(hsotg);
160
161 return 0;
162}
163EXPORT_SYMBOL(mv_udc_register_client);
164
165int mv_udc_unregister_client(struct notifier_block *nb)
166{
167 return blocking_notifier_chain_unregister(&dwc2_notifier_list, nb);
168}
169EXPORT_SYMBOL(mv_udc_unregister_client);
170
171static void do_delayed_charger_work(struct work_struct *work)
172{
173 struct dwc2_hsotg *hsotg = NULL;
174
175 hsotg = container_of(work, struct dwc2_hsotg, delayed_charger_work.work);
176
177 /* if still see DEFAULT_CHARGER, check again */
178 if (hsotg->charger_type == DEFAULT_CHARGER) {
179 hsotg->charger_type = NONE_STANDARD_CHARGER;
180 }
181
182 dev_info(hsotg->dev, "final charger type: %s\n",
183 charger_type(hsotg->charger_type));
184
185 call_charger_notifier(hsotg);
186
187
188 /* SDP or CDP need transfer data, hold wake lock
189 * also hold lock for DCP: some chargers have watchdog or do not
190 * have irq indication so need to work in polling mode
191 * and should not suspend
192 */
193 if ((hsotg->charger_type == SDP_CHARGER) ||
194 (hsotg->charger_type == NONE_STANDARD_CHARGER) ||
195 (hsotg->charger_type == CDP_CHARGER) ||
196 (hsotg->charger_type == DCP_CHARGER)) {
197 pm_stay_awake(hsotg->dev);
198 pm_qos_update_request(&hsotg->qos_idle, hsotg->lpm_qos);
199 } else {
200 dev_info(hsotg->dev, "rls pm lock\n");
201 pm_qos_update_request(&hsotg->qos_idle,
202 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
203 /*
204 * leave some delay for charger driver to do something
205 * for nz3 we need some extra time to restore os type,
206 * so change it to 4s here
207 */
208 pm_wakeup_event(hsotg->dev, 5000);
209 }
210}
211
212void dwc2_report_sdp_charger(struct dwc2_hsotg *hsotg)
213{
214 if (work_pending(&hsotg->delayed_charger_work.work))
215 cancel_delayed_work(&hsotg->delayed_charger_work);
216 hsotg->charger_type = SDP_CHARGER;
217 schedule_delayed_work(&hsotg->delayed_charger_work, 0);
218}
219
220static void dwc2_ep0_complete_profiling(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *hs_ep, u32 length, u8 dir_in)
221{
222 if (dir_in) {
223 hsotg->ep0_rw_complete[hsotg->ep0_rw_complete_idx % NR_EP0_PROFILING] = (1 << 15) | ((u16)length);
224 } else {
225 hsotg->ep0_rw_complete[hsotg->ep0_rw_complete_idx % NR_EP0_PROFILING] = (0 << 15) | ((u16)length);
226 }
227
228 hsotg->ep0_rw_complete_idx++;
229}
230
231void dwc2_dump_ep0_profiling_data(void)
232{
233 int i;
234 struct dwc2_hsotg *hsotg = the_controller;
235
236 printk("dwc2 last ep0_rw_idx: %d %d\n", hsotg->ep0_rw_idx - 1, (hsotg->ep0_rw_idx - 1) % NR_EP0_PROFILING);
237 for (i = 0; i < NR_EP0_PROFILING; i += 4) {
238 printk("%3d: 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
239 hsotg->ep0_rw[i + 0],
240 hsotg->ep0_rw[i + 1],
241 hsotg->ep0_rw[i + 2],
242 hsotg->ep0_rw[i + 3]);
243 }
244
245 printk("dwc2 last ep0_rw_complete_idx: %d %d\n", hsotg->ep0_rw_complete_idx - 1, (hsotg->ep0_rw_complete_idx - 1) % NR_EP0_PROFILING);
246 for (i = 0; i < NR_EP0_PROFILING; i += 4) {
247 printk("%3d: 0x%04x 0x%04x 0x%04x 0x%04x\n", i,
248 hsotg->ep0_rw_complete[i + 0],
249 hsotg->ep0_rw_complete[i + 1],
250 hsotg->ep0_rw_complete[i + 2],
251 hsotg->ep0_rw_complete[i + 3]);
252 }
253
254 for (i = 0; i < 8; i++) {
255 printk("ep%din, nr_started: %d, nr_enqueue: %d, nr_dequeue: %d\n", i,
256 hsotg->eps_in[i]->nr_started,
257 hsotg->eps_in[i]->nr_enqueue,
258 hsotg->eps_in[i]->nr_dequeue);
259 printk("ep%dout, nr_started: %d, nr_enqueue: %d, nr_dequeue: %d\n", i,
260 hsotg->eps_out[i]->nr_started,
261 hsotg->eps_out[i]->nr_enqueue,
262 hsotg->eps_out[i]->nr_dequeue);
263 }
264}
265
266extern void rndis_dump_data(void);
267static void __maybe_unused dwc2_dump_regs(struct dwc2_hsotg *hsotg)
268{
269 u32 i;
270
271 pr_info("ep0_state: %d\n", hsotg->ep0_state);
272#ifdef CONFIG_DWC2_MON_TIMER
273 pr_info("mon_timer_started: %d\n", hsotg->mon_timer_started);
274#endif
275 rndis_dump_data();
276 dwc2_dump_ep0_profiling_data();
277
278 for (i = 0x0; i < 0x100; i += 32) {
279 pr_info("0x%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", i,
280 dwc2_readl(hsotg, i + 0),
281 dwc2_readl(hsotg, i + 4),
282 dwc2_readl(hsotg, i + 8),
283 dwc2_readl(hsotg, i + 12),
284 dwc2_readl(hsotg, i + 16),
285 dwc2_readl(hsotg, i + 20),
286 dwc2_readl(hsotg, i + 24),
287 dwc2_readl(hsotg, i + 28));
288 }
289
290 for (i = 0x800; i < 0xA00; i += 32) {
291 pr_info("0x%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", i,
292 dwc2_readl(hsotg, i + 0),
293 dwc2_readl(hsotg, i + 4),
294 dwc2_readl(hsotg, i + 8),
295 dwc2_readl(hsotg, i + 12),
296 dwc2_readl(hsotg, i + 16),
297 dwc2_readl(hsotg, i + 20),
298 dwc2_readl(hsotg, i + 24),
299 dwc2_readl(hsotg, i + 28));
300 }
301
302 for (i = 0xB00; i < 0xC00; i += 32) {
303 pr_info("0x%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n", i,
304 dwc2_readl(hsotg, i + 0),
305 dwc2_readl(hsotg, i + 4),
306 dwc2_readl(hsotg, i + 8),
307 dwc2_readl(hsotg, i + 12),
308 dwc2_readl(hsotg, i + 16),
309 dwc2_readl(hsotg, i + 20),
310 dwc2_readl(hsotg, i + 24),
311 dwc2_readl(hsotg, i + 28));
312 }
313 usb_phy_dump_cfg(hsotg->uphy);
314}
315
316static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg);
317static void dwc2_charger_type_confirm(struct dwc2_hsotg *hsotg)
318{
319 unsigned long flags;
320 int ret, timeout = 95; /* 950 ms, should get charger type in 1s */
321 unsigned int vbus = 0;
322
323 if (hsotg->no_acchg_det) {
324 pr_info("don't detect ac charger type\n");
325 return;
326 }
327
328 while (timeout--) {
329 if (hsotg->bus_reset_received || hsotg->suspend_received) {
330 pr_err("udc_charger: reset/suspend = %d/%d\n",
331 hsotg->bus_reset_received, hsotg->suspend_received);
332 return;
333 }
334
335 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
336 if (ret) {
337 vbus = usb_phy_get_vbus(hsotg->uphy);
338 if (vbus == 0) {
339 pr_err("%s: usb plugout\n", __func__);
340 return;
341 }
342 } else if (vbus == 0) {
343 pr_err("%s: usb plugout\n", __func__);
344 return;
345 }
346 msleep(10);
347 }
348
349 spin_lock_irqsave(&hsotg->lock, flags);
350 dwc2_hsotg_core_disconnect(hsotg);
351 dwc2_hsotg_disconnect(hsotg);
352 hsotg->enabled = 0;
353 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
354 hsotg->charger_type = DCP_CHARGER;
355 pr_err("%s: suspend usb phy\n", __func__);
356 usb_phy_set_suspend(hsotg->uphy, 1);
357 spin_unlock_irqrestore(&hsotg->lock, flags);
358
359 if (work_pending(&hsotg->delayed_charger_work.work))
360 cancel_delayed_work(&hsotg->delayed_charger_work);
361 schedule_delayed_work(&hsotg->delayed_charger_work, 0);
362}
363
364void dwc2_suspend_gadget(struct dwc2_hsotg *hsotg)
365{
366 dev_err(hsotg->dev, "usb-suspd: st: %d, lx_state: %d 0x%x\n",
367 hsotg->gadget.state, hsotg->lx_state,
368 dwc2_readl(hsotg, DSTS));
369 if (hsotg->allow_suspend && hsotg->gadget.state >= USB_STATE_CONFIGURED
370 && (dwc2_readl(hsotg, DSTS) & DSTS_SUSPSTS)) {
371 dev_err(hsotg->dev, "%s: rls wakelock\n", __func__);
372 dwc2_release_wakeup_event_timeout(DWC2_WAKEUP_TIMEOUT_SEC);
373 hsotg->lx_state = DWC2_L2;
374 }
375}
376
377void dwc2_resume_gadget(struct dwc2_hsotg *hsotg)
378{
379 dev_err(hsotg->dev, "usb-resume: st: %d, lx_state: %d 0x%x\n",
380 hsotg->gadget.state, hsotg->lx_state,
381 dwc2_readl(hsotg, DSTS));
382
383 if (hsotg->allow_suspend && (!(dwc2_readl(hsotg, DSTS) & DSTS_SUSPSTS))) {
384 hsotg->lx_state = DWC2_L0;
385 dev_err(hsotg->dev, "%s: get wakelock\n", __func__);
386 dwc2_acquire_wakeup_event();
387 }
388}
389
390/* conversion functions */
391static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
392{
393 return container_of(req, struct dwc2_hsotg_req, req);
394}
395
396static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
397{
398 return container_of(ep, struct dwc2_hsotg_ep, ep);
399}
400
401static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
402{
403 return container_of(gadget, struct dwc2_hsotg, gadget);
404}
405
406static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
407{
408 dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
409}
410
411static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
412{
413 dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
414}
415
416static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
417 u32 ep_index, u32 dir_in)
418{
419 if (dir_in)
420 return hsotg->eps_in[ep_index];
421 else
422 return hsotg->eps_out[ep_index];
423}
424
425/* forward declaration of functions */
426static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
427
428/**
429 * using_dma - return the DMA status of the driver.
430 * @hsotg: The driver state.
431 *
432 * Return true if we're using DMA.
433 *
434 * Currently, we have the DMA support code worked into everywhere
435 * that needs it, but the AMBA DMA implementation in the hardware can
436 * only DMA from 32bit aligned addresses. This means that gadgets such
437 * as the CDC Ethernet cannot work as they often pass packets which are
438 * not 32bit aligned.
439 *
440 * Unfortunately the choice to use DMA or not is global to the controller
441 * and seems to be only settable when the controller is being put through
442 * a core reset. This means we either need to fix the gadgets to take
443 * account of DMA alignment, or add bounce buffers (yuerk).
444 *
445 * g_using_dma is set depending on dts flag.
446 */
447static inline bool using_dma(struct dwc2_hsotg *hsotg)
448{
449 return hsotg->params.g_dma;
450}
451
452/*
453 * using_desc_dma - return the descriptor DMA status of the driver.
454 * @hsotg: The driver state.
455 *
456 * Return true if we're using descriptor DMA.
457 */
458static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
459{
460 return hsotg->params.g_dma_desc;
461}
462
463/**
464 * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
465 * @hs_ep: The endpoint
466 *
467 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
468 * If an overrun occurs it will wrap the value and set the frame_overrun flag.
469 */
470static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
471{
472 struct dwc2_hsotg *hsotg = hs_ep->parent;
473 u16 limit = DSTS_SOFFN_LIMIT;
474
475 if (hsotg->gadget.speed != USB_SPEED_HIGH)
476 limit >>= 3;
477
478 hs_ep->target_frame += hs_ep->interval;
479 if (hs_ep->target_frame > limit) {
480 hs_ep->frame_overrun = true;
481 hs_ep->target_frame &= limit;
482 } else {
483 hs_ep->frame_overrun = false;
484 }
485}
486
487/**
488 * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
489 * by one.
490 * @hs_ep: The endpoint.
491 *
492 * This function used in service interval based scheduling flow to calculate
493 * descriptor frame number filed value. For service interval mode frame
494 * number in descriptor should point to last (u)frame in the interval.
495 *
496 */
497static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
498{
499 struct dwc2_hsotg *hsotg = hs_ep->parent;
500 u16 limit = DSTS_SOFFN_LIMIT;
501
502 if (hsotg->gadget.speed != USB_SPEED_HIGH)
503 limit >>= 3;
504
505 if (hs_ep->target_frame)
506 hs_ep->target_frame -= 1;
507 else
508 hs_ep->target_frame = limit;
509}
510
511/**
512 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
513 * @hsotg: The device state
514 * @ints: A bitmask of the interrupts to enable
515 */
516static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
517{
518 u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
519 u32 new_gsintmsk;
520
521 new_gsintmsk = gsintmsk | ints;
522
523 if (new_gsintmsk != gsintmsk) {
524 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
525 dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
526 }
527}
528
529/**
530 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
531 * @hsotg: The device state
532 * @ints: A bitmask of the interrupts to enable
533 */
534static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
535{
536 u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
537 u32 new_gsintmsk;
538
539 new_gsintmsk = gsintmsk & ~ints;
540
541 if (new_gsintmsk != gsintmsk)
542 dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
543}
544
545/**
546 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
547 * @hsotg: The device state
548 * @ep: The endpoint index
549 * @dir_in: True if direction is in.
550 * @en: The enable value, true to enable
551 *
552 * Set or clear the mask for an individual endpoint's interrupt
553 * request.
554 */
555static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
556 unsigned int ep, unsigned int dir_in,
557 unsigned int en)
558{
559 unsigned long flags;
560 u32 bit = 1 << ep;
561 u32 daint;
562
563 if (!dir_in)
564 bit <<= 16;
565
566 local_irq_save(flags);
567 daint = dwc2_readl(hsotg, DAINTMSK);
568 if (en)
569 daint |= bit;
570 else
571 daint &= ~bit;
572 dwc2_writel(hsotg, daint, DAINTMSK);
573 local_irq_restore(flags);
574}
575
576/**
577 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
578 *
579 * @hsotg: Programming view of the DWC_otg controller
580 */
581int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
582{
583 if (hsotg->hw_params.en_multiple_tx_fifo)
584 /* In dedicated FIFO mode we need count of IN EPs */
585 return hsotg->hw_params.num_dev_in_eps;
586 else
587 /* In shared FIFO mode we need count of Periodic IN EPs */
588 return hsotg->hw_params.num_dev_perio_in_ep;
589}
590
591/**
592 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
593 * device mode TX FIFOs
594 *
595 * @hsotg: Programming view of the DWC_otg controller
596 */
597int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
598{
599 int addr;
600 int tx_addr_max;
601 u32 np_tx_fifo_size;
602
603 np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
604 hsotg->params.g_np_tx_fifo_size);
605
606 /* Get Endpoint Info Control block size in DWORDs. */
607 tx_addr_max = hsotg->hw_params.total_fifo_size;
608
609 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
610 if (tx_addr_max <= addr)
611 return 0;
612
613 return tx_addr_max - addr;
614}
615
616/**
617 * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
618 *
619 * @hsotg: Programming view of the DWC_otg controller
620 *
621 */
622static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
623{
624 u32 gintsts2;
625 u32 gintmsk2;
626
627 gintsts2 = dwc2_readl(hsotg, GINTSTS2);
628 gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
629
630 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
631 dev_info(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
632 dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
633 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
634 }
635}
636
637/**
638 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
639 * TX FIFOs
640 *
641 * @hsotg: Programming view of the DWC_otg controller
642 */
643int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
644{
645 int tx_fifo_count;
646 int tx_fifo_depth;
647
648 tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
649
650 tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
651
652 if (!tx_fifo_count)
653 return tx_fifo_depth;
654 else
655 return tx_fifo_depth / tx_fifo_count;
656}
657
658/**
659 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
660 * @hsotg: The device instance.
661 */
662static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
663{
664 unsigned int ep;
665 unsigned int addr;
666 int timeout;
667
668 u32 val;
669 u32 *txfsz = hsotg->params.g_tx_fifo_size;
670
671 /* Reset fifo map if not correctly cleared during previous session */
672 WARN_ON(hsotg->fifo_map);
673 hsotg->fifo_map = 0;
674
675 /* set RX/NPTX FIFO sizes */
676 dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
677 dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
678 FIFOSIZE_STARTADDR_SHIFT) |
679 (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
680 GNPTXFSIZ);
681
682 /*
683 * arange all the rest of the TX FIFOs, as some versions of this
684 * block have overlapping default addresses. This also ensures
685 * that if the settings have been changed, then they are set to
686 * known values.
687 */
688
689 /* start at the end of the GNPTXFSIZ, rounded up */
690 addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
691
692 /*
693 * Configure fifos sizes from provided configuration and assign
694 * them to endpoints dynamically according to maxpacket size value of
695 * given endpoint.
696 */
697 for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
698 if (!txfsz[ep])
699 continue;
700 val = addr;
701 val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
702 WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
703 "insufficient fifo memory");
704 addr += txfsz[ep];
705
706 dwc2_writel(hsotg, val, DPTXFSIZN(ep));
707 val = dwc2_readl(hsotg, DPTXFSIZN(ep));
708 }
709
710 dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
711 addr << GDFIFOCFG_EPINFOBASE_SHIFT,
712 GDFIFOCFG);
713 /*
714 * according to p428 of the design guide, we need to ensure that
715 * all fifos are flushed before continuing
716 */
717
718 dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
719 GRSTCTL_RXFFLSH, GRSTCTL);
720
721 /* wait until the fifos are both flushed */
722 timeout = 100;
723 while (1) {
724 val = dwc2_readl(hsotg, GRSTCTL);
725
726 if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
727 break;
728
729 if (--timeout == 0) {
730 dev_err(hsotg->dev,
731 "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
732 __func__, val);
733 break;
734 }
735
736 udelay(1);
737 }
738
739 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
740}
741
742/**
743 * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
744 * @ep: USB endpoint to allocate request for.
745 * @flags: Allocation flags
746 *
747 * Allocate a new USB request structure appropriate for the specified endpoint
748 */
749static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
750 gfp_t flags)
751{
752 struct dwc2_hsotg_req *req;
753
754 req = kzalloc(sizeof(*req), flags);
755 if (!req)
756 return NULL;
757
758 INIT_LIST_HEAD(&req->queue);
759
760 return &req->req;
761}
762
763/**
764 * is_ep_periodic - return true if the endpoint is in periodic mode.
765 * @hs_ep: The endpoint to query.
766 *
767 * Returns true if the endpoint is in periodic mode, meaning it is being
768 * used for an Interrupt or ISO transfer.
769 */
770static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
771{
772 return hs_ep->periodic;
773}
774
775/**
776 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
777 * @hsotg: The device state.
778 * @hs_ep: The endpoint for the request
779 * @hs_req: The request being processed.
780 *
781 * This is the reverse of dwc2_hsotg_map_dma(), called for the completion
782 * of a request to ensure the buffer is ready for access by the caller.
783 */
784static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
785 struct dwc2_hsotg_ep *hs_ep,
786 struct dwc2_hsotg_req *hs_req)
787{
788 struct usb_request *req = &hs_req->req;
789
790 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
791}
792
793/*
794 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
795 * for Control endpoint
796 * @hsotg: The device state.
797 *
798 * This function will allocate 4 descriptor chains for EP 0: 2 for
799 * Setup stage, per one for IN and OUT data/status transactions.
800 */
801static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
802{
803 hsotg->setup_desc[0] =
804 dmam_alloc_coherent(hsotg->dev,
805 sizeof(struct dwc2_dma_desc),
806 &hsotg->setup_desc_dma[0],
807 GFP_KERNEL);
808 if (!hsotg->setup_desc[0])
809 goto fail;
810
811 hsotg->setup_desc[1] =
812 dmam_alloc_coherent(hsotg->dev,
813 sizeof(struct dwc2_dma_desc),
814 &hsotg->setup_desc_dma[1],
815 GFP_KERNEL);
816 if (!hsotg->setup_desc[1])
817 goto fail;
818
819 hsotg->ctrl_in_desc =
820 dmam_alloc_coherent(hsotg->dev,
821 sizeof(struct dwc2_dma_desc),
822 &hsotg->ctrl_in_desc_dma,
823 GFP_KERNEL);
824 if (!hsotg->ctrl_in_desc)
825 goto fail;
826
827 hsotg->ctrl_out_desc =
828 dmam_alloc_coherent(hsotg->dev,
829 sizeof(struct dwc2_dma_desc),
830 &hsotg->ctrl_out_desc_dma,
831 GFP_KERNEL);
832 if (!hsotg->ctrl_out_desc)
833 goto fail;
834
835 return 0;
836
837fail:
838 return -ENOMEM;
839}
840
841/**
842 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
843 * @hsotg: The controller state.
844 * @hs_ep: The endpoint we're going to write for.
845 * @hs_req: The request to write data for.
846 *
847 * This is called when the TxFIFO has some space in it to hold a new
848 * transmission and we have something to give it. The actual setup of
849 * the data size is done elsewhere, so all we have to do is to actually
850 * write the data.
851 *
852 * The return value is zero if there is more space (or nothing was done)
853 * otherwise -ENOSPC is returned if the FIFO space was used up.
854 *
855 * This routine is only needed for PIO
856 */
857static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
858 struct dwc2_hsotg_ep *hs_ep,
859 struct dwc2_hsotg_req *hs_req)
860{
861 bool periodic = is_ep_periodic(hs_ep);
862 u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
863 int buf_pos = hs_req->req.actual;
864 int to_write = hs_ep->size_loaded;
865 void *data;
866 int can_write;
867 int pkt_round;
868 int max_transfer;
869
870 to_write -= (buf_pos - hs_ep->last_load);
871
872 /* if there's nothing to write, get out early */
873 if (to_write == 0)
874 return 0;
875
876 if (periodic && !hsotg->dedicated_fifos) {
877 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
878 int size_left;
879 int size_done;
880
881 /*
882 * work out how much data was loaded so we can calculate
883 * how much data is left in the fifo.
884 */
885
886 size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
887
888 /*
889 * if shared fifo, we cannot write anything until the
890 * previous data has been completely sent.
891 */
892 if (hs_ep->fifo_load != 0) {
893 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
894 return -ENOSPC;
895 }
896
897 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
898 __func__, size_left,
899 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
900
901 /* how much of the data has moved */
902 size_done = hs_ep->size_loaded - size_left;
903
904 /* how much data is left in the fifo */
905 can_write = hs_ep->fifo_load - size_done;
906 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
907 __func__, can_write);
908
909 can_write = hs_ep->fifo_size - can_write;
910 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
911 __func__, can_write);
912
913 if (can_write <= 0) {
914 dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
915 return -ENOSPC;
916 }
917 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
918 can_write = dwc2_readl(hsotg,
919 DTXFSTS(hs_ep->fifo_index));
920
921 can_write &= 0xffff;
922 can_write *= 4;
923 } else {
924 if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
925 dev_info(hsotg->dev,
926 "%s: no queue slots available (0x%08x)\n",
927 __func__, gnptxsts);
928
929 dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
930 return -ENOSPC;
931 }
932
933 can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
934 can_write *= 4; /* fifo size is in 32bit quantities. */
935 }
936
937 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
938
939 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
940 __func__, gnptxsts, can_write, to_write, max_transfer);
941
942 /*
943 * limit to 512 bytes of data, it seems at least on the non-periodic
944 * FIFO, requests of >512 cause the endpoint to get stuck with a
945 * fragment of the end of the transfer in it.
946 */
947 if (can_write > 512 && !periodic)
948 can_write = 512;
949
950 /*
951 * limit the write to one max-packet size worth of data, but allow
952 * the transfer to return that it did not run out of fifo space
953 * doing it.
954 */
955 if (to_write > max_transfer) {
956 to_write = max_transfer;
957
958 /* it's needed only when we do not use dedicated fifos */
959 if (!hsotg->dedicated_fifos)
960 dwc2_hsotg_en_gsint(hsotg,
961 periodic ? GINTSTS_PTXFEMP :
962 GINTSTS_NPTXFEMP);
963 }
964
965 /* see if we can write data */
966
967 if (to_write > can_write) {
968 to_write = can_write;
969 pkt_round = to_write % max_transfer;
970
971 /*
972 * Round the write down to an
973 * exact number of packets.
974 *
975 * Note, we do not currently check to see if we can ever
976 * write a full packet or not to the FIFO.
977 */
978
979 if (pkt_round)
980 to_write -= pkt_round;
981
982 /*
983 * enable correct FIFO interrupt to alert us when there
984 * is more room left.
985 */
986
987 /* it's needed only when we do not use dedicated fifos */
988 if (!hsotg->dedicated_fifos)
989 dwc2_hsotg_en_gsint(hsotg,
990 periodic ? GINTSTS_PTXFEMP :
991 GINTSTS_NPTXFEMP);
992 }
993
994 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
995 to_write, hs_req->req.length, can_write, buf_pos);
996
997 if (to_write <= 0)
998 return -ENOSPC;
999
1000 hs_req->req.actual = buf_pos + to_write;
1001 hs_ep->total_data += to_write;
1002
1003 if (periodic)
1004 hs_ep->fifo_load += to_write;
1005
1006 to_write = DIV_ROUND_UP(to_write, 4);
1007 data = hs_req->req.buf + buf_pos;
1008
1009 dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
1010
1011 return (to_write >= can_write) ? -ENOSPC : 0;
1012}
1013
1014/**
1015 * get_ep_limit - get the maximum data legnth for this endpoint
1016 * @hs_ep: The endpoint
1017 *
1018 * Return the maximum data that can be queued in one go on a given endpoint
1019 * so that transfers that are too long can be split.
1020 */
1021static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
1022{
1023 int index = hs_ep->index;
1024 unsigned int maxsize;
1025 unsigned int maxpkt;
1026
1027 if (index != 0) {
1028 maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
1029 maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
1030 } else {
1031 maxsize = 64 + 64;
1032 if (hs_ep->dir_in)
1033 maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
1034 else
1035 maxpkt = 2;
1036 }
1037
1038 /* we made the constant loading easier above by using +1 */
1039 maxpkt--;
1040 maxsize--;
1041
1042 /*
1043 * constrain by packet count if maxpkts*pktsize is greater
1044 * than the length register size.
1045 */
1046
1047 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
1048 maxsize = maxpkt * hs_ep->ep.maxpacket;
1049
1050 return maxsize;
1051}
1052
1053/**
1054 * dwc2_hsotg_read_frameno - read current frame number
1055 * @hsotg: The device instance
1056 *
1057 * Return the current frame number
1058 */
1059static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
1060{
1061 u32 dsts;
1062
1063 dsts = dwc2_readl(hsotg, DSTS);
1064 dsts &= DSTS_SOFFN_MASK;
1065 dsts >>= DSTS_SOFFN_SHIFT;
1066
1067 return dsts;
1068}
1069
1070/**
1071 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
1072 * DMA descriptor chain prepared for specific endpoint
1073 * @hs_ep: The endpoint
1074 *
1075 * Return the maximum data that can be queued in one go on a given endpoint
1076 * depending on its descriptor chain capacity so that transfers that
1077 * are too long can be split.
1078 */
1079static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
1080{
1081 const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
1082 int is_isoc = hs_ep->isochronous;
1083 unsigned int maxsize;
1084 u32 mps = hs_ep->ep.maxpacket;
1085 int dir_in = hs_ep->dir_in;
1086
1087 if (is_isoc)
1088 maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
1089 DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
1090 MAX_DMA_DESC_NUM_HS_ISOC;
1091 else
1092 maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
1093
1094 /* Interrupt OUT EP with mps not multiple of 4 */
1095 if (hs_ep->index)
1096 if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
1097 maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
1098
1099 return maxsize;
1100}
1101
1102/*
1103 * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
1104 * @hs_ep: The endpoint
1105 * @mask: RX/TX bytes mask to be defined
1106 *
1107 * Returns maximum data payload for one descriptor after analyzing endpoint
1108 * characteristics.
1109 * DMA descriptor transfer bytes limit depends on EP type:
1110 * Control out - MPS,
1111 * Isochronous - descriptor rx/tx bytes bitfield limit,
1112 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
1113 * have concatenations from various descriptors within one packet.
1114 * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
1115 * to a single descriptor.
1116 *
1117 * Selects corresponding mask for RX/TX bytes as well.
1118 */
1119static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
1120{
1121 const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
1122 u32 mps = hs_ep->ep.maxpacket;
1123 int dir_in = hs_ep->dir_in;
1124 u32 desc_size = 0;
1125
1126 if (!hs_ep->index && !dir_in) {
1127 desc_size = mps;
1128 *mask = DEV_DMA_NBYTES_MASK;
1129 } else if (hs_ep->isochronous) {
1130 if (dir_in) {
1131 desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
1132 *mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
1133 } else {
1134 desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
1135 *mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
1136 }
1137 } else {
1138 desc_size = DEV_DMA_NBYTES_LIMIT;
1139 *mask = DEV_DMA_NBYTES_MASK;
1140
1141 /* Round down desc_size to be mps multiple */
1142 desc_size -= desc_size % mps;
1143 }
1144
1145 /* Interrupt OUT EP with mps not multiple of 4 */
1146 if (hs_ep->index)
1147 if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
1148 desc_size = mps;
1149 *mask = DEV_DMA_NBYTES_MASK;
1150 }
1151
1152 return desc_size;
1153}
1154
1155static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
1156 struct dwc2_dma_desc **desc,
1157 dma_addr_t dma_buff,
1158 unsigned int len,
1159 bool true_last)
1160{
1161 int dir_in = hs_ep->dir_in;
1162 u32 mps = hs_ep->ep.maxpacket;
1163 u32 maxsize = 0;
1164 u32 offset = 0;
1165 u32 mask = 0;
1166 int i;
1167
1168 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
1169
1170 hs_ep->desc_count = (len / maxsize) +
1171 ((len % maxsize) ? 1 : 0);
1172 if (len == 0)
1173 hs_ep->desc_count = 1;
1174
1175 for (i = 0; i < hs_ep->desc_count; ++i) {
1176 (*desc)->status = 0;
1177 (*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
1178 << DEV_DMA_BUFF_STS_SHIFT);
1179
1180 if (len > maxsize) {
1181 if (!hs_ep->index && !dir_in)
1182 (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
1183
1184 (*desc)->status |=
1185 maxsize << DEV_DMA_NBYTES_SHIFT & mask;
1186 (*desc)->buf = dma_buff + offset;
1187
1188 len -= maxsize;
1189 offset += maxsize;
1190 } else {
1191 if (true_last)
1192 (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
1193
1194 if (dir_in)
1195 (*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
1196 ((hs_ep->send_zlp && true_last) ?
1197 DEV_DMA_SHORT : 0);
1198
1199 (*desc)->status |=
1200 len << DEV_DMA_NBYTES_SHIFT & mask;
1201 (*desc)->buf = dma_buff + offset;
1202 }
1203
1204 (*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
1205 (*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
1206 << DEV_DMA_BUFF_STS_SHIFT);
1207 (*desc)++;
1208 }
1209}
1210
1211/*
1212 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
1213 * @hs_ep: The endpoint
1214 * @ureq: Request to transfer
1215 * @offset: offset in bytes
1216 * @len: Length of the transfer
1217 *
1218 * This function will iterate over descriptor chain and fill its entries
1219 * with corresponding information based on transfer data.
1220 */
1221static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
1222 dma_addr_t dma_buff,
1223 unsigned int len)
1224{
1225 struct usb_request *ureq = NULL;
1226 struct dwc2_dma_desc *desc = hs_ep->desc_list;
1227 struct scatterlist *sg;
1228 int i;
1229 u8 desc_count = 0;
1230
1231 if (hs_ep->req)
1232 ureq = &hs_ep->req->req;
1233
1234 /* non-DMA sg buffer */
1235 if (!ureq || !ureq->num_sgs) {
1236 dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
1237 dma_buff, len, true);
1238 return;
1239 }
1240
1241 /* DMA sg buffer */
1242 for_each_sg(ureq->sg, sg, ureq->num_mapped_sgs, i) {
1243 dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
1244 sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
1245 (i == (ureq->num_mapped_sgs - 1)));
1246 desc_count += hs_ep->desc_count;
1247 }
1248
1249 hs_ep->desc_count = desc_count;
1250}
1251
1252/*
1253 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
1254 * @hs_ep: The isochronous endpoint.
1255 * @dma_buff: usb requests dma buffer.
1256 * @len: usb request transfer length.
1257 *
1258 * Fills next free descriptor with the data of the arrived usb request,
1259 * frame info, sets Last and IOC bits increments next_desc. If filled
1260 * descriptor is not the first one, removes L bit from the previous descriptor
1261 * status.
1262 */
1263static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
1264 dma_addr_t dma_buff, unsigned int len)
1265{
1266 struct dwc2_dma_desc *desc;
1267 struct dwc2_hsotg *hsotg = hs_ep->parent;
1268 u32 index;
1269 u32 maxsize = 0;
1270 u32 mask = 0;
1271 u8 pid = 0;
1272
1273 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
1274
1275 index = hs_ep->next_desc;
1276 desc = &hs_ep->desc_list[index];
1277
1278 /* Check if descriptor chain full */
1279 if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
1280 DEV_DMA_BUFF_STS_HREADY) {
1281 dev_info(hsotg->dev, "%s: desc chain full\n", __func__);
1282 return 1;
1283 }
1284
1285 /* Clear L bit of previous desc if more than one entries in the chain */
1286 if (hs_ep->next_desc)
1287 hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
1288
1289 dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
1290 __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
1291
1292 desc->status = 0;
1293 desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT);
1294
1295 desc->buf = dma_buff;
1296 desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
1297 ((len << DEV_DMA_NBYTES_SHIFT) & mask));
1298
1299 if (hs_ep->dir_in) {
1300 if (len)
1301 pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
1302 else
1303 pid = 1;
1304 desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
1305 DEV_DMA_ISOC_PID_MASK) |
1306 ((len % hs_ep->ep.maxpacket) ?
1307 DEV_DMA_SHORT : 0) |
1308 ((hs_ep->target_frame <<
1309 DEV_DMA_ISOC_FRNUM_SHIFT) &
1310 DEV_DMA_ISOC_FRNUM_MASK);
1311 }
1312
1313 desc->status &= ~DEV_DMA_BUFF_STS_MASK;
1314 desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
1315
1316 /* Increment frame number by interval for IN */
1317 if (hs_ep->dir_in)
1318 dwc2_gadget_incr_frame_num(hs_ep);
1319
1320 /* Update index of last configured entry in the chain */
1321 hs_ep->next_desc++;
1322 if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
1323 hs_ep->next_desc = 0;
1324
1325 return 0;
1326}
1327
1328/*
1329 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
1330 * @hs_ep: The isochronous endpoint.
1331 *
1332 * Prepare descriptor chain for isochronous endpoints. Afterwards
1333 * write DMA address to HW and enable the endpoint.
1334 */
1335static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
1336{
1337 struct dwc2_hsotg *hsotg = hs_ep->parent;
1338 struct dwc2_hsotg_req *hs_req, *treq;
1339 int index = hs_ep->index;
1340 int ret;
1341 int i;
1342 u32 dma_reg;
1343 u32 depctl;
1344 u32 ctrl;
1345 struct dwc2_dma_desc *desc;
1346
1347 if (list_empty(&hs_ep->queue)) {
1348 hs_ep->target_frame = TARGET_FRAME_INITIAL;
1349 dev_info(hsotg->dev, "%s: No requests in queue\n", __func__);
1350 return;
1351 }
1352
1353 /* Initialize descriptor chain by Host Busy status */
1354 for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
1355 desc = &hs_ep->desc_list[i];
1356 desc->status = 0;
1357 desc->status |= (DEV_DMA_BUFF_STS_HBUSY
1358 << DEV_DMA_BUFF_STS_SHIFT);
1359 }
1360
1361 hs_ep->next_desc = 0;
1362 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
1363 dma_addr_t dma_addr = hs_req->req.dma;
1364
1365 if (hs_req->req.num_sgs) {
1366 WARN_ON(hs_req->req.num_sgs > 1);
1367 dma_addr = sg_dma_address(hs_req->req.sg);
1368 }
1369 ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
1370 hs_req->req.length);
1371 if (ret)
1372 break;
1373 }
1374
1375 hs_ep->compl_desc = 0;
1376 depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
1377 dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
1378
1379 /* write descriptor chain address to control register */
1380 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1381
1382 ctrl = dwc2_readl(hsotg, depctl);
1383 ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
1384 dwc2_writel(hsotg, ctrl, depctl);
1385}
1386
1387static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
1388static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
1389 struct dwc2_hsotg_ep *hs_ep,
1390 struct dwc2_hsotg_req *hs_req,
1391 int result);
1392static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep);
1393
1394/**
1395 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
1396 * @hsotg: The controller state.
1397 * @hs_ep: The endpoint to process a request for
1398 * @hs_req: The request to start.
1399 * @continuing: True if we are doing more for the current request.
1400 *
1401 * Start the given request running by setting the endpoint registers
1402 * appropriately, and writing any data to the FIFOs.
1403 */
1404static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
1405 struct dwc2_hsotg_ep *hs_ep,
1406 struct dwc2_hsotg_req *hs_req,
1407 bool continuing)
1408{
1409 struct usb_request *ureq = &hs_req->req;
1410 int index = hs_ep->index;
1411 int dir_in = hs_ep->dir_in;
1412 u32 epctrl_reg;
1413 u32 epsize_reg;
1414 u32 epsize;
1415 u32 ctrl;
1416 unsigned int length, pure_length;
1417 unsigned int packets;
1418 unsigned int maxreq;
1419 unsigned int dma_reg;
1420
1421 if (index != 0) {
1422 if (hs_ep->req && !continuing) {
1423 dev_err(hsotg->dev, "%s: active request\n", __func__);
1424 WARN_ON(1);
1425 return;
1426 } else if (hs_ep->req != hs_req && continuing) {
1427 dev_err(hsotg->dev,
1428 "%s: continue different req\n", __func__);
1429 WARN_ON(1);
1430 return;
1431 }
1432 }
1433
1434 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
1435 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
1436 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
1437
1438 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
1439 __func__, dwc2_readl(hsotg, epctrl_reg), index,
1440 hs_ep->dir_in ? "in" : "out");
1441
1442 /* If endpoint is stalled, we will restart request later */
1443 ctrl = dwc2_readl(hsotg, epctrl_reg);
1444
1445 if (index && ctrl & DXEPCTL_STALL) {
1446 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
1447 return;
1448 }
1449
1450 length = ureq->length - ureq->actual;
1451 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
1452 ureq->length, ureq->actual);
1453
1454 /* sanity check */
1455 if (unlikely(length && ((ureq->dma > hsotg->phys_mem_end)
1456 || ((ureq->dma + length) > hsotg->phys_mem_end)))) {
1457 pr_emerg("dwc2 dma: 0x%x, len: 0x%x, hsotg->phys_mem_end: 0x%lx\n",
1458 ureq->dma, length, hsotg->phys_mem_end);
1459 BUG();
1460 }
1461
1462 if (!using_desc_dma(hsotg))
1463 maxreq = get_ep_limit(hs_ep);
1464 else
1465 maxreq = dwc2_gadget_get_chain_limit(hs_ep);
1466
1467 if (length > maxreq) {
1468 int round = maxreq % hs_ep->ep.maxpacket;
1469
1470 dev_info(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
1471 __func__, length, maxreq, round);
1472
1473 /* round down to multiple of packets */
1474 if (round)
1475 maxreq -= round;
1476
1477 length = maxreq;
1478 }
1479
1480 if (length)
1481 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
1482 else
1483 packets = 1; /* send one packet if length is zero. */
1484
1485 if (dir_in && index != 0)
1486 if (hs_ep->isochronous)
1487 epsize = DXEPTSIZ_MC(packets);
1488 else
1489 epsize = DXEPTSIZ_MC(1);
1490 else
1491 epsize = 0;
1492
1493 /*
1494 * zero length packet should be programmed on its own and should not
1495 * be counted in DIEPTSIZ.PktCnt with other packets.
1496 */
1497 if (dir_in && ureq->zero && !continuing) {
1498 /* Test if zlp is actually required. */
1499 if ((ureq->length >= hs_ep->ep.maxpacket) &&
1500 !(ureq->length % hs_ep->ep.maxpacket))
1501 hs_ep->send_zlp = 1;
1502 }
1503
1504 epsize |= DXEPTSIZ_PKTCNT(packets);
1505 epsize |= DXEPTSIZ_XFERSIZE(length);
1506 pure_length = length;
1507
1508 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
1509 __func__, packets, length, ureq->length, epsize, epsize_reg);
1510
1511 /* store the request as the current one we're doing */
1512 hs_ep->req = hs_req;
1513
1514 if (using_desc_dma(hsotg)) {
1515 u32 offset = 0;
1516 u32 mps = hs_ep->ep.maxpacket;
1517
1518 /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
1519 if (!dir_in) {
1520 if (!index)
1521 length = mps;
1522 else if (length % mps)
1523 length += (mps - (length % mps));
1524 }
1525
1526 if (continuing)
1527 offset = ureq->actual;
1528
1529 /* Fill DDMA chain entries */
1530 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
1531 length);
1532
1533 /* write descriptor chain address to control register */
1534 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1535
1536 dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
1537 __func__, (u32)hs_ep->desc_list_dma, dma_reg);
1538 } else {
1539 /* write size / packets */
1540 dwc2_writel(hsotg, epsize, epsize_reg);
1541
1542 if (using_dma(hsotg) && !continuing && (length != 0)) {
1543 /*
1544 * write DMA address to control register, buffer
1545 * already synced by dwc2_hsotg_ep_queue().
1546 */
1547
1548 dwc2_writel(hsotg, ureq->dma, dma_reg);
1549
1550 dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
1551 __func__, &ureq->dma, dma_reg);
1552 }
1553 }
1554
1555 if (hs_ep->isochronous) {
1556 if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
1557 if (hs_ep->interval == 1) {
1558 if (hs_ep->target_frame & 0x1)
1559 ctrl |= DXEPCTL_SETODDFR;
1560 else
1561 ctrl |= DXEPCTL_SETEVENFR;
1562 }
1563 ctrl |= DXEPCTL_CNAK;
1564 } else {
1565 hs_req->req.frame_number = hs_ep->target_frame;
1566 hs_req->req.actual = 0;
1567 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1568 return;
1569 }
1570 }
1571
1572 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
1573
1574 dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
1575
1576 /* For Setup request do not clear NAK */
1577 if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
1578 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
1579
1580 if (index)
1581 ctrl |= DXEPCTL_CNAK;
1582
1583 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
1584 dwc2_writel(hsotg, ctrl, epctrl_reg);
1585
1586 /* profiling for rndis command error issue */
1587 if (index == 0) {
1588 if (hs_ep->dir_in) {
1589 hsotg->ep0_rw[hsotg->ep0_rw_idx % NR_EP0_PROFILING] = (1 << 15) | pure_length;
1590 } else {
1591 hsotg->ep0_rw[hsotg->ep0_rw_idx % NR_EP0_PROFILING] = (0 << 15) | pure_length;
1592 }
1593 hsotg->ep0_rw_idx++;
1594 }
1595
1596 if (index != 0) {
1597 if (dir_in)
1598 hsotg->eps_in[index]->nr_started++;
1599 else
1600 hsotg->eps_out[index]->nr_started++;
1601 }
1602
1603 /*
1604 * set these, it seems that DMA support increments past the end
1605 * of the packet buffer so we need to calculate the length from
1606 * this information.
1607 */
1608 hs_ep->size_loaded = length;
1609 hs_ep->last_load = ureq->actual;
1610
1611 if (dir_in && !using_dma(hsotg)) {
1612 /* set these anyway, we may need them for non-periodic in */
1613 hs_ep->fifo_load = 0;
1614
1615 dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1616 }
1617
1618 /*
1619 * Note, trying to clear the NAK here causes problems with transmit
1620 * on the S3C6400 ending up with the TXFIFO becoming full.
1621 */
1622
1623 /* check ep is enabled */
1624 if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA)) {
1625 dev_dbg(hsotg->dev,
1626 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
1627 index, dwc2_readl(hsotg, epctrl_reg));
1628 }
1629 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
1630 __func__, dwc2_readl(hsotg, epctrl_reg));
1631
1632 /* enable ep interrupts */
1633 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
1634}
1635
1636/**
1637 * dwc2_hsotg_map_dma - map the DMA memory being used for the request
1638 * @hsotg: The device state.
1639 * @hs_ep: The endpoint the request is on.
1640 * @req: The request being processed.
1641 *
1642 * We've been asked to queue a request, so ensure that the memory buffer
1643 * is correctly setup for DMA. If we've been passed an extant DMA address
1644 * then ensure the buffer has been synced to memory. If our buffer has no
1645 * DMA memory, then we map the memory and mark our request to allow us to
1646 * cleanup on completion.
1647 */
1648static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
1649 struct dwc2_hsotg_ep *hs_ep,
1650 struct usb_request *req)
1651{
1652 int ret;
1653
1654 hs_ep->map_dir = hs_ep->dir_in;
1655 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
1656 if (ret)
1657 goto dma_error;
1658
1659 return 0;
1660
1661dma_error:
1662 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
1663 __func__, req->buf, req->length);
1664
1665 return -EIO;
1666}
1667
1668static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
1669 struct dwc2_hsotg_ep *hs_ep,
1670 struct dwc2_hsotg_req *hs_req)
1671{
1672 void *req_buf = hs_req->req.buf;
1673
1674 /* If dma is not being used or buffer is aligned */
1675 if (!using_dma(hsotg) || !((long)req_buf & 3))
1676 return 0;
1677
1678 WARN_ON(hs_req->saved_req_buf);
1679
1680 hsotg->nr_unaligned_pkts++;
1681 dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
1682 hs_ep->ep.name, req_buf, hs_req->req.length);
1683
1684 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1685 if (!hs_req->req.buf) {
1686 hs_req->req.buf = req_buf;
1687 dev_err(hsotg->dev,
1688 "%s: unable to allocate memory for bounce buffer\n",
1689 __func__);
1690 return -ENOMEM;
1691 }
1692
1693 /* Save actual buffer */
1694 hs_req->saved_req_buf = req_buf;
1695
1696 if (hs_ep->dir_in)
1697 memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1698 return 0;
1699}
1700
1701static void
1702dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
1703 struct dwc2_hsotg_ep *hs_ep,
1704 struct dwc2_hsotg_req *hs_req)
1705{
1706 /* If dma is not being used or buffer was aligned */
1707 if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1708 return;
1709
1710 dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
1711 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1712
1713 /* Copy data from bounce buffer on successful out transfer */
1714 if (!hs_ep->dir_in && !hs_req->req.status)
1715 memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1716 hs_req->req.actual);
1717
1718 /* Free bounce buffer */
1719 kfree(hs_req->req.buf);
1720
1721 hs_req->req.buf = hs_req->saved_req_buf;
1722 hs_req->saved_req_buf = NULL;
1723}
1724
1725/**
1726 * dwc2_gadget_target_frame_elapsed - Checks target frame
1727 * @hs_ep: The driver endpoint to check
1728 *
1729 * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
1730 * corresponding transfer.
1731 */
1732static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
1733{
1734 struct dwc2_hsotg *hsotg = hs_ep->parent;
1735 u32 target_frame = hs_ep->target_frame;
1736 u32 current_frame = hsotg->frame_number;
1737 bool frame_overrun = hs_ep->frame_overrun;
1738 u16 limit = DSTS_SOFFN_LIMIT;
1739
1740 if (hsotg->gadget.speed != USB_SPEED_HIGH)
1741 limit >>= 3;
1742
1743 if (!frame_overrun && current_frame >= target_frame)
1744 return true;
1745
1746 if (frame_overrun && current_frame >= target_frame &&
1747 ((current_frame - target_frame) < limit / 2))
1748 return true;
1749
1750 return false;
1751}
1752
1753/*
1754 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
1755 * @hsotg: The driver state
1756 * @hs_ep: the ep descriptor chain is for
1757 *
1758 * Called to update EP0 structure's pointers depend on stage of
1759 * control transfer.
1760 */
1761static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
1762 struct dwc2_hsotg_ep *hs_ep)
1763{
1764 switch (hsotg->ep0_state) {
1765 case DWC2_EP0_SETUP:
1766 case DWC2_EP0_STATUS_OUT:
1767 hs_ep->desc_list = hsotg->setup_desc[0];
1768 hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
1769 break;
1770 case DWC2_EP0_DATA_IN:
1771 case DWC2_EP0_STATUS_IN:
1772 hs_ep->desc_list = hsotg->ctrl_in_desc;
1773 hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
1774 break;
1775 case DWC2_EP0_DATA_OUT:
1776 hs_ep->desc_list = hsotg->ctrl_out_desc;
1777 hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
1778 break;
1779 default:
1780 dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
1781 hsotg->ep0_state);
1782 return -EINVAL;
1783 }
1784
1785 return 0;
1786}
1787
1788static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
1789 gfp_t gfp_flags)
1790{
1791 struct dwc2_hsotg_req *hs_req = our_req(req);
1792 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1793 struct dwc2_hsotg *hs = hs_ep->parent;
1794 bool first;
1795 int ret;
1796 u32 maxsize = 0;
1797 u32 mask = 0;
1798
1799 if (unlikely(!hs->vbus_active)) {
1800 dev_err_ratelimited(hs->dev, "dwc2 already disconnected\n");
1801 return -ESHUTDOWN;
1802 }
1803
1804#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
1805 if (os_detect_is_done()) {
1806 dev_err(hs->dev,
1807 "os detection is done, skip ep:%s req: %d\n", ep->name, req->length);
1808 return -ESHUTDOWN;
1809 }
1810#endif
1811
1812 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
1813 ep->name, req, req->length, req->buf, req->no_interrupt,
1814 req->zero, req->short_not_ok);
1815
1816 if (hs->lx_state == DWC2_L1) {
1817 dwc2_wakeup_from_lpm_l1(hs, true);
1818 }
1819
1820 /* Prevent new request submission when controller is suspended */
1821 if (hs->lx_state != DWC2_L0) {
1822 dev_info_ratelimited(hs->dev, "%s: submit request only in active state: %d ep%02x\n",
1823 __func__, hs->lx_state, ep->address);
1824 return -EAGAIN;
1825 }
1826
1827 /* initialise status of the request */
1828 INIT_LIST_HEAD(&hs_req->queue);
1829 req->actual = 0;
1830 req->status = -EINPROGRESS;
1831
1832 /* Don't queue ISOC request if length greater than mps*mc */
1833 if (hs_ep->isochronous &&
1834 req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
1835 dev_err(hs->dev, "req length > maxpacket*mc\n");
1836 return -EINVAL;
1837 }
1838
1839 /* In DDMA mode for ISOC's don't queue request if length greater
1840 * than descriptor limits.
1841 */
1842 if (using_desc_dma(hs) && hs_ep->isochronous) {
1843 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
1844 if (hs_ep->dir_in && req->length > maxsize) {
1845 dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
1846 req->length, maxsize);
1847 return -EINVAL;
1848 }
1849
1850 if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
1851 dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
1852 req->length, hs_ep->ep.maxpacket);
1853 return -EINVAL;
1854 }
1855 }
1856
1857 ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
1858 if (ret)
1859 return ret;
1860
1861 /* if we're using DMA, sync the buffers as necessary */
1862 if (using_dma(hs)) {
1863 ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
1864 if (ret) {
1865 dev_err(hs->dev, "dwc2_hsotg_map_dma failed %d\n", ret);
1866 return ret;
1867 }
1868 }
1869 /* If using descriptor DMA configure EP0 descriptor chain pointers */
1870 if (using_desc_dma(hs) && !hs_ep->index) {
1871 ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
1872 if (ret) {
1873 dev_err(hs->dev, "dwc2_gadget_set_ep0_desc_chain failed %d\n", ret);
1874 return ret;
1875 }
1876 }
1877
1878 first = list_empty(&hs_ep->queue);
1879 list_add_tail(&hs_req->queue, &hs_ep->queue);
1880
1881 if (hs_ep->index != 0) {
1882 if (hs_ep->dir_in)
1883 hs->eps_in[hs_ep->index]->nr_enqueue++;
1884 else
1885 hs->eps_out[hs_ep->index]->nr_enqueue++;
1886 }
1887
1888 /*
1889 * Handle DDMA isochronous transfers separately - just add new entry
1890 * to the descriptor chain.
1891 * Transfer will be started once SW gets either one of NAK or
1892 * OutTknEpDis interrupts.
1893 */
1894 if (using_desc_dma(hs) && hs_ep->isochronous) {
1895 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
1896 dma_addr_t dma_addr = hs_req->req.dma;
1897
1898 if (hs_req->req.num_sgs) {
1899 WARN_ON(hs_req->req.num_sgs > 1);
1900 dma_addr = sg_dma_address(hs_req->req.sg);
1901 }
1902 dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
1903 hs_req->req.length);
1904 }
1905 return 0;
1906 }
1907
1908 /* Change EP direction if status phase request is after data out */
1909 if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
1910 hs->ep0_state == DWC2_EP0_DATA_OUT)
1911 hs_ep->dir_in = 1;
1912
1913 if (first) {
1914 if (!hs_ep->isochronous) {
1915 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1916 return 0;
1917 }
1918
1919 /* Update current frame number value. */
1920 hs->frame_number = dwc2_hsotg_read_frameno(hs);
1921 while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
1922 dwc2_gadget_incr_frame_num(hs_ep);
1923 /* Update current frame number value once more as it
1924 * changes here.
1925 */
1926 hs->frame_number = dwc2_hsotg_read_frameno(hs);
1927 }
1928
1929 if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
1930 dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1931 }
1932 return 0;
1933}
1934
1935static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
1936 gfp_t gfp_flags)
1937{
1938 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1939 struct dwc2_hsotg *hs = hs_ep->parent;
1940 unsigned long flags = 0;
1941 int ret = 0;
1942
1943 spin_lock_irqsave(&hs->lock, flags);
1944 ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
1945 spin_unlock_irqrestore(&hs->lock, flags);
1946
1947 return ret;
1948}
1949
1950static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
1951 struct usb_request *req)
1952{
1953 struct dwc2_hsotg_req *hs_req = our_req(req);
1954
1955 kfree(hs_req);
1956}
1957
1958/**
1959 * dwc2_hsotg_complete_oursetup - setup completion callback
1960 * @ep: The endpoint the request was on.
1961 * @req: The request completed.
1962 *
1963 * Called on completion of any requests the driver itself
1964 * submitted that need cleaning up.
1965 */
1966static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
1967 struct usb_request *req)
1968{
1969 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1970 struct dwc2_hsotg *hsotg = hs_ep->parent;
1971
1972 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
1973 dwc2_hsotg_ep_free_request(ep, req);
1974}
1975
1976/**
1977 * ep_from_windex - convert control wIndex value to endpoint
1978 * @hsotg: The driver state.
1979 * @windex: The control request wIndex field (in host order).
1980 *
1981 * Convert the given wIndex into a pointer to an driver endpoint
1982 * structure, or return NULL if it is not a valid endpoint.
1983 */
1984static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
1985 u32 windex)
1986{
1987 int dir = (windex & USB_DIR_IN) ? 1 : 0;
1988 int idx = windex & 0x7F;
1989
1990 if (windex >= 0x100)
1991 return NULL;
1992
1993 if (idx > hsotg->num_of_eps)
1994 return NULL;
1995
1996 return index_to_ep(hsotg, idx, dir);
1997}
1998
1999/**
2000 * dwc2_hsotg_set_test_mode - Enable usb Test Modes
2001 * @hsotg: The driver state.
2002 * @testmode: requested usb test mode
2003 * Enable usb Test Mode requested by the Host.
2004 */
2005int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
2006{
2007 int dctl = dwc2_readl(hsotg, DCTL);
2008
2009 dctl &= ~DCTL_TSTCTL_MASK;
2010 switch (testmode) {
2011 case TEST_J:
2012 case TEST_K:
2013 case TEST_SE0_NAK:
2014 case TEST_PACKET:
2015 case TEST_FORCE_EN:
2016 dctl |= testmode << DCTL_TSTCTL_SHIFT;
2017 break;
2018 default:
2019 return -EINVAL;
2020 }
2021 dwc2_writel(hsotg, dctl, DCTL);
2022 return 0;
2023}
2024
2025/**
2026 * dwc2_hsotg_send_reply - send reply to control request
2027 * @hsotg: The device state
2028 * @ep: Endpoint 0
2029 * @buff: Buffer for request
2030 * @length: Length of reply.
2031 *
2032 * Create a request and queue it on the given endpoint. This is useful as
2033 * an internal method of sending replies to certain control requests, etc.
2034 */
2035static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
2036 struct dwc2_hsotg_ep *ep,
2037 void *buff,
2038 int length)
2039{
2040 struct usb_request *req;
2041 int ret;
2042
2043 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
2044
2045 req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
2046 hsotg->ep0_reply = req;
2047 if (!req) {
2048 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
2049 return -ENOMEM;
2050 }
2051
2052 req->buf = hsotg->ep0_buff;
2053 req->length = length;
2054 /*
2055 * zero flag is for sending zlp in DATA IN stage. It has no impact on
2056 * STATUS stage.
2057 */
2058 req->zero = 0;
2059 req->complete = dwc2_hsotg_complete_oursetup;
2060
2061 if (length)
2062 memcpy(req->buf, buff, length);
2063
2064 ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
2065 if (ret) {
2066 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
2067 return ret;
2068 }
2069
2070 return 0;
2071}
2072
2073/**
2074 * dwc2_hsotg_process_req_status - process request GET_STATUS
2075 * @hsotg: The device state
2076 * @ctrl: USB control request
2077 */
2078static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
2079 struct usb_ctrlrequest *ctrl)
2080{
2081 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
2082 struct dwc2_hsotg_ep *ep;
2083 __le16 reply;
2084 u16 status;
2085 int ret;
2086
2087 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
2088
2089 if (!ep0->dir_in) {
2090 dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
2091 return -EINVAL;
2092 }
2093
2094 switch (ctrl->bRequestType & USB_RECIP_MASK) {
2095 case USB_RECIP_DEVICE:
2096 status = 1 << USB_DEVICE_SELF_POWERED;
2097 status |= hsotg->remote_wakeup_allowed <<
2098 USB_DEVICE_REMOTE_WAKEUP;
2099 reply = cpu_to_le16(status);
2100 break;
2101
2102 case USB_RECIP_INTERFACE:
2103 /* currently, the data result should be zero */
2104 reply = cpu_to_le16(0);
2105 break;
2106
2107 case USB_RECIP_ENDPOINT:
2108 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
2109 if (!ep)
2110 return -ENOENT;
2111
2112 reply = cpu_to_le16(ep->halted ? 1 : 0);
2113 break;
2114
2115 default:
2116 return 0;
2117 }
2118
2119 if (le16_to_cpu(ctrl->wLength) != 2)
2120 return -EINVAL;
2121
2122 ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
2123 if (ret) {
2124 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
2125 return ret;
2126 }
2127
2128 return 1;
2129}
2130
2131static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
2132
2133/**
2134 * get_ep_head - return the first request on the endpoint
2135 * @hs_ep: The controller endpoint to get
2136 *
2137 * Get the first request on the endpoint.
2138 */
2139static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
2140{
2141 return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
2142 queue);
2143}
2144
2145/**
2146 * dwc2_gadget_start_next_request - Starts next request from ep queue
2147 * @hs_ep: Endpoint structure
2148 *
2149 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
2150 * in its handler. Hence we need to unmask it here to be able to do
2151 * resynchronization.
2152 */
2153static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
2154{
2155 struct dwc2_hsotg *hsotg = hs_ep->parent;
2156 int dir_in = hs_ep->dir_in;
2157 struct dwc2_hsotg_req *hs_req;
2158
2159 if (!list_empty(&hs_ep->queue)) {
2160 hs_req = get_ep_head(hs_ep);
2161 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
2162 return;
2163 }
2164 if (!hs_ep->isochronous)
2165 return;
2166
2167 if (dir_in) {
2168 dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
2169 __func__);
2170 } else {
2171 dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
2172 __func__);
2173 }
2174}
2175
2176/**
2177 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
2178 * @hsotg: The device state
2179 * @ctrl: USB control request
2180 */
2181static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
2182 struct usb_ctrlrequest *ctrl)
2183{
2184 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
2185 struct dwc2_hsotg_req *hs_req;
2186 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
2187 struct dwc2_hsotg_ep *ep;
2188 int ret;
2189 bool halted;
2190 u32 recip;
2191 u32 wValue;
2192 u32 wIndex;
2193
2194 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
2195 __func__, set ? "SET" : "CLEAR");
2196
2197 wValue = le16_to_cpu(ctrl->wValue);
2198 wIndex = le16_to_cpu(ctrl->wIndex);
2199 recip = ctrl->bRequestType & USB_RECIP_MASK;
2200
2201 switch (recip) {
2202 case USB_RECIP_DEVICE:
2203 switch (wValue) {
2204 case USB_DEVICE_REMOTE_WAKEUP:
2205 if (set)
2206 hsotg->remote_wakeup_allowed = 1;
2207 else
2208 hsotg->remote_wakeup_allowed = 0;
2209 break;
2210
2211 case USB_DEVICE_TEST_MODE:
2212 if ((wIndex & 0xff) != 0)
2213 return -EINVAL;
2214 if (!set)
2215 return -EINVAL;
2216
2217 hsotg->test_mode = wIndex >> 8;
2218 break;
2219 default:
2220 return -ENOENT;
2221 }
2222
2223 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
2224 if (ret) {
2225 dev_err(hsotg->dev,
2226 "%s: failed to send reply\n", __func__);
2227 return ret;
2228 }
2229 break;
2230
2231 case USB_RECIP_ENDPOINT:
2232 ep = ep_from_windex(hsotg, wIndex);
2233 if (!ep) {
2234 dev_info(hsotg->dev, "%s: no endpoint for 0x%04x\n",
2235 __func__, wIndex);
2236 return -ENOENT;
2237 }
2238
2239 switch (wValue) {
2240 case USB_ENDPOINT_HALT:
2241 halted = ep->halted;
2242
2243 dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
2244
2245 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
2246 if (ret) {
2247 dev_err(hsotg->dev,
2248 "%s: failed to send reply\n", __func__);
2249 return ret;
2250 }
2251
2252 /*
2253 * we have to complete all requests for ep if it was
2254 * halted, and the halt was cleared by CLEAR_FEATURE
2255 */
2256
2257 if (!set && halted) {
2258 /*
2259 * If we have request in progress,
2260 * then complete it
2261 */
2262 if (ep->req) {
2263 dev_info(hsotg->dev, "ephlt: handle inpro\n");
2264 hs_req = ep->req;
2265 ep->req = NULL;
2266 list_del_init(&hs_req->queue);
2267 if (hs_req->req.complete) {
2268 if (ep->index == 0)
2269 dwc2_ep0_complete_profiling(hsotg, ep, hs_req->req.actual, ep->dir_in);
2270
2271 spin_unlock(&hsotg->lock);
2272 usb_gadget_giveback_request(
2273 &ep->ep, &hs_req->req);
2274 spin_lock(&hsotg->lock);
2275 }
2276 }
2277
2278 /* If we have pending request, then start it */
2279 if (!ep->req)
2280 dwc2_gadget_start_next_request(ep);
2281 }
2282
2283 break;
2284
2285 default:
2286 return -ENOENT;
2287 }
2288 break;
2289 default:
2290 return -ENOENT;
2291 }
2292 return 1;
2293}
2294
2295static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
2296
2297/**
2298 * dwc2_hsotg_stall_ep0 - stall ep0
2299 * @hsotg: The device state
2300 *
2301 * Set stall for ep0 as response for setup request.
2302 */
2303static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
2304{
2305 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
2306 u32 reg;
2307 u32 ctrl;
2308
2309 dev_info(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
2310 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
2311
2312 /*
2313 * DxEPCTL_Stall will be cleared by EP once it has
2314 * taken effect, so no need to clear later.
2315 */
2316
2317 ctrl = dwc2_readl(hsotg, reg);
2318 ctrl |= DXEPCTL_STALL;
2319 ctrl |= DXEPCTL_CNAK;
2320 dwc2_writel(hsotg, ctrl, reg);
2321
2322 dev_dbg(hsotg->dev,
2323 "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
2324 ctrl, reg, dwc2_readl(hsotg, reg));
2325
2326 /*
2327 * complete won't be called, so we enqueue
2328 * setup request here
2329 */
2330 dwc2_hsotg_enqueue_setup(hsotg);
2331}
2332
2333static int usb_is_set_configuration(struct usb_ctrlrequest *setup)
2334{
2335 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
2336 if ((setup->bRequest == USB_REQ_SET_CONFIGURATION)
2337 && setup->wValue)
2338 return 1;
2339
2340 return 0;
2341}
2342
2343/**
2344 * dwc2_hsotg_process_control - process a control request
2345 * @hsotg: The device state
2346 * @ctrl: The control request received
2347 *
2348 * The controller has received the SETUP phase of a control request, and
2349 * needs to work out what to do next (and whether to pass it on to the
2350 * gadget driver).
2351 */
2352static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
2353 struct usb_ctrlrequest *ctrl)
2354{
2355 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
2356 int ret = 0;
2357 u32 dcfg;
2358
2359 dev_dbg(hsotg->dev,
2360 "ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
2361 ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
2362 ctrl->wIndex, ctrl->wLength);
2363
2364 if (ctrl->wLength == 0) {
2365 ep0->dir_in = 1;
2366 hsotg->ep0_state = DWC2_EP0_STATUS_IN;
2367 } else if (ctrl->bRequestType & USB_DIR_IN) {
2368 ep0->dir_in = 1;
2369 hsotg->ep0_state = DWC2_EP0_DATA_IN;
2370 } else {
2371 ep0->dir_in = 0;
2372 hsotg->ep0_state = DWC2_EP0_DATA_OUT;
2373 }
2374
2375 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
2376 switch (ctrl->bRequest) {
2377 case USB_REQ_SET_ADDRESS:
2378 hsotg->connected = 1;
2379 dcfg = dwc2_readl(hsotg, DCFG);
2380 dcfg &= ~DCFG_DEVADDR_MASK;
2381 dcfg |= (le16_to_cpu(ctrl->wValue) <<
2382 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
2383 dwc2_writel(hsotg, dcfg, DCFG);
2384
2385 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
2386
2387 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
2388 dwc2_report_sdp_charger(hsotg);
2389 return;
2390
2391 case USB_REQ_GET_STATUS:
2392 ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
2393 break;
2394
2395 case USB_REQ_CLEAR_FEATURE:
2396 case USB_REQ_SET_FEATURE:
2397 ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
2398 break;
2399 }
2400 }
2401
2402 /* as a fallback, try delivering it to the driver to deal with */
2403
2404 if (ret == 0 && hsotg->driver) {
2405 spin_unlock(&hsotg->lock);
2406 ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
2407 spin_lock(&hsotg->lock);
2408 if (ret < 0)
2409 dev_info(hsotg->dev, "driver->setup() ret %d\n", ret);
2410
2411 if (usb_is_set_configuration(ctrl)) {
2412 dev_info(hsotg->dev, "udc configured\n");
2413 usb_gadget_set_state(&hsotg->gadget, USB_STATE_CONFIGURED);
2414 }
2415 }
2416
2417 hsotg->delayed_status = false;
2418 if (ret == USB_GADGET_DELAYED_STATUS)
2419 hsotg->delayed_status = true;
2420
2421 /*
2422 * the request is either unhandlable, or is not formatted correctly
2423 * so respond with a STALL for the status stage to indicate failure.
2424 */
2425
2426 if (ret < 0) {
2427#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
2428 if (os_detect_is_done()) {
2429 dev_info(hsotg->dev, "os detect: skip ep0 stall\n");
2430 return;
2431 }
2432#endif
2433 dwc2_hsotg_stall_ep0(hsotg);
2434 }
2435}
2436
2437/**
2438 * dwc2_hsotg_complete_setup - completion of a setup transfer
2439 * @ep: The endpoint the request was on.
2440 * @req: The request completed.
2441 *
2442 * Called on completion of any requests the driver itself submitted for
2443 * EP0 setup packets
2444 */
2445static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
2446 struct usb_request *req)
2447{
2448 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
2449 struct dwc2_hsotg *hsotg = hs_ep->parent;
2450
2451 if (req->status < 0) {
2452 dev_info(hsotg->dev, "%s: failed %d\n", __func__, req->status);
2453 return;
2454 }
2455
2456 spin_lock(&hsotg->lock);
2457 if (req->actual == 0) {
2458 dev_info(hsotg->dev, "setup 0 pkt\n");
2459 dwc2_hsotg_enqueue_setup(hsotg);
2460 } else {
2461#ifdef CONFIG_DWC2_MON_TIMER
2462 if (!hsotg->mon_timer_started) {
2463 pr_debug("st mon-timer\n");
2464 hrtimer_start(&hsotg->usb_mon_timer,
2465 ns_to_ktime(DWC2_MON_TIMER_NS),
2466 HRTIMER_MODE_REL);
2467 hsotg->mon_timer_started = true;
2468 }
2469
2470#endif
2471 dwc2_hsotg_process_control(hsotg, req->buf);
2472 }
2473 spin_unlock(&hsotg->lock);
2474}
2475
2476/**
2477 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
2478 * @hsotg: The device state.
2479 *
2480 * Enqueue a request on EP0 if necessary to received any SETUP packets
2481 * received from the host.
2482 */
2483static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
2484{
2485 struct usb_request *req = hsotg->ctrl_req;
2486 struct dwc2_hsotg_req *hs_req = our_req(req);
2487 int ret;
2488
2489 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
2490
2491 req->zero = 0;
2492 req->length = 8;
2493 req->buf = hsotg->ctrl_buff;
2494 req->complete = dwc2_hsotg_complete_setup;
2495
2496 if (!list_empty(&hs_req->queue)) {
2497 dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
2498 return;
2499 }
2500
2501 hsotg->eps_out[0]->dir_in = 0;
2502 hsotg->eps_out[0]->send_zlp = 0;
2503 hsotg->ep0_state = DWC2_EP0_SETUP;
2504
2505 ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
2506 if (ret < 0) {
2507 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
2508 /*
2509 * Don't think there's much we can do other than watch the
2510 * driver fail.
2511 */
2512 } else {
2513#ifdef CONFIG_DWC2_MON_TIMER
2514 if (hsotg->mon_timer_started) {
2515 pr_debug("del mon-timer\n");
2516 hrtimer_cancel(&hsotg->usb_mon_timer);
2517 hsotg->mon_timer_started = false;
2518 }
2519#endif
2520 }
2521}
2522
2523static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
2524 struct dwc2_hsotg_ep *hs_ep)
2525{
2526 u32 ctrl;
2527 u8 index = hs_ep->index;
2528 u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
2529 u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
2530
2531 if (hs_ep->dir_in)
2532 dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
2533 index);
2534 else
2535 dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
2536 index);
2537 if (using_desc_dma(hsotg)) {
2538 /* Not specific buffer needed for ep0 ZLP */
2539 dma_addr_t dma = hs_ep->desc_list_dma;
2540
2541 if (!index)
2542 dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
2543
2544 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
2545 } else {
2546 dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
2547 DXEPTSIZ_XFERSIZE(0),
2548 epsiz_reg);
2549 }
2550
2551 ctrl = dwc2_readl(hsotg, epctl_reg);
2552 ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
2553 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
2554 ctrl |= DXEPCTL_USBACTEP;
2555 dwc2_writel(hsotg, ctrl, epctl_reg);
2556
2557 if (index == 0) {
2558 if (hs_ep->dir_in) {
2559 hsotg->ep0_rw[hsotg->ep0_rw_idx % NR_EP0_PROFILING] = (1 << 15) | 0;
2560 } else {
2561 hsotg->ep0_rw[hsotg->ep0_rw_idx % NR_EP0_PROFILING] = (0 << 15) | 0;
2562 }
2563 hsotg->ep0_rw_idx++;
2564 }
2565}
2566
2567/**
2568 * dwc2_hsotg_complete_request - complete a request given to us
2569 * @hsotg: The device state.
2570 * @hs_ep: The endpoint the request was on.
2571 * @hs_req: The request to complete.
2572 * @result: The result code (0 => Ok, otherwise errno)
2573 *
2574 * The given request has finished, so call the necessary completion
2575 * if it has one and then look to see if we can start a new request
2576 * on the endpoint.
2577 *
2578 * Note, expects the ep to already be locked as appropriate.
2579 */
2580static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
2581 struct dwc2_hsotg_ep *hs_ep,
2582 struct dwc2_hsotg_req *hs_req,
2583 int result)
2584{
2585 if (!hs_req) {
2586 dev_info(hsotg->dev, "%s: nothing to complete?\n", __func__);
2587 return;
2588 }
2589
2590 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
2591 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2592
2593 /*
2594 * only replace the status if we've not already set an error
2595 * from a previous transaction
2596 */
2597
2598 if (hs_req->req.status == -EINPROGRESS)
2599 hs_req->req.status = result;
2600
2601 if (using_dma(hsotg))
2602 dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
2603
2604 dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
2605
2606 hs_ep->req = NULL;
2607 list_del_init(&hs_req->queue);
2608 if (hs_ep->index != 0) {
2609 if (hs_ep->dir_in)
2610 hsotg->eps_in[hs_ep->index]->nr_dequeue++;
2611 else
2612 hsotg->eps_out[hs_ep->index]->nr_dequeue++;
2613 }
2614
2615 /*
2616 * call the complete request with the locks off, just in case the
2617 * request tries to queue more work for this endpoint.
2618 */
2619 if (hs_req->req.complete) {
2620 spin_unlock(&hsotg->lock);
2621 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2622 spin_lock(&hsotg->lock);
2623 }
2624
2625 /* In DDMA don't need to proceed to starting of next ISOC request */
2626 if (using_desc_dma(hsotg) && hs_ep->isochronous)
2627 return;
2628
2629 /*
2630 * Look to see if there is anything else to do. Note, the completion
2631 * of the previous request may have caused a new request to be started
2632 * so be careful when doing this.
2633 */
2634
2635 if (!hs_ep->req && result >= 0)
2636 dwc2_gadget_start_next_request(hs_ep);
2637}
2638
2639/*
2640 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
2641 * @hs_ep: The endpoint the request was on.
2642 *
2643 * Get first request from the ep queue, determine descriptor on which complete
2644 * happened. SW discovers which descriptor currently in use by HW, adjusts
2645 * dma_address and calculates index of completed descriptor based on the value
2646 * of DEPDMA register. Update actual length of request, giveback to gadget.
2647 */
2648static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
2649{
2650 struct dwc2_hsotg *hsotg = hs_ep->parent;
2651 struct dwc2_hsotg_req *hs_req;
2652 struct usb_request *ureq;
2653 u32 desc_sts;
2654 u32 mask;
2655
2656 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2657
2658 /* Process only descriptors with buffer status set to DMA done */
2659 while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
2660 DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
2661
2662 hs_req = get_ep_head(hs_ep);
2663 if (!hs_req) {
2664 dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
2665 return;
2666 }
2667 ureq = &hs_req->req;
2668
2669 /* Check completion status */
2670 if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
2671 DEV_DMA_STS_SUCC) {
2672 mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
2673 DEV_DMA_ISOC_RX_NBYTES_MASK;
2674 ureq->actual = ureq->length - ((desc_sts & mask) >>
2675 DEV_DMA_ISOC_NBYTES_SHIFT);
2676
2677 /* Adjust actual len for ISOC Out if len is
2678 * not align of 4
2679 */
2680 if (!hs_ep->dir_in && ureq->length & 0x3)
2681 ureq->actual += 4 - (ureq->length & 0x3);
2682
2683 /* Set actual frame number for completed transfers */
2684 ureq->frame_number =
2685 (desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
2686 DEV_DMA_ISOC_FRNUM_SHIFT;
2687 }
2688
2689 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2690
2691 hs_ep->compl_desc++;
2692 if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
2693 hs_ep->compl_desc = 0;
2694 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2695 }
2696}
2697
2698/*
2699 * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
2700 * @hs_ep: The isochronous endpoint.
2701 *
2702 * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
2703 * interrupt. Reset target frame and next_desc to allow to start
2704 * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
2705 * interrupt for OUT direction.
2706 */
2707static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
2708{
2709 struct dwc2_hsotg *hsotg = hs_ep->parent;
2710
2711 if (!hs_ep->dir_in)
2712 dwc2_flush_rx_fifo(hsotg);
2713 dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
2714
2715 hs_ep->target_frame = TARGET_FRAME_INITIAL;
2716 hs_ep->next_desc = 0;
2717 hs_ep->compl_desc = 0;
2718}
2719
2720/**
2721 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
2722 * @hsotg: The device state.
2723 * @ep_idx: The endpoint index for the data
2724 * @size: The size of data in the fifo, in bytes
2725 *
2726 * The FIFO status shows there is data to read from the FIFO for a given
2727 * endpoint, so sort out whether we need to read the data into a request
2728 * that has been made for that endpoint.
2729 */
2730static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
2731{
2732 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
2733 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2734 int to_read;
2735 int max_req;
2736 int read_ptr;
2737
2738 if (!hs_req) {
2739 u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
2740 int ptr;
2741
2742 dev_info(hsotg->dev,
2743 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
2744 __func__, size, ep_idx, epctl);
2745
2746 /* dump the data from the FIFO, we've nothing we can do */
2747 for (ptr = 0; ptr < size; ptr += 4)
2748 (void)dwc2_readl(hsotg, EPFIFO(ep_idx));
2749
2750 return;
2751 }
2752
2753 to_read = size;
2754 read_ptr = hs_req->req.actual;
2755 max_req = hs_req->req.length - read_ptr;
2756
2757 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
2758 __func__, to_read, max_req, read_ptr, hs_req->req.length);
2759
2760 if (to_read > max_req) {
2761 /*
2762 * more data appeared than we where willing
2763 * to deal with in this request.
2764 */
2765
2766 /* currently we don't deal this */
2767 WARN_ON_ONCE(1);
2768 }
2769
2770 hs_ep->total_data += to_read;
2771 hs_req->req.actual += to_read;
2772 to_read = DIV_ROUND_UP(to_read, 4);
2773
2774 /*
2775 * note, we might over-write the buffer end by 3 bytes depending on
2776 * alignment of the data.
2777 */
2778 dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
2779 hs_req->req.buf + read_ptr, to_read);
2780}
2781
2782/**
2783 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
2784 * @hsotg: The device instance
2785 * @dir_in: If IN zlp
2786 *
2787 * Generate a zero-length IN packet request for terminating a SETUP
2788 * transaction.
2789 *
2790 * Note, since we don't write any data to the TxFIFO, then it is
2791 * currently believed that we do not need to wait for any space in
2792 * the TxFIFO.
2793 */
2794static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
2795{
2796 /* eps_out[0] is used in both directions */
2797 hsotg->eps_out[0]->dir_in = dir_in;
2798 hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
2799
2800 dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
2801}
2802
2803/*
2804 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
2805 * @hs_ep - The endpoint on which transfer went
2806 *
2807 * Iterate over endpoints descriptor chain and get info on bytes remained
2808 * in DMA descriptors after transfer has completed. Used for non isoc EPs.
2809 */
2810static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
2811{
2812 const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
2813 struct dwc2_hsotg *hsotg = hs_ep->parent;
2814 unsigned int bytes_rem = 0;
2815 unsigned int bytes_rem_correction = 0;
2816 struct dwc2_dma_desc *desc = hs_ep->desc_list;
2817 int i;
2818 u32 status;
2819 u32 mps = hs_ep->ep.maxpacket;
2820 int dir_in = hs_ep->dir_in;
2821
2822 if (!desc)
2823 return -EINVAL;
2824
2825 /* Interrupt OUT EP with mps not multiple of 4 */
2826 if (hs_ep->index)
2827 if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
2828 bytes_rem_correction = 4 - (mps % 4);
2829
2830 for (i = 0; i < hs_ep->desc_count; ++i) {
2831 status = desc->status;
2832 bytes_rem += status & DEV_DMA_NBYTES_MASK;
2833 bytes_rem -= bytes_rem_correction;
2834
2835 if (status & DEV_DMA_STS_MASK)
2836 dev_err(hsotg->dev, "descriptor %d closed with %x\n",
2837 i, status & DEV_DMA_STS_MASK);
2838
2839 if (status & DEV_DMA_L)
2840 break;
2841
2842 desc++;
2843 }
2844
2845 return bytes_rem;
2846}
2847
2848/**
2849 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
2850 * @hsotg: The device instance
2851 * @epnum: The endpoint received from
2852 *
2853 * The RXFIFO has delivered an OutDone event, which means that the data
2854 * transfer for an OUT endpoint has been completed, either by a short
2855 * packet or by the finish of a transfer.
2856 */
2857static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
2858{
2859 u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
2860 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
2861 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2862 struct usb_request *req = &hs_req->req;
2863 unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2864 int result = 0;
2865
2866 if (!hs_req) {
2867 dev_info(hsotg->dev, "%s: no request active\n", __func__);
2868 return;
2869 }
2870
2871 if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
2872 dev_dbg(hsotg->dev, "zlp packet received\n");
2873 if (hs_req) {
2874 dwc2_ep0_complete_profiling(hsotg, hs_ep, 0, 0);
2875 }
2876 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2877 dwc2_hsotg_enqueue_setup(hsotg);
2878 return;
2879 }
2880
2881 if (using_desc_dma(hsotg))
2882 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2883
2884 if (using_dma(hsotg)) {
2885 unsigned int size_done;
2886
2887 /*
2888 * Calculate the size of the transfer by checking how much
2889 * is left in the endpoint size register and then working it
2890 * out from the amount we loaded for the transfer.
2891 *
2892 * We need to do this as DMA pointers are always 32bit aligned
2893 * so may overshoot/undershoot the transfer.
2894 */
2895
2896 size_done = hs_ep->size_loaded - size_left;
2897 size_done += hs_ep->last_load;
2898
2899 req->actual = size_done;
2900 }
2901
2902 /* if there is more request to do, schedule new transfer */
2903 if (req->actual < req->length && size_left == 0) {
2904 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2905 return;
2906 }
2907
2908 if (req->actual < req->length && req->short_not_ok) {
2909 dev_info(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
2910 __func__, req->actual, req->length);
2911
2912 /*
2913 * todo - what should we return here? there's no one else
2914 * even bothering to check the status.
2915 */
2916 }
2917
2918 /* DDMA IN status phase will start from StsPhseRcvd interrupt */
2919 if (!using_desc_dma(hsotg) && epnum == 0 &&
2920 hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
2921 /* Move to STATUS IN */
2922 if (!hsotg->delayed_status)
2923 dwc2_hsotg_ep0_zlp(hsotg, true);
2924 }
2925
2926 /* Set actual frame number for completed transfers */
2927 if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
2928 req->frame_number = hs_ep->target_frame;
2929 dwc2_gadget_incr_frame_num(hs_ep);
2930 }
2931
2932 if (epnum == 0 && hs_req)
2933 dwc2_ep0_complete_profiling(hsotg, hs_ep, hs_req->req.actual, 0);
2934 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
2935}
2936
2937/**
2938 * dwc2_hsotg_handle_rx - RX FIFO has data
2939 * @hsotg: The device instance
2940 *
2941 * The IRQ handler has detected that the RX FIFO has some data in it
2942 * that requires processing, so find out what is in there and do the
2943 * appropriate read.
2944 *
2945 * The RXFIFO is a true FIFO, the packets coming out are still in packet
2946 * chunks, so if you have x packets received on an endpoint you'll get x
2947 * FIFO events delivered, each with a packet's worth of data in it.
2948 *
2949 * When using DMA, we should not be processing events from the RXFIFO
2950 * as the actual data should be sent to the memory directly and we turn
2951 * on the completion interrupts to get notifications of transfer completion.
2952 */
2953static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
2954{
2955 u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
2956 u32 epnum, status, size;
2957
2958 WARN_ON(using_dma(hsotg));
2959
2960 epnum = grxstsr & GRXSTS_EPNUM_MASK;
2961 status = grxstsr & GRXSTS_PKTSTS_MASK;
2962
2963 size = grxstsr & GRXSTS_BYTECNT_MASK;
2964 size >>= GRXSTS_BYTECNT_SHIFT;
2965
2966 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
2967 __func__, grxstsr, size, epnum);
2968
2969 switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
2970 case GRXSTS_PKTSTS_GLOBALOUTNAK:
2971 dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
2972 break;
2973
2974 case GRXSTS_PKTSTS_OUTDONE:
2975 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
2976 dwc2_hsotg_read_frameno(hsotg));
2977
2978 if (!using_dma(hsotg))
2979 dwc2_hsotg_handle_outdone(hsotg, epnum);
2980 break;
2981
2982 case GRXSTS_PKTSTS_SETUPDONE:
2983 dev_dbg(hsotg->dev,
2984 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2985 dwc2_hsotg_read_frameno(hsotg),
2986 dwc2_readl(hsotg, DOEPCTL(0)));
2987 /*
2988 * Call dwc2_hsotg_handle_outdone here if it was not called from
2989 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
2990 * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
2991 */
2992 if (hsotg->ep0_state == DWC2_EP0_SETUP)
2993 dwc2_hsotg_handle_outdone(hsotg, epnum);
2994 break;
2995
2996 case GRXSTS_PKTSTS_OUTRX:
2997 dwc2_hsotg_rx_data(hsotg, epnum, size);
2998 break;
2999
3000 case GRXSTS_PKTSTS_SETUPRX:
3001 dev_dbg(hsotg->dev,
3002 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
3003 dwc2_hsotg_read_frameno(hsotg),
3004 dwc2_readl(hsotg, DOEPCTL(0)));
3005
3006 WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
3007
3008 dwc2_hsotg_rx_data(hsotg, epnum, size);
3009 break;
3010
3011 default:
3012 dev_warn(hsotg->dev, "%s: unknown status %08x\n",
3013 __func__, grxstsr);
3014
3015 dwc2_hsotg_dump(hsotg);
3016 break;
3017 }
3018}
3019
3020/**
3021 * dwc2_hsotg_ep0_mps - turn max packet size into register setting
3022 * @mps: The maximum packet size in bytes.
3023 */
3024static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
3025{
3026 switch (mps) {
3027 case 64:
3028 return D0EPCTL_MPS_64;
3029 case 32:
3030 return D0EPCTL_MPS_32;
3031 case 16:
3032 return D0EPCTL_MPS_16;
3033 case 8:
3034 return D0EPCTL_MPS_8;
3035 }
3036
3037 /* bad max packet size, warn and return invalid result */
3038 WARN_ON(1);
3039 return (u32)-1;
3040}
3041
3042/**
3043 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
3044 * @hsotg: The driver state.
3045 * @ep: The index number of the endpoint
3046 * @mps: The maximum packet size in bytes
3047 * @mc: The multicount value
3048 * @dir_in: True if direction is in.
3049 *
3050 * Configure the maximum packet size for the given endpoint, updating
3051 * the hardware control registers to reflect this.
3052 */
3053static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
3054 unsigned int ep, unsigned int mps,
3055 unsigned int mc, unsigned int dir_in)
3056{
3057 struct dwc2_hsotg_ep *hs_ep;
3058 u32 reg;
3059
3060 hs_ep = index_to_ep(hsotg, ep, dir_in);
3061 if (!hs_ep)
3062 return;
3063
3064 if (ep == 0) {
3065 u32 mps_bytes = mps;
3066
3067 /* EP0 is a special case */
3068 mps = dwc2_hsotg_ep0_mps(mps_bytes);
3069 if (mps > 3)
3070 goto bad_mps;
3071 hs_ep->ep.maxpacket = mps_bytes;
3072 hs_ep->mc = 1;
3073 } else {
3074 if (mps > 1024)
3075 goto bad_mps;
3076 hs_ep->mc = mc;
3077 if (mc > 3)
3078 goto bad_mps;
3079 hs_ep->ep.maxpacket = mps;
3080 }
3081
3082 if (dir_in) {
3083 reg = dwc2_readl(hsotg, DIEPCTL(ep));
3084 reg &= ~DXEPCTL_MPS_MASK;
3085 reg |= mps;
3086 dwc2_writel(hsotg, reg, DIEPCTL(ep));
3087 } else {
3088 reg = dwc2_readl(hsotg, DOEPCTL(ep));
3089 reg &= ~DXEPCTL_MPS_MASK;
3090 reg |= mps;
3091 dwc2_writel(hsotg, reg, DOEPCTL(ep));
3092 }
3093
3094 return;
3095
3096bad_mps:
3097 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
3098}
3099
3100/**
3101 * dwc2_hsotg_txfifo_flush - flush Tx FIFO
3102 * @hsotg: The driver state
3103 * @idx: The index for the endpoint (0..15)
3104 */
3105static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
3106{
3107 dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
3108 GRSTCTL);
3109
3110 /* wait until the fifo is flushed */
3111 if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
3112 dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
3113 __func__);
3114}
3115
3116/**
3117 * dwc2_hsotg_trytx - check to see if anything needs transmitting
3118 * @hsotg: The driver state
3119 * @hs_ep: The driver endpoint to check.
3120 *
3121 * Check to see if there is a request that has data to send, and if so
3122 * make an attempt to write data into the FIFO.
3123 */
3124static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
3125 struct dwc2_hsotg_ep *hs_ep)
3126{
3127 struct dwc2_hsotg_req *hs_req = hs_ep->req;
3128
3129 if (!hs_ep->dir_in || !hs_req) {
3130 /**
3131 * if request is not enqueued, we disable interrupts
3132 * for endpoints, excepting ep0
3133 */
3134 if (hs_ep->index != 0)
3135 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
3136 hs_ep->dir_in, 0);
3137 return 0;
3138 }
3139
3140 if (hs_req->req.actual < hs_req->req.length) {
3141 dev_info(hsotg->dev, "trying to write more for ep%d\n",
3142 hs_ep->index);
3143 return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
3144 }
3145
3146 return 0;
3147}
3148
3149/**
3150 * dwc2_hsotg_complete_in - complete IN transfer
3151 * @hsotg: The device state.
3152 * @hs_ep: The endpoint that has just completed.
3153 *
3154 * An IN transfer has been completed, update the transfer's state and then
3155 * call the relevant completion routines.
3156 */
3157static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
3158 struct dwc2_hsotg_ep *hs_ep)
3159{
3160 struct dwc2_hsotg_req *hs_req = hs_ep->req;
3161 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
3162 int size_left, size_done;
3163
3164 if (!hs_req) {
3165 if (hs_ep->index == 0)
3166 dwc2_ep0_complete_profiling(hsotg, hs_ep, 0, 1);
3167 dev_dbg(hsotg->dev, "XferCompl but no req\n");
3168 return;
3169 }
3170
3171 /* Finish ZLP handling for IN EP0 transactions */
3172 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
3173 dev_dbg(hsotg->dev, "zlp packet sent\n");
3174 /*
3175 * While send zlp for DWC2_EP0_STATUS_IN EP direction was
3176 * changed to IN. Change back to complete OUT transfer request
3177 */
3178 hs_ep->dir_in = 0;
3179
3180 dwc2_ep0_complete_profiling(hsotg, hs_ep, 0, 1);
3181 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
3182 if (hsotg->test_mode) {
3183 int ret;
3184
3185 ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
3186 if (ret < 0) {
3187 dev_info(hsotg->dev, "Invalid Test #%d\n",
3188 hsotg->test_mode);
3189 dwc2_hsotg_stall_ep0(hsotg);
3190 return;
3191 }
3192 }
3193 dwc2_hsotg_enqueue_setup(hsotg);
3194 return;
3195 }
3196
3197 /*
3198 * Calculate the size of the transfer by checking how much is left
3199 * in the endpoint size register and then working it out from
3200 * the amount we loaded for the transfer.
3201 *
3202 * We do this even for DMA, as the transfer may have incremented
3203 * past the end of the buffer (DMA transfers are always 32bit
3204 * aligned).
3205 */
3206 if (using_desc_dma(hsotg)) {
3207 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
3208 if (size_left < 0)
3209 dev_err(hsotg->dev, "error parsing DDMA results %d\n",
3210 size_left);
3211 } else {
3212 size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
3213 }
3214
3215 size_done = hs_ep->size_loaded - size_left;
3216 size_done += hs_ep->last_load;
3217
3218 if (hs_req->req.actual != size_done)
3219 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
3220 __func__, hs_req->req.actual, size_done);
3221
3222 hs_req->req.actual = size_done;
3223 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
3224 hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
3225
3226 if (!size_left && hs_req->req.actual < hs_req->req.length) {
3227 dev_info(hsotg->dev, "%s trying more for req...\n", __func__);
3228 dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
3229 return;
3230 }
3231
3232 /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
3233 if (hs_ep->send_zlp) {
3234 hs_ep->send_zlp = 0;
3235 if (!using_desc_dma(hsotg)) {
3236 dwc2_hsotg_program_zlp(hsotg, hs_ep);
3237 /* transfer will be completed on next complete interrupt */
3238 return;
3239 }
3240 }
3241
3242 if (hs_ep->index == 0)
3243 dwc2_ep0_complete_profiling(hsotg, hs_ep, hs_req->req.actual, 1);
3244
3245 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
3246 /* Move to STATUS OUT */
3247 dwc2_hsotg_ep0_zlp(hsotg, false);
3248 return;
3249 }
3250
3251 /* Set actual frame number for completed transfers */
3252 if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
3253 hs_req->req.frame_number = hs_ep->target_frame;
3254 dwc2_gadget_incr_frame_num(hs_ep);
3255 }
3256
3257 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
3258}
3259
3260/**
3261 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
3262 * @hsotg: The device state.
3263 * @idx: Index of ep.
3264 * @dir_in: Endpoint direction 1-in 0-out.
3265 *
3266 * Reads for endpoint with given index and direction, by masking
3267 * epint_reg with coresponding mask.
3268 */
3269static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
3270 unsigned int idx, int dir_in)
3271{
3272 u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
3273 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
3274 u32 ints;
3275 u32 mask;
3276 u32 diepempmsk;
3277
3278 mask = dwc2_readl(hsotg, epmsk_reg);
3279 diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
3280 mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
3281 mask |= DXEPINT_SETUP_RCVD;
3282
3283 ints = dwc2_readl(hsotg, epint_reg);
3284 ints &= mask;
3285 return ints;
3286}
3287
3288/**
3289 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
3290 * @hs_ep: The endpoint on which interrupt is asserted.
3291 *
3292 * This interrupt indicates that the endpoint has been disabled per the
3293 * application's request.
3294 *
3295 * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
3296 * in case of ISOC completes current request.
3297 *
3298 * For ISOC-OUT endpoints completes expired requests. If there is remaining
3299 * request starts it.
3300 */
3301static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
3302{
3303 struct dwc2_hsotg *hsotg = hs_ep->parent;
3304 struct dwc2_hsotg_req *hs_req;
3305 unsigned char idx = hs_ep->index;
3306 int dir_in = hs_ep->dir_in;
3307 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
3308 int dctl = dwc2_readl(hsotg, DCTL);
3309
3310 dev_info(hsotg->dev, "%s: EPDisbld\n", __func__);
3311
3312 if (dir_in) {
3313 int epctl = dwc2_readl(hsotg, epctl_reg);
3314
3315 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
3316
3317 if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
3318 int dctl = dwc2_readl(hsotg, DCTL);
3319
3320 dctl |= DCTL_CGNPINNAK;
3321 dwc2_writel(hsotg, dctl, DCTL);
3322 }
3323 } else {
3324
3325 if (dctl & DCTL_GOUTNAKSTS) {
3326 dctl |= DCTL_CGOUTNAK;
3327 dwc2_writel(hsotg, dctl, DCTL);
3328 }
3329 }
3330
3331 if (!hs_ep->isochronous)
3332 return;
3333
3334 if (list_empty(&hs_ep->queue)) {
3335 dev_info(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
3336 __func__, hs_ep);
3337 return;
3338 }
3339
3340 do {
3341 hs_req = get_ep_head(hs_ep);
3342 if (hs_req) {
3343 hs_req->req.frame_number = hs_ep->target_frame;
3344 hs_req->req.actual = 0;
3345 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
3346 -ENODATA);
3347 }
3348 dwc2_gadget_incr_frame_num(hs_ep);
3349 /* Update current frame number value. */
3350 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
3351 } while (dwc2_gadget_target_frame_elapsed(hs_ep));
3352}
3353
3354/**
3355 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
3356 * @ep: The endpoint on which interrupt is asserted.
3357 *
3358 * This is starting point for ISOC-OUT transfer, synchronization done with
3359 * first out token received from host while corresponding EP is disabled.
3360 *
3361 * Device does not know initial frame in which out token will come. For this
3362 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
3363 * getting this interrupt SW starts calculation for next transfer frame.
3364 */
3365static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
3366{
3367 struct dwc2_hsotg *hsotg = ep->parent;
3368 struct dwc2_hsotg_req *hs_req;
3369 int dir_in = ep->dir_in;
3370
3371 if (dir_in || !ep->isochronous)
3372 return;
3373
3374 if (using_desc_dma(hsotg)) {
3375 if (ep->target_frame == TARGET_FRAME_INITIAL) {
3376 /* Start first ISO Out */
3377 ep->target_frame = hsotg->frame_number;
3378 dwc2_gadget_start_isoc_ddma(ep);
3379 }
3380 return;
3381 }
3382
3383 if (ep->target_frame == TARGET_FRAME_INITIAL) {
3384 u32 ctrl;
3385
3386 ep->target_frame = hsotg->frame_number;
3387 if (ep->interval > 1) {
3388 ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
3389 if (ep->target_frame & 0x1)
3390 ctrl |= DXEPCTL_SETODDFR;
3391 else
3392 ctrl |= DXEPCTL_SETEVENFR;
3393
3394 dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
3395 }
3396 }
3397
3398 while (dwc2_gadget_target_frame_elapsed(ep)) {
3399 hs_req = get_ep_head(ep);
3400 if (hs_req) {
3401 hs_req->req.frame_number = ep->target_frame;
3402 hs_req->req.actual = 0;
3403 dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
3404 }
3405
3406 dwc2_gadget_incr_frame_num(ep);
3407 /* Update current frame number value. */
3408 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
3409 }
3410
3411 if (!ep->req)
3412 dwc2_gadget_start_next_request(ep);
3413
3414}
3415
3416static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
3417 struct dwc2_hsotg_ep *hs_ep);
3418
3419/**
3420 * dwc2_gadget_handle_nak - handle NAK interrupt
3421 * @hs_ep: The endpoint on which interrupt is asserted.
3422 *
3423 * This is starting point for ISOC-IN transfer, synchronization done with
3424 * first IN token received from host while corresponding EP is disabled.
3425 *
3426 * Device does not know when first one token will arrive from host. On first
3427 * token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
3428 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
3429 * sent in response to that as there was no data in FIFO. SW is basing on this
3430 * interrupt to obtain frame in which token has come and then based on the
3431 * interval calculates next frame for transfer.
3432 */
3433static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
3434{
3435 struct dwc2_hsotg *hsotg = hs_ep->parent;
3436 struct dwc2_hsotg_req *hs_req;
3437 int dir_in = hs_ep->dir_in;
3438 u32 ctrl;
3439
3440 if (!dir_in || !hs_ep->isochronous)
3441 return;
3442
3443 if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
3444
3445 if (using_desc_dma(hsotg)) {
3446 hs_ep->target_frame = hsotg->frame_number;
3447 dwc2_gadget_incr_frame_num(hs_ep);
3448
3449 /* In service interval mode target_frame must
3450 * be set to last (u)frame of the service interval.
3451 */
3452 if (hsotg->params.service_interval) {
3453 /* Set target_frame to the first (u)frame of
3454 * the service interval
3455 */
3456 hs_ep->target_frame &= ~hs_ep->interval + 1;
3457
3458 /* Set target_frame to the last (u)frame of
3459 * the service interval
3460 */
3461 dwc2_gadget_incr_frame_num(hs_ep);
3462 dwc2_gadget_dec_frame_num_by_one(hs_ep);
3463 }
3464
3465 dwc2_gadget_start_isoc_ddma(hs_ep);
3466 return;
3467 }
3468
3469 hs_ep->target_frame = hsotg->frame_number;
3470 if (hs_ep->interval > 1) {
3471 u32 ctrl = dwc2_readl(hsotg,
3472 DIEPCTL(hs_ep->index));
3473 if (hs_ep->target_frame & 0x1)
3474 ctrl |= DXEPCTL_SETODDFR;
3475 else
3476 ctrl |= DXEPCTL_SETEVENFR;
3477
3478 dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
3479 }
3480 }
3481
3482 if (using_desc_dma(hsotg))
3483 return;
3484
3485 ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
3486 if (ctrl & DXEPCTL_EPENA)
3487 dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
3488 else
3489 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
3490
3491 while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
3492 hs_req = get_ep_head(hs_ep);
3493 if (hs_req) {
3494 hs_req->req.frame_number = hs_ep->target_frame;
3495 hs_req->req.actual = 0;
3496 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
3497 }
3498
3499 dwc2_gadget_incr_frame_num(hs_ep);
3500 /* Update current frame number value. */
3501 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
3502 }
3503
3504 if (!hs_ep->req)
3505 dwc2_gadget_start_next_request(hs_ep);
3506}
3507
3508/**
3509 * dwc2_hsotg_epint - handle an in/out endpoint interrupt
3510 * @hsotg: The driver state
3511 * @idx: The index for the endpoint (0..15)
3512 * @dir_in: Set if this is an IN endpoint
3513 *
3514 * Process and clear any interrupt pending for an individual endpoint
3515 */
3516static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
3517 int dir_in)
3518{
3519 struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
3520 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
3521 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
3522 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
3523 u32 ints;
3524 u32 ctrl;
3525
3526 ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
3527 ctrl = dwc2_readl(hsotg, epctl_reg);
3528
3529 /* Clear endpoint interrupts */
3530 dwc2_writel(hsotg, ints, epint_reg);
3531
3532 if (!hs_ep) {
3533 dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
3534 __func__, idx, dir_in ? "in" : "out");
3535 return;
3536 }
3537
3538 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
3539 __func__, idx, dir_in ? "in" : "out", ints);
3540
3541 /* Don't process XferCompl interrupt if it is a setup packet */
3542 if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
3543 ints &= ~DXEPINT_XFERCOMPL;
3544
3545 /*
3546 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
3547 * stage and xfercomplete was generated without SETUP phase done
3548 * interrupt. SW should parse received setup packet only after host's
3549 * exit from setup phase of control transfer.
3550 */
3551 if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
3552 hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
3553 ints &= ~DXEPINT_XFERCOMPL;
3554
3555 if (ints & DXEPINT_XFERCOMPL) {
3556 dev_dbg(hsotg->dev,
3557 "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
3558 __func__, dwc2_readl(hsotg, epctl_reg),
3559 dwc2_readl(hsotg, epsiz_reg));
3560
3561 /* In DDMA handle isochronous requests separately */
3562 if (using_desc_dma(hsotg) && hs_ep->isochronous) {
3563 dwc2_gadget_complete_isoc_request_ddma(hs_ep);
3564 } else if (dir_in) {
3565 /*
3566 * We get OutDone from the FIFO, so we only
3567 * need to look at completing IN requests here
3568 * if operating slave mode
3569 */
3570 if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
3571 dwc2_hsotg_complete_in(hsotg, hs_ep);
3572
3573 if (idx == 0 && !hs_ep->req)
3574 dwc2_hsotg_enqueue_setup(hsotg);
3575 } else if (using_dma(hsotg)) {
3576 /*
3577 * We're using DMA, we need to fire an OutDone here
3578 * as we ignore the RXFIFO.
3579 */
3580 if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
3581 dwc2_hsotg_handle_outdone(hsotg, idx);
3582 }
3583 }
3584
3585 if (ints & DXEPINT_EPDISBLD)
3586 dwc2_gadget_handle_ep_disabled(hs_ep);
3587
3588 if (ints & DXEPINT_OUTTKNEPDIS)
3589 dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
3590
3591 if (ints & DXEPINT_NAKINTRPT)
3592 dwc2_gadget_handle_nak(hs_ep);
3593
3594 if (ints & DXEPINT_AHBERR)
3595 dev_info(hsotg->dev, "%s: AHBErr\n", __func__);
3596
3597 if (ints & DXEPINT_SETUP) { /* Setup or Timeout */
3598 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
3599
3600 if (using_dma(hsotg) && idx == 0) {
3601 /*
3602 * this is the notification we've received a
3603 * setup packet. In non-DMA mode we'd get this
3604 * from the RXFIFO, instead we need to process
3605 * the setup here.
3606 */
3607
3608 if (dir_in)
3609 WARN_ON_ONCE(1);
3610 else
3611 dwc2_hsotg_handle_outdone(hsotg, 0);
3612 }
3613 }
3614
3615 if (ints & DXEPINT_STSPHSERCVD) {
3616 dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
3617
3618 /* Safety check EP0 state when STSPHSERCVD asserted */
3619 if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
3620 /* Move to STATUS IN for DDMA */
3621 if (using_desc_dma(hsotg)) {
3622 if (!hsotg->delayed_status)
3623 dwc2_hsotg_ep0_zlp(hsotg, true);
3624 else
3625 /* In case of 3 stage Control Write with delayed
3626 * status, when Status IN transfer started
3627 * before STSPHSERCVD asserted, NAKSTS bit not
3628 * cleared by CNAK in dwc2_hsotg_start_req()
3629 * function. Clear now NAKSTS to allow complete
3630 * transfer.
3631 */
3632 dwc2_set_bit(hsotg, DIEPCTL(0),
3633 DXEPCTL_CNAK);
3634 }
3635 }
3636
3637 }
3638
3639 if (ints & DXEPINT_BACK2BACKSETUP)
3640 dev_info(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
3641
3642 if (ints & DXEPINT_BNAINTR) {
3643 dev_info_ratelimited(hsotg->dev, "%s: ep%d(%d) BNA interrupt\n", __func__, idx, dir_in);
3644 if (hs_ep->isochronous)
3645 dwc2_gadget_handle_isoc_bna(hs_ep);
3646 }
3647
3648 if (dir_in && !hs_ep->isochronous) {
3649 /* not sure if this is important, but we'll clear it anyway */
3650 if (ints & DXEPINT_INTKNTXFEMP) {
3651 dev_info(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
3652 __func__, idx);
3653 }
3654
3655 /* this probably means something bad is happening */
3656 if (ints & DXEPINT_INTKNEPMIS) {
3657 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
3658 __func__, idx);
3659 }
3660
3661 /* FIFO has space or is empty (see GAHBCFG) */
3662 if (hsotg->dedicated_fifos &&
3663 ints & DXEPINT_TXFEMP) {
3664 dev_info(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
3665 __func__, idx);
3666 if (!using_dma(hsotg))
3667 dwc2_hsotg_trytx(hsotg, hs_ep);
3668 }
3669 }
3670}
3671
3672/**
3673 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
3674 * @hsotg: The device state.
3675 *
3676 * Handle updating the device settings after the enumeration phase has
3677 * been completed.
3678 */
3679static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
3680{
3681 u32 dsts = dwc2_readl(hsotg, DSTS);
3682 int ep0_mps = 0, ep_mps = 8;
3683
3684 /*
3685 * This should signal the finish of the enumeration phase
3686 * of the USB handshaking, so we should now know what rate
3687 * we connected at.
3688 */
3689
3690 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
3691
3692 /*
3693 * note, since we're limited by the size of transfer on EP0, and
3694 * it seems IN transfers must be a even number of packets we do
3695 * not advertise a 64byte MPS on EP0.
3696 */
3697
3698 /* catch both EnumSpd_FS and EnumSpd_FS48 */
3699 switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
3700 case DSTS_ENUMSPD_FS:
3701 case DSTS_ENUMSPD_FS48:
3702 hsotg->gadget.speed = USB_SPEED_FULL;
3703 ep0_mps = EP0_MPS_LIMIT;
3704 ep_mps = 1023;
3705 break;
3706
3707 case DSTS_ENUMSPD_HS:
3708 hsotg->gadget.speed = USB_SPEED_HIGH;
3709 ep0_mps = EP0_MPS_LIMIT;
3710 ep_mps = 1024;
3711 break;
3712
3713 case DSTS_ENUMSPD_LS:
3714 hsotg->gadget.speed = USB_SPEED_LOW;
3715 ep0_mps = 8;
3716 ep_mps = 8;
3717 /*
3718 * note, we don't actually support LS in this driver at the
3719 * moment, and the documentation seems to imply that it isn't
3720 * supported by the PHYs on some of the devices.
3721 */
3722 break;
3723 }
3724
3725#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
3726 usb_os_detect_reset_state();
3727#endif
3728 dev_info(hsotg->dev, "new device is %s\n",
3729 usb_speed_string(hsotg->gadget.speed));
3730
3731 /*
3732 * we should now know the maximum packet size for an
3733 * endpoint, so set the endpoints to a default value.
3734 */
3735
3736 if (ep0_mps) {
3737 int i;
3738 /* Initialize ep0 for both in and out directions */
3739 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
3740 dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
3741 for (i = 1; i < hsotg->num_of_eps; i++) {
3742 if (hsotg->eps_in[i])
3743 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3744 0, 1);
3745 if (hsotg->eps_out[i])
3746 dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3747 0, 0);
3748 }
3749 }
3750
3751 /* ensure after enumeration our EP0 is active */
3752
3753 dwc2_hsotg_enqueue_setup(hsotg);
3754
3755 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3756 dwc2_readl(hsotg, DIEPCTL0),
3757 dwc2_readl(hsotg, DOEPCTL0));
3758}
3759
3760/**
3761 * kill_all_requests - remove all requests from the endpoint's queue
3762 * @hsotg: The device state.
3763 * @ep: The endpoint the requests may be on.
3764 * @result: The result code to use.
3765 *
3766 * Go through the requests on the given endpoint and mark them
3767 * completed with the given result code.
3768 */
3769static void kill_all_requests(struct dwc2_hsotg *hsotg,
3770 struct dwc2_hsotg_ep *ep,
3771 int result)
3772{
3773 unsigned int size;
3774
3775 ep->req = NULL;
3776
3777 while (!list_empty(&ep->queue)) {
3778 struct dwc2_hsotg_req *req = get_ep_head(ep);
3779
3780 dwc2_hsotg_complete_request(hsotg, ep, req, result);
3781 }
3782
3783 if (!hsotg->dedicated_fifos)
3784 return;
3785 size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
3786 if (size < ep->fifo_size)
3787 dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
3788}
3789
3790/**
3791 * dwc2_hsotg_disconnect - disconnect service
3792 * @hsotg: The device state.
3793 *
3794 * The device has been disconnected. Remove all current
3795 * transactions and signal the gadget driver that this
3796 * has happened.
3797 */
3798void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
3799{
3800 unsigned int ep;
3801
3802 if (!hsotg->connected)
3803 return;
3804
3805 hsotg->connected = 0;
3806 hsotg->test_mode = 0;
3807
3808 /* all endpoints should be shutdown */
3809 for (ep = 0; ep < hsotg->num_of_eps; ep++) {
3810 if (hsotg->eps_in[ep])
3811 kill_all_requests(hsotg, hsotg->eps_in[ep],
3812 -ESHUTDOWN);
3813 if (hsotg->eps_out[ep])
3814 kill_all_requests(hsotg, hsotg->eps_out[ep],
3815 -ESHUTDOWN);
3816 }
3817
3818 call_gadget(hsotg, disconnect);
3819 hsotg->lx_state = DWC2_L3;
3820
3821 usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
3822}
3823
3824/**
3825 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
3826 * @hsotg: The device state:
3827 * @periodic: True if this is a periodic FIFO interrupt
3828 */
3829static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
3830{
3831 struct dwc2_hsotg_ep *ep;
3832 int epno, ret;
3833
3834 /* look through for any more data to transmit */
3835 for (epno = 0; epno < hsotg->num_of_eps; epno++) {
3836 ep = index_to_ep(hsotg, epno, 1);
3837
3838 if (!ep)
3839 continue;
3840
3841 if (!ep->dir_in)
3842 continue;
3843
3844 if ((periodic && !ep->periodic) ||
3845 (!periodic && ep->periodic))
3846 continue;
3847
3848 ret = dwc2_hsotg_trytx(hsotg, ep);
3849 if (ret < 0)
3850 break;
3851 }
3852}
3853
3854/* IRQ flags which will trigger a retry around the IRQ loop */
3855#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
3856 GINTSTS_PTXFEMP | \
3857 GINTSTS_RXFLVL)
3858
3859static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
3860/**
3861 * dwc2_hsotg_core_init - issue softreset to the core
3862 * @hsotg: The device state
3863 * @is_usb_reset: Usb resetting flag
3864 *
3865 * Issue a soft reset to the core, and await the core finishing it.
3866 */
3867void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3868 bool is_usb_reset)
3869{
3870 u32 intmsk;
3871 u32 val;
3872 u32 usbcfg;
3873 u32 dcfg = 0;
3874 int ep;
3875
3876#ifdef CONFIG_DWC2_MON_TIMER
3877 if (hsotg->mon_timer_started) {
3878 pr_info("cancel mon timer\n");
3879 hrtimer_cancel(&hsotg->usb_mon_timer);
3880 hsotg->mon_timer_started = false;
3881 }
3882#endif
3883
3884#if defined(CONFIG_DWC2_MON_TIMER) || defined(CONFIG_USB_RESTART_ON_RESET)
3885 if (work_pending(&hsotg->usb_restart_work.work))
3886 cancel_delayed_work(&hsotg->usb_restart_work);
3887 hsotg->usb_do_restart = 0;
3888#endif
3889
3890 /* Kill any ep0 requests as controller will be reinitialized */
3891 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
3892
3893 if (!is_usb_reset) {
3894 if (dwc2_core_reset(hsotg, true))
3895 return;
3896 } else {
3897 /* all endpoints should be shutdown */
3898 for (ep = 1; ep < hsotg->num_of_eps; ep++) {
3899 if (hsotg->eps_in[ep])
3900 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3901 if (hsotg->eps_out[ep])
3902 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3903 }
3904 }
3905
3906 /*
3907 * we must now enable ep0 ready for host detection and then
3908 * set configuration.
3909 */
3910
3911 /* keep other bits untouched (so e.g. forced modes are not lost) */
3912 usbcfg = dwc2_readl(hsotg, GUSBCFG);
3913 usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
3914 usbcfg |= GUSBCFG_TOUTCAL(7);
3915
3916 /* remove the HNP/SRP and set the PHY */
3917 usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
3918 dwc2_writel(hsotg, usbcfg, GUSBCFG);
3919
3920 dwc2_phy_init(hsotg, true);
3921
3922 dwc2_hsotg_init_fifo(hsotg);
3923
3924 if (!is_usb_reset)
3925 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
3926
3927 dcfg |= DCFG_EPMISCNT(1);
3928
3929 switch (hsotg->params.speed) {
3930 case DWC2_SPEED_PARAM_LOW:
3931 dcfg |= DCFG_DEVSPD_LS;
3932 break;
3933 case DWC2_SPEED_PARAM_FULL:
3934 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
3935 dcfg |= DCFG_DEVSPD_FS48;
3936 else
3937 dcfg |= DCFG_DEVSPD_FS;
3938 break;
3939 default:
3940 dcfg |= DCFG_DEVSPD_HS;
3941 }
3942
3943 if (hsotg->params.ipg_isoc_en)
3944 dcfg |= DCFG_IPG_ISOC_SUPPORDED;
3945
3946 dwc2_writel(hsotg, dcfg, DCFG);
3947
3948 /* Clear any pending OTG interrupts */
3949 dwc2_writel(hsotg, 0xffffffff, GOTGINT);
3950
3951 /* Clear any pending interrupts */
3952 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
3953 intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
3954 GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
3955 GINTSTS_USBRST | GINTSTS_RESETDET |
3956 GINTSTS_ENUMDONE | GINTSTS_OTGINT |
3957 GINTSTS_USBSUSP | GINTSTS_WKUPINT |
3958 GINTSTS_LPMTRANRCVD;
3959
3960 if (!using_desc_dma(hsotg))
3961 intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
3962
3963 if (!hsotg->params.external_id_pin_ctl)
3964 intmsk |= GINTSTS_CONIDSTSCHNG;
3965
3966 dwc2_writel(hsotg, intmsk, GINTMSK);
3967
3968 if (using_dma(hsotg)) {
3969 dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
3970 hsotg->params.ahbcfg,
3971 GAHBCFG);
3972
3973 /* Set DDMA mode support in the core if needed */
3974 if (using_desc_dma(hsotg))
3975 dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
3976
3977 } else {
3978 dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
3979 (GAHBCFG_NP_TXF_EMP_LVL |
3980 GAHBCFG_P_TXF_EMP_LVL) : 0) |
3981 GAHBCFG_GLBL_INTR_EN, GAHBCFG);
3982 }
3983
3984 /*
3985 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
3986 * when we have no data to transfer. Otherwise we get being flooded by
3987 * interrupts.
3988 */
3989
3990 dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
3991 DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
3992 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
3993 DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
3994 DIEPMSK);
3995
3996 /*
3997 * don't need XferCompl, we get that from RXFIFO in slave mode. In
3998 * DMA mode we may need this and StsPhseRcvd.
3999 */
4000 dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
4001 DOEPMSK_STSPHSERCVDMSK) : 0) |
4002 DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
4003 DOEPMSK_SETUPMSK,
4004 DOEPMSK);
4005
4006 /* Enable BNA interrupt for DDMA */
4007 if (using_desc_dma(hsotg)) {
4008 dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
4009 dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
4010 }
4011
4012 /* Enable Service Interval mode if supported */
4013 if (using_desc_dma(hsotg) && hsotg->params.service_interval)
4014 dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
4015
4016 dwc2_writel(hsotg, 0, DAINTMSK);
4017
4018 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
4019 dwc2_readl(hsotg, DIEPCTL0),
4020 dwc2_readl(hsotg, DOEPCTL0));
4021
4022 /* enable in and out endpoint interrupts */
4023 dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
4024
4025 /*
4026 * Enable the RXFIFO when in slave mode, as this is how we collect
4027 * the data. In DMA mode, we get events from the FIFO but also
4028 * things we cannot process, so do not use it.
4029 */
4030 if (!using_dma(hsotg))
4031 dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
4032
4033 /* Enable interrupts for EP0 in and out */
4034 dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
4035 dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
4036
4037 if (!is_usb_reset) {
4038 dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
4039 udelay(10); /* see openiboot */
4040 dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
4041 }
4042
4043 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
4044
4045 /*
4046 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
4047 * writing to the EPCTL register..
4048 */
4049
4050 /* set to read 1 8byte packet */
4051 dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
4052 DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
4053
4054 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
4055 DXEPCTL_CNAK | DXEPCTL_EPENA |
4056 DXEPCTL_USBACTEP,
4057 DOEPCTL0);
4058
4059 /* enable, but don't activate EP0in */
4060 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
4061 DXEPCTL_USBACTEP, DIEPCTL0);
4062
4063 /* clear global NAKs */
4064 val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
4065 if (!is_usb_reset)
4066 val |= DCTL_SFTDISCON;
4067 dwc2_set_bit(hsotg, DCTL, val);
4068
4069 /* configure the core to support LPM */
4070 dwc2_gadget_init_lpm(hsotg);
4071
4072 /* program GREFCLK register if needed */
4073 if (using_desc_dma(hsotg) && hsotg->params.service_interval)
4074 dwc2_gadget_program_ref_clk(hsotg);
4075
4076 /* must be at-least 3ms to allow bus to see disconnect */
4077 mdelay(3);
4078
4079 hsotg->lx_state = DWC2_L0;
4080
4081 dwc2_hsotg_enqueue_setup(hsotg);
4082
4083 hsotg->bus_reset_received = 0;
4084 hsotg->suspend_received = 0;
4085 hsotg->ep0_rw_idx = 0;
4086 hsotg->ep0_rw_complete_idx = 0;
4087 hsotg->enum_done_ms = 0;
4088
4089 dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
4090 dwc2_readl(hsotg, DIEPCTL0),
4091 dwc2_readl(hsotg, DOEPCTL0));
4092}
4093
4094static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
4095{
4096 /* set the soft-disconnect bit */
4097 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
4098 hsotg->active = 0;
4099}
4100
4101void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
4102{
4103 /* dwc2_dump_regs(hsotg); */
4104 /* remove the soft-disconnect and let's go */
4105 dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
4106 hsotg->active = 1;
4107}
4108
4109/**
4110 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
4111 * @hsotg: The device state:
4112 *
4113 * This interrupt indicates one of the following conditions occurred while
4114 * transmitting an ISOC transaction.
4115 * - Corrupted IN Token for ISOC EP.
4116 * - Packet not complete in FIFO.
4117 *
4118 * The following actions will be taken:
4119 * - Determine the EP
4120 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
4121 */
4122static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
4123{
4124 struct dwc2_hsotg_ep *hs_ep;
4125 u32 epctrl;
4126 u32 daintmsk;
4127 u32 idx;
4128
4129 dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
4130
4131 daintmsk = dwc2_readl(hsotg, DAINTMSK);
4132
4133 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
4134 hs_ep = hsotg->eps_in[idx];
4135 /* Proceed only unmasked ISOC EPs */
4136 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
4137 continue;
4138
4139 epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
4140 if ((epctrl & DXEPCTL_EPENA) &&
4141 dwc2_gadget_target_frame_elapsed(hs_ep)) {
4142 epctrl |= DXEPCTL_SNAK;
4143 epctrl |= DXEPCTL_EPDIS;
4144 dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
4145 }
4146 }
4147
4148 /* Clear interrupt */
4149 dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
4150}
4151
4152/**
4153 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
4154 * @hsotg: The device state:
4155 *
4156 * This interrupt indicates one of the following conditions occurred while
4157 * transmitting an ISOC transaction.
4158 * - Corrupted OUT Token for ISOC EP.
4159 * - Packet not complete in FIFO.
4160 *
4161 * The following actions will be taken:
4162 * - Determine the EP
4163 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
4164 */
4165static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
4166{
4167 u32 gintsts;
4168 u32 gintmsk;
4169 u32 daintmsk;
4170 u32 epctrl;
4171 struct dwc2_hsotg_ep *hs_ep;
4172 int idx;
4173
4174 dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
4175
4176 daintmsk = dwc2_readl(hsotg, DAINTMSK);
4177 daintmsk >>= DAINT_OUTEP_SHIFT;
4178
4179 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
4180 hs_ep = hsotg->eps_out[idx];
4181 /* Proceed only unmasked ISOC EPs */
4182 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
4183 continue;
4184
4185 epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
4186 if ((epctrl & DXEPCTL_EPENA) &&
4187 dwc2_gadget_target_frame_elapsed(hs_ep)) {
4188 /* Unmask GOUTNAKEFF interrupt */
4189 gintmsk = dwc2_readl(hsotg, GINTMSK);
4190 gintmsk |= GINTSTS_GOUTNAKEFF;
4191 dwc2_writel(hsotg, gintmsk, GINTMSK);
4192
4193 gintsts = dwc2_readl(hsotg, GINTSTS);
4194 if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
4195 dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
4196 break;
4197 }
4198 }
4199 }
4200
4201 /* Clear interrupt */
4202 dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
4203}
4204
4205/**
4206 * dwc2_hsotg_irq - handle device interrupt
4207 * @irq: The IRQ number triggered
4208 * @pw: The pw value when registered the handler.
4209 */
4210static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
4211{
4212 struct dwc2_hsotg *hsotg = pw;
4213 int retry_count = 8;
4214 u32 gintsts;
4215 u32 gintmsk;
4216 s64 ktime_ms_now;
4217 u32 tmp_do_restart;
4218
4219 if (!dwc2_is_device_mode(hsotg))
4220 return IRQ_NONE;
4221
4222 spin_lock(&hsotg->lock);
4223irq_retry:
4224 gintsts = dwc2_readl(hsotg, GINTSTS);
4225 gintmsk = dwc2_readl(hsotg, GINTMSK);
4226
4227 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
4228 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
4229
4230 gintsts &= gintmsk;
4231
4232 if (gintsts & GINTSTS_RESETDET) {
4233 dev_info(hsotg->dev, "%s: USBRstDet\n", __func__);
4234
4235 dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
4236
4237 /* This event must be used only if controller is suspended */
4238 if (hsotg->lx_state == DWC2_L2) {
4239 dwc2_exit_partial_power_down(hsotg, true);
4240 hsotg->lx_state = DWC2_L0;
4241 }
4242 }
4243
4244 if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
4245 u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
4246 u32 connected = hsotg->connected;
4247
4248 dev_info(hsotg->dev, "%s: USBRst, GNPTXSTS=%08x\n", __func__, dwc2_readl(hsotg, GNPTXSTS));
4249 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
4250 dwc2_readl(hsotg, GNPTXSTS));
4251
4252 dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
4253
4254 if (hsotg->gadget.state >= USB_STATE_CONFIGURED) {
4255 dwc2_dump_regs(hsotg);
4256#ifdef CONFIG_USB_RESTART_ON_RESET
4257 hsotg->usb_do_restart = 1;
4258#endif
4259 }
4260
4261 /* Report disconnection if it is not already done. */
4262 dwc2_hsotg_disconnect(hsotg);
4263
4264 /* Reset device address to zero */
4265 dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
4266
4267 if (usb_status & GOTGCTL_BSESVLD && connected) {
4268 tmp_do_restart = hsotg->usb_do_restart;
4269 dev_info(hsotg->dev, "%s: GOTGCTL_BSESVLD\n", __func__);
4270 dwc2_hsotg_core_init_disconnected(hsotg, true);
4271 hsotg->usb_do_restart = tmp_do_restart;
4272 }
4273#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
4274 usb_os_detect_reset_state();
4275#endif
4276 hsotg->bus_reset_received = true;
4277 hsotg->enum_done_ms = 0;
4278 }
4279
4280 if (gintsts & GINTSTS_ENUMDONE) {
4281 dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
4282
4283 dwc2_hsotg_irq_enumdone(hsotg);
4284
4285 hsotg->enum_done_ms = ktime_to_ms(ktime_get());
4286 }
4287
4288 if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
4289 u32 daint = dwc2_readl(hsotg, DAINT);
4290 u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
4291 u32 daint_out, daint_in;
4292 int ep;
4293
4294 daint &= daintmsk;
4295 daint_out = daint >> DAINT_OUTEP_SHIFT;
4296 daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
4297
4298 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
4299
4300 for (ep = 0; ep < hsotg->num_of_eps && daint_out;
4301 ep++, daint_out >>= 1) {
4302 if (daint_out & 1)
4303 dwc2_hsotg_epint(hsotg, ep, 0);
4304 }
4305
4306 for (ep = 0; ep < hsotg->num_of_eps && daint_in;
4307 ep++, daint_in >>= 1) {
4308 if (daint_in & 1)
4309 dwc2_hsotg_epint(hsotg, ep, 1);
4310 }
4311 }
4312
4313 /* check both FIFOs */
4314
4315 if (gintsts & GINTSTS_NPTXFEMP) {
4316 dev_info(hsotg->dev, "NPTxFEmp\n");
4317
4318 /*
4319 * Disable the interrupt to stop it happening again
4320 * unless one of these endpoint routines decides that
4321 * it needs re-enabling
4322 */
4323
4324 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
4325 dwc2_hsotg_irq_fifoempty(hsotg, false);
4326 }
4327
4328 if (gintsts & GINTSTS_PTXFEMP) {
4329 dev_info(hsotg->dev, "PTxFEmp\n");
4330
4331 /* See note in GINTSTS_NPTxFEmp */
4332
4333 dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
4334 dwc2_hsotg_irq_fifoempty(hsotg, true);
4335 }
4336
4337 if (gintsts & GINTSTS_RXFLVL) {
4338 /*
4339 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
4340 * we need to retry dwc2_hsotg_handle_rx if this is still
4341 * set.
4342 */
4343
4344 dwc2_hsotg_handle_rx(hsotg);
4345 }
4346
4347 if (gintsts & GINTSTS_ERLYSUSP) {
4348 hsotg->suspend_received = true;
4349 ktime_ms_now = ktime_to_ms(ktime_get());
4350 dev_info(hsotg->dev, "GINTSTS_ErlySusp: %lld %lld %d\n",
4351 ktime_ms_now, hsotg->enum_done_ms, hsotg->ep0_rw_complete_idx);
4352 dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
4353 dwc2_suspend_gadget(hsotg);
4354 if (hsotg->enum_done_ms &&
4355 ((ktime_ms_now - hsotg->enum_done_ms) < USB_ENUM_SUSPEND_DIFF) &&
4356 (hsotg->ep0_rw_complete_idx == 0)) {
4357 hsotg->usb_do_restart = 1;
4358 }
4359 }
4360
4361 /*
4362 * these next two seem to crop-up occasionally causing the core
4363 * to shutdown the USB transfer, so try clearing them and logging
4364 * the occurrence.
4365 */
4366
4367 if (gintsts & GINTSTS_GOUTNAKEFF) {
4368 u8 idx;
4369 u32 epctrl;
4370 u32 gintmsk;
4371 u32 daintmsk;
4372 struct dwc2_hsotg_ep *hs_ep;
4373
4374 daintmsk = dwc2_readl(hsotg, DAINTMSK);
4375 daintmsk >>= DAINT_OUTEP_SHIFT;
4376 /* Mask this interrupt */
4377 gintmsk = dwc2_readl(hsotg, GINTMSK);
4378 gintmsk &= ~GINTSTS_GOUTNAKEFF;
4379 dwc2_writel(hsotg, gintmsk, GINTMSK);
4380
4381 dev_info(hsotg->dev, "!!!!GOUTNakEff triggered\n");
4382 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
4383 hs_ep = hsotg->eps_out[idx];
4384 /* Proceed only unmasked ISOC EPs */
4385 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
4386 continue;
4387
4388 epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
4389
4390 if (epctrl & DXEPCTL_EPENA) {
4391 epctrl |= DXEPCTL_SNAK;
4392 epctrl |= DXEPCTL_EPDIS;
4393 dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
4394 }
4395 }
4396
4397 /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
4398 }
4399
4400 if (gintsts & GINTSTS_GINNAKEFF) {
4401 dev_info(hsotg->dev, "GINNakEff triggered\n");
4402
4403 dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
4404
4405 dwc2_hsotg_dump(hsotg);
4406 }
4407
4408 if (gintsts & GINTSTS_INCOMPL_SOIN)
4409 dwc2_gadget_handle_incomplete_isoc_in(hsotg);
4410
4411 if (gintsts & GINTSTS_INCOMPL_SOOUT)
4412 dwc2_gadget_handle_incomplete_isoc_out(hsotg);
4413
4414 /*
4415 * if we've had fifo events, we should try and go around the
4416 * loop again to see if there's any point in returning yet.
4417 */
4418
4419 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
4420 goto irq_retry;
4421
4422 if (gintsts & GINTSTS_USBSUSP) {
4423 hsotg->suspend_received = true;
4424 dev_info(hsotg->dev, "%s: USBSUSP\n", __func__);
4425 dwc2_suspend_gadget(hsotg);
4426 }
4427
4428 if (gintsts & GINTSTS_WKUPINT) {
4429 dev_info(hsotg->dev, "%s: USBWKUP\n", __func__);
4430 dwc2_resume_gadget(hsotg);
4431 }
4432
4433 /* Check WKUP_ALERT interrupt*/
4434 if (hsotg->params.service_interval)
4435 dwc2_gadget_wkup_alert_handler(hsotg);
4436
4437#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
4438 if (os_detect_is_done()) {
4439 dwc2_hsotg_core_disconnect(hsotg);
4440 dwc2_hsotg_disconnect(hsotg);
4441 hsotg->enabled = 0;
4442 os_detect_clear_done();
4443#ifdef CONFIG_DWC2_MON_TIMER
4444 if (hsotg->mon_timer_started) {
4445 pr_info("del mon timer\n");
4446 hrtimer_cancel(&hsotg->usb_mon_timer);
4447 hsotg->mon_timer_started = false;
4448 }
4449#endif
4450 }
4451#endif
4452
4453#ifdef CONFIG_USB_RESTART_ON_RESET
4454 if (hsotg->usb_do_restart) {
4455 dwc2_hsotg_core_disconnect(hsotg);
4456 dwc2_hsotg_disconnect(hsotg);
4457 hsotg->enabled = 0;
4458 pr_info("usb restart\n");
4459 if (work_pending(&hsotg->usb_restart_work.work))
4460 cancel_delayed_work(&hsotg->usb_restart_work);
4461 schedule_delayed_work(&hsotg->usb_restart_work, 0);
4462 dwc2_writel(hsotg, 0, DAINTMSK);
4463 /* Clear any pending OTG interrupts */
4464 dwc2_writel(hsotg, 0xffffffff, GOTGINT);
4465 /* Clear any pending interrupts */
4466 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
4467 }
4468#endif
4469 spin_unlock(&hsotg->lock);
4470
4471 return IRQ_HANDLED;
4472}
4473
4474static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
4475 struct dwc2_hsotg_ep *hs_ep)
4476{
4477 u32 epctrl_reg;
4478 u32 epint_reg;
4479
4480 epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
4481 DOEPCTL(hs_ep->index);
4482 epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
4483 DOEPINT(hs_ep->index);
4484
4485 dev_info(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
4486 hs_ep->name);
4487
4488 if (hs_ep->dir_in) {
4489 if (hsotg->dedicated_fifos || hs_ep->periodic) {
4490 dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
4491 /* Wait for Nak effect */
4492 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
4493 DXEPINT_INEPNAKEFF, 100))
4494 dev_warn(hsotg->dev,
4495 "%s: timeout DIEPINT.NAKEFF\n",
4496 __func__);
4497 } else {
4498 dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
4499 /* Wait for Nak effect */
4500 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
4501 GINTSTS_GINNAKEFF, 100))
4502 dev_warn(hsotg->dev,
4503 "%s: timeout GINTSTS.GINNAKEFF\n",
4504 __func__);
4505 }
4506 } else {
4507 if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
4508 dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
4509
4510 /* Wait for global nak to take effect */
4511 if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
4512 GINTSTS_GOUTNAKEFF, 100)) {
4513 dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
4514 __func__);
4515 /* dwc2_dump_regs(hsotg); */
4516 }
4517 }
4518
4519 /* Disable ep */
4520 dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
4521
4522 /* Wait for ep to be disabled */
4523 if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
4524 dev_warn(hsotg->dev,
4525 "%s: timeout DOEPCTL.EPDisable\n", __func__);
4526
4527 /* Clear EPDISBLD interrupt */
4528 dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
4529
4530 if (hs_ep->dir_in) {
4531 unsigned short fifo_index;
4532
4533 if (hsotg->dedicated_fifos || hs_ep->periodic)
4534 fifo_index = hs_ep->fifo_index;
4535 else
4536 fifo_index = 0;
4537
4538 /* Flush TX FIFO */
4539 dwc2_flush_tx_fifo(hsotg, fifo_index);
4540
4541 /* Clear Global In NP NAK in Shared FIFO for non periodic ep */
4542 if (!hsotg->dedicated_fifos && !hs_ep->periodic)
4543 dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
4544
4545 } else {
4546 /* Remove global NAKs */
4547 dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
4548 }
4549}
4550
4551/**
4552 * dwc2_hsotg_ep_enable - enable the given endpoint
4553 * @ep: The USB endpint to configure
4554 * @desc: The USB endpoint descriptor to configure with.
4555 *
4556 * This is called from the USB gadget code's usb_ep_enable().
4557 */
4558static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
4559 const struct usb_endpoint_descriptor *desc)
4560{
4561 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4562 struct dwc2_hsotg *hsotg = hs_ep->parent;
4563 unsigned long flags;
4564 unsigned int index = hs_ep->index;
4565 u32 epctrl_reg;
4566 u32 epctrl;
4567 u32 mps;
4568 u32 mc;
4569 u32 mask;
4570 unsigned int dir_in;
4571 unsigned int i, val, size;
4572 int ret = 0;
4573 unsigned char ep_type;
4574 int desc_num;
4575
4576 dev_dbg(hsotg->dev,
4577 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
4578 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
4579 desc->wMaxPacketSize, desc->bInterval);
4580
4581 /* not to be called for EP0 */
4582 if (index == 0) {
4583 dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
4584 return -EINVAL;
4585 }
4586
4587 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
4588 if (dir_in != hs_ep->dir_in) {
4589 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
4590 return -EINVAL;
4591 }
4592
4593 ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
4594 mps = usb_endpoint_maxp(desc);
4595 mc = usb_endpoint_maxp_mult(desc);
4596
4597 /* ISOC IN in DDMA supported bInterval up to 10 */
4598 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
4599 dir_in && desc->bInterval > 10) {
4600 dev_err(hsotg->dev,
4601 "%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
4602 return -EINVAL;
4603 }
4604
4605 /* High bandwidth ISOC OUT in DDMA not supported */
4606 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
4607 !dir_in && mc > 1) {
4608 dev_err(hsotg->dev,
4609 "%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
4610 return -EINVAL;
4611 }
4612
4613 /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
4614
4615 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
4616 epctrl = dwc2_readl(hsotg, epctrl_reg);
4617
4618 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
4619 __func__, epctrl, epctrl_reg);
4620
4621 if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
4622 desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
4623 else
4624 desc_num = MAX_DMA_DESC_NUM_GENERIC;
4625
4626 /* Allocate DMA descriptor chain for non-ctrl endpoints */
4627 if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
4628 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
4629 desc_num * sizeof(struct dwc2_dma_desc),
4630 &hs_ep->desc_list_dma, GFP_ATOMIC);
4631 if (!hs_ep->desc_list) {
4632 ret = -ENOMEM;
4633 goto error2;
4634 }
4635 }
4636
4637 spin_lock_irqsave(&hsotg->lock, flags);
4638
4639 epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
4640 epctrl |= DXEPCTL_MPS(mps);
4641
4642 /*
4643 * mark the endpoint as active, otherwise the core may ignore
4644 * transactions entirely for this endpoint
4645 */
4646 epctrl |= DXEPCTL_USBACTEP;
4647
4648 /* update the endpoint state */
4649 dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
4650
4651 /* default, set to non-periodic */
4652 hs_ep->isochronous = 0;
4653 hs_ep->periodic = 0;
4654 hs_ep->halted = 0;
4655 hs_ep->interval = desc->bInterval;
4656
4657 switch (ep_type) {
4658 case USB_ENDPOINT_XFER_ISOC:
4659 epctrl |= DXEPCTL_EPTYPE_ISO;
4660 epctrl |= DXEPCTL_SETEVENFR;
4661 hs_ep->isochronous = 1;
4662 hs_ep->interval = 1 << (desc->bInterval - 1);
4663 hs_ep->target_frame = TARGET_FRAME_INITIAL;
4664 hs_ep->next_desc = 0;
4665 hs_ep->compl_desc = 0;
4666 if (dir_in) {
4667 hs_ep->periodic = 1;
4668 mask = dwc2_readl(hsotg, DIEPMSK);
4669 mask |= DIEPMSK_NAKMSK;
4670 dwc2_writel(hsotg, mask, DIEPMSK);
4671 } else {
4672 epctrl |= DXEPCTL_SNAK;
4673 mask = dwc2_readl(hsotg, DOEPMSK);
4674 mask |= DOEPMSK_OUTTKNEPDISMSK;
4675 dwc2_writel(hsotg, mask, DOEPMSK);
4676 }
4677 break;
4678
4679 case USB_ENDPOINT_XFER_BULK:
4680 epctrl |= DXEPCTL_EPTYPE_BULK;
4681 break;
4682
4683 case USB_ENDPOINT_XFER_INT:
4684 if (dir_in)
4685 hs_ep->periodic = 1;
4686
4687 if (hsotg->gadget.speed == USB_SPEED_HIGH)
4688 hs_ep->interval = 1 << (desc->bInterval - 1);
4689
4690 epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
4691 break;
4692
4693 case USB_ENDPOINT_XFER_CONTROL:
4694 epctrl |= DXEPCTL_EPTYPE_CONTROL;
4695 break;
4696 }
4697
4698 /*
4699 * if the hardware has dedicated fifos, we must give each IN EP
4700 * a unique tx-fifo even if it is non-periodic.
4701 */
4702 if (dir_in && hsotg->dedicated_fifos) {
4703 unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
4704 u32 fifo_index = 0;
4705#if 0
4706 u32 fifo_size = UINT_MAX;
4707
4708 size = hs_ep->ep.maxpacket * hs_ep->mc;
4709 for (i = 1; i <= fifo_count; ++i) {
4710 if (hsotg->fifo_map & (1 << i))
4711 continue;
4712 val = dwc2_readl(hsotg, DPTXFSIZN(i));
4713 val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
4714 if (val < size)
4715 continue;
4716 /* Search for smallest acceptable fifo */
4717 if (val < fifo_size) {
4718 fifo_size = val;
4719 fifo_index = i;
4720 }
4721 }
4722#else
4723 u32 fifo_size = 0;
4724
4725 size = hs_ep->ep.maxpacket * hs_ep->mc;
4726 for (i = 1; i <= fifo_count; ++i) {
4727 if (hsotg->fifo_map & (1 << i))
4728 continue;
4729 val = dwc2_readl(hsotg, DPTXFSIZN(i));
4730 val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
4731 if (val < size)
4732 continue;
4733 /* Search for largest acceptable fifo */
4734 if (val > fifo_size) {
4735 fifo_size = val;
4736 fifo_index = i;
4737 }
4738 }
4739#endif
4740 if (!fifo_index) {
4741 dev_err(hsotg->dev,
4742 "%s: No suitable fifo found\n", __func__);
4743 ret = -ENOMEM;
4744 goto error1;
4745 }
4746 epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
4747 hsotg->fifo_map |= 1 << fifo_index;
4748 epctrl |= DXEPCTL_TXFNUM(fifo_index);
4749 hs_ep->fifo_index = fifo_index;
4750 hs_ep->fifo_size = fifo_size;
4751 }
4752
4753 /* for non control endpoints, set PID to D0 */
4754 if (index && !hs_ep->isochronous)
4755 epctrl |= DXEPCTL_SETD0PID;
4756
4757 /* WA for Full speed ISOC IN in DDMA mode.
4758 * By Clear NAK status of EP, core will send ZLP
4759 * to IN token and assert NAK interrupt relying
4760 * on TxFIFO status only
4761 */
4762
4763 if (hsotg->gadget.speed == USB_SPEED_FULL &&
4764 hs_ep->isochronous && dir_in) {
4765 /* The WA applies only to core versions from 2.72a
4766 * to 4.00a (including both). Also for FS_IOT_1.00a
4767 * and HS_IOT_1.00a.
4768 */
4769 u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
4770
4771 if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
4772 gsnpsid <= DWC2_CORE_REV_4_00a) ||
4773 gsnpsid == DWC2_FS_IOT_REV_1_00a ||
4774 gsnpsid == DWC2_HS_IOT_REV_1_00a)
4775 epctrl |= DXEPCTL_CNAK;
4776 }
4777
4778 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
4779 __func__, epctrl);
4780
4781 if (index)
4782 epctrl |= DXEPCTL_SNAK;
4783
4784 dwc2_writel(hsotg, epctrl, epctrl_reg);
4785 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
4786 __func__, dwc2_readl(hsotg, epctrl_reg));
4787
4788 /* enable the endpoint interrupt */
4789 dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
4790
4791error1:
4792 spin_unlock_irqrestore(&hsotg->lock, flags);
4793
4794error2:
4795 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
4796 dmam_free_coherent(hsotg->dev, desc_num *
4797 sizeof(struct dwc2_dma_desc),
4798 hs_ep->desc_list, hs_ep->desc_list_dma);
4799 hs_ep->desc_list = NULL;
4800 }
4801
4802 return ret;
4803}
4804
4805/**
4806 * dwc2_hsotg_ep_disable - disable given endpoint
4807 * @ep: The endpoint to disable.
4808 */
4809static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
4810{
4811 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4812 struct dwc2_hsotg *hsotg = hs_ep->parent;
4813 int dir_in = hs_ep->dir_in;
4814 int index = hs_ep->index;
4815 u32 epctrl_reg;
4816 u32 ctrl;
4817
4818 dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
4819
4820 if (ep == &hsotg->eps_out[0]->ep) {
4821 dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
4822 return -EINVAL;
4823 }
4824
4825 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4826 dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
4827 return -EINVAL;
4828 }
4829
4830 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
4831
4832 ctrl = dwc2_readl(hsotg, epctrl_reg);
4833
4834 if (ctrl & DXEPCTL_EPENA)
4835 dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
4836
4837 ctrl &= ~DXEPCTL_EPENA;
4838 ctrl &= ~DXEPCTL_USBACTEP;
4839 ctrl |= DXEPCTL_SNAK;
4840
4841 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
4842 dwc2_writel(hsotg, ctrl, epctrl_reg);
4843
4844 /* disable endpoint interrupts */
4845 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
4846
4847 /* terminate all requests with shutdown */
4848 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
4849
4850 hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
4851 hs_ep->fifo_index = 0;
4852 hs_ep->fifo_size = 0;
4853
4854 return 0;
4855}
4856
4857static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
4858{
4859 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4860 struct dwc2_hsotg *hsotg = hs_ep->parent;
4861 unsigned long flags;
4862 int ret;
4863
4864 spin_lock_irqsave(&hsotg->lock, flags);
4865 ret = dwc2_hsotg_ep_disable(ep);
4866 spin_unlock_irqrestore(&hsotg->lock, flags);
4867 return ret;
4868}
4869
4870/**
4871 * on_list - check request is on the given endpoint
4872 * @ep: The endpoint to check.
4873 * @test: The request to test if it is on the endpoint.
4874 */
4875static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
4876{
4877 struct dwc2_hsotg_req *req, *treq;
4878
4879 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
4880 if (req == test)
4881 return true;
4882 }
4883
4884 return false;
4885}
4886
4887/**
4888 * dwc2_hsotg_ep_dequeue - dequeue given endpoint
4889 * @ep: The endpoint to dequeue.
4890 * @req: The request to be removed from a queue.
4891 */
4892static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
4893{
4894 struct dwc2_hsotg_req *hs_req = our_req(req);
4895 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4896 struct dwc2_hsotg *hs = hs_ep->parent;
4897 unsigned long flags;
4898
4899 dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
4900
4901 spin_lock_irqsave(&hs->lock, flags);
4902
4903 if (!on_list(hs_ep, hs_req)) {
4904 spin_unlock_irqrestore(&hs->lock, flags);
4905 return -EINVAL;
4906 }
4907
4908 /* Dequeue already started request */
4909 if (req == &hs_ep->req->req)
4910 dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
4911
4912 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
4913 spin_unlock_irqrestore(&hs->lock, flags);
4914
4915 return 0;
4916}
4917
4918/**
4919 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
4920 * @ep: The endpoint to set halt.
4921 * @value: Set or unset the halt.
4922 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
4923 * the endpoint is busy processing requests.
4924 *
4925 * We need to stall the endpoint immediately if request comes from set_feature
4926 * protocol command handler.
4927 */
4928static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
4929{
4930 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4931 struct dwc2_hsotg *hs = hs_ep->parent;
4932 int index = hs_ep->index;
4933 u32 epreg;
4934 u32 epctl;
4935 u32 xfertype;
4936
4937 dev_info(hs->dev, "ephlt(ep %p %s, %d)\n", ep, ep->name, value);
4938
4939 if (index == 0) {
4940 if (value)
4941 dwc2_hsotg_stall_ep0(hs);
4942 else
4943 dev_warn(hs->dev,
4944 "%s: can't clear halt on ep0\n", __func__);
4945 return 0;
4946 }
4947
4948 if (hs_ep->isochronous) {
4949 dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
4950 return -EINVAL;
4951 }
4952
4953 if (!now && value && !list_empty(&hs_ep->queue)) {
4954 dev_info(hs->dev, "%s request is pending, cannot halt\n",
4955 ep->name);
4956 return -EAGAIN;
4957 }
4958
4959 if (hs_ep->dir_in) {
4960 epreg = DIEPCTL(index);
4961 epctl = dwc2_readl(hs, epreg);
4962
4963 if (value) {
4964 epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
4965 if (epctl & DXEPCTL_EPENA)
4966 epctl |= DXEPCTL_EPDIS;
4967 } else {
4968 epctl &= ~DXEPCTL_STALL;
4969 xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4970 if (xfertype == DXEPCTL_EPTYPE_BULK ||
4971 xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4972 epctl |= DXEPCTL_SETD0PID;
4973 }
4974 dwc2_writel(hs, epctl, epreg);
4975 } else {
4976 epreg = DOEPCTL(index);
4977 epctl = dwc2_readl(hs, epreg);
4978
4979 if (value) {
4980 epctl |= DXEPCTL_STALL;
4981 } else {
4982 epctl &= ~DXEPCTL_STALL;
4983 xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4984 if (xfertype == DXEPCTL_EPTYPE_BULK ||
4985 xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4986 epctl |= DXEPCTL_SETD0PID;
4987 }
4988 dwc2_writel(hs, epctl, epreg);
4989 }
4990
4991 hs_ep->halted = value;
4992
4993 return 0;
4994}
4995
4996/**
4997 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
4998 * @ep: The endpoint to set halt.
4999 * @value: Set or unset the halt.
5000 */
5001static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
5002{
5003 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
5004 struct dwc2_hsotg *hs = hs_ep->parent;
5005 unsigned long flags = 0;
5006 int ret = 0;
5007
5008 spin_lock_irqsave(&hs->lock, flags);
5009 ret = dwc2_hsotg_ep_sethalt(ep, value, false);
5010 spin_unlock_irqrestore(&hs->lock, flags);
5011
5012 return ret;
5013}
5014
5015static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
5016 .enable = dwc2_hsotg_ep_enable,
5017 .disable = dwc2_hsotg_ep_disable_lock,
5018 .alloc_request = dwc2_hsotg_ep_alloc_request,
5019 .free_request = dwc2_hsotg_ep_free_request,
5020 .queue = dwc2_hsotg_ep_queue_lock,
5021 .dequeue = dwc2_hsotg_ep_dequeue,
5022 .set_halt = dwc2_hsotg_ep_sethalt_lock,
5023 /* note, don't believe we have any call for the fifo routines */
5024};
5025
5026/**
5027 * dwc2_hsotg_init - initialize the usb core
5028 * @hsotg: The driver state
5029 */
5030static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
5031{
5032 /* unmask subset of endpoint interrupts */
5033
5034 dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
5035 DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
5036 DIEPMSK);
5037
5038 dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
5039 DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
5040 DOEPMSK);
5041
5042 dwc2_writel(hsotg, 0, DAINTMSK);
5043
5044 /* Be in disconnected state until gadget is registered */
5045 dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
5046
5047 /* setup fifos */
5048
5049 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
5050 dwc2_readl(hsotg, GRXFSIZ),
5051 dwc2_readl(hsotg, GNPTXFSIZ));
5052
5053 dwc2_hsotg_init_fifo(hsotg);
5054
5055 if (using_dma(hsotg))
5056 dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
5057}
5058
5059#ifdef CONFIG_USB_DWC2_ASR_OTG
5060/**
5061 * dwc2_hsotg_udc_start - prepare the udc for work
5062 * @gadget: The usb gadget state
5063 * @driver: The usb gadget driver
5064 *
5065 * Perform initialization to prepare udc device and driver
5066 * to work.
5067 */
5068static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
5069 struct usb_gadget_driver *driver)
5070{
5071 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5072 unsigned long flags;
5073 int ret;
5074
5075 if (!hsotg) {
5076 pr_err("%s: called with no device\n", __func__);
5077 return -ENODEV;
5078 }
5079
5080 if (!driver) {
5081 dev_err(hsotg->dev, "%s: no driver\n", __func__);
5082 return -EINVAL;
5083 }
5084
5085 if (driver->max_speed < USB_SPEED_FULL)
5086 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
5087
5088 if (!driver->setup) {
5089 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
5090 return -EINVAL;
5091 }
5092
5093 WARN_ON(hsotg->driver);
5094
5095 hsotg->driver = driver;
5096 hsotg->gadget.dev.of_node = hsotg->dev->of_node;
5097 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5098
5099 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
5100 ret = dwc2_lowlevel_hw_enable(hsotg);
5101 if (ret)
5102 goto err;
5103 }
5104
5105 if (!IS_ERR_OR_NULL(hsotg->uphy))
5106 otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
5107
5108 spin_lock_irqsave(&hsotg->lock, flags);
5109 if (dwc2_hw_is_device(hsotg)) {
5110 //dwc2_hsotg_init(hsotg);
5111 //dwc2_hsotg_core_init_disconnected(hsotg, false);
5112 }
5113
5114 hsotg->enabled = 0;
5115 spin_unlock_irqrestore(&hsotg->lock, flags);
5116
5117 gadget->sg_supported = using_desc_dma(hsotg);
5118 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
5119
5120#ifdef CONFIG_DWC2_MON_USB_PHY
5121 schedule_delayed_work(&hsotg->phy_mon_work, 0);
5122#endif
5123 return 0;
5124
5125err:
5126 hsotg->driver = NULL;
5127 return ret;
5128}
5129#else
5130/**
5131 * dwc2_hsotg_udc_start - prepare the udc for work
5132 * @gadget: The usb gadget state
5133 * @driver: The usb gadget driver
5134 *
5135 * Perform initialization to prepare udc device and driver
5136 * to work.
5137 */
5138static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
5139 struct usb_gadget_driver *driver)
5140{
5141 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5142 unsigned long flags;
5143 int ret;
5144
5145 if (!hsotg) {
5146 pr_err("%s: called with no device\n", __func__);
5147 return -ENODEV;
5148 }
5149
5150 if (!driver) {
5151 dev_err(hsotg->dev, "%s: no driver\n", __func__);
5152 return -EINVAL;
5153 }
5154
5155 if (driver->max_speed < USB_SPEED_FULL)
5156 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
5157
5158 if (!driver->setup) {
5159 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
5160 return -EINVAL;
5161 }
5162
5163 WARN_ON(hsotg->driver);
5164
5165 driver->driver.bus = NULL;
5166 hsotg->driver = driver;
5167 hsotg->gadget.dev.of_node = hsotg->dev->of_node;
5168 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5169
5170 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
5171 ret = dwc2_lowlevel_hw_enable(hsotg);
5172 if (ret)
5173 goto err;
5174 }
5175
5176 if (!IS_ERR_OR_NULL(hsotg->uphy))
5177 otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
5178
5179 spin_lock_irqsave(&hsotg->lock, flags);
5180 if (dwc2_hw_is_device(hsotg)) {
5181 dwc2_hsotg_init(hsotg);
5182 dwc2_hsotg_core_init_disconnected(hsotg, false);
5183 }
5184
5185 hsotg->enabled = 0;
5186 spin_unlock_irqrestore(&hsotg->lock, flags);
5187
5188 gadget->sg_supported = using_desc_dma(hsotg);
5189 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
5190
5191 if (hsotg->qwork)
5192 queue_work(hsotg->qwork, &hsotg->vbus_work);
5193
5194#ifdef CONFIG_DWC2_MON_USB_PHY
5195 schedule_delayed_work(&hsotg->phy_mon_work, 0);
5196#endif
5197 return 0;
5198
5199err:
5200 hsotg->driver = NULL;
5201 return ret;
5202}
5203#endif
5204
5205/**
5206 * dwc2_hsotg_udc_stop - stop the udc
5207 * @gadget: The usb gadget state
5208 *
5209 * Stop udc hw block and stay tunned for future transmissions
5210 */
5211static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
5212{
5213 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5214 unsigned long flags = 0;
5215 int ep;
5216
5217 if (!hsotg)
5218 return -ENODEV;
5219
5220 /* all endpoints should be shutdown */
5221 for (ep = 1; ep < hsotg->num_of_eps; ep++) {
5222 if (hsotg->eps_in[ep])
5223 dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
5224 if (hsotg->eps_out[ep])
5225 dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
5226 }
5227
5228 spin_lock_irqsave(&hsotg->lock, flags);
5229
5230 hsotg->driver = NULL;
5231 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5232 hsotg->enabled = 0;
5233
5234 spin_unlock_irqrestore(&hsotg->lock, flags);
5235
5236 if (!IS_ERR_OR_NULL(hsotg->uphy))
5237 otg_set_peripheral(hsotg->uphy->otg, NULL);
5238
5239 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
5240 dwc2_lowlevel_hw_disable(hsotg);
5241
5242 return 0;
5243}
5244
5245/**
5246 * dwc2_hsotg_gadget_getframe - read the frame number
5247 * @gadget: The usb gadget state
5248 *
5249 * Read the {micro} frame number
5250 */
5251static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
5252{
5253 return dwc2_hsotg_read_frameno(to_hsotg(gadget));
5254}
5255#ifdef CONFIG_USB_DWC2_ASR_OTG
5256/**
5257 * dwc2_hsotg_pullup - connect/disconnect the USB PHY
5258 * @gadget: The usb gadget state
5259 * @is_on: Current state of the USB PHY
5260 *
5261 * Connect/Disconnect the USB PHY pullup
5262 */
5263static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
5264{
5265 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5266 unsigned long flags = 0;
5267
5268 dev_info(hsotg->dev, "%s: is_on: %d op_state: %d, vbus: %d\n", __func__, is_on,
5269 hsotg->op_state, hsotg->vbus_active);
5270 pr_emerg("dwc2 pullup(%d) otg state: %d - %s\n",
5271 is_on, hsotg->otg_state, usb_otg_state_string(hsotg->otg_state));
5272
5273 if (hsotg->otg_state != OTG_STATE_B_IDLE
5274 && hsotg->otg_state != OTG_STATE_B_PERIPHERAL) {
5275 hsotg->softconnect = (is_on != 0);
5276 pr_info("pullup exit for host mode\n");
5277 return 0;
5278 }
5279
5280 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
5281
5282 mutex_lock(&usb_con_mutex);
5283
5284 is_on = !!is_on;
5285 if (hsotg->softconnect == is_on) {
5286 dev_info(hsotg->dev, "dwc2 already pulled up\n");
5287 goto out;
5288 }
5289
5290 hsotg->softconnect = (is_on != 0);
5291
5292 if (hsotg->charger_type == DCP_CHARGER) {
5293 dev_info(hsotg->dev, "dwc2 pullup out on DCP_CHARGER\n");
5294 goto out;
5295 }
5296
5297 /* Don't modify pullup state while in host mode */
5298 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
5299 hsotg->enabled = is_on;
5300 dev_info(hsotg->dev, "dwc2 NOT OTG_STATE_B_PERIPHERAL\n");
5301 mutex_unlock(&usb_con_mutex);
5302 return 0;
5303 }
5304
5305 spin_lock_irqsave(&hsotg->lock, flags);
5306 if (hsotg->driver && hsotg->softconnect && hsotg->vbus_active) {
5307 hsotg->enabled = 1;
5308 dwc2_hsotg_init(hsotg);
5309 dwc2_hsotg_core_init_disconnected(hsotg, false);
5310 /* Enable ACG feature in device mode,if supported */
5311 dwc2_enable_acg(hsotg);
5312 dwc2_hsotg_core_connect(hsotg);
5313 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5314 spin_unlock_irqrestore(&hsotg->lock, flags);
5315 dwc2_charger_type_confirm(hsotg);
5316 goto out;
5317 } else if (hsotg->driver && (!hsotg->softconnect) && hsotg->vbus_active) {
5318 dwc2_hsotg_core_disconnect(hsotg);
5319 dwc2_hsotg_disconnect(hsotg);
5320 hsotg->enabled = 0;
5321 }
5322
5323 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5324 spin_unlock_irqrestore(&hsotg->lock, flags);
5325
5326out:
5327 mutex_unlock(&usb_con_mutex);
5328 return 0;
5329}
5330#else
5331/**
5332 * dwc2_hsotg_pullup - connect/disconnect the USB PHY
5333 * @gadget: The usb gadget state
5334 * @is_on: Current state of the USB PHY
5335 *
5336 * Connect/Disconnect the USB PHY pullup
5337 */
5338static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
5339{
5340 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5341 unsigned long flags = 0;
5342
5343 dev_info(hsotg->dev, "%s: is_on: %d op_state: %d, vbus: %d\n", __func__, is_on,
5344 hsotg->op_state, hsotg->vbus_active);
5345
5346 mutex_lock(&usb_con_mutex);
5347
5348 is_on = !!is_on;
5349 if (hsotg->softconnect == is_on) {
5350 dev_info(hsotg->dev, "dwc2 already pulled up\n");
5351 goto out;
5352 }
5353
5354 hsotg->softconnect = (is_on != 0);
5355
5356 if (hsotg->charger_type == DCP_CHARGER) {
5357 dev_info(hsotg->dev, "dwc2 pullup out on DCP_CHARGER\n");
5358 goto out;
5359 }
5360
5361 /* Don't modify pullup state while in host mode */
5362 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
5363 hsotg->enabled = is_on;
5364 dev_info(hsotg->dev, "dwc2 NOT OTG_STATE_B_PERIPHERAL\n");
5365 mutex_unlock(&usb_con_mutex);
5366 return 0;
5367 }
5368
5369 spin_lock_irqsave(&hsotg->lock, flags);
5370 if (hsotg->driver && hsotg->softconnect && hsotg->vbus_active) {
5371 hsotg->enabled = 1;
5372 dwc2_hsotg_core_init_disconnected(hsotg, false);
5373 /* Enable ACG feature in device mode,if supported */
5374 dwc2_enable_acg(hsotg);
5375 dwc2_hsotg_core_connect(hsotg);
5376 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5377 spin_unlock_irqrestore(&hsotg->lock, flags);
5378 dwc2_charger_type_confirm(hsotg);
5379 goto out;
5380 } else if (hsotg->driver && (!hsotg->softconnect) && hsotg->vbus_active) {
5381 dwc2_hsotg_core_disconnect(hsotg);
5382 dwc2_hsotg_disconnect(hsotg);
5383 hsotg->enabled = 0;
5384 }
5385
5386 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5387 spin_unlock_irqrestore(&hsotg->lock, flags);
5388
5389out:
5390 mutex_unlock(&usb_con_mutex);
5391 return 0;
5392}
5393#endif
5394
5395static int asr_usb_vbus_notifier_call(struct notifier_block *nb,
5396 unsigned long val, void *v)
5397{
5398 struct dwc2_hsotg *hsotg = container_of(nb, struct dwc2_hsotg, notifier);
5399 /* polling VBUS and init phy may cause too much time*/
5400 if (hsotg->qwork && val == EVENT_VBUS)
5401 queue_work(hsotg->qwork, &hsotg->vbus_work);
5402
5403 return 0;
5404}
5405
5406static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active);
5407static void asr_usb_vbus_work(struct work_struct *work)
5408{
5409 struct dwc2_hsotg *hsotg;
5410 unsigned int vbus = 0;
5411 int ret;
5412 static bool first_vbus = true;
5413
5414 hsotg = container_of(work, struct dwc2_hsotg, vbus_work);
5415 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
5416 if (ret) {
5417 vbus = usb_phy_get_vbus(hsotg->uphy);
5418 }
5419
5420 if (!first_vbus) {
5421 if (vbus == hsotg->vbus_active) {
5422 dev_info(hsotg->dev, "!!!skip vbus event %d -> %d\n", vbus, hsotg->vbus_active);
5423 if (vbus)
5424 pm_stay_awake(hsotg->dev);
5425 return;
5426 }
5427 }
5428
5429 if (first_vbus)
5430 first_vbus = false;
5431 dev_info(hsotg->dev, "vbus is %s.\n", vbus ? "on" : "off");
5432 dwc2_hsotg_vbus_session(&hsotg->gadget, vbus);
5433}
5434
5435#ifdef CONFIG_USB_DWC2_ASR_OTG
5436static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
5437{
5438 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5439 unsigned long flags;
5440 static unsigned int vbus = 0;
5441 static bool first_vbus = true;
5442
5443 hsotg->vbus_active = (is_active != 0);
5444 dev_info(hsotg->dev, "%s: is_active: %d, softconnect: %d\n",
5445 __func__, is_active, hsotg->softconnect);
5446 if (!first_vbus) {
5447 if (vbus == hsotg->vbus_active) {
5448 dev_info(hsotg->dev, "!!!skip vbus event %d -> %d\n", vbus, hsotg->vbus_active);
5449 if (vbus)
5450 pm_stay_awake(hsotg->dev);
5451 return 0;
5452 }
5453 }
5454 if (first_vbus)
5455 first_vbus = false;
5456 vbus = hsotg->vbus_active;
5457
5458 if (work_pending(&hsotg->usb_restart_work.work)) {
5459 dev_info(hsotg->dev, "cancel restart work...");
5460 cancel_delayed_work_sync(&hsotg->usb_restart_work);
5461 dev_info(hsotg->dev, "done\n");
5462 }
5463
5464 pr_emerg("vbus session otg state: %d - %s\n",
5465 hsotg->otg_state, usb_otg_state_string(hsotg->otg_state));
5466
5467 if (hsotg->otg_state != OTG_STATE_B_IDLE
5468 && hsotg->otg_state != OTG_STATE_B_PERIPHERAL) {
5469 pr_info("vbus exit for host mode\n");
5470 return 0;
5471 }
5472
5473 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
5474 mutex_lock(&usb_con_mutex);
5475 hsotg->vbus_active = (is_active != 0);
5476 hsotg->prev_charger_type = hsotg->charger_type;
5477 usb_phy_set_suspend(hsotg->uphy, 0);
5478
5479 if (hsotg->vbus_active) {
5480 pm_stay_awake(hsotg->dev);
5481 pm_qos_update_request(&hsotg->qos_idle, hsotg->lpm_qos);
5482 hsotg->charger_type = DEFAULT_CHARGER;
5483 } else {
5484 pm_wakeup_event(hsotg->dev, 5000);
5485 pm_qos_update_request_timeout(&hsotg->qos_idle,
5486 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE, (5000 * 1000));
5487 hsotg->charger_type = NULL_CHARGER;
5488 }
5489
5490 if (work_pending(&hsotg->delayed_charger_work.work))
5491 cancel_delayed_work(&hsotg->delayed_charger_work);
5492
5493 if (hsotg->charger_type == NULL_CHARGER)
5494 schedule_delayed_work(&hsotg->delayed_charger_work, 0);
5495
5496 if (hsotg->charger_type == DEFAULT_CHARGER) {
5497 int enum_delay = ENUMERATION_DELAY;
5498 dev_info(hsotg->dev, "1st stage charger type: %s\n",
5499 charger_type(hsotg->charger_type));
5500 call_charger_notifier(hsotg);
5501 schedule_delayed_work(&hsotg->delayed_charger_work, enum_delay);
5502 }
5503
5504 spin_lock_irqsave(&hsotg->lock, flags);
5505
5506 /*
5507 * If controller is hibernated, it must exit from power_down
5508 * before being initialized / de-initialized
5509 */
5510 if (hsotg->lx_state == DWC2_L2)
5511 dwc2_exit_partial_power_down(hsotg, false);
5512
5513 if (hsotg->driver && hsotg->softconnect && hsotg->vbus_active) {
5514 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
5515 dwc2_hsotg_init(hsotg);
5516 dwc2_hsotg_core_init_disconnected(hsotg, false);
5517 /* Enable ACG feature in device mode,if supported */
5518 dwc2_enable_acg(hsotg);
5519 dwc2_hsotg_core_connect(hsotg);
5520 spin_unlock_irqrestore(&hsotg->lock, flags);
5521 dwc2_charger_type_confirm(hsotg);
5522 spin_lock_irqsave(&hsotg->lock, flags);
5523 } else if (hsotg->driver && hsotg->softconnect) {
5524 dwc2_hsotg_core_disconnect(hsotg);
5525 dwc2_hsotg_disconnect(hsotg);
5526 }
5527 spin_unlock_irqrestore(&hsotg->lock, flags);
5528#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
5529 if ((!hsotg->vbus_active) && hsotg->driver && hsotg->softconnect
5530 && (hsotg->prev_charger_type == SDP_CHARGER
5531 || hsotg->prev_charger_type == CDP_CHARGER)) {
5532 mutex_unlock(&usb_con_mutex);
5533 cancel_reconfigure_work();
5534 /* restore os type to default state */
5535 android_dev_enable(0);
5536 usb_os_restore();
5537 android_dev_enable(1);
5538 mutex_lock(&usb_con_mutex);
5539 hsotg->prev_charger_type = NULL_CHARGER;
5540 }
5541#endif
5542 spin_lock_irqsave(&hsotg->lock, flags);
5543 if (!hsotg->vbus_active) {
5544 hsotg->prev_charger_type = NULL_CHARGER;
5545 usb_phy_set_suspend(hsotg->uphy, 1);
5546 }
5547 spin_unlock_irqrestore(&hsotg->lock, flags);
5548 mutex_unlock(&usb_con_mutex);
5549 return 0;
5550}
5551#else
5552static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
5553{
5554 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5555 unsigned long flags;
5556
5557 dev_info(hsotg->dev, "%s: is_active: %d, softconnect: %d\n",
5558 __func__, is_active, hsotg->softconnect);
5559
5560 if (work_pending(&hsotg->usb_restart_work.work)) {
5561 dev_info(hsotg->dev, "cancel restart work...");
5562 cancel_delayed_work_sync(&hsotg->usb_restart_work);
5563 dev_info(hsotg->dev, "done\n");
5564 }
5565
5566 mutex_lock(&usb_con_mutex);
5567 hsotg->vbus_active = (is_active != 0);
5568 hsotg->prev_charger_type = hsotg->charger_type;
5569 usb_phy_set_suspend(hsotg->uphy, 0);
5570
5571 if (hsotg->vbus_active) {
5572 pm_stay_awake(hsotg->dev);
5573 pm_qos_update_request(&hsotg->qos_idle, hsotg->lpm_qos);
5574 hsotg->charger_type = DEFAULT_CHARGER;
5575 } else {
5576 pm_wakeup_event(hsotg->dev, 5000);
5577 pm_qos_update_request_timeout(&hsotg->qos_idle,
5578 PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE, (5000 * 1000));
5579 hsotg->charger_type = NULL_CHARGER;
5580 }
5581
5582 if (work_pending(&hsotg->delayed_charger_work.work))
5583 cancel_delayed_work(&hsotg->delayed_charger_work);
5584
5585 if (hsotg->charger_type == NULL_CHARGER)
5586 schedule_delayed_work(&hsotg->delayed_charger_work, 0);
5587
5588 if (hsotg->charger_type == DEFAULT_CHARGER) {
5589 int enum_delay = ENUMERATION_DELAY;
5590 dev_info(hsotg->dev, "1st stage charger type: %s\n",
5591 charger_type(hsotg->charger_type));
5592 call_charger_notifier(hsotg);
5593 schedule_delayed_work(&hsotg->delayed_charger_work, enum_delay);
5594 }
5595
5596 spin_lock_irqsave(&hsotg->lock, flags);
5597
5598 /*
5599 * If controller is hibernated, it must exit from power_down
5600 * before being initialized / de-initialized
5601 */
5602 if (hsotg->lx_state == DWC2_L2)
5603 dwc2_exit_partial_power_down(hsotg, false);
5604
5605 if (hsotg->driver && hsotg->softconnect && hsotg->vbus_active) {
5606 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
5607
5608 dwc2_hsotg_core_init_disconnected(hsotg, false);
5609 /* Enable ACG feature in device mode,if supported */
5610 dwc2_enable_acg(hsotg);
5611 dwc2_hsotg_core_connect(hsotg);
5612 spin_unlock_irqrestore(&hsotg->lock, flags);
5613 dwc2_charger_type_confirm(hsotg);
5614 spin_lock_irqsave(&hsotg->lock, flags);
5615 } else if (hsotg->driver && hsotg->softconnect) {
5616 dwc2_hsotg_core_disconnect(hsotg);
5617 dwc2_hsotg_disconnect(hsotg);
5618 }
5619 spin_unlock_irqrestore(&hsotg->lock, flags);
5620#if defined(CONFIG_USB_ANDROID_DETECT_HOST_OS)
5621 if ((!hsotg->vbus_active) && hsotg->driver && hsotg->softconnect
5622 && (hsotg->prev_charger_type == SDP_CHARGER
5623 || hsotg->prev_charger_type == CDP_CHARGER)) {
5624 mutex_unlock(&usb_con_mutex);
5625 cancel_reconfigure_work();
5626 /* restore os type to default state */
5627 android_dev_enable(0);
5628 usb_os_restore();
5629 android_dev_enable(1);
5630 mutex_lock(&usb_con_mutex);
5631 hsotg->prev_charger_type = NULL_CHARGER;
5632 }
5633#endif
5634 spin_lock_irqsave(&hsotg->lock, flags);
5635 if (!hsotg->vbus_active) {
5636 hsotg->prev_charger_type = NULL_CHARGER;
5637 usb_phy_set_suspend(hsotg->uphy, 1);
5638 }
5639 spin_unlock_irqrestore(&hsotg->lock, flags);
5640 mutex_unlock(&usb_con_mutex);
5641 return 0;
5642}
5643#endif
5644/**
5645 * dwc2_hsotg_vbus_draw - report bMaxPower field
5646 * @gadget: The usb gadget state
5647 * @mA: Amount of current
5648 *
5649 * Report how much power the device may consume to the phy.
5650 */
5651static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
5652{
5653 struct dwc2_hsotg *hsotg = to_hsotg(gadget);
5654
5655 if (IS_ERR_OR_NULL(hsotg->uphy))
5656 return -ENOTSUPP;
5657 return usb_phy_set_power(hsotg->uphy, mA);
5658}
5659
5660static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
5661 .get_frame = dwc2_hsotg_gadget_getframe,
5662 .udc_start = dwc2_hsotg_udc_start,
5663 .udc_stop = dwc2_hsotg_udc_stop,
5664 .pullup = dwc2_hsotg_pullup,
5665 .vbus_session = dwc2_hsotg_vbus_session,
5666 .vbus_draw = dwc2_hsotg_vbus_draw,
5667};
5668
5669/**
5670 * dwc2_hsotg_initep - initialise a single endpoint
5671 * @hsotg: The device state.
5672 * @hs_ep: The endpoint to be initialised.
5673 * @epnum: The endpoint number
5674 * @dir_in: True if direction is in.
5675 *
5676 * Initialise the given endpoint (as part of the probe and device state
5677 * creation) to give to the gadget driver. Setup the endpoint name, any
5678 * direction information and other state that may be required.
5679 */
5680static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
5681 struct dwc2_hsotg_ep *hs_ep,
5682 int epnum,
5683 bool dir_in)
5684{
5685 char *dir;
5686
5687 if (epnum == 0)
5688 dir = "";
5689 else if (dir_in)
5690 dir = "in";
5691 else
5692 dir = "out";
5693
5694 hs_ep->dir_in = dir_in;
5695 hs_ep->index = epnum;
5696
5697 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
5698
5699 INIT_LIST_HEAD(&hs_ep->queue);
5700 INIT_LIST_HEAD(&hs_ep->ep.ep_list);
5701
5702 /* add to the list of endpoints known by the gadget driver */
5703 if (epnum)
5704 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
5705
5706 hs_ep->parent = hsotg;
5707 hs_ep->ep.name = hs_ep->name;
5708
5709 if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
5710 usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
5711 else
5712 usb_ep_set_maxpacket_limit(&hs_ep->ep,
5713 epnum ? 1024 : EP0_MPS_LIMIT);
5714 hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
5715
5716 if (epnum == 0) {
5717 hs_ep->ep.caps.type_control = true;
5718 } else {
5719 if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
5720 hs_ep->ep.caps.type_iso = true;
5721 hs_ep->ep.caps.type_bulk = true;
5722 }
5723 hs_ep->ep.caps.type_int = true;
5724 }
5725
5726 if (dir_in)
5727 hs_ep->ep.caps.dir_in = true;
5728 else
5729 hs_ep->ep.caps.dir_out = true;
5730
5731 /*
5732 * if we're using dma, we need to set the next-endpoint pointer
5733 * to be something valid.
5734 */
5735
5736 if (using_dma(hsotg)) {
5737 u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
5738
5739 if (dir_in)
5740 dwc2_writel(hsotg, next, DIEPCTL(epnum));
5741 else
5742 dwc2_writel(hsotg, next, DOEPCTL(epnum));
5743 }
5744}
5745
5746/**
5747 * dwc2_hsotg_hw_cfg - read HW configuration registers
5748 * @hsotg: Programming view of the DWC_otg controller
5749 *
5750 * Read the USB core HW configuration registers
5751 */
5752static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
5753{
5754 u32 cfg;
5755 u32 ep_type;
5756 u32 i;
5757
5758 /* check hardware configuration */
5759
5760 hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
5761
5762 /* Add ep0 */
5763 hsotg->num_of_eps++;
5764
5765 hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
5766 sizeof(struct dwc2_hsotg_ep),
5767 GFP_KERNEL);
5768 if (!hsotg->eps_in[0])
5769 return -ENOMEM;
5770 /* Same dwc2_hsotg_ep is used in both directions for ep0 */
5771 hsotg->eps_out[0] = hsotg->eps_in[0];
5772
5773 cfg = hsotg->hw_params.dev_ep_dirs;
5774 for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
5775 ep_type = cfg & 3;
5776 /* Direction in or both */
5777 if (!(ep_type & 2)) {
5778 hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
5779 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
5780 if (!hsotg->eps_in[i])
5781 return -ENOMEM;
5782 }
5783 /* Direction out or both */
5784 if (!(ep_type & 1)) {
5785 hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
5786 sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
5787 if (!hsotg->eps_out[i])
5788 return -ENOMEM;
5789 }
5790 }
5791
5792 hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
5793 hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
5794
5795 dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
5796 hsotg->num_of_eps,
5797 hsotg->dedicated_fifos ? "dedicated" : "shared",
5798 hsotg->fifo_mem);
5799 return 0;
5800}
5801
5802/**
5803 * dwc2_hsotg_dump - dump state of the udc
5804 * @hsotg: Programming view of the DWC_otg controller
5805 *
5806 */
5807static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
5808{
5809#ifdef DEBUG
5810 struct device *dev = hsotg->dev;
5811 u32 val;
5812 int idx;
5813
5814 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
5815 dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
5816 dwc2_readl(hsotg, DIEPMSK));
5817
5818 dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
5819 dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
5820
5821 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
5822 dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
5823
5824 /* show periodic fifo settings */
5825
5826 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
5827 val = dwc2_readl(hsotg, DPTXFSIZN(idx));
5828 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
5829 val >> FIFOSIZE_DEPTH_SHIFT,
5830 val & FIFOSIZE_STARTADDR_MASK);
5831 }
5832
5833 for (idx = 0; idx < hsotg->num_of_eps; idx++) {
5834 dev_info(dev,
5835 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
5836 dwc2_readl(hsotg, DIEPCTL(idx)),
5837 dwc2_readl(hsotg, DIEPTSIZ(idx)),
5838 dwc2_readl(hsotg, DIEPDMA(idx)));
5839
5840 val = dwc2_readl(hsotg, DOEPCTL(idx));
5841 dev_info(dev,
5842 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
5843 idx, dwc2_readl(hsotg, DOEPCTL(idx)),
5844 dwc2_readl(hsotg, DOEPTSIZ(idx)),
5845 dwc2_readl(hsotg, DOEPDMA(idx)));
5846 }
5847
5848 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
5849 dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
5850#endif
5851}
5852
5853#ifdef CONFIG_DWC2_MON_TIMER
5854static enum hrtimer_restart dwc2_mon_timer_func(struct hrtimer *timer)
5855{
5856 struct dwc2_hsotg *hsotg = the_controller;
5857 u32 sizeleft;
5858 u32 ctrlout, ctrlin;
5859 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
5860
5861 ctrlin = dwc2_readl(hsotg, DIEPCTL(0));
5862 ctrlout = dwc2_readl(hsotg, DOEPCTL(0));
5863 pr_info("ep0_state: %d dir_in: %d\n", hsotg->ep0_state, ep0->dir_in);
5864 pr_info("DIEPCTL(0): 0x%x, DOEPCTL(0):0x%x DSTS: 0x%x\n",
5865 ctrlin, ctrlout, dwc2_readl(hsotg, DSTS));
5866
5867 if (ep0->req) {
5868 sizeleft = dwc2_gadget_get_xfersize_ddma(ep0);
5869 pr_info("sizeleft: %d\n", sizeleft);
5870 } else {
5871 pr_err("error: ep0 req is null\n");
5872 }
5873
5874 if (hsotg->ep0_state != DWC2_EP0_SETUP) {
5875#if 0
5876 if (hsotg->ep0_state == DWC2_EP0_DATA_IN ||
5877 hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
5878 if ((dwc2_readl(hsotg, DIEPCTL(0)) & (DXEPCTL_NAKSTS | DXEPCTL_EPENA))
5879 == (DXEPCTL_NAKSTS)) {
5880 ctrlin |= (DXEPCTL_EPENA | DXEPCTL_CNAK);
5881 dwc2_writel(hsotg, ctrlin, DIEPCTL(0));
5882 pr_err("timer: re-enable ep0-%d, ctrlin: 0x%x\n",
5883 ep0->dir_in, ctrlin);
5884 hrtimer_start(&hsotg->usb_mon_timer,
5885 ns_to_ktime(DWC2_MON_TIMER_NS),
5886 HRTIMER_MODE_REL);
5887 }
5888 }
5889
5890 if (hsotg->ep0_state == DWC2_EP0_DATA_OUT ||
5891 hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
5892 if ((dwc2_readl(hsotg, DOEPCTL(0)) & (DXEPCTL_NAKSTS | DXEPCTL_EPENA))
5893 == (DXEPCTL_NAKSTS)) {
5894 ctrlout |= (DXEPCTL_EPENA | DXEPCTL_CNAK);
5895 dwc2_writel(hsotg, ctrlout, DOEPCTL(0));
5896 pr_err("timer: re-enable ep0-%d, ctrlout: 0x%x\n",
5897 ep0->dir_in, ctrlout);
5898 hrtimer_start(&hsotg->usb_mon_timer,
5899 ns_to_ktime(DWC2_MON_TIMER_NS),
5900 HRTIMER_MODE_REL);
5901 }
5902 }
5903#else
5904 schedule_delayed_work(&hsotg->usb_restart_work, 0);
5905#endif
5906 }
5907 return HRTIMER_NORESTART;
5908}
5909#endif
5910
5911#if defined(CONFIG_DWC2_MON_TIMER) || defined(CONFIG_USB_RESTART_ON_RESET)
5912static void dwc2_restart_work(struct work_struct *work)
5913{
5914 u32 vbus = 0;
5915 int ret;
5916
5917 the_controller->usb_do_restart = 0;
5918
5919 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
5920 if (ret) {
5921 vbus = usb_phy_get_vbus(the_controller->uphy);
5922 }
5923
5924 if ((!the_controller->vbus_active) || (!vbus)) {
5925 pr_err("%s vbus is off\n", __func__);
5926 return;
5927 }
5928
5929 msleep(500);
5930 ret = pxa_usb_extern_call(PXA_USB_DEV_OTG, vbus, get_vbus, &vbus);
5931 if (ret) {
5932 vbus = usb_phy_get_vbus(the_controller->uphy);
5933 }
5934
5935 if ((!the_controller->vbus_active) || (!vbus)) {
5936 pr_err("%s vbus is off2\n", __func__);
5937 return;
5938 }
5939
5940 pr_info("%s \n", __func__);
5941 android_dev_enable(0);
5942 android_dev_enable(1);
5943}
5944#endif
5945
5946#ifdef CONFIG_DWC2_MON_USB_PHY
5947static void dwc2_phy_mon_func(struct work_struct *work)
5948{
5949 struct dwc2_hsotg *hsotg = the_controller;
5950 int ret;
5951
5952 if (!hsotg->vbus_active)
5953 goto out;
5954
5955 if (hsotg->uphy->phy_private3) {
5956 ret = hsotg->uphy->phy_private3(hsotg->uphy, 0);
5957 if (ret)
5958 pr_info("!!!!!dwc2 phy softrst: %d\n", ret);
5959 }
5960
5961out:
5962 schedule_delayed_work(&hsotg->phy_mon_work, DWC2_PHY_MON_INTERVAL);
5963}
5964#endif
5965
5966/**
5967 * dwc2_gadget_init - init function for gadget
5968 * @hsotg: Programming view of the DWC_otg controller
5969 *
5970 */
5971int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
5972{
5973 struct device *dev = hsotg->dev;
5974 int epnum;
5975 int ret;
5976
5977 /* Dump fifo information */
5978 dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
5979 hsotg->params.g_np_tx_fifo_size);
5980 dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
5981
5982 hsotg->gadget.max_speed = USB_SPEED_SUPER;
5983 hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
5984 hsotg->gadget.name = dev_name(dev);
5985 hsotg->remote_wakeup_allowed = 0;
5986
5987 if (hsotg->params.lpm)
5988 hsotg->gadget.lpm_capable = true;
5989
5990 if (hsotg->dr_mode == USB_DR_MODE_OTG)
5991 hsotg->gadget.is_otg = 1;
5992 else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
5993 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
5994
5995 ret = dwc2_hsotg_hw_cfg(hsotg);
5996 if (ret) {
5997 dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
5998 return ret;
5999 }
6000
6001 hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
6002 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
6003 if (!hsotg->ctrl_buff)
6004 return -ENOMEM;
6005
6006 hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
6007 DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
6008 if (!hsotg->ep0_buff)
6009 return -ENOMEM;
6010
6011 if (using_desc_dma(hsotg)) {
6012 ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
6013 if (ret < 0)
6014 return ret;
6015 }
6016
6017 ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
6018 IRQF_SHARED, dev_name(hsotg->dev), hsotg);
6019 if (ret < 0) {
6020 dev_err(dev, "cannot claim IRQ for gadget\n");
6021 return ret;
6022 }
6023
6024 /* hsotg->num_of_eps holds number of EPs other than ep0 */
6025
6026 if (hsotg->num_of_eps == 0) {
6027 dev_err(dev, "wrong number of EPs (zero)\n");
6028 return -EINVAL;
6029 }
6030
6031 /* setup endpoint information */
6032
6033 INIT_LIST_HEAD(&hsotg->gadget.ep_list);
6034 hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
6035
6036 /* allocate EP0 request */
6037
6038 hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
6039 GFP_KERNEL);
6040 if (!hsotg->ctrl_req) {
6041 dev_err(dev, "failed to allocate ctrl req\n");
6042 return -ENOMEM;
6043 }
6044
6045 /* initialise the endpoints now the core has been initialised */
6046 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
6047 if (hsotg->eps_in[epnum])
6048 dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
6049 epnum, 1);
6050 if (hsotg->eps_out[epnum])
6051 dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
6052 epnum, 0);
6053 }
6054
6055 dwc2_hsotg_dump(hsotg);
6056
6057 hsotg->qwork = create_singlethread_workqueue("asr_vbus_queue");
6058 if (!hsotg->qwork) {
6059 dev_err(hsotg->dev, "error cannot create workqueue\n");
6060 return -ENOMEM;
6061 }
6062
6063 INIT_WORK(&hsotg->vbus_work, asr_usb_vbus_work);
6064 hsotg->notifier.notifier_call = asr_usb_vbus_notifier_call;
6065 pxa_usb_register_notifier(PXA_USB_DEV_OTG, &hsotg->notifier);
6066
6067 INIT_DELAYED_WORK(&hsotg->delayed_charger_work, do_delayed_charger_work);
6068 hsotg->charger_type = NULL_CHARGER;
6069 the_controller = hsotg;
6070
6071#ifdef CONFIG_DWC2_MON_TIMER
6072 hrtimer_init(&hsotg->usb_mon_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6073 hsotg->usb_mon_timer.function = &dwc2_mon_timer_func;
6074#endif
6075#if defined(CONFIG_DWC2_MON_TIMER) || defined(CONFIG_USB_RESTART_ON_RESET)
6076 INIT_DELAYED_WORK(&hsotg->usb_restart_work, dwc2_restart_work);
6077#endif
6078
6079#ifdef CONFIG_DWC2_MON_USB_PHY
6080 INIT_DELAYED_WORK(&hsotg->phy_mon_work, dwc2_phy_mon_func);
6081#endif
6082
6083 hsotg->phys_mem_end = PAGE_SIZE * max_pfn;
6084 /* extend it to 128MB for cpmem if phys_mem_end is 64MB */
6085 if (hsotg->phys_mem_end == (64*1024*1024))
6086 hsotg->phys_mem_end = (128*1024*1024);
6087 dev_info(hsotg->dev, "phys_mem_end: 0x%lx\n", hsotg->phys_mem_end);
6088
6089 return 0;
6090}
6091
6092/**
6093 * dwc2_hsotg_remove - remove function for hsotg driver
6094 * @hsotg: Programming view of the DWC_otg controller
6095 *
6096 */
6097int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
6098{
6099 usb_del_gadget_udc(&hsotg->gadget);
6100 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
6101
6102 return 0;
6103}
6104
6105int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
6106{
6107 unsigned long flags;
6108
6109 if (hsotg->lx_state != DWC2_L0)
6110 return 0;
6111
6112 if (hsotg->driver) {
6113 int ep;
6114
6115 dev_info(hsotg->dev, "suspending usb gadget %s\n",
6116 hsotg->driver->driver.name);
6117
6118 spin_lock_irqsave(&hsotg->lock, flags);
6119 if (hsotg->enabled)
6120 dwc2_hsotg_core_disconnect(hsotg);
6121 dwc2_hsotg_disconnect(hsotg);
6122 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
6123 spin_unlock_irqrestore(&hsotg->lock, flags);
6124
6125 for (ep = 1; ep < hsotg->num_of_eps; ep++) {
6126 if (hsotg->eps_in[ep])
6127 dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
6128 if (hsotg->eps_out[ep])
6129 dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
6130 }
6131 }
6132
6133 return 0;
6134}
6135
6136int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
6137{
6138 unsigned long flags;
6139
6140 if (hsotg->lx_state == DWC2_L2)
6141 return 0;
6142
6143 if (hsotg->driver) {
6144 dev_info(hsotg->dev, "resuming usb gadget %s\n",
6145 hsotg->driver->driver.name);
6146
6147 spin_lock_irqsave(&hsotg->lock, flags);
6148 dwc2_hsotg_core_init_disconnected(hsotg, false);
6149 if (hsotg->enabled) {
6150 /* Enable ACG feature in device mode,if supported */
6151 dwc2_enable_acg(hsotg);
6152 dwc2_hsotg_core_connect(hsotg);
6153 }
6154 spin_unlock_irqrestore(&hsotg->lock, flags);
6155 }
6156
6157 return 0;
6158}
6159
6160/**
6161 * dwc2_backup_device_registers() - Backup controller device registers.
6162 * When suspending usb bus, registers needs to be backuped
6163 * if controller power is disabled once suspended.
6164 *
6165 * @hsotg: Programming view of the DWC_otg controller
6166 */
6167int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
6168{
6169 struct dwc2_dregs_backup *dr;
6170 int i;
6171
6172 dev_dbg(hsotg->dev, "%s\n", __func__);
6173
6174 /* Backup dev regs */
6175 dr = &hsotg->dr_backup;
6176
6177 dr->dcfg = dwc2_readl(hsotg, DCFG);
6178 dr->dctl = dwc2_readl(hsotg, DCTL);
6179 dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
6180 dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
6181 dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
6182
6183 for (i = 0; i < hsotg->num_of_eps; i++) {
6184 /* Backup IN EPs */
6185 dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
6186
6187 /* Ensure DATA PID is correctly configured */
6188 if (dr->diepctl[i] & DXEPCTL_DPID)
6189 dr->diepctl[i] |= DXEPCTL_SETD1PID;
6190 else
6191 dr->diepctl[i] |= DXEPCTL_SETD0PID;
6192
6193 dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
6194 dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
6195
6196 /* Backup OUT EPs */
6197 dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
6198
6199 /* Ensure DATA PID is correctly configured */
6200 if (dr->doepctl[i] & DXEPCTL_DPID)
6201 dr->doepctl[i] |= DXEPCTL_SETD1PID;
6202 else
6203 dr->doepctl[i] |= DXEPCTL_SETD0PID;
6204
6205 dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
6206 dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
6207 dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
6208 }
6209 dr->valid = true;
6210 return 0;
6211}
6212
6213/**
6214 * dwc2_restore_device_registers() - Restore controller device registers.
6215 * When resuming usb bus, device registers needs to be restored
6216 * if controller power were disabled.
6217 *
6218 * @hsotg: Programming view of the DWC_otg controller
6219 * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
6220 *
6221 * Return: 0 if successful, negative error code otherwise
6222 */
6223int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
6224{
6225 struct dwc2_dregs_backup *dr;
6226 int i;
6227
6228 dev_dbg(hsotg->dev, "%s\n", __func__);
6229
6230 /* Restore dev regs */
6231 dr = &hsotg->dr_backup;
6232 if (!dr->valid) {
6233 dev_err(hsotg->dev, "%s: no device registers to restore\n",
6234 __func__);
6235 return -EINVAL;
6236 }
6237 dr->valid = false;
6238
6239 if (!remote_wakeup)
6240 dwc2_writel(hsotg, dr->dctl, DCTL);
6241
6242 dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
6243 dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
6244 dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
6245
6246 for (i = 0; i < hsotg->num_of_eps; i++) {
6247 /* Restore IN EPs */
6248 dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
6249 dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
6250 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
6251 /** WA for enabled EPx's IN in DDMA mode. On entering to
6252 * hibernation wrong value read and saved from DIEPDMAx,
6253 * as result BNA interrupt asserted on hibernation exit
6254 * by restoring from saved area.
6255 */
6256 if (hsotg->params.g_dma_desc &&
6257 (dr->diepctl[i] & DXEPCTL_EPENA))
6258 dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
6259 dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
6260 dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
6261 /* Restore OUT EPs */
6262 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
6263 /* WA for enabled EPx's OUT in DDMA mode. On entering to
6264 * hibernation wrong value read and saved from DOEPDMAx,
6265 * as result BNA interrupt asserted on hibernation exit
6266 * by restoring from saved area.
6267 */
6268 if (hsotg->params.g_dma_desc &&
6269 (dr->doepctl[i] & DXEPCTL_EPENA))
6270 dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
6271 dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
6272 dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
6273 }
6274
6275 return 0;
6276}
6277
6278/**
6279 * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
6280 *
6281 * @hsotg: Programming view of DWC_otg controller
6282 *
6283 */
6284void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
6285{
6286 u32 val;
6287
6288 if (!hsotg->params.lpm)
6289 return;
6290
6291 val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES;
6292 val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
6293 val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
6294 val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
6295 val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
6296 val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
6297 val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
6298 dwc2_writel(hsotg, val, GLPMCFG);
6299 dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
6300
6301 /* Unmask WKUP_ALERT Interrupt */
6302 if (hsotg->params.service_interval)
6303 dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
6304}
6305
6306/**
6307 * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
6308 *
6309 * @hsotg: Programming view of DWC_otg controller
6310 *
6311 */
6312void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
6313{
6314 u32 val = 0;
6315
6316 val |= GREFCLK_REF_CLK_MODE;
6317 val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
6318 val |= hsotg->params.sof_cnt_wkup_alert <<
6319 GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
6320
6321 dwc2_writel(hsotg, val, GREFCLK);
6322 dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
6323}
6324
6325/**
6326 * dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
6327 *
6328 * @hsotg: Programming view of the DWC_otg controller
6329 *
6330 * Return non-zero if failed to enter to hibernation.
6331 */
6332int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
6333{
6334 u32 gpwrdn;
6335 int ret = 0;
6336
6337 /* Change to L2(suspend) state */
6338 hsotg->lx_state = DWC2_L2;
6339 dev_dbg(hsotg->dev, "Start of hibernation completed\n");
6340 ret = dwc2_backup_global_registers(hsotg);
6341 if (ret) {
6342 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
6343 __func__);
6344 return ret;
6345 }
6346 ret = dwc2_backup_device_registers(hsotg);
6347 if (ret) {
6348 dev_err(hsotg->dev, "%s: failed to backup device registers\n",
6349 __func__);
6350 return ret;
6351 }
6352
6353 gpwrdn = GPWRDN_PWRDNRSTN;
6354 gpwrdn |= GPWRDN_PMUACTV;
6355 dwc2_writel(hsotg, gpwrdn, GPWRDN);
6356 udelay(10);
6357
6358 /* Set flag to indicate that we are in hibernation */
6359 hsotg->hibernated = 1;
6360
6361 /* Enable interrupts from wake up logic */
6362 gpwrdn = dwc2_readl(hsotg, GPWRDN);
6363 gpwrdn |= GPWRDN_PMUINTSEL;
6364 dwc2_writel(hsotg, gpwrdn, GPWRDN);
6365 udelay(10);
6366
6367 /* Unmask device mode interrupts in GPWRDN */
6368 gpwrdn = dwc2_readl(hsotg, GPWRDN);
6369 gpwrdn |= GPWRDN_RST_DET_MSK;
6370 gpwrdn |= GPWRDN_LNSTSCHG_MSK;
6371 gpwrdn |= GPWRDN_STS_CHGINT_MSK;
6372 dwc2_writel(hsotg, gpwrdn, GPWRDN);
6373 udelay(10);
6374
6375 /* Enable Power Down Clamp */
6376 gpwrdn = dwc2_readl(hsotg, GPWRDN);
6377 gpwrdn |= GPWRDN_PWRDNCLMP;
6378 dwc2_writel(hsotg, gpwrdn, GPWRDN);
6379 udelay(10);
6380
6381 /* Switch off VDD */
6382 gpwrdn = dwc2_readl(hsotg, GPWRDN);
6383 gpwrdn |= GPWRDN_PWRDNSWTCH;
6384 dwc2_writel(hsotg, gpwrdn, GPWRDN);
6385 udelay(10);
6386
6387 /* Save gpwrdn register for further usage if stschng interrupt */
6388 hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
6389 dev_dbg(hsotg->dev, "Hibernation completed\n");
6390
6391 return ret;
6392}
6393
6394/**
6395 * dwc2_gadget_exit_hibernation()
6396 * This function is for exiting from Device mode hibernation by host initiated
6397 * resume/reset and device initiated remote-wakeup.
6398 *
6399 * @hsotg: Programming view of the DWC_otg controller
6400 * @rem_wakeup: indicates whether resume is initiated by Device or Host.
6401 * @reset: indicates whether resume is initiated by Reset.
6402 *
6403 * Return non-zero if failed to exit from hibernation.
6404 */
6405int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
6406 int rem_wakeup, int reset)
6407{
6408 u32 pcgcctl;
6409 u32 gpwrdn;
6410 u32 dctl;
6411 int ret = 0;
6412 struct dwc2_gregs_backup *gr;
6413 struct dwc2_dregs_backup *dr;
6414
6415 gr = &hsotg->gr_backup;
6416 dr = &hsotg->dr_backup;
6417
6418 if (!hsotg->hibernated) {
6419 dev_info(hsotg->dev, "Already exited from Hibernation\n");
6420 return 1;
6421 }
6422 dev_dbg(hsotg->dev,
6423 "%s: called with rem_wakeup = %d reset = %d\n",
6424 __func__, rem_wakeup, reset);
6425
6426 dwc2_hib_restore_common(hsotg, rem_wakeup, 0);
6427
6428 if (!reset) {
6429 /* Clear all pending interupts */
6430 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
6431 }
6432
6433 /* De-assert Restore */
6434 gpwrdn = dwc2_readl(hsotg, GPWRDN);
6435 gpwrdn &= ~GPWRDN_RESTORE;
6436 dwc2_writel(hsotg, gpwrdn, GPWRDN);
6437 udelay(10);
6438
6439 if (!rem_wakeup) {
6440 pcgcctl = dwc2_readl(hsotg, PCGCTL);
6441 pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
6442 dwc2_writel(hsotg, pcgcctl, PCGCTL);
6443 }
6444
6445 /* Restore GUSBCFG, DCFG and DCTL */
6446 dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
6447 dwc2_writel(hsotg, dr->dcfg, DCFG);
6448 dwc2_writel(hsotg, dr->dctl, DCTL);
6449
6450 /* De-assert Wakeup Logic */
6451 gpwrdn = dwc2_readl(hsotg, GPWRDN);
6452 gpwrdn &= ~GPWRDN_PMUACTV;
6453 dwc2_writel(hsotg, gpwrdn, GPWRDN);
6454
6455 if (rem_wakeup) {
6456 udelay(10);
6457 /* Start Remote Wakeup Signaling */
6458 dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
6459 } else {
6460 udelay(50);
6461 /* Set Device programming done bit */
6462 dctl = dwc2_readl(hsotg, DCTL);
6463 dctl |= DCTL_PWRONPRGDONE;
6464 dwc2_writel(hsotg, dctl, DCTL);
6465 }
6466 /* Wait for interrupts which must be cleared */
6467 mdelay(2);
6468 /* Clear all pending interupts */
6469 dwc2_writel(hsotg, 0xffffffff, GINTSTS);
6470
6471 /* Restore global registers */
6472 ret = dwc2_restore_global_registers(hsotg);
6473 if (ret) {
6474 dev_err(hsotg->dev, "%s: failed to restore registers\n",
6475 __func__);
6476 return ret;
6477 }
6478
6479 /* Restore device registers */
6480 ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
6481 if (ret) {
6482 dev_err(hsotg->dev, "%s: failed to restore device registers\n",
6483 __func__);
6484 return ret;
6485 }
6486
6487 if (rem_wakeup) {
6488 mdelay(10);
6489 dctl = dwc2_readl(hsotg, DCTL);
6490 dctl &= ~DCTL_RMTWKUPSIG;
6491 dwc2_writel(hsotg, dctl, DCTL);
6492 }
6493
6494 hsotg->hibernated = 0;
6495 hsotg->lx_state = DWC2_L0;
6496 dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
6497
6498 return ret;
6499}
6500