| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) | 
|  | 2 | /* | 
|  | 3 | * hcd.c - DesignWare HS OTG Controller host-mode routines | 
|  | 4 | * | 
|  | 5 | * Copyright (C) 2004-2013 Synopsys, Inc. | 
|  | 6 | * | 
|  | 7 | * Redistribution and use in source and binary forms, with or without | 
|  | 8 | * modification, are permitted provided that the following conditions | 
|  | 9 | * are met: | 
|  | 10 | * 1. Redistributions of source code must retain the above copyright | 
|  | 11 | *    notice, this list of conditions, and the following disclaimer, | 
|  | 12 | *    without modification. | 
|  | 13 | * 2. Redistributions in binary form must reproduce the above copyright | 
|  | 14 | *    notice, this list of conditions and the following disclaimer in the | 
|  | 15 | *    documentation and/or other materials provided with the distribution. | 
|  | 16 | * 3. The names of the above-listed copyright holders may not be used | 
|  | 17 | *    to endorse or promote products derived from this software without | 
|  | 18 | *    specific prior written permission. | 
|  | 19 | * | 
|  | 20 | * ALTERNATIVELY, this software may be distributed under the terms of the | 
|  | 21 | * GNU General Public License ("GPL") as published by the Free Software | 
|  | 22 | * Foundation; either version 2 of the License, or (at your option) any | 
|  | 23 | * later version. | 
|  | 24 | * | 
|  | 25 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS | 
|  | 26 | * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | 
|  | 27 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 
|  | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR | 
|  | 29 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | 
|  | 30 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | 
|  | 31 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | 
|  | 32 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | 
|  | 33 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | 
|  | 34 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 
|  | 35 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
|  | 36 | */ | 
|  | 37 |  | 
|  | 38 | /* | 
|  | 39 | * This file contains the core HCD code, and implements the Linux hc_driver | 
|  | 40 | * API | 
|  | 41 | */ | 
|  | 42 | #include <linux/kernel.h> | 
|  | 43 | #include <linux/module.h> | 
|  | 44 | #include <linux/spinlock.h> | 
|  | 45 | #include <linux/interrupt.h> | 
|  | 46 | #include <linux/platform_device.h> | 
|  | 47 | #include <linux/dma-mapping.h> | 
|  | 48 | #include <linux/delay.h> | 
|  | 49 | #include <linux/io.h> | 
|  | 50 | #include <linux/slab.h> | 
|  | 51 | #include <linux/usb.h> | 
|  | 52 |  | 
|  | 53 | #include <linux/usb/hcd.h> | 
|  | 54 | #include <linux/usb/ch11.h> | 
|  | 55 |  | 
|  | 56 | #include "core.h" | 
|  | 57 | #include "hcd.h" | 
|  | 58 |  | 
|  | 59 | static void dwc2_port_resume(struct dwc2_hsotg *hsotg); | 
|  | 60 |  | 
|  | 61 | /* | 
|  | 62 | * ========================================================================= | 
|  | 63 | *  Host Core Layer Functions | 
|  | 64 | * ========================================================================= | 
|  | 65 | */ | 
|  | 66 |  | 
|  | 67 | /** | 
|  | 68 | * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, | 
|  | 69 | * used in both device and host modes | 
|  | 70 | * | 
|  | 71 | * @hsotg: Programming view of the DWC_otg controller | 
|  | 72 | */ | 
|  | 73 | static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) | 
|  | 74 | { | 
|  | 75 | u32 intmsk; | 
|  | 76 |  | 
|  | 77 | /* Clear any pending OTG Interrupts */ | 
|  | 78 | dwc2_writel(hsotg, 0xffffffff, GOTGINT); | 
|  | 79 |  | 
|  | 80 | /* Clear any pending interrupts */ | 
|  | 81 | dwc2_writel(hsotg, 0xffffffff, GINTSTS); | 
|  | 82 |  | 
|  | 83 | /* Enable the interrupts in the GINTMSK */ | 
|  | 84 | intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; | 
|  | 85 |  | 
|  | 86 | if (!hsotg->params.host_dma) | 
|  | 87 | intmsk |= GINTSTS_RXFLVL; | 
|  | 88 | if (!hsotg->params.external_id_pin_ctl) | 
|  | 89 | intmsk |= GINTSTS_CONIDSTSCHNG; | 
|  | 90 |  | 
|  | 91 | intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | | 
|  | 92 | GINTSTS_SESSREQINT; | 
|  | 93 |  | 
|  | 94 | if (dwc2_is_device_mode(hsotg) && hsotg->params.lpm) | 
|  | 95 | intmsk |= GINTSTS_LPMTRANRCVD; | 
|  | 96 |  | 
|  | 97 | dwc2_writel(hsotg, intmsk, GINTMSK); | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | /* | 
|  | 101 | * Initializes the FSLSPClkSel field of the HCFG register depending on the | 
|  | 102 | * PHY type | 
|  | 103 | */ | 
|  | 104 | static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) | 
|  | 105 | { | 
|  | 106 | u32 hcfg, val; | 
|  | 107 |  | 
|  | 108 | if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && | 
|  | 109 | hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && | 
|  | 110 | hsotg->params.ulpi_fs_ls) || | 
|  | 111 | hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) { | 
|  | 112 | /* Full speed PHY */ | 
|  | 113 | val = HCFG_FSLSPCLKSEL_48_MHZ; | 
|  | 114 | } else { | 
|  | 115 | /* High speed PHY running at full speed or high speed */ | 
|  | 116 | val = HCFG_FSLSPCLKSEL_30_60_MHZ; | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); | 
|  | 120 | hcfg = dwc2_readl(hsotg, HCFG); | 
|  | 121 | hcfg &= ~HCFG_FSLSPCLKSEL_MASK; | 
|  | 122 | hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT; | 
|  | 123 | dwc2_writel(hsotg, hcfg, HCFG); | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) | 
|  | 127 | { | 
|  | 128 | u32 usbcfg, ggpio, i2cctl; | 
|  | 129 | int retval = 0; | 
|  | 130 |  | 
|  | 131 | /* | 
|  | 132 | * core_init() is now called on every switch so only call the | 
|  | 133 | * following for the first time through | 
|  | 134 | */ | 
|  | 135 | if (select_phy) { | 
|  | 136 | dev_dbg(hsotg->dev, "FS PHY selected\n"); | 
|  | 137 |  | 
|  | 138 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 139 | if (!(usbcfg & GUSBCFG_PHYSEL)) { | 
|  | 140 | usbcfg |= GUSBCFG_PHYSEL; | 
|  | 141 | dwc2_writel(hsotg, usbcfg, GUSBCFG); | 
|  | 142 |  | 
|  | 143 | /* Reset after a PHY select */ | 
|  | 144 | retval = dwc2_core_reset(hsotg, false); | 
|  | 145 |  | 
|  | 146 | if (retval) { | 
|  | 147 | dev_err(hsotg->dev, | 
|  | 148 | "%s: Reset failed, aborting", __func__); | 
|  | 149 | return retval; | 
|  | 150 | } | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | if (hsotg->params.activate_stm_fs_transceiver) { | 
|  | 154 | ggpio = dwc2_readl(hsotg, GGPIO); | 
|  | 155 | if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) { | 
|  | 156 | dev_dbg(hsotg->dev, "Activating transceiver\n"); | 
|  | 157 | /* | 
|  | 158 | * STM32F4x9 uses the GGPIO register as general | 
|  | 159 | * core configuration register. | 
|  | 160 | */ | 
|  | 161 | ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN; | 
|  | 162 | dwc2_writel(hsotg, ggpio, GGPIO); | 
|  | 163 | } | 
|  | 164 | } | 
|  | 165 | } | 
|  | 166 |  | 
|  | 167 | /* | 
|  | 168 | * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also | 
|  | 169 | * do this on HNP Dev/Host mode switches (done in dev_init and | 
|  | 170 | * host_init). | 
|  | 171 | */ | 
|  | 172 | if (dwc2_is_host_mode(hsotg)) | 
|  | 173 | dwc2_init_fs_ls_pclk_sel(hsotg); | 
|  | 174 |  | 
|  | 175 | if (hsotg->params.i2c_enable) { | 
|  | 176 | dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); | 
|  | 177 |  | 
|  | 178 | /* Program GUSBCFG.OtgUtmiFsSel to I2C */ | 
|  | 179 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 180 | usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL; | 
|  | 181 | dwc2_writel(hsotg, usbcfg, GUSBCFG); | 
|  | 182 |  | 
|  | 183 | /* Program GI2CCTL.I2CEn */ | 
|  | 184 | i2cctl = dwc2_readl(hsotg, GI2CCTL); | 
|  | 185 | i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK; | 
|  | 186 | i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; | 
|  | 187 | i2cctl &= ~GI2CCTL_I2CEN; | 
|  | 188 | dwc2_writel(hsotg, i2cctl, GI2CCTL); | 
|  | 189 | i2cctl |= GI2CCTL_I2CEN; | 
|  | 190 | dwc2_writel(hsotg, i2cctl, GI2CCTL); | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | return retval; | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) | 
|  | 197 | { | 
|  | 198 | u32 usbcfg, usbcfg_old; | 
|  | 199 | int retval = 0; | 
|  | 200 |  | 
|  | 201 | if (!select_phy) | 
|  | 202 | return 0; | 
|  | 203 |  | 
|  | 204 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 205 | usbcfg_old = usbcfg; | 
|  | 206 |  | 
|  | 207 | /* | 
|  | 208 | * HS PHY parameters. These parameters are preserved during soft reset | 
|  | 209 | * so only program the first time. Do a soft reset immediately after | 
|  | 210 | * setting phyif. | 
|  | 211 | */ | 
|  | 212 | switch (hsotg->params.phy_type) { | 
|  | 213 | case DWC2_PHY_TYPE_PARAM_ULPI: | 
|  | 214 | /* ULPI interface */ | 
|  | 215 | dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); | 
|  | 216 | usbcfg |= GUSBCFG_ULPI_UTMI_SEL; | 
|  | 217 | usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); | 
|  | 218 | if (hsotg->params.phy_ulpi_ddr) | 
|  | 219 | usbcfg |= GUSBCFG_DDRSEL; | 
|  | 220 |  | 
|  | 221 | /* Set external VBUS indicator as needed. */ | 
|  | 222 | if (hsotg->params.oc_disable) | 
|  | 223 | usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND | | 
|  | 224 | GUSBCFG_INDICATORPASSTHROUGH); | 
|  | 225 | break; | 
|  | 226 | case DWC2_PHY_TYPE_PARAM_UTMI: | 
|  | 227 | /* UTMI+ interface */ | 
|  | 228 | dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); | 
|  | 229 | usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); | 
|  | 230 | if (hsotg->params.phy_utmi_width == 16) | 
|  | 231 | usbcfg |= GUSBCFG_PHYIF16; | 
|  | 232 | break; | 
|  | 233 | default: | 
|  | 234 | dev_err(hsotg->dev, "FS PHY selected at HS!\n"); | 
|  | 235 | break; | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | if (usbcfg != usbcfg_old) { | 
|  | 239 | dwc2_writel(hsotg, usbcfg, GUSBCFG); | 
|  | 240 |  | 
|  | 241 | /* Reset after setting the PHY parameters */ | 
|  | 242 | retval = dwc2_core_reset(hsotg, false); | 
|  | 243 | if (retval) { | 
|  | 244 | dev_err(hsotg->dev, | 
|  | 245 | "%s: Reset failed, aborting", __func__); | 
|  | 246 | return retval; | 
|  | 247 | } | 
|  | 248 | } | 
|  | 249 |  | 
|  | 250 | return retval; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) | 
|  | 254 | { | 
|  | 255 | u32 usbcfg; | 
|  | 256 | int retval = 0; | 
|  | 257 |  | 
|  | 258 | if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL || | 
|  | 259 | hsotg->params.speed == DWC2_SPEED_PARAM_LOW) && | 
|  | 260 | hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) { | 
|  | 261 | /* If FS/LS mode with FS/LS PHY */ | 
|  | 262 | retval = dwc2_fs_phy_init(hsotg, select_phy); | 
|  | 263 | if (retval) | 
|  | 264 | return retval; | 
|  | 265 | } else { | 
|  | 266 | /* High speed PHY */ | 
|  | 267 | retval = dwc2_hs_phy_init(hsotg, select_phy); | 
|  | 268 | if (retval) | 
|  | 269 | return retval; | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && | 
|  | 273 | hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && | 
|  | 274 | hsotg->params.ulpi_fs_ls) { | 
|  | 275 | dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); | 
|  | 276 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 277 | usbcfg |= GUSBCFG_ULPI_FS_LS; | 
|  | 278 | usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M; | 
|  | 279 | dwc2_writel(hsotg, usbcfg, GUSBCFG); | 
|  | 280 | } else { | 
|  | 281 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 282 | usbcfg &= ~GUSBCFG_ULPI_FS_LS; | 
|  | 283 | usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M; | 
|  | 284 | dwc2_writel(hsotg, usbcfg, GUSBCFG); | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 | return retval; | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) | 
|  | 291 | { | 
|  | 292 | u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG); | 
|  | 293 |  | 
|  | 294 | switch (hsotg->hw_params.arch) { | 
|  | 295 | case GHWCFG2_EXT_DMA_ARCH: | 
|  | 296 | dev_err(hsotg->dev, "External DMA Mode not supported\n"); | 
|  | 297 | return -EINVAL; | 
|  | 298 |  | 
|  | 299 | case GHWCFG2_INT_DMA_ARCH: | 
|  | 300 | dev_dbg(hsotg->dev, "Internal DMA Mode\n"); | 
|  | 301 | if (hsotg->params.ahbcfg != -1) { | 
|  | 302 | ahbcfg &= GAHBCFG_CTRL_MASK; | 
|  | 303 | ahbcfg |= hsotg->params.ahbcfg & | 
|  | 304 | ~GAHBCFG_CTRL_MASK; | 
|  | 305 | } | 
|  | 306 | break; | 
|  | 307 |  | 
|  | 308 | case GHWCFG2_SLAVE_ONLY_ARCH: | 
|  | 309 | default: | 
|  | 310 | dev_dbg(hsotg->dev, "Slave Only Mode\n"); | 
|  | 311 | break; | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | if (hsotg->params.host_dma) | 
|  | 315 | ahbcfg |= GAHBCFG_DMA_EN; | 
|  | 316 | else | 
|  | 317 | hsotg->params.dma_desc_enable = false; | 
|  | 318 |  | 
|  | 319 | dwc2_writel(hsotg, ahbcfg, GAHBCFG); | 
|  | 320 |  | 
|  | 321 | return 0; | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) | 
|  | 325 | { | 
|  | 326 | u32 usbcfg; | 
|  | 327 |  | 
|  | 328 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 329 | usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP); | 
|  | 330 |  | 
|  | 331 | switch (hsotg->hw_params.op_mode) { | 
|  | 332 | case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: | 
|  | 333 | if (hsotg->params.otg_cap == | 
|  | 334 | DWC2_CAP_PARAM_HNP_SRP_CAPABLE) | 
|  | 335 | usbcfg |= GUSBCFG_HNPCAP; | 
|  | 336 | if (hsotg->params.otg_cap != | 
|  | 337 | DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) | 
|  | 338 | usbcfg |= GUSBCFG_SRPCAP; | 
|  | 339 | break; | 
|  | 340 |  | 
|  | 341 | case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: | 
|  | 342 | case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: | 
|  | 343 | case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: | 
|  | 344 | if (hsotg->params.otg_cap != | 
|  | 345 | DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) | 
|  | 346 | usbcfg |= GUSBCFG_SRPCAP; | 
|  | 347 | break; | 
|  | 348 |  | 
|  | 349 | case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: | 
|  | 350 | case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: | 
|  | 351 | case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: | 
|  | 352 | default: | 
|  | 353 | break; | 
|  | 354 | } | 
|  | 355 |  | 
|  | 356 | dwc2_writel(hsotg, usbcfg, GUSBCFG); | 
|  | 357 | } | 
|  | 358 |  | 
|  | 359 | static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg) | 
|  | 360 | { | 
|  | 361 | int ret; | 
|  | 362 |  | 
|  | 363 | hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus"); | 
|  | 364 | if (IS_ERR(hsotg->vbus_supply)) { | 
|  | 365 | ret = PTR_ERR(hsotg->vbus_supply); | 
|  | 366 | hsotg->vbus_supply = NULL; | 
|  | 367 | return ret == -ENODEV ? 0 : ret; | 
|  | 368 | } | 
|  | 369 |  | 
|  | 370 | return regulator_enable(hsotg->vbus_supply); | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | static int dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg) | 
|  | 374 | { | 
|  | 375 | if (hsotg->vbus_supply) | 
|  | 376 | return regulator_disable(hsotg->vbus_supply); | 
|  | 377 |  | 
|  | 378 | return 0; | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | /** | 
|  | 382 | * dwc2_enable_host_interrupts() - Enables the Host mode interrupts | 
|  | 383 | * | 
|  | 384 | * @hsotg: Programming view of DWC_otg controller | 
|  | 385 | */ | 
|  | 386 | static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg) | 
|  | 387 | { | 
|  | 388 | u32 intmsk; | 
|  | 389 |  | 
|  | 390 | dev_dbg(hsotg->dev, "%s()\n", __func__); | 
|  | 391 |  | 
|  | 392 | /* Disable all interrupts */ | 
|  | 393 | dwc2_writel(hsotg, 0, GINTMSK); | 
|  | 394 | dwc2_writel(hsotg, 0, HAINTMSK); | 
|  | 395 |  | 
|  | 396 | /* Enable the common interrupts */ | 
|  | 397 | dwc2_enable_common_interrupts(hsotg); | 
|  | 398 |  | 
|  | 399 | /* Enable host mode interrupts without disturbing common interrupts */ | 
|  | 400 | intmsk = dwc2_readl(hsotg, GINTMSK); | 
|  | 401 | intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT; | 
|  | 402 | dwc2_writel(hsotg, intmsk, GINTMSK); | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | /** | 
|  | 406 | * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts | 
|  | 407 | * | 
|  | 408 | * @hsotg: Programming view of DWC_otg controller | 
|  | 409 | */ | 
|  | 410 | static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) | 
|  | 411 | { | 
|  | 412 | u32 intmsk = dwc2_readl(hsotg, GINTMSK); | 
|  | 413 |  | 
|  | 414 | /* Disable host mode interrupts without disturbing common interrupts */ | 
|  | 415 | intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT | | 
|  | 416 | GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT); | 
|  | 417 | dwc2_writel(hsotg, intmsk, GINTMSK); | 
|  | 418 | } | 
|  | 419 |  | 
|  | 420 | /* | 
|  | 421 | * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size | 
|  | 422 | * For system that have a total fifo depth that is smaller than the default | 
|  | 423 | * RX + TX fifo size. | 
|  | 424 | * | 
|  | 425 | * @hsotg: Programming view of DWC_otg controller | 
|  | 426 | */ | 
|  | 427 | static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) | 
|  | 428 | { | 
|  | 429 | struct dwc2_core_params *params = &hsotg->params; | 
|  | 430 | struct dwc2_hw_params *hw = &hsotg->hw_params; | 
|  | 431 | u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; | 
|  | 432 |  | 
|  | 433 | total_fifo_size = hw->total_fifo_size; | 
|  | 434 | rxfsiz = params->host_rx_fifo_size; | 
|  | 435 | nptxfsiz = params->host_nperio_tx_fifo_size; | 
|  | 436 | ptxfsiz = params->host_perio_tx_fifo_size; | 
|  | 437 |  | 
|  | 438 | /* | 
|  | 439 | * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth | 
|  | 440 | * allocation with support for high bandwidth endpoints. Synopsys | 
|  | 441 | * defines MPS(Max Packet size) for a periodic EP=1024, and for | 
|  | 442 | * non-periodic as 512. | 
|  | 443 | */ | 
|  | 444 | if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { | 
|  | 445 | /* | 
|  | 446 | * For Buffer DMA mode/Scatter Gather DMA mode | 
|  | 447 | * 2 * ((Largest Packet size / 4) + 1 + 1) + n | 
|  | 448 | * with n = number of host channel. | 
|  | 449 | * 2 * ((1024/4) + 2) = 516 | 
|  | 450 | */ | 
|  | 451 | rxfsiz = 516 + hw->host_channels; | 
|  | 452 |  | 
|  | 453 | /* | 
|  | 454 | * min non-periodic tx fifo depth | 
|  | 455 | * 2 * (largest non-periodic USB packet used / 4) | 
|  | 456 | * 2 * (512/4) = 256 | 
|  | 457 | */ | 
|  | 458 | nptxfsiz = 256; | 
|  | 459 |  | 
|  | 460 | /* | 
|  | 461 | * min periodic tx fifo depth | 
|  | 462 | * (largest packet size*MC)/4 | 
|  | 463 | * (1024 * 3)/4 = 768 | 
|  | 464 | */ | 
|  | 465 | ptxfsiz = 768; | 
|  | 466 |  | 
|  | 467 | params->host_rx_fifo_size = rxfsiz; | 
|  | 468 | params->host_nperio_tx_fifo_size = nptxfsiz; | 
|  | 469 | params->host_perio_tx_fifo_size = ptxfsiz; | 
|  | 470 | } | 
|  | 471 |  | 
|  | 472 | /* | 
|  | 473 | * If the summation of RX, NPTX and PTX fifo sizes is still | 
|  | 474 | * bigger than the total_fifo_size, then we have a problem. | 
|  | 475 | * | 
|  | 476 | * We won't be able to allocate as many endpoints. Right now, | 
|  | 477 | * we're just printing an error message, but ideally this FIFO | 
|  | 478 | * allocation algorithm would be improved in the future. | 
|  | 479 | * | 
|  | 480 | * FIXME improve this FIFO allocation algorithm. | 
|  | 481 | */ | 
|  | 482 | if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz))) | 
|  | 483 | dev_err(hsotg->dev, "invalid fifo sizes\n"); | 
|  | 484 | } | 
|  | 485 |  | 
|  | 486 | static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) | 
|  | 487 | { | 
|  | 488 | struct dwc2_core_params *params = &hsotg->params; | 
|  | 489 | u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; | 
|  | 490 |  | 
|  | 491 | if (!params->enable_dynamic_fifo) | 
|  | 492 | return; | 
|  | 493 |  | 
|  | 494 | dwc2_calculate_dynamic_fifo(hsotg); | 
|  | 495 |  | 
|  | 496 | /* Rx FIFO */ | 
|  | 497 | grxfsiz = dwc2_readl(hsotg, GRXFSIZ); | 
|  | 498 | dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz); | 
|  | 499 | grxfsiz &= ~GRXFSIZ_DEPTH_MASK; | 
|  | 500 | grxfsiz |= params->host_rx_fifo_size << | 
|  | 501 | GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK; | 
|  | 502 | dwc2_writel(hsotg, grxfsiz, GRXFSIZ); | 
|  | 503 | dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", | 
|  | 504 | dwc2_readl(hsotg, GRXFSIZ)); | 
|  | 505 |  | 
|  | 506 | /* Non-periodic Tx FIFO */ | 
|  | 507 | dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n", | 
|  | 508 | dwc2_readl(hsotg, GNPTXFSIZ)); | 
|  | 509 | nptxfsiz = params->host_nperio_tx_fifo_size << | 
|  | 510 | FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; | 
|  | 511 | nptxfsiz |= params->host_rx_fifo_size << | 
|  | 512 | FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; | 
|  | 513 | dwc2_writel(hsotg, nptxfsiz, GNPTXFSIZ); | 
|  | 514 | dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n", | 
|  | 515 | dwc2_readl(hsotg, GNPTXFSIZ)); | 
|  | 516 |  | 
|  | 517 | /* Periodic Tx FIFO */ | 
|  | 518 | dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n", | 
|  | 519 | dwc2_readl(hsotg, HPTXFSIZ)); | 
|  | 520 | hptxfsiz = params->host_perio_tx_fifo_size << | 
|  | 521 | FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; | 
|  | 522 | hptxfsiz |= (params->host_rx_fifo_size + | 
|  | 523 | params->host_nperio_tx_fifo_size) << | 
|  | 524 | FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; | 
|  | 525 | dwc2_writel(hsotg, hptxfsiz, HPTXFSIZ); | 
|  | 526 | dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", | 
|  | 527 | dwc2_readl(hsotg, HPTXFSIZ)); | 
|  | 528 |  | 
|  | 529 | if (hsotg->params.en_multiple_tx_fifo && | 
|  | 530 | hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) { | 
|  | 531 | /* | 
|  | 532 | * This feature was implemented in 2.91a version | 
|  | 533 | * Global DFIFOCFG calculation for Host mode - | 
|  | 534 | * include RxFIFO, NPTXFIFO and HPTXFIFO | 
|  | 535 | */ | 
|  | 536 | dfifocfg = dwc2_readl(hsotg, GDFIFOCFG); | 
|  | 537 | dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; | 
|  | 538 | dfifocfg |= (params->host_rx_fifo_size + | 
|  | 539 | params->host_nperio_tx_fifo_size + | 
|  | 540 | params->host_perio_tx_fifo_size) << | 
|  | 541 | GDFIFOCFG_EPINFOBASE_SHIFT & | 
|  | 542 | GDFIFOCFG_EPINFOBASE_MASK; | 
|  | 543 | dwc2_writel(hsotg, dfifocfg, GDFIFOCFG); | 
|  | 544 | } | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 | /** | 
|  | 548 | * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for | 
|  | 549 | * the HFIR register according to PHY type and speed | 
|  | 550 | * | 
|  | 551 | * @hsotg: Programming view of DWC_otg controller | 
|  | 552 | * | 
|  | 553 | * NOTE: The caller can modify the value of the HFIR register only after the | 
|  | 554 | * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) | 
|  | 555 | * has been set | 
|  | 556 | */ | 
|  | 557 | u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg) | 
|  | 558 | { | 
|  | 559 | u32 usbcfg; | 
|  | 560 | u32 hprt0; | 
|  | 561 | int clock = 60;	/* default value */ | 
|  | 562 |  | 
|  | 563 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 564 | hprt0 = dwc2_readl(hsotg, HPRT0); | 
|  | 565 |  | 
|  | 566 | if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) && | 
|  | 567 | !(usbcfg & GUSBCFG_PHYIF16)) | 
|  | 568 | clock = 60; | 
|  | 569 | if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type == | 
|  | 570 | GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) | 
|  | 571 | clock = 48; | 
|  | 572 | if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && | 
|  | 573 | !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) | 
|  | 574 | clock = 30; | 
|  | 575 | if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && | 
|  | 576 | !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16)) | 
|  | 577 | clock = 60; | 
|  | 578 | if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) && | 
|  | 579 | !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16)) | 
|  | 580 | clock = 48; | 
|  | 581 | if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) && | 
|  | 582 | hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) | 
|  | 583 | clock = 48; | 
|  | 584 | if ((usbcfg & GUSBCFG_PHYSEL) && | 
|  | 585 | hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) | 
|  | 586 | clock = 48; | 
|  | 587 |  | 
|  | 588 | if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) | 
|  | 589 | /* High speed case */ | 
|  | 590 | return 125 * clock - 1; | 
|  | 591 |  | 
|  | 592 | /* FS/LS case */ | 
|  | 593 | return 1000 * clock - 1; | 
|  | 594 | } | 
|  | 595 |  | 
|  | 596 | /** | 
|  | 597 | * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination | 
|  | 598 | * buffer | 
|  | 599 | * | 
|  | 600 | * @hsotg: Programming view of DWC_otg controller | 
|  | 601 | * @dest:    Destination buffer for the packet | 
|  | 602 | * @bytes:   Number of bytes to copy to the destination | 
|  | 603 | */ | 
|  | 604 | void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes) | 
|  | 605 | { | 
|  | 606 | u32 *data_buf = (u32 *)dest; | 
|  | 607 | int word_count = (bytes + 3) / 4; | 
|  | 608 | int i; | 
|  | 609 |  | 
|  | 610 | /* | 
|  | 611 | * Todo: Account for the case where dest is not dword aligned. This | 
|  | 612 | * requires reading data from the FIFO into a u32 temp buffer, then | 
|  | 613 | * moving it into the data buffer. | 
|  | 614 | */ | 
|  | 615 |  | 
|  | 616 | dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes); | 
|  | 617 |  | 
|  | 618 | for (i = 0; i < word_count; i++, data_buf++) | 
|  | 619 | *data_buf = dwc2_readl(hsotg, HCFIFO(0)); | 
|  | 620 | } | 
|  | 621 |  | 
|  | 622 | /** | 
|  | 623 | * dwc2_dump_channel_info() - Prints the state of a host channel | 
|  | 624 | * | 
|  | 625 | * @hsotg: Programming view of DWC_otg controller | 
|  | 626 | * @chan:  Pointer to the channel to dump | 
|  | 627 | * | 
|  | 628 | * Must be called with interrupt disabled and spinlock held | 
|  | 629 | * | 
|  | 630 | * NOTE: This function will be removed once the peripheral controller code | 
|  | 631 | * is integrated and the driver is stable | 
|  | 632 | */ | 
|  | 633 | static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, | 
|  | 634 | struct dwc2_host_chan *chan) | 
|  | 635 | { | 
|  | 636 | #ifdef VERBOSE_DEBUG | 
|  | 637 | int num_channels = hsotg->params.host_channels; | 
|  | 638 | struct dwc2_qh *qh; | 
|  | 639 | u32 hcchar; | 
|  | 640 | u32 hcsplt; | 
|  | 641 | u32 hctsiz; | 
|  | 642 | u32 hc_dma; | 
|  | 643 | int i; | 
|  | 644 |  | 
|  | 645 | if (!chan) | 
|  | 646 | return; | 
|  | 647 |  | 
|  | 648 | hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num)); | 
|  | 649 | hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num)); | 
|  | 650 | hctsiz = dwc2_readl(hsotg, HCTSIZ(chan->hc_num)); | 
|  | 651 | hc_dma = dwc2_readl(hsotg, HCDMA(chan->hc_num)); | 
|  | 652 |  | 
|  | 653 | dev_dbg(hsotg->dev, "  Assigned to channel %p:\n", chan); | 
|  | 654 | dev_dbg(hsotg->dev, "    hcchar 0x%08x, hcsplt 0x%08x\n", | 
|  | 655 | hcchar, hcsplt); | 
|  | 656 | dev_dbg(hsotg->dev, "    hctsiz 0x%08x, hc_dma 0x%08x\n", | 
|  | 657 | hctsiz, hc_dma); | 
|  | 658 | dev_dbg(hsotg->dev, "    dev_addr: %d, ep_num: %d, ep_is_in: %d\n", | 
|  | 659 | chan->dev_addr, chan->ep_num, chan->ep_is_in); | 
|  | 660 | dev_dbg(hsotg->dev, "    ep_type: %d\n", chan->ep_type); | 
|  | 661 | dev_dbg(hsotg->dev, "    max_packet: %d\n", chan->max_packet); | 
|  | 662 | dev_dbg(hsotg->dev, "    data_pid_start: %d\n", chan->data_pid_start); | 
|  | 663 | dev_dbg(hsotg->dev, "    xfer_started: %d\n", chan->xfer_started); | 
|  | 664 | dev_dbg(hsotg->dev, "    halt_status: %d\n", chan->halt_status); | 
|  | 665 | dev_dbg(hsotg->dev, "    xfer_buf: %p\n", chan->xfer_buf); | 
|  | 666 | dev_dbg(hsotg->dev, "    xfer_dma: %08lx\n", | 
|  | 667 | (unsigned long)chan->xfer_dma); | 
|  | 668 | dev_dbg(hsotg->dev, "    xfer_len: %d\n", chan->xfer_len); | 
|  | 669 | dev_dbg(hsotg->dev, "    qh: %p\n", chan->qh); | 
|  | 670 | dev_dbg(hsotg->dev, "  NP inactive sched:\n"); | 
|  | 671 | list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive, | 
|  | 672 | qh_list_entry) | 
|  | 673 | dev_dbg(hsotg->dev, "    %p\n", qh); | 
|  | 674 | dev_dbg(hsotg->dev, "  NP waiting sched:\n"); | 
|  | 675 | list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting, | 
|  | 676 | qh_list_entry) | 
|  | 677 | dev_dbg(hsotg->dev, "    %p\n", qh); | 
|  | 678 | dev_dbg(hsotg->dev, "  NP active sched:\n"); | 
|  | 679 | list_for_each_entry(qh, &hsotg->non_periodic_sched_active, | 
|  | 680 | qh_list_entry) | 
|  | 681 | dev_dbg(hsotg->dev, "    %p\n", qh); | 
|  | 682 | dev_dbg(hsotg->dev, "  Channels:\n"); | 
|  | 683 | for (i = 0; i < num_channels; i++) { | 
|  | 684 | struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; | 
|  | 685 |  | 
|  | 686 | dev_dbg(hsotg->dev, "    %2d: %p\n", i, chan); | 
|  | 687 | } | 
|  | 688 | #endif /* VERBOSE_DEBUG */ | 
|  | 689 | } | 
|  | 690 |  | 
|  | 691 | static int _dwc2_hcd_start(struct usb_hcd *hcd); | 
|  | 692 |  | 
|  | 693 | static void dwc2_host_start(struct dwc2_hsotg *hsotg) | 
|  | 694 | { | 
|  | 695 | struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); | 
|  | 696 |  | 
|  | 697 | hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg); | 
|  | 698 | _dwc2_hcd_start(hcd); | 
|  | 699 | } | 
|  | 700 |  | 
|  | 701 | static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg) | 
|  | 702 | { | 
|  | 703 | struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); | 
|  | 704 |  | 
|  | 705 | hcd->self.is_b_host = 0; | 
|  | 706 | } | 
|  | 707 |  | 
|  | 708 | static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, | 
|  | 709 | int *hub_addr, int *hub_port) | 
|  | 710 | { | 
|  | 711 | struct urb *urb = context; | 
|  | 712 |  | 
|  | 713 | if (urb->dev->tt) | 
|  | 714 | *hub_addr = urb->dev->tt->hub->devnum; | 
|  | 715 | else | 
|  | 716 | *hub_addr = 0; | 
|  | 717 | *hub_port = urb->dev->ttport; | 
|  | 718 | } | 
|  | 719 |  | 
|  | 720 | /* | 
|  | 721 | * ========================================================================= | 
|  | 722 | *  Low Level Host Channel Access Functions | 
|  | 723 | * ========================================================================= | 
|  | 724 | */ | 
|  | 725 |  | 
|  | 726 | static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg, | 
|  | 727 | struct dwc2_host_chan *chan) | 
|  | 728 | { | 
|  | 729 | u32 hcintmsk = HCINTMSK_CHHLTD; | 
|  | 730 |  | 
|  | 731 | switch (chan->ep_type) { | 
|  | 732 | case USB_ENDPOINT_XFER_CONTROL: | 
|  | 733 | case USB_ENDPOINT_XFER_BULK: | 
|  | 734 | dev_vdbg(hsotg->dev, "control/bulk\n"); | 
|  | 735 | hcintmsk |= HCINTMSK_XFERCOMPL; | 
|  | 736 | hcintmsk |= HCINTMSK_STALL; | 
|  | 737 | hcintmsk |= HCINTMSK_XACTERR; | 
|  | 738 | hcintmsk |= HCINTMSK_DATATGLERR; | 
|  | 739 | if (chan->ep_is_in) { | 
|  | 740 | hcintmsk |= HCINTMSK_BBLERR; | 
|  | 741 | } else { | 
|  | 742 | hcintmsk |= HCINTMSK_NAK; | 
|  | 743 | hcintmsk |= HCINTMSK_NYET; | 
|  | 744 | if (chan->do_ping) | 
|  | 745 | hcintmsk |= HCINTMSK_ACK; | 
|  | 746 | } | 
|  | 747 |  | 
|  | 748 | if (chan->do_split) { | 
|  | 749 | hcintmsk |= HCINTMSK_NAK; | 
|  | 750 | if (chan->complete_split) | 
|  | 751 | hcintmsk |= HCINTMSK_NYET; | 
|  | 752 | else | 
|  | 753 | hcintmsk |= HCINTMSK_ACK; | 
|  | 754 | } | 
|  | 755 |  | 
|  | 756 | if (chan->error_state) | 
|  | 757 | hcintmsk |= HCINTMSK_ACK; | 
|  | 758 | break; | 
|  | 759 |  | 
|  | 760 | case USB_ENDPOINT_XFER_INT: | 
|  | 761 | if (dbg_perio()) | 
|  | 762 | dev_vdbg(hsotg->dev, "intr\n"); | 
|  | 763 | hcintmsk |= HCINTMSK_XFERCOMPL; | 
|  | 764 | hcintmsk |= HCINTMSK_NAK; | 
|  | 765 | hcintmsk |= HCINTMSK_STALL; | 
|  | 766 | hcintmsk |= HCINTMSK_XACTERR; | 
|  | 767 | hcintmsk |= HCINTMSK_DATATGLERR; | 
|  | 768 | hcintmsk |= HCINTMSK_FRMOVRUN; | 
|  | 769 |  | 
|  | 770 | if (chan->ep_is_in) | 
|  | 771 | hcintmsk |= HCINTMSK_BBLERR; | 
|  | 772 | if (chan->error_state) | 
|  | 773 | hcintmsk |= HCINTMSK_ACK; | 
|  | 774 | if (chan->do_split) { | 
|  | 775 | if (chan->complete_split) | 
|  | 776 | hcintmsk |= HCINTMSK_NYET; | 
|  | 777 | else | 
|  | 778 | hcintmsk |= HCINTMSK_ACK; | 
|  | 779 | } | 
|  | 780 | break; | 
|  | 781 |  | 
|  | 782 | case USB_ENDPOINT_XFER_ISOC: | 
|  | 783 | if (dbg_perio()) | 
|  | 784 | dev_vdbg(hsotg->dev, "isoc\n"); | 
|  | 785 | hcintmsk |= HCINTMSK_XFERCOMPL; | 
|  | 786 | hcintmsk |= HCINTMSK_FRMOVRUN; | 
|  | 787 | hcintmsk |= HCINTMSK_ACK; | 
|  | 788 |  | 
|  | 789 | if (chan->ep_is_in) { | 
|  | 790 | hcintmsk |= HCINTMSK_XACTERR; | 
|  | 791 | hcintmsk |= HCINTMSK_BBLERR; | 
|  | 792 | } | 
|  | 793 | break; | 
|  | 794 | default: | 
|  | 795 | dev_err(hsotg->dev, "## Unknown EP type ##\n"); | 
|  | 796 | break; | 
|  | 797 | } | 
|  | 798 |  | 
|  | 799 | dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num)); | 
|  | 800 | if (dbg_hc(chan)) | 
|  | 801 | dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); | 
|  | 802 | } | 
|  | 803 |  | 
|  | 804 | static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, | 
|  | 805 | struct dwc2_host_chan *chan) | 
|  | 806 | { | 
|  | 807 | u32 hcintmsk = HCINTMSK_CHHLTD; | 
|  | 808 |  | 
|  | 809 | /* | 
|  | 810 | * For Descriptor DMA mode core halts the channel on AHB error. | 
|  | 811 | * Interrupt is not required. | 
|  | 812 | */ | 
|  | 813 | if (!hsotg->params.dma_desc_enable) { | 
|  | 814 | if (dbg_hc(chan)) | 
|  | 815 | dev_vdbg(hsotg->dev, "desc DMA disabled\n"); | 
|  | 816 | hcintmsk |= HCINTMSK_AHBERR; | 
|  | 817 | } else { | 
|  | 818 | if (dbg_hc(chan)) | 
|  | 819 | dev_vdbg(hsotg->dev, "desc DMA enabled\n"); | 
|  | 820 | if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) | 
|  | 821 | hcintmsk |= HCINTMSK_XFERCOMPL; | 
|  | 822 | } | 
|  | 823 |  | 
|  | 824 | if (chan->error_state && !chan->do_split && | 
|  | 825 | chan->ep_type != USB_ENDPOINT_XFER_ISOC) { | 
|  | 826 | if (dbg_hc(chan)) | 
|  | 827 | dev_vdbg(hsotg->dev, "setting ACK\n"); | 
|  | 828 | hcintmsk |= HCINTMSK_ACK; | 
|  | 829 | if (chan->ep_is_in) { | 
|  | 830 | hcintmsk |= HCINTMSK_DATATGLERR; | 
|  | 831 | if (chan->ep_type != USB_ENDPOINT_XFER_INT) | 
|  | 832 | hcintmsk |= HCINTMSK_NAK; | 
|  | 833 | } | 
|  | 834 | } | 
|  | 835 |  | 
|  | 836 | dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num)); | 
|  | 837 | if (dbg_hc(chan)) | 
|  | 838 | dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk); | 
|  | 839 | } | 
|  | 840 |  | 
|  | 841 | static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, | 
|  | 842 | struct dwc2_host_chan *chan) | 
|  | 843 | { | 
|  | 844 | u32 intmsk; | 
|  | 845 |  | 
|  | 846 | if (hsotg->params.host_dma) { | 
|  | 847 | if (dbg_hc(chan)) | 
|  | 848 | dev_vdbg(hsotg->dev, "DMA enabled\n"); | 
|  | 849 | dwc2_hc_enable_dma_ints(hsotg, chan); | 
|  | 850 | } else { | 
|  | 851 | if (dbg_hc(chan)) | 
|  | 852 | dev_vdbg(hsotg->dev, "DMA disabled\n"); | 
|  | 853 | dwc2_hc_enable_slave_ints(hsotg, chan); | 
|  | 854 | } | 
|  | 855 |  | 
|  | 856 | /* Enable the top level host channel interrupt */ | 
|  | 857 | intmsk = dwc2_readl(hsotg, HAINTMSK); | 
|  | 858 | intmsk |= 1 << chan->hc_num; | 
|  | 859 | dwc2_writel(hsotg, intmsk, HAINTMSK); | 
|  | 860 | if (dbg_hc(chan)) | 
|  | 861 | dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk); | 
|  | 862 |  | 
|  | 863 | /* Make sure host channel interrupts are enabled */ | 
|  | 864 | intmsk = dwc2_readl(hsotg, GINTMSK); | 
|  | 865 | intmsk |= GINTSTS_HCHINT; | 
|  | 866 | dwc2_writel(hsotg, intmsk, GINTMSK); | 
|  | 867 | if (dbg_hc(chan)) | 
|  | 868 | dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk); | 
|  | 869 | } | 
|  | 870 |  | 
|  | 871 | /** | 
|  | 872 | * dwc2_hc_init() - Prepares a host channel for transferring packets to/from | 
|  | 873 | * a specific endpoint | 
|  | 874 | * | 
|  | 875 | * @hsotg: Programming view of DWC_otg controller | 
|  | 876 | * @chan:  Information needed to initialize the host channel | 
|  | 877 | * | 
|  | 878 | * The HCCHARn register is set up with the characteristics specified in chan. | 
|  | 879 | * Host channel interrupts that may need to be serviced while this transfer is | 
|  | 880 | * in progress are enabled. | 
|  | 881 | */ | 
|  | 882 | static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) | 
|  | 883 | { | 
|  | 884 | u8 hc_num = chan->hc_num; | 
|  | 885 | u32 hcintmsk; | 
|  | 886 | u32 hcchar; | 
|  | 887 | u32 hcsplt = 0; | 
|  | 888 |  | 
|  | 889 | if (dbg_hc(chan)) | 
|  | 890 | dev_vdbg(hsotg->dev, "%s()\n", __func__); | 
|  | 891 |  | 
|  | 892 | /* Clear old interrupt conditions for this host channel */ | 
|  | 893 | hcintmsk = 0xffffffff; | 
|  | 894 | hcintmsk &= ~HCINTMSK_RESERVED14_31; | 
|  | 895 | dwc2_writel(hsotg, hcintmsk, HCINT(hc_num)); | 
|  | 896 |  | 
|  | 897 | /* Enable channel interrupts required for this transfer */ | 
|  | 898 | dwc2_hc_enable_ints(hsotg, chan); | 
|  | 899 |  | 
|  | 900 | /* | 
|  | 901 | * Program the HCCHARn register with the endpoint characteristics for | 
|  | 902 | * the current transfer | 
|  | 903 | */ | 
|  | 904 | hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; | 
|  | 905 | hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; | 
|  | 906 | if (chan->ep_is_in) | 
|  | 907 | hcchar |= HCCHAR_EPDIR; | 
|  | 908 | if (chan->speed == USB_SPEED_LOW) | 
|  | 909 | hcchar |= HCCHAR_LSPDDEV; | 
|  | 910 | hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; | 
|  | 911 | hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; | 
|  | 912 | dwc2_writel(hsotg, hcchar, HCCHAR(hc_num)); | 
|  | 913 | if (dbg_hc(chan)) { | 
|  | 914 | dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n", | 
|  | 915 | hc_num, hcchar); | 
|  | 916 |  | 
|  | 917 | dev_vdbg(hsotg->dev, "%s: Channel %d\n", | 
|  | 918 | __func__, hc_num); | 
|  | 919 | dev_vdbg(hsotg->dev, "	 Dev Addr: %d\n", | 
|  | 920 | chan->dev_addr); | 
|  | 921 | dev_vdbg(hsotg->dev, "	 Ep Num: %d\n", | 
|  | 922 | chan->ep_num); | 
|  | 923 | dev_vdbg(hsotg->dev, "	 Is In: %d\n", | 
|  | 924 | chan->ep_is_in); | 
|  | 925 | dev_vdbg(hsotg->dev, "	 Is Low Speed: %d\n", | 
|  | 926 | chan->speed == USB_SPEED_LOW); | 
|  | 927 | dev_vdbg(hsotg->dev, "	 Ep Type: %d\n", | 
|  | 928 | chan->ep_type); | 
|  | 929 | dev_vdbg(hsotg->dev, "	 Max Pkt: %d\n", | 
|  | 930 | chan->max_packet); | 
|  | 931 | } | 
|  | 932 |  | 
|  | 933 | /* Program the HCSPLT register for SPLITs */ | 
|  | 934 | if (chan->do_split) { | 
|  | 935 | if (dbg_hc(chan)) | 
|  | 936 | dev_vdbg(hsotg->dev, | 
|  | 937 | "Programming HC %d with split --> %s\n", | 
|  | 938 | hc_num, | 
|  | 939 | chan->complete_split ? "CSPLIT" : "SSPLIT"); | 
|  | 940 | if (chan->complete_split) | 
|  | 941 | hcsplt |= HCSPLT_COMPSPLT; | 
|  | 942 | hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT & | 
|  | 943 | HCSPLT_XACTPOS_MASK; | 
|  | 944 | hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT & | 
|  | 945 | HCSPLT_HUBADDR_MASK; | 
|  | 946 | hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT & | 
|  | 947 | HCSPLT_PRTADDR_MASK; | 
|  | 948 | if (dbg_hc(chan)) { | 
|  | 949 | dev_vdbg(hsotg->dev, "	  comp split %d\n", | 
|  | 950 | chan->complete_split); | 
|  | 951 | dev_vdbg(hsotg->dev, "	  xact pos %d\n", | 
|  | 952 | chan->xact_pos); | 
|  | 953 | dev_vdbg(hsotg->dev, "	  hub addr %d\n", | 
|  | 954 | chan->hub_addr); | 
|  | 955 | dev_vdbg(hsotg->dev, "	  hub port %d\n", | 
|  | 956 | chan->hub_port); | 
|  | 957 | dev_vdbg(hsotg->dev, "	  is_in %d\n", | 
|  | 958 | chan->ep_is_in); | 
|  | 959 | dev_vdbg(hsotg->dev, "	  Max Pkt %d\n", | 
|  | 960 | chan->max_packet); | 
|  | 961 | dev_vdbg(hsotg->dev, "	  xferlen %d\n", | 
|  | 962 | chan->xfer_len); | 
|  | 963 | } | 
|  | 964 | } | 
|  | 965 |  | 
|  | 966 | dwc2_writel(hsotg, hcsplt, HCSPLT(hc_num)); | 
|  | 967 | } | 
|  | 968 |  | 
|  | 969 | /** | 
|  | 970 | * dwc2_hc_halt() - Attempts to halt a host channel | 
|  | 971 | * | 
|  | 972 | * @hsotg:       Controller register interface | 
|  | 973 | * @chan:        Host channel to halt | 
|  | 974 | * @halt_status: Reason for halting the channel | 
|  | 975 | * | 
|  | 976 | * This function should only be called in Slave mode or to abort a transfer in | 
|  | 977 | * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the | 
|  | 978 | * controller halts the channel when the transfer is complete or a condition | 
|  | 979 | * occurs that requires application intervention. | 
|  | 980 | * | 
|  | 981 | * In slave mode, checks for a free request queue entry, then sets the Channel | 
|  | 982 | * Enable and Channel Disable bits of the Host Channel Characteristics | 
|  | 983 | * register of the specified channel to intiate the halt. If there is no free | 
|  | 984 | * request queue entry, sets only the Channel Disable bit of the HCCHARn | 
|  | 985 | * register to flush requests for this channel. In the latter case, sets a | 
|  | 986 | * flag to indicate that the host channel needs to be halted when a request | 
|  | 987 | * queue slot is open. | 
|  | 988 | * | 
|  | 989 | * In DMA mode, always sets the Channel Enable and Channel Disable bits of the | 
|  | 990 | * HCCHARn register. The controller ensures there is space in the request | 
|  | 991 | * queue before submitting the halt request. | 
|  | 992 | * | 
|  | 993 | * Some time may elapse before the core flushes any posted requests for this | 
|  | 994 | * host channel and halts. The Channel Halted interrupt handler completes the | 
|  | 995 | * deactivation of the host channel. | 
|  | 996 | */ | 
|  | 997 | void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, | 
|  | 998 | enum dwc2_halt_status halt_status) | 
|  | 999 | { | 
|  | 1000 | u32 nptxsts, hptxsts, hcchar; | 
|  | 1001 |  | 
|  | 1002 | if (dbg_hc(chan)) | 
|  | 1003 | dev_vdbg(hsotg->dev, "%s()\n", __func__); | 
|  | 1004 |  | 
|  | 1005 | /* | 
|  | 1006 | * In buffer DMA or external DMA mode channel can't be halted | 
|  | 1007 | * for non-split periodic channels. At the end of the next | 
|  | 1008 | * uframe/frame (in the worst case), the core generates a channel | 
|  | 1009 | * halted and disables the channel automatically. | 
|  | 1010 | */ | 
|  | 1011 | if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) || | 
|  | 1012 | hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) { | 
|  | 1013 | if (!chan->do_split && | 
|  | 1014 | (chan->ep_type == USB_ENDPOINT_XFER_ISOC || | 
|  | 1015 | chan->ep_type == USB_ENDPOINT_XFER_INT)) { | 
|  | 1016 | dev_err(hsotg->dev, "%s() Channel can't be halted\n", | 
|  | 1017 | __func__); | 
|  | 1018 | return; | 
|  | 1019 | } | 
|  | 1020 | } | 
|  | 1021 |  | 
|  | 1022 | if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) | 
|  | 1023 | dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); | 
|  | 1024 |  | 
|  | 1025 | if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || | 
|  | 1026 | halt_status == DWC2_HC_XFER_AHB_ERR) { | 
|  | 1027 | /* | 
|  | 1028 | * Disable all channel interrupts except Ch Halted. The QTD | 
|  | 1029 | * and QH state associated with this transfer has been cleared | 
|  | 1030 | * (in the case of URB_DEQUEUE), so the channel needs to be | 
|  | 1031 | * shut down carefully to prevent crashes. | 
|  | 1032 | */ | 
|  | 1033 | u32 hcintmsk = HCINTMSK_CHHLTD; | 
|  | 1034 |  | 
|  | 1035 | dev_vdbg(hsotg->dev, "dequeue/error\n"); | 
|  | 1036 | dwc2_writel(hsotg, hcintmsk, HCINTMSK(chan->hc_num)); | 
|  | 1037 |  | 
|  | 1038 | /* | 
|  | 1039 | * Make sure no other interrupts besides halt are currently | 
|  | 1040 | * pending. Handling another interrupt could cause a crash due | 
|  | 1041 | * to the QTD and QH state. | 
|  | 1042 | */ | 
|  | 1043 | dwc2_writel(hsotg, ~hcintmsk, HCINT(chan->hc_num)); | 
|  | 1044 |  | 
|  | 1045 | /* | 
|  | 1046 | * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR | 
|  | 1047 | * even if the channel was already halted for some other | 
|  | 1048 | * reason | 
|  | 1049 | */ | 
|  | 1050 | chan->halt_status = halt_status; | 
|  | 1051 |  | 
|  | 1052 | hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num)); | 
|  | 1053 | if (!(hcchar & HCCHAR_CHENA)) { | 
|  | 1054 | /* | 
|  | 1055 | * The channel is either already halted or it hasn't | 
|  | 1056 | * started yet. In DMA mode, the transfer may halt if | 
|  | 1057 | * it finishes normally or a condition occurs that | 
|  | 1058 | * requires driver intervention. Don't want to halt | 
|  | 1059 | * the channel again. In either Slave or DMA mode, | 
|  | 1060 | * it's possible that the transfer has been assigned | 
|  | 1061 | * to a channel, but not started yet when an URB is | 
|  | 1062 | * dequeued. Don't want to halt a channel that hasn't | 
|  | 1063 | * started yet. | 
|  | 1064 | */ | 
|  | 1065 | return; | 
|  | 1066 | } | 
|  | 1067 | } | 
|  | 1068 | if (chan->halt_pending) { | 
|  | 1069 | /* | 
|  | 1070 | * A halt has already been issued for this channel. This might | 
|  | 1071 | * happen when a transfer is aborted by a higher level in | 
|  | 1072 | * the stack. | 
|  | 1073 | */ | 
|  | 1074 | dev_vdbg(hsotg->dev, | 
|  | 1075 | "*** %s: Channel %d, chan->halt_pending already set ***\n", | 
|  | 1076 | __func__, chan->hc_num); | 
|  | 1077 | return; | 
|  | 1078 | } | 
|  | 1079 |  | 
|  | 1080 | hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num)); | 
|  | 1081 |  | 
|  | 1082 | /* No need to set the bit in DDMA for disabling the channel */ | 
|  | 1083 | /* TODO check it everywhere channel is disabled */ | 
|  | 1084 | if (!hsotg->params.dma_desc_enable) { | 
|  | 1085 | if (dbg_hc(chan)) | 
|  | 1086 | dev_vdbg(hsotg->dev, "desc DMA disabled\n"); | 
|  | 1087 | hcchar |= HCCHAR_CHENA; | 
|  | 1088 | } else { | 
|  | 1089 | if (dbg_hc(chan)) | 
|  | 1090 | dev_dbg(hsotg->dev, "desc DMA enabled\n"); | 
|  | 1091 | } | 
|  | 1092 | hcchar |= HCCHAR_CHDIS; | 
|  | 1093 |  | 
|  | 1094 | if (!hsotg->params.host_dma) { | 
|  | 1095 | if (dbg_hc(chan)) | 
|  | 1096 | dev_vdbg(hsotg->dev, "DMA not enabled\n"); | 
|  | 1097 | hcchar |= HCCHAR_CHENA; | 
|  | 1098 |  | 
|  | 1099 | /* Check for space in the request queue to issue the halt */ | 
|  | 1100 | if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL || | 
|  | 1101 | chan->ep_type == USB_ENDPOINT_XFER_BULK) { | 
|  | 1102 | dev_vdbg(hsotg->dev, "control/bulk\n"); | 
|  | 1103 | nptxsts = dwc2_readl(hsotg, GNPTXSTS); | 
|  | 1104 | if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { | 
|  | 1105 | dev_vdbg(hsotg->dev, "Disabling channel\n"); | 
|  | 1106 | hcchar &= ~HCCHAR_CHENA; | 
|  | 1107 | } | 
|  | 1108 | } else { | 
|  | 1109 | if (dbg_perio()) | 
|  | 1110 | dev_vdbg(hsotg->dev, "isoc/intr\n"); | 
|  | 1111 | hptxsts = dwc2_readl(hsotg, HPTXSTS); | 
|  | 1112 | if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || | 
|  | 1113 | hsotg->queuing_high_bandwidth) { | 
|  | 1114 | if (dbg_perio()) | 
|  | 1115 | dev_vdbg(hsotg->dev, "Disabling channel\n"); | 
|  | 1116 | hcchar &= ~HCCHAR_CHENA; | 
|  | 1117 | } | 
|  | 1118 | } | 
|  | 1119 | } else { | 
|  | 1120 | if (dbg_hc(chan)) | 
|  | 1121 | dev_vdbg(hsotg->dev, "DMA enabled\n"); | 
|  | 1122 | } | 
|  | 1123 |  | 
|  | 1124 | dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num)); | 
|  | 1125 | chan->halt_status = halt_status; | 
|  | 1126 |  | 
|  | 1127 | if (hcchar & HCCHAR_CHENA) { | 
|  | 1128 | if (dbg_hc(chan)) | 
|  | 1129 | dev_vdbg(hsotg->dev, "Channel enabled\n"); | 
|  | 1130 | chan->halt_pending = 1; | 
|  | 1131 | chan->halt_on_queue = 0; | 
|  | 1132 | } else { | 
|  | 1133 | if (dbg_hc(chan)) | 
|  | 1134 | dev_vdbg(hsotg->dev, "Channel disabled\n"); | 
|  | 1135 | chan->halt_on_queue = 1; | 
|  | 1136 | } | 
|  | 1137 |  | 
|  | 1138 | if (dbg_hc(chan)) { | 
|  | 1139 | dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, | 
|  | 1140 | chan->hc_num); | 
|  | 1141 | dev_vdbg(hsotg->dev, "	 hcchar: 0x%08x\n", | 
|  | 1142 | hcchar); | 
|  | 1143 | dev_vdbg(hsotg->dev, "	 halt_pending: %d\n", | 
|  | 1144 | chan->halt_pending); | 
|  | 1145 | dev_vdbg(hsotg->dev, "	 halt_on_queue: %d\n", | 
|  | 1146 | chan->halt_on_queue); | 
|  | 1147 | dev_vdbg(hsotg->dev, "	 halt_status: %d\n", | 
|  | 1148 | chan->halt_status); | 
|  | 1149 | } | 
|  | 1150 | } | 
|  | 1151 |  | 
|  | 1152 | /** | 
|  | 1153 | * dwc2_hc_cleanup() - Clears the transfer state for a host channel | 
|  | 1154 | * | 
|  | 1155 | * @hsotg: Programming view of DWC_otg controller | 
|  | 1156 | * @chan:  Identifies the host channel to clean up | 
|  | 1157 | * | 
|  | 1158 | * This function is normally called after a transfer is done and the host | 
|  | 1159 | * channel is being released | 
|  | 1160 | */ | 
|  | 1161 | void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) | 
|  | 1162 | { | 
|  | 1163 | u32 hcintmsk; | 
|  | 1164 |  | 
|  | 1165 | chan->xfer_started = 0; | 
|  | 1166 |  | 
|  | 1167 | list_del_init(&chan->split_order_list_entry); | 
|  | 1168 |  | 
|  | 1169 | /* | 
|  | 1170 | * Clear channel interrupt enables and any unhandled channel interrupt | 
|  | 1171 | * conditions | 
|  | 1172 | */ | 
|  | 1173 | dwc2_writel(hsotg, 0, HCINTMSK(chan->hc_num)); | 
|  | 1174 | hcintmsk = 0xffffffff; | 
|  | 1175 | hcintmsk &= ~HCINTMSK_RESERVED14_31; | 
|  | 1176 | dwc2_writel(hsotg, hcintmsk, HCINT(chan->hc_num)); | 
|  | 1177 | } | 
|  | 1178 |  | 
|  | 1179 | /** | 
|  | 1180 | * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in | 
|  | 1181 | * which frame a periodic transfer should occur | 
|  | 1182 | * | 
|  | 1183 | * @hsotg:  Programming view of DWC_otg controller | 
|  | 1184 | * @chan:   Identifies the host channel to set up and its properties | 
|  | 1185 | * @hcchar: Current value of the HCCHAR register for the specified host channel | 
|  | 1186 | * | 
|  | 1187 | * This function has no effect on non-periodic transfers | 
|  | 1188 | */ | 
|  | 1189 | static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, | 
|  | 1190 | struct dwc2_host_chan *chan, u32 *hcchar) | 
|  | 1191 | { | 
|  | 1192 | if (chan->ep_type == USB_ENDPOINT_XFER_INT || | 
|  | 1193 | chan->ep_type == USB_ENDPOINT_XFER_ISOC) { | 
|  | 1194 | int host_speed; | 
|  | 1195 | int xfer_ns; | 
|  | 1196 | int xfer_us; | 
|  | 1197 | int bytes_in_fifo; | 
|  | 1198 | u16 fifo_space; | 
|  | 1199 | u16 frame_number; | 
|  | 1200 | u16 wire_frame; | 
|  | 1201 |  | 
|  | 1202 | /* | 
|  | 1203 | * Try to figure out if we're an even or odd frame. If we set | 
|  | 1204 | * even and the current frame number is even the the transfer | 
|  | 1205 | * will happen immediately.  Similar if both are odd. If one is | 
|  | 1206 | * even and the other is odd then the transfer will happen when | 
|  | 1207 | * the frame number ticks. | 
|  | 1208 | * | 
|  | 1209 | * There's a bit of a balancing act to get this right. | 
|  | 1210 | * Sometimes we may want to send data in the current frame (AK | 
|  | 1211 | * right away).  We might want to do this if the frame number | 
|  | 1212 | * _just_ ticked, but we might also want to do this in order | 
|  | 1213 | * to continue a split transaction that happened late in a | 
|  | 1214 | * microframe (so we didn't know to queue the next transfer | 
|  | 1215 | * until the frame number had ticked).  The problem is that we | 
|  | 1216 | * need a lot of knowledge to know if there's actually still | 
|  | 1217 | * time to send things or if it would be better to wait until | 
|  | 1218 | * the next frame. | 
|  | 1219 | * | 
|  | 1220 | * We can look at how much time is left in the current frame | 
|  | 1221 | * and make a guess about whether we'll have time to transfer. | 
|  | 1222 | * We'll do that. | 
|  | 1223 | */ | 
|  | 1224 |  | 
|  | 1225 | /* Get speed host is running at */ | 
|  | 1226 | host_speed = (chan->speed != USB_SPEED_HIGH && | 
|  | 1227 | !chan->do_split) ? chan->speed : USB_SPEED_HIGH; | 
|  | 1228 |  | 
|  | 1229 | /* See how many bytes are in the periodic FIFO right now */ | 
|  | 1230 | fifo_space = (dwc2_readl(hsotg, HPTXSTS) & | 
|  | 1231 | TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; | 
|  | 1232 | bytes_in_fifo = sizeof(u32) * | 
|  | 1233 | (hsotg->params.host_perio_tx_fifo_size - | 
|  | 1234 | fifo_space); | 
|  | 1235 |  | 
|  | 1236 | /* | 
|  | 1237 | * Roughly estimate bus time for everything in the periodic | 
|  | 1238 | * queue + our new transfer.  This is "rough" because we're | 
|  | 1239 | * using a function that makes takes into account IN/OUT | 
|  | 1240 | * and INT/ISO and we're just slamming in one value for all | 
|  | 1241 | * transfers.  This should be an over-estimate and that should | 
|  | 1242 | * be OK, but we can probably tighten it. | 
|  | 1243 | */ | 
|  | 1244 | xfer_ns = usb_calc_bus_time(host_speed, false, false, | 
|  | 1245 | chan->xfer_len + bytes_in_fifo); | 
|  | 1246 | xfer_us = NS_TO_US(xfer_ns); | 
|  | 1247 |  | 
|  | 1248 | /* See what frame number we'll be at by the time we finish */ | 
|  | 1249 | frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us); | 
|  | 1250 |  | 
|  | 1251 | /* This is when we were scheduled to be on the wire */ | 
|  | 1252 | wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1); | 
|  | 1253 |  | 
|  | 1254 | /* | 
|  | 1255 | * If we'd finish _after_ the frame we're scheduled in then | 
|  | 1256 | * it's hopeless.  Just schedule right away and hope for the | 
|  | 1257 | * best.  Note that it _might_ be wise to call back into the | 
|  | 1258 | * scheduler to pick a better frame, but this is better than | 
|  | 1259 | * nothing. | 
|  | 1260 | */ | 
|  | 1261 | if (dwc2_frame_num_gt(frame_number, wire_frame)) { | 
|  | 1262 | dwc2_sch_vdbg(hsotg, | 
|  | 1263 | "QH=%p EO MISS fr=%04x=>%04x (%+d)\n", | 
|  | 1264 | chan->qh, wire_frame, frame_number, | 
|  | 1265 | dwc2_frame_num_dec(frame_number, | 
|  | 1266 | wire_frame)); | 
|  | 1267 | wire_frame = frame_number; | 
|  | 1268 |  | 
|  | 1269 | /* | 
|  | 1270 | * We picked a different frame number; communicate this | 
|  | 1271 | * back to the scheduler so it doesn't try to schedule | 
|  | 1272 | * another in the same frame. | 
|  | 1273 | * | 
|  | 1274 | * Remember that next_active_frame is 1 before the wire | 
|  | 1275 | * frame. | 
|  | 1276 | */ | 
|  | 1277 | chan->qh->next_active_frame = | 
|  | 1278 | dwc2_frame_num_dec(frame_number, 1); | 
|  | 1279 | } | 
|  | 1280 |  | 
|  | 1281 | if (wire_frame & 1) | 
|  | 1282 | *hcchar |= HCCHAR_ODDFRM; | 
|  | 1283 | else | 
|  | 1284 | *hcchar &= ~HCCHAR_ODDFRM; | 
|  | 1285 | } | 
|  | 1286 | } | 
|  | 1287 |  | 
|  | 1288 | static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan) | 
|  | 1289 | { | 
|  | 1290 | /* Set up the initial PID for the transfer */ | 
|  | 1291 | if (chan->speed == USB_SPEED_HIGH) { | 
|  | 1292 | if (chan->ep_is_in) { | 
|  | 1293 | if (chan->multi_count == 1) | 
|  | 1294 | chan->data_pid_start = DWC2_HC_PID_DATA0; | 
|  | 1295 | else if (chan->multi_count == 2) | 
|  | 1296 | chan->data_pid_start = DWC2_HC_PID_DATA1; | 
|  | 1297 | else | 
|  | 1298 | chan->data_pid_start = DWC2_HC_PID_DATA2; | 
|  | 1299 | } else { | 
|  | 1300 | if (chan->multi_count == 1) | 
|  | 1301 | chan->data_pid_start = DWC2_HC_PID_DATA0; | 
|  | 1302 | else | 
|  | 1303 | chan->data_pid_start = DWC2_HC_PID_MDATA; | 
|  | 1304 | } | 
|  | 1305 | } else { | 
|  | 1306 | chan->data_pid_start = DWC2_HC_PID_DATA0; | 
|  | 1307 | } | 
|  | 1308 | } | 
|  | 1309 |  | 
|  | 1310 | /** | 
|  | 1311 | * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with | 
|  | 1312 | * the Host Channel | 
|  | 1313 | * | 
|  | 1314 | * @hsotg: Programming view of DWC_otg controller | 
|  | 1315 | * @chan:  Information needed to initialize the host channel | 
|  | 1316 | * | 
|  | 1317 | * This function should only be called in Slave mode. For a channel associated | 
|  | 1318 | * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel | 
|  | 1319 | * associated with a periodic EP, the periodic Tx FIFO is written. | 
|  | 1320 | * | 
|  | 1321 | * Upon return the xfer_buf and xfer_count fields in chan are incremented by | 
|  | 1322 | * the number of bytes written to the Tx FIFO. | 
|  | 1323 | */ | 
|  | 1324 | static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, | 
|  | 1325 | struct dwc2_host_chan *chan) | 
|  | 1326 | { | 
|  | 1327 | u32 i; | 
|  | 1328 | u32 remaining_count; | 
|  | 1329 | u32 byte_count; | 
|  | 1330 | u32 dword_count; | 
|  | 1331 | u32 __iomem *data_fifo; | 
|  | 1332 | u32 *data_buf = (u32 *)chan->xfer_buf; | 
|  | 1333 |  | 
|  | 1334 | if (dbg_hc(chan)) | 
|  | 1335 | dev_vdbg(hsotg->dev, "%s()\n", __func__); | 
|  | 1336 |  | 
|  | 1337 | data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num)); | 
|  | 1338 |  | 
|  | 1339 | remaining_count = chan->xfer_len - chan->xfer_count; | 
|  | 1340 | if (remaining_count > chan->max_packet) | 
|  | 1341 | byte_count = chan->max_packet; | 
|  | 1342 | else | 
|  | 1343 | byte_count = remaining_count; | 
|  | 1344 |  | 
|  | 1345 | dword_count = (byte_count + 3) / 4; | 
|  | 1346 |  | 
|  | 1347 | if (((unsigned long)data_buf & 0x3) == 0) { | 
|  | 1348 | /* xfer_buf is DWORD aligned */ | 
|  | 1349 | for (i = 0; i < dword_count; i++, data_buf++) | 
|  | 1350 | dwc2_writel(hsotg, *data_buf, HCFIFO(chan->hc_num)); | 
|  | 1351 | } else { | 
|  | 1352 | /* xfer_buf is not DWORD aligned */ | 
|  | 1353 | for (i = 0; i < dword_count; i++, data_buf++) { | 
|  | 1354 | u32 data = data_buf[0] | data_buf[1] << 8 | | 
|  | 1355 | data_buf[2] << 16 | data_buf[3] << 24; | 
|  | 1356 | dwc2_writel(hsotg, data, HCFIFO(chan->hc_num)); | 
|  | 1357 | } | 
|  | 1358 | } | 
|  | 1359 |  | 
|  | 1360 | chan->xfer_count += byte_count; | 
|  | 1361 | chan->xfer_buf += byte_count; | 
|  | 1362 | } | 
|  | 1363 |  | 
|  | 1364 | /** | 
|  | 1365 | * dwc2_hc_do_ping() - Starts a PING transfer | 
|  | 1366 | * | 
|  | 1367 | * @hsotg: Programming view of DWC_otg controller | 
|  | 1368 | * @chan:  Information needed to initialize the host channel | 
|  | 1369 | * | 
|  | 1370 | * This function should only be called in Slave mode. The Do Ping bit is set in | 
|  | 1371 | * the HCTSIZ register, then the channel is enabled. | 
|  | 1372 | */ | 
|  | 1373 | static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, | 
|  | 1374 | struct dwc2_host_chan *chan) | 
|  | 1375 | { | 
|  | 1376 | u32 hcchar; | 
|  | 1377 | u32 hctsiz; | 
|  | 1378 |  | 
|  | 1379 | if (dbg_hc(chan)) | 
|  | 1380 | dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, | 
|  | 1381 | chan->hc_num); | 
|  | 1382 |  | 
|  | 1383 | hctsiz = TSIZ_DOPNG; | 
|  | 1384 | hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; | 
|  | 1385 | dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num)); | 
|  | 1386 |  | 
|  | 1387 | hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num)); | 
|  | 1388 | hcchar |= HCCHAR_CHENA; | 
|  | 1389 | hcchar &= ~HCCHAR_CHDIS; | 
|  | 1390 | dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num)); | 
|  | 1391 | } | 
|  | 1392 |  | 
|  | 1393 | /** | 
|  | 1394 | * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host | 
|  | 1395 | * channel and starts the transfer | 
|  | 1396 | * | 
|  | 1397 | * @hsotg: Programming view of DWC_otg controller | 
|  | 1398 | * @chan:  Information needed to initialize the host channel. The xfer_len value | 
|  | 1399 | *         may be reduced to accommodate the max widths of the XferSize and | 
|  | 1400 | *         PktCnt fields in the HCTSIZn register. The multi_count value may be | 
|  | 1401 | *         changed to reflect the final xfer_len value. | 
|  | 1402 | * | 
|  | 1403 | * This function may be called in either Slave mode or DMA mode. In Slave mode, | 
|  | 1404 | * the caller must ensure that there is sufficient space in the request queue | 
|  | 1405 | * and Tx Data FIFO. | 
|  | 1406 | * | 
|  | 1407 | * For an OUT transfer in Slave mode, it loads a data packet into the | 
|  | 1408 | * appropriate FIFO. If necessary, additional data packets are loaded in the | 
|  | 1409 | * Host ISR. | 
|  | 1410 | * | 
|  | 1411 | * For an IN transfer in Slave mode, a data packet is requested. The data | 
|  | 1412 | * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, | 
|  | 1413 | * additional data packets are requested in the Host ISR. | 
|  | 1414 | * | 
|  | 1415 | * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ | 
|  | 1416 | * register along with a packet count of 1 and the channel is enabled. This | 
|  | 1417 | * causes a single PING transaction to occur. Other fields in HCTSIZ are | 
|  | 1418 | * simply set to 0 since no data transfer occurs in this case. | 
|  | 1419 | * | 
|  | 1420 | * For a PING transfer in DMA mode, the HCTSIZ register is initialized with | 
|  | 1421 | * all the information required to perform the subsequent data transfer. In | 
|  | 1422 | * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the | 
|  | 1423 | * controller performs the entire PING protocol, then starts the data | 
|  | 1424 | * transfer. | 
|  | 1425 | */ | 
|  | 1426 | static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, | 
|  | 1427 | struct dwc2_host_chan *chan) | 
|  | 1428 | { | 
|  | 1429 | u32 max_hc_xfer_size = hsotg->params.max_transfer_size; | 
|  | 1430 | u16 max_hc_pkt_count = hsotg->params.max_packet_count; | 
|  | 1431 | u32 hcchar; | 
|  | 1432 | u32 hctsiz = 0; | 
|  | 1433 | u16 num_packets; | 
|  | 1434 | u32 ec_mc; | 
|  | 1435 |  | 
|  | 1436 | if (dbg_hc(chan)) | 
|  | 1437 | dev_vdbg(hsotg->dev, "%s()\n", __func__); | 
|  | 1438 |  | 
|  | 1439 | if (chan->do_ping) { | 
|  | 1440 | if (!hsotg->params.host_dma) { | 
|  | 1441 | if (dbg_hc(chan)) | 
|  | 1442 | dev_vdbg(hsotg->dev, "ping, no DMA\n"); | 
|  | 1443 | dwc2_hc_do_ping(hsotg, chan); | 
|  | 1444 | chan->xfer_started = 1; | 
|  | 1445 | return; | 
|  | 1446 | } | 
|  | 1447 |  | 
|  | 1448 | if (dbg_hc(chan)) | 
|  | 1449 | dev_vdbg(hsotg->dev, "ping, DMA\n"); | 
|  | 1450 |  | 
|  | 1451 | hctsiz |= TSIZ_DOPNG; | 
|  | 1452 | } | 
|  | 1453 |  | 
|  | 1454 | if (chan->do_split) { | 
|  | 1455 | if (dbg_hc(chan)) | 
|  | 1456 | dev_vdbg(hsotg->dev, "split\n"); | 
|  | 1457 | num_packets = 1; | 
|  | 1458 |  | 
|  | 1459 | if (chan->complete_split && !chan->ep_is_in) | 
|  | 1460 | /* | 
|  | 1461 | * For CSPLIT OUT Transfer, set the size to 0 so the | 
|  | 1462 | * core doesn't expect any data written to the FIFO | 
|  | 1463 | */ | 
|  | 1464 | chan->xfer_len = 0; | 
|  | 1465 | else if (chan->ep_is_in || chan->xfer_len > chan->max_packet) | 
|  | 1466 | chan->xfer_len = chan->max_packet; | 
|  | 1467 | else if (!chan->ep_is_in && chan->xfer_len > 188) | 
|  | 1468 | chan->xfer_len = 188; | 
|  | 1469 |  | 
|  | 1470 | hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & | 
|  | 1471 | TSIZ_XFERSIZE_MASK; | 
|  | 1472 |  | 
|  | 1473 | /* For split set ec_mc for immediate retries */ | 
|  | 1474 | if (chan->ep_type == USB_ENDPOINT_XFER_INT || | 
|  | 1475 | chan->ep_type == USB_ENDPOINT_XFER_ISOC) | 
|  | 1476 | ec_mc = 3; | 
|  | 1477 | else | 
|  | 1478 | ec_mc = 1; | 
|  | 1479 | } else { | 
|  | 1480 | if (dbg_hc(chan)) | 
|  | 1481 | dev_vdbg(hsotg->dev, "no split\n"); | 
|  | 1482 | /* | 
|  | 1483 | * Ensure that the transfer length and packet count will fit | 
|  | 1484 | * in the widths allocated for them in the HCTSIZn register | 
|  | 1485 | */ | 
|  | 1486 | if (chan->ep_type == USB_ENDPOINT_XFER_INT || | 
|  | 1487 | chan->ep_type == USB_ENDPOINT_XFER_ISOC) { | 
|  | 1488 | /* | 
|  | 1489 | * Make sure the transfer size is no larger than one | 
|  | 1490 | * (micro)frame's worth of data. (A check was done | 
|  | 1491 | * when the periodic transfer was accepted to ensure | 
|  | 1492 | * that a (micro)frame's worth of data can be | 
|  | 1493 | * programmed into a channel.) | 
|  | 1494 | */ | 
|  | 1495 | u32 max_periodic_len = | 
|  | 1496 | chan->multi_count * chan->max_packet; | 
|  | 1497 |  | 
|  | 1498 | if (chan->xfer_len > max_periodic_len) | 
|  | 1499 | chan->xfer_len = max_periodic_len; | 
|  | 1500 | } else if (chan->xfer_len > max_hc_xfer_size) { | 
|  | 1501 | /* | 
|  | 1502 | * Make sure that xfer_len is a multiple of max packet | 
|  | 1503 | * size | 
|  | 1504 | */ | 
|  | 1505 | chan->xfer_len = | 
|  | 1506 | max_hc_xfer_size - chan->max_packet + 1; | 
|  | 1507 | } | 
|  | 1508 |  | 
|  | 1509 | if (chan->xfer_len > 0) { | 
|  | 1510 | num_packets = (chan->xfer_len + chan->max_packet - 1) / | 
|  | 1511 | chan->max_packet; | 
|  | 1512 | if (num_packets > max_hc_pkt_count) { | 
|  | 1513 | num_packets = max_hc_pkt_count; | 
|  | 1514 | chan->xfer_len = num_packets * chan->max_packet; | 
|  | 1515 | } | 
|  | 1516 | } else { | 
|  | 1517 | /* Need 1 packet for transfer length of 0 */ | 
|  | 1518 | num_packets = 1; | 
|  | 1519 | } | 
|  | 1520 |  | 
|  | 1521 | if (chan->ep_is_in) | 
|  | 1522 | /* | 
|  | 1523 | * Always program an integral # of max packets for IN | 
|  | 1524 | * transfers | 
|  | 1525 | */ | 
|  | 1526 | chan->xfer_len = num_packets * chan->max_packet; | 
|  | 1527 |  | 
|  | 1528 | if (chan->ep_type == USB_ENDPOINT_XFER_INT || | 
|  | 1529 | chan->ep_type == USB_ENDPOINT_XFER_ISOC) | 
|  | 1530 | /* | 
|  | 1531 | * Make sure that the multi_count field matches the | 
|  | 1532 | * actual transfer length | 
|  | 1533 | */ | 
|  | 1534 | chan->multi_count = num_packets; | 
|  | 1535 |  | 
|  | 1536 | if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) | 
|  | 1537 | dwc2_set_pid_isoc(chan); | 
|  | 1538 |  | 
|  | 1539 | hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT & | 
|  | 1540 | TSIZ_XFERSIZE_MASK; | 
|  | 1541 |  | 
|  | 1542 | /* The ec_mc gets the multi_count for non-split */ | 
|  | 1543 | ec_mc = chan->multi_count; | 
|  | 1544 | } | 
|  | 1545 |  | 
|  | 1546 | chan->start_pkt_count = num_packets; | 
|  | 1547 | hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; | 
|  | 1548 | hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & | 
|  | 1549 | TSIZ_SC_MC_PID_MASK; | 
|  | 1550 | dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num)); | 
|  | 1551 | if (dbg_hc(chan)) { | 
|  | 1552 | dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n", | 
|  | 1553 | hctsiz, chan->hc_num); | 
|  | 1554 |  | 
|  | 1555 | dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, | 
|  | 1556 | chan->hc_num); | 
|  | 1557 | dev_vdbg(hsotg->dev, "	 Xfer Size: %d\n", | 
|  | 1558 | (hctsiz & TSIZ_XFERSIZE_MASK) >> | 
|  | 1559 | TSIZ_XFERSIZE_SHIFT); | 
|  | 1560 | dev_vdbg(hsotg->dev, "	 Num Pkts: %d\n", | 
|  | 1561 | (hctsiz & TSIZ_PKTCNT_MASK) >> | 
|  | 1562 | TSIZ_PKTCNT_SHIFT); | 
|  | 1563 | dev_vdbg(hsotg->dev, "	 Start PID: %d\n", | 
|  | 1564 | (hctsiz & TSIZ_SC_MC_PID_MASK) >> | 
|  | 1565 | TSIZ_SC_MC_PID_SHIFT); | 
|  | 1566 | } | 
|  | 1567 |  | 
|  | 1568 | if (hsotg->params.host_dma) { | 
|  | 1569 | dma_addr_t dma_addr; | 
|  | 1570 |  | 
|  | 1571 | if (chan->align_buf) { | 
|  | 1572 | if (dbg_hc(chan)) | 
|  | 1573 | dev_vdbg(hsotg->dev, "align_buf\n"); | 
|  | 1574 | dma_addr = chan->align_buf; | 
|  | 1575 | } else { | 
|  | 1576 | dma_addr = chan->xfer_dma; | 
|  | 1577 | } | 
|  | 1578 | dwc2_writel(hsotg, (u32)dma_addr, HCDMA(chan->hc_num)); | 
|  | 1579 |  | 
|  | 1580 | if (dbg_hc(chan)) | 
|  | 1581 | dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", | 
|  | 1582 | (unsigned long)dma_addr, chan->hc_num); | 
|  | 1583 | } | 
|  | 1584 |  | 
|  | 1585 | /* Start the split */ | 
|  | 1586 | if (chan->do_split) { | 
|  | 1587 | u32 hcsplt = dwc2_readl(hsotg, HCSPLT(chan->hc_num)); | 
|  | 1588 |  | 
|  | 1589 | hcsplt |= HCSPLT_SPLTENA; | 
|  | 1590 | dwc2_writel(hsotg, hcsplt, HCSPLT(chan->hc_num)); | 
|  | 1591 | } | 
|  | 1592 |  | 
|  | 1593 | hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num)); | 
|  | 1594 | hcchar &= ~HCCHAR_MULTICNT_MASK; | 
|  | 1595 | hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK; | 
|  | 1596 | dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); | 
|  | 1597 |  | 
|  | 1598 | if (hcchar & HCCHAR_CHDIS) | 
|  | 1599 | dev_warn(hsotg->dev, | 
|  | 1600 | "%s: chdis set, channel %d, hcchar 0x%08x\n", | 
|  | 1601 | __func__, chan->hc_num, hcchar); | 
|  | 1602 |  | 
|  | 1603 | /* Set host channel enable after all other setup is complete */ | 
|  | 1604 | hcchar |= HCCHAR_CHENA; | 
|  | 1605 | hcchar &= ~HCCHAR_CHDIS; | 
|  | 1606 |  | 
|  | 1607 | if (dbg_hc(chan)) | 
|  | 1608 | dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n", | 
|  | 1609 | (hcchar & HCCHAR_MULTICNT_MASK) >> | 
|  | 1610 | HCCHAR_MULTICNT_SHIFT); | 
|  | 1611 |  | 
|  | 1612 | dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num)); | 
|  | 1613 | if (dbg_hc(chan)) | 
|  | 1614 | dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, | 
|  | 1615 | chan->hc_num); | 
|  | 1616 |  | 
|  | 1617 | chan->xfer_started = 1; | 
|  | 1618 | chan->requests++; | 
|  | 1619 |  | 
|  | 1620 | if (!hsotg->params.host_dma && | 
|  | 1621 | !chan->ep_is_in && chan->xfer_len > 0) | 
|  | 1622 | /* Load OUT packet into the appropriate Tx FIFO */ | 
|  | 1623 | dwc2_hc_write_packet(hsotg, chan); | 
|  | 1624 | } | 
|  | 1625 |  | 
|  | 1626 | /** | 
|  | 1627 | * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a | 
|  | 1628 | * host channel and starts the transfer in Descriptor DMA mode | 
|  | 1629 | * | 
|  | 1630 | * @hsotg: Programming view of DWC_otg controller | 
|  | 1631 | * @chan:  Information needed to initialize the host channel | 
|  | 1632 | * | 
|  | 1633 | * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. | 
|  | 1634 | * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field | 
|  | 1635 | * with micro-frame bitmap. | 
|  | 1636 | * | 
|  | 1637 | * Initializes HCDMA register with descriptor list address and CTD value then | 
|  | 1638 | * starts the transfer via enabling the channel. | 
|  | 1639 | */ | 
|  | 1640 | void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, | 
|  | 1641 | struct dwc2_host_chan *chan) | 
|  | 1642 | { | 
|  | 1643 | u32 hcchar; | 
|  | 1644 | u32 hctsiz = 0; | 
|  | 1645 |  | 
|  | 1646 | if (chan->do_ping) | 
|  | 1647 | hctsiz |= TSIZ_DOPNG; | 
|  | 1648 |  | 
|  | 1649 | if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) | 
|  | 1650 | dwc2_set_pid_isoc(chan); | 
|  | 1651 |  | 
|  | 1652 | /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ | 
|  | 1653 | hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT & | 
|  | 1654 | TSIZ_SC_MC_PID_MASK; | 
|  | 1655 |  | 
|  | 1656 | /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ | 
|  | 1657 | hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; | 
|  | 1658 |  | 
|  | 1659 | /* Non-zero only for high-speed interrupt endpoints */ | 
|  | 1660 | hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; | 
|  | 1661 |  | 
|  | 1662 | if (dbg_hc(chan)) { | 
|  | 1663 | dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, | 
|  | 1664 | chan->hc_num); | 
|  | 1665 | dev_vdbg(hsotg->dev, "	 Start PID: %d\n", | 
|  | 1666 | chan->data_pid_start); | 
|  | 1667 | dev_vdbg(hsotg->dev, "	 NTD: %d\n", chan->ntd - 1); | 
|  | 1668 | } | 
|  | 1669 |  | 
|  | 1670 | dwc2_writel(hsotg, hctsiz, HCTSIZ(chan->hc_num)); | 
|  | 1671 |  | 
|  | 1672 | dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr, | 
|  | 1673 | chan->desc_list_sz, DMA_TO_DEVICE); | 
|  | 1674 |  | 
|  | 1675 | dwc2_writel(hsotg, chan->desc_list_addr, HCDMA(chan->hc_num)); | 
|  | 1676 |  | 
|  | 1677 | if (dbg_hc(chan)) | 
|  | 1678 | dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n", | 
|  | 1679 | &chan->desc_list_addr, chan->hc_num); | 
|  | 1680 |  | 
|  | 1681 | hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num)); | 
|  | 1682 | hcchar &= ~HCCHAR_MULTICNT_MASK; | 
|  | 1683 | hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT & | 
|  | 1684 | HCCHAR_MULTICNT_MASK; | 
|  | 1685 |  | 
|  | 1686 | if (hcchar & HCCHAR_CHDIS) | 
|  | 1687 | dev_warn(hsotg->dev, | 
|  | 1688 | "%s: chdis set, channel %d, hcchar 0x%08x\n", | 
|  | 1689 | __func__, chan->hc_num, hcchar); | 
|  | 1690 |  | 
|  | 1691 | /* Set host channel enable after all other setup is complete */ | 
|  | 1692 | hcchar |= HCCHAR_CHENA; | 
|  | 1693 | hcchar &= ~HCCHAR_CHDIS; | 
|  | 1694 |  | 
|  | 1695 | if (dbg_hc(chan)) | 
|  | 1696 | dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n", | 
|  | 1697 | (hcchar & HCCHAR_MULTICNT_MASK) >> | 
|  | 1698 | HCCHAR_MULTICNT_SHIFT); | 
|  | 1699 |  | 
|  | 1700 | dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num)); | 
|  | 1701 | if (dbg_hc(chan)) | 
|  | 1702 | dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, | 
|  | 1703 | chan->hc_num); | 
|  | 1704 |  | 
|  | 1705 | chan->xfer_started = 1; | 
|  | 1706 | chan->requests++; | 
|  | 1707 | } | 
|  | 1708 |  | 
|  | 1709 | /** | 
|  | 1710 | * dwc2_hc_continue_transfer() - Continues a data transfer that was started by | 
|  | 1711 | * a previous call to dwc2_hc_start_transfer() | 
|  | 1712 | * | 
|  | 1713 | * @hsotg: Programming view of DWC_otg controller | 
|  | 1714 | * @chan:  Information needed to initialize the host channel | 
|  | 1715 | * | 
|  | 1716 | * The caller must ensure there is sufficient space in the request queue and Tx | 
|  | 1717 | * Data FIFO. This function should only be called in Slave mode. In DMA mode, | 
|  | 1718 | * the controller acts autonomously to complete transfers programmed to a host | 
|  | 1719 | * channel. | 
|  | 1720 | * | 
|  | 1721 | * For an OUT transfer, a new data packet is loaded into the appropriate FIFO | 
|  | 1722 | * if there is any data remaining to be queued. For an IN transfer, another | 
|  | 1723 | * data packet is always requested. For the SETUP phase of a control transfer, | 
|  | 1724 | * this function does nothing. | 
|  | 1725 | * | 
|  | 1726 | * Return: 1 if a new request is queued, 0 if no more requests are required | 
|  | 1727 | * for this transfer | 
|  | 1728 | */ | 
|  | 1729 | static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, | 
|  | 1730 | struct dwc2_host_chan *chan) | 
|  | 1731 | { | 
|  | 1732 | if (dbg_hc(chan)) | 
|  | 1733 | dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, | 
|  | 1734 | chan->hc_num); | 
|  | 1735 |  | 
|  | 1736 | if (chan->do_split) | 
|  | 1737 | /* SPLITs always queue just once per channel */ | 
|  | 1738 | return 0; | 
|  | 1739 |  | 
|  | 1740 | if (chan->data_pid_start == DWC2_HC_PID_SETUP) | 
|  | 1741 | /* SETUPs are queued only once since they can't be NAK'd */ | 
|  | 1742 | return 0; | 
|  | 1743 |  | 
|  | 1744 | if (chan->ep_is_in) { | 
|  | 1745 | /* | 
|  | 1746 | * Always queue another request for other IN transfers. If | 
|  | 1747 | * back-to-back INs are issued and NAKs are received for both, | 
|  | 1748 | * the driver may still be processing the first NAK when the | 
|  | 1749 | * second NAK is received. When the interrupt handler clears | 
|  | 1750 | * the NAK interrupt for the first NAK, the second NAK will | 
|  | 1751 | * not be seen. So we can't depend on the NAK interrupt | 
|  | 1752 | * handler to requeue a NAK'd request. Instead, IN requests | 
|  | 1753 | * are issued each time this function is called. When the | 
|  | 1754 | * transfer completes, the extra requests for the channel will | 
|  | 1755 | * be flushed. | 
|  | 1756 | */ | 
|  | 1757 | u32 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num)); | 
|  | 1758 |  | 
|  | 1759 | dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar); | 
|  | 1760 | hcchar |= HCCHAR_CHENA; | 
|  | 1761 | hcchar &= ~HCCHAR_CHDIS; | 
|  | 1762 | if (dbg_hc(chan)) | 
|  | 1763 | dev_vdbg(hsotg->dev, "	 IN xfer: hcchar = 0x%08x\n", | 
|  | 1764 | hcchar); | 
|  | 1765 | dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num)); | 
|  | 1766 | chan->requests++; | 
|  | 1767 | return 1; | 
|  | 1768 | } | 
|  | 1769 |  | 
|  | 1770 | /* OUT transfers */ | 
|  | 1771 |  | 
|  | 1772 | if (chan->xfer_count < chan->xfer_len) { | 
|  | 1773 | if (chan->ep_type == USB_ENDPOINT_XFER_INT || | 
|  | 1774 | chan->ep_type == USB_ENDPOINT_XFER_ISOC) { | 
|  | 1775 | u32 hcchar = dwc2_readl(hsotg, | 
|  | 1776 | HCCHAR(chan->hc_num)); | 
|  | 1777 |  | 
|  | 1778 | dwc2_hc_set_even_odd_frame(hsotg, chan, | 
|  | 1779 | &hcchar); | 
|  | 1780 | } | 
|  | 1781 |  | 
|  | 1782 | /* Load OUT packet into the appropriate Tx FIFO */ | 
|  | 1783 | dwc2_hc_write_packet(hsotg, chan); | 
|  | 1784 | chan->requests++; | 
|  | 1785 | return 1; | 
|  | 1786 | } | 
|  | 1787 |  | 
|  | 1788 | return 0; | 
|  | 1789 | } | 
|  | 1790 |  | 
|  | 1791 | /* | 
|  | 1792 | * ========================================================================= | 
|  | 1793 | *  HCD | 
|  | 1794 | * ========================================================================= | 
|  | 1795 | */ | 
|  | 1796 |  | 
|  | 1797 | /* | 
|  | 1798 | * Processes all the URBs in a single list of QHs. Completes them with | 
|  | 1799 | * -ETIMEDOUT and frees the QTD. | 
|  | 1800 | * | 
|  | 1801 | * Must be called with interrupt disabled and spinlock held | 
|  | 1802 | */ | 
|  | 1803 | static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg, | 
|  | 1804 | struct list_head *qh_list) | 
|  | 1805 | { | 
|  | 1806 | struct dwc2_qh *qh, *qh_tmp; | 
|  | 1807 | struct dwc2_qtd *qtd, *qtd_tmp; | 
|  | 1808 |  | 
|  | 1809 | list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) { | 
|  | 1810 | list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, | 
|  | 1811 | qtd_list_entry) { | 
|  | 1812 | dwc2_host_complete(hsotg, qtd, -ECONNRESET); | 
|  | 1813 | dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); | 
|  | 1814 | } | 
|  | 1815 | } | 
|  | 1816 | } | 
|  | 1817 |  | 
|  | 1818 | static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg, | 
|  | 1819 | struct list_head *qh_list) | 
|  | 1820 | { | 
|  | 1821 | struct dwc2_qtd *qtd, *qtd_tmp; | 
|  | 1822 | struct dwc2_qh *qh, *qh_tmp; | 
|  | 1823 | unsigned long flags; | 
|  | 1824 |  | 
|  | 1825 | if (!qh_list->next) | 
|  | 1826 | /* The list hasn't been initialized yet */ | 
|  | 1827 | return; | 
|  | 1828 |  | 
|  | 1829 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 1830 |  | 
|  | 1831 | /* Ensure there are no QTDs or URBs left */ | 
|  | 1832 | dwc2_kill_urbs_in_qh_list(hsotg, qh_list); | 
|  | 1833 |  | 
|  | 1834 | list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) { | 
|  | 1835 | dwc2_hcd_qh_unlink(hsotg, qh); | 
|  | 1836 |  | 
|  | 1837 | /* Free each QTD in the QH's QTD list */ | 
|  | 1838 | list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, | 
|  | 1839 | qtd_list_entry) | 
|  | 1840 | dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); | 
|  | 1841 |  | 
|  | 1842 | if (qh->channel && qh->channel->qh == qh) | 
|  | 1843 | qh->channel->qh = NULL; | 
|  | 1844 |  | 
|  | 1845 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 1846 | dwc2_hcd_qh_free(hsotg, qh); | 
|  | 1847 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 1848 | } | 
|  | 1849 |  | 
|  | 1850 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 1851 | } | 
|  | 1852 |  | 
|  | 1853 | /* | 
|  | 1854 | * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic | 
|  | 1855 | * and periodic schedules. The QTD associated with each URB is removed from | 
|  | 1856 | * the schedule and freed. This function may be called when a disconnect is | 
|  | 1857 | * detected or when the HCD is being stopped. | 
|  | 1858 | * | 
|  | 1859 | * Must be called with interrupt disabled and spinlock held | 
|  | 1860 | */ | 
|  | 1861 | static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg) | 
|  | 1862 | { | 
|  | 1863 | dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive); | 
|  | 1864 | dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting); | 
|  | 1865 | dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active); | 
|  | 1866 | dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive); | 
|  | 1867 | dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready); | 
|  | 1868 | dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned); | 
|  | 1869 | dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued); | 
|  | 1870 | } | 
|  | 1871 |  | 
|  | 1872 | /** | 
|  | 1873 | * dwc2_hcd_start() - Starts the HCD when switching to Host mode | 
|  | 1874 | * | 
|  | 1875 | * @hsotg: Pointer to struct dwc2_hsotg | 
|  | 1876 | */ | 
|  | 1877 | void dwc2_hcd_start(struct dwc2_hsotg *hsotg) | 
|  | 1878 | { | 
|  | 1879 | u32 hprt0; | 
|  | 1880 |  | 
|  | 1881 | if (hsotg->op_state == OTG_STATE_B_HOST) { | 
|  | 1882 | /* | 
|  | 1883 | * Reset the port. During a HNP mode switch the reset | 
|  | 1884 | * needs to occur within 1ms and have a duration of at | 
|  | 1885 | * least 50ms. | 
|  | 1886 | */ | 
|  | 1887 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 1888 | hprt0 |= HPRT0_RST; | 
|  | 1889 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 1890 | } | 
|  | 1891 |  | 
|  | 1892 | queue_delayed_work(hsotg->wq_otg, &hsotg->start_work, | 
|  | 1893 | msecs_to_jiffies(50)); | 
|  | 1894 | } | 
|  | 1895 |  | 
|  | 1896 | /* Must be called with interrupt disabled and spinlock held */ | 
|  | 1897 | static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) | 
|  | 1898 | { | 
|  | 1899 | int num_channels = hsotg->params.host_channels; | 
|  | 1900 | struct dwc2_host_chan *channel; | 
|  | 1901 | u32 hcchar; | 
|  | 1902 | int i; | 
|  | 1903 |  | 
|  | 1904 | if (!hsotg->params.host_dma) { | 
|  | 1905 | /* Flush out any channel requests in slave mode */ | 
|  | 1906 | for (i = 0; i < num_channels; i++) { | 
|  | 1907 | channel = hsotg->hc_ptr_array[i]; | 
|  | 1908 | if (!list_empty(&channel->hc_list_entry)) | 
|  | 1909 | continue; | 
|  | 1910 | hcchar = dwc2_readl(hsotg, HCCHAR(i)); | 
|  | 1911 | if (hcchar & HCCHAR_CHENA) { | 
|  | 1912 | hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR); | 
|  | 1913 | hcchar |= HCCHAR_CHDIS; | 
|  | 1914 | dwc2_writel(hsotg, hcchar, HCCHAR(i)); | 
|  | 1915 | } | 
|  | 1916 | } | 
|  | 1917 | } | 
|  | 1918 |  | 
|  | 1919 | for (i = 0; i < num_channels; i++) { | 
|  | 1920 | channel = hsotg->hc_ptr_array[i]; | 
|  | 1921 | if (!list_empty(&channel->hc_list_entry)) | 
|  | 1922 | continue; | 
|  | 1923 | hcchar = dwc2_readl(hsotg, HCCHAR(i)); | 
|  | 1924 | if (hcchar & HCCHAR_CHENA) { | 
|  | 1925 | /* Halt the channel */ | 
|  | 1926 | hcchar |= HCCHAR_CHDIS; | 
|  | 1927 | dwc2_writel(hsotg, hcchar, HCCHAR(i)); | 
|  | 1928 | } | 
|  | 1929 |  | 
|  | 1930 | dwc2_hc_cleanup(hsotg, channel); | 
|  | 1931 | list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list); | 
|  | 1932 | /* | 
|  | 1933 | * Added for Descriptor DMA to prevent channel double cleanup in | 
|  | 1934 | * release_channel_ddma(), which is called from ep_disable when | 
|  | 1935 | * device disconnects | 
|  | 1936 | */ | 
|  | 1937 | channel->qh = NULL; | 
|  | 1938 | } | 
|  | 1939 | /* All channels have been freed, mark them available */ | 
|  | 1940 | if (hsotg->params.uframe_sched) { | 
|  | 1941 | hsotg->available_host_channels = | 
|  | 1942 | hsotg->params.host_channels; | 
|  | 1943 | } else { | 
|  | 1944 | hsotg->non_periodic_channels = 0; | 
|  | 1945 | hsotg->periodic_channels = 0; | 
|  | 1946 | } | 
|  | 1947 | } | 
|  | 1948 |  | 
|  | 1949 | /** | 
|  | 1950 | * dwc2_hcd_connect() - Handles connect of the HCD | 
|  | 1951 | * | 
|  | 1952 | * @hsotg: Pointer to struct dwc2_hsotg | 
|  | 1953 | * | 
|  | 1954 | * Must be called with interrupt disabled and spinlock held | 
|  | 1955 | */ | 
|  | 1956 | void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) | 
|  | 1957 | { | 
|  | 1958 | if (hsotg->lx_state != DWC2_L0) | 
|  | 1959 | usb_hcd_resume_root_hub(hsotg->priv); | 
|  | 1960 |  | 
|  | 1961 | hsotg->flags.b.port_connect_status_change = 1; | 
|  | 1962 | hsotg->flags.b.port_connect_status = 1; | 
|  | 1963 | } | 
|  | 1964 |  | 
|  | 1965 | /** | 
|  | 1966 | * dwc2_hcd_disconnect() - Handles disconnect of the HCD | 
|  | 1967 | * | 
|  | 1968 | * @hsotg: Pointer to struct dwc2_hsotg | 
|  | 1969 | * @force: If true, we won't try to reconnect even if we see device connected. | 
|  | 1970 | * | 
|  | 1971 | * Must be called with interrupt disabled and spinlock held | 
|  | 1972 | */ | 
|  | 1973 | void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) | 
|  | 1974 | { | 
|  | 1975 | u32 intr; | 
|  | 1976 | u32 hprt0; | 
|  | 1977 |  | 
|  | 1978 | /* Set status flags for the hub driver */ | 
|  | 1979 | hsotg->flags.b.port_connect_status_change = 1; | 
|  | 1980 | hsotg->flags.b.port_connect_status = 0; | 
|  | 1981 |  | 
|  | 1982 | /* | 
|  | 1983 | * Shutdown any transfers in process by clearing the Tx FIFO Empty | 
|  | 1984 | * interrupt mask and status bits and disabling subsequent host | 
|  | 1985 | * channel interrupts. | 
|  | 1986 | */ | 
|  | 1987 | intr = dwc2_readl(hsotg, GINTMSK); | 
|  | 1988 | intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT); | 
|  | 1989 | dwc2_writel(hsotg, intr, GINTMSK); | 
|  | 1990 | intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT; | 
|  | 1991 | dwc2_writel(hsotg, intr, GINTSTS); | 
|  | 1992 |  | 
|  | 1993 | /* | 
|  | 1994 | * Turn off the vbus power only if the core has transitioned to device | 
|  | 1995 | * mode. If still in host mode, need to keep power on to detect a | 
|  | 1996 | * reconnection. | 
|  | 1997 | */ | 
|  | 1998 | if (dwc2_is_device_mode(hsotg)) { | 
|  | 1999 | if (hsotg->op_state != OTG_STATE_A_SUSPEND) { | 
|  | 2000 | dev_dbg(hsotg->dev, "Disconnect: PortPower off\n"); | 
|  | 2001 | dwc2_writel(hsotg, 0, HPRT0); | 
|  | 2002 | } | 
|  | 2003 |  | 
|  | 2004 | dwc2_disable_host_interrupts(hsotg); | 
|  | 2005 | } | 
|  | 2006 |  | 
|  | 2007 | /* Respond with an error status to all URBs in the schedule */ | 
|  | 2008 | dwc2_kill_all_urbs(hsotg); | 
|  | 2009 |  | 
|  | 2010 | if (dwc2_is_host_mode(hsotg)) | 
|  | 2011 | /* Clean up any host channels that were in use */ | 
|  | 2012 | dwc2_hcd_cleanup_channels(hsotg); | 
|  | 2013 |  | 
|  | 2014 | dwc2_host_disconnect(hsotg); | 
|  | 2015 |  | 
|  | 2016 | /* | 
|  | 2017 | * Add an extra check here to see if we're actually connected but | 
|  | 2018 | * we don't have a detection interrupt pending.  This can happen if: | 
|  | 2019 | *   1. hardware sees connect | 
|  | 2020 | *   2. hardware sees disconnect | 
|  | 2021 | *   3. hardware sees connect | 
|  | 2022 | *   4. dwc2_port_intr() - clears connect interrupt | 
|  | 2023 | *   5. dwc2_handle_common_intr() - calls here | 
|  | 2024 | * | 
|  | 2025 | * Without the extra check here we will end calling disconnect | 
|  | 2026 | * and won't get any future interrupts to handle the connect. | 
|  | 2027 | */ | 
|  | 2028 | if (!force) { | 
|  | 2029 | hprt0 = dwc2_readl(hsotg, HPRT0); | 
|  | 2030 | if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS)) | 
|  | 2031 | dwc2_hcd_connect(hsotg); | 
|  | 2032 | } | 
|  | 2033 | } | 
|  | 2034 |  | 
|  | 2035 | /** | 
|  | 2036 | * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup | 
|  | 2037 | * | 
|  | 2038 | * @hsotg: Pointer to struct dwc2_hsotg | 
|  | 2039 | */ | 
|  | 2040 | static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) | 
|  | 2041 | { | 
|  | 2042 | if (hsotg->bus_suspended) { | 
|  | 2043 | hsotg->flags.b.port_suspend_change = 1; | 
|  | 2044 | usb_hcd_resume_root_hub(hsotg->priv); | 
|  | 2045 | } | 
|  | 2046 |  | 
|  | 2047 | if (hsotg->lx_state == DWC2_L1) | 
|  | 2048 | hsotg->flags.b.port_l1_change = 1; | 
|  | 2049 | } | 
|  | 2050 |  | 
|  | 2051 | /** | 
|  | 2052 | * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner | 
|  | 2053 | * | 
|  | 2054 | * @hsotg: Pointer to struct dwc2_hsotg | 
|  | 2055 | * | 
|  | 2056 | * Must be called with interrupt disabled and spinlock held | 
|  | 2057 | */ | 
|  | 2058 | void dwc2_hcd_stop(struct dwc2_hsotg *hsotg) | 
|  | 2059 | { | 
|  | 2060 | dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n"); | 
|  | 2061 |  | 
|  | 2062 | /* | 
|  | 2063 | * The root hub should be disconnected before this function is called. | 
|  | 2064 | * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) | 
|  | 2065 | * and the QH lists (via ..._hcd_endpoint_disable). | 
|  | 2066 | */ | 
|  | 2067 |  | 
|  | 2068 | /* Turn off all host-specific interrupts */ | 
|  | 2069 | dwc2_disable_host_interrupts(hsotg); | 
|  | 2070 |  | 
|  | 2071 | /* Turn off the vbus power */ | 
|  | 2072 | dev_dbg(hsotg->dev, "PortPower off\n"); | 
|  | 2073 | dwc2_writel(hsotg, 0, HPRT0); | 
|  | 2074 | } | 
|  | 2075 |  | 
|  | 2076 | /* Caller must hold driver lock */ | 
|  | 2077 | static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, | 
|  | 2078 | struct dwc2_hcd_urb *urb, struct dwc2_qh *qh, | 
|  | 2079 | struct dwc2_qtd *qtd) | 
|  | 2080 | { | 
|  | 2081 | u32 intr_mask; | 
|  | 2082 | int retval; | 
|  | 2083 | int dev_speed; | 
|  | 2084 |  | 
|  | 2085 | if (!hsotg->flags.b.port_connect_status) { | 
|  | 2086 | /* No longer connected */ | 
|  | 2087 | dev_err(hsotg->dev, "Not connected\n"); | 
|  | 2088 | return -ENODEV; | 
|  | 2089 | } | 
|  | 2090 |  | 
|  | 2091 | dev_speed = dwc2_host_get_speed(hsotg, urb->priv); | 
|  | 2092 |  | 
|  | 2093 | /* Some configurations cannot support LS traffic on a FS root port */ | 
|  | 2094 | if ((dev_speed == USB_SPEED_LOW) && | 
|  | 2095 | (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) && | 
|  | 2096 | (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) { | 
|  | 2097 | u32 hprt0 = dwc2_readl(hsotg, HPRT0); | 
|  | 2098 | u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; | 
|  | 2099 |  | 
|  | 2100 | if (prtspd == HPRT0_SPD_FULL_SPEED) | 
|  | 2101 | return -ENODEV; | 
|  | 2102 | } | 
|  | 2103 |  | 
|  | 2104 | if (!qtd) | 
|  | 2105 | return -EINVAL; | 
|  | 2106 |  | 
|  | 2107 | dwc2_hcd_qtd_init(qtd, urb); | 
|  | 2108 | retval = dwc2_hcd_qtd_add(hsotg, qtd, qh); | 
|  | 2109 | if (retval) { | 
|  | 2110 | dev_err(hsotg->dev, | 
|  | 2111 | "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", | 
|  | 2112 | retval); | 
|  | 2113 | return retval; | 
|  | 2114 | } | 
|  | 2115 |  | 
|  | 2116 | intr_mask = dwc2_readl(hsotg, GINTMSK); | 
|  | 2117 | if (!(intr_mask & GINTSTS_SOF)) { | 
|  | 2118 | enum dwc2_transaction_type tr_type; | 
|  | 2119 |  | 
|  | 2120 | if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK && | 
|  | 2121 | !(qtd->urb->flags & URB_GIVEBACK_ASAP)) | 
|  | 2122 | /* | 
|  | 2123 | * Do not schedule SG transactions until qtd has | 
|  | 2124 | * URB_GIVEBACK_ASAP set | 
|  | 2125 | */ | 
|  | 2126 | return 0; | 
|  | 2127 |  | 
|  | 2128 | tr_type = dwc2_hcd_select_transactions(hsotg); | 
|  | 2129 | if (tr_type != DWC2_TRANSACTION_NONE) | 
|  | 2130 | dwc2_hcd_queue_transactions(hsotg, tr_type); | 
|  | 2131 | } | 
|  | 2132 |  | 
|  | 2133 | return 0; | 
|  | 2134 | } | 
|  | 2135 |  | 
|  | 2136 | /* Must be called with interrupt disabled and spinlock held */ | 
|  | 2137 | static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg, | 
|  | 2138 | struct dwc2_hcd_urb *urb) | 
|  | 2139 | { | 
|  | 2140 | struct dwc2_qh *qh; | 
|  | 2141 | struct dwc2_qtd *urb_qtd; | 
|  | 2142 |  | 
|  | 2143 | urb_qtd = urb->qtd; | 
|  | 2144 | if (!urb_qtd) { | 
|  | 2145 | dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n"); | 
|  | 2146 | return -EINVAL; | 
|  | 2147 | } | 
|  | 2148 |  | 
|  | 2149 | qh = urb_qtd->qh; | 
|  | 2150 | if (!qh) { | 
|  | 2151 | dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n"); | 
|  | 2152 | return -EINVAL; | 
|  | 2153 | } | 
|  | 2154 |  | 
|  | 2155 | urb->priv = NULL; | 
|  | 2156 |  | 
|  | 2157 | if (urb_qtd->in_process && qh->channel) { | 
|  | 2158 | dwc2_dump_channel_info(hsotg, qh->channel); | 
|  | 2159 |  | 
|  | 2160 | /* The QTD is in process (it has been assigned to a channel) */ | 
|  | 2161 | if (hsotg->flags.b.port_connect_status) | 
|  | 2162 | /* | 
|  | 2163 | * If still connected (i.e. in host mode), halt the | 
|  | 2164 | * channel so it can be used for other transfers. If | 
|  | 2165 | * no longer connected, the host registers can't be | 
|  | 2166 | * written to halt the channel since the core is in | 
|  | 2167 | * device mode. | 
|  | 2168 | */ | 
|  | 2169 | dwc2_hc_halt(hsotg, qh->channel, | 
|  | 2170 | DWC2_HC_XFER_URB_DEQUEUE); | 
|  | 2171 | } | 
|  | 2172 |  | 
|  | 2173 | /* | 
|  | 2174 | * Free the QTD and clean up the associated QH. Leave the QH in the | 
|  | 2175 | * schedule if it has any remaining QTDs. | 
|  | 2176 | */ | 
|  | 2177 | if (!hsotg->params.dma_desc_enable) { | 
|  | 2178 | u8 in_process = urb_qtd->in_process; | 
|  | 2179 |  | 
|  | 2180 | dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); | 
|  | 2181 | if (in_process) { | 
|  | 2182 | dwc2_hcd_qh_deactivate(hsotg, qh, 0); | 
|  | 2183 | qh->channel = NULL; | 
|  | 2184 | } else if (list_empty(&qh->qtd_list)) { | 
|  | 2185 | dwc2_hcd_qh_unlink(hsotg, qh); | 
|  | 2186 | } | 
|  | 2187 | } else { | 
|  | 2188 | dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); | 
|  | 2189 | } | 
|  | 2190 |  | 
|  | 2191 | return 0; | 
|  | 2192 | } | 
|  | 2193 |  | 
|  | 2194 | /* Must NOT be called with interrupt disabled or spinlock held */ | 
|  | 2195 | static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg, | 
|  | 2196 | struct usb_host_endpoint *ep, int retry) | 
|  | 2197 | { | 
|  | 2198 | struct dwc2_qtd *qtd, *qtd_tmp; | 
|  | 2199 | struct dwc2_qh *qh; | 
|  | 2200 | unsigned long flags; | 
|  | 2201 | int rc; | 
|  | 2202 |  | 
|  | 2203 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 2204 |  | 
|  | 2205 | qh = ep->hcpriv; | 
|  | 2206 | if (!qh) { | 
|  | 2207 | rc = -EINVAL; | 
|  | 2208 | goto err; | 
|  | 2209 | } | 
|  | 2210 |  | 
|  | 2211 | while (!list_empty(&qh->qtd_list) && retry--) { | 
|  | 2212 | if (retry == 0) { | 
|  | 2213 | dev_err(hsotg->dev, | 
|  | 2214 | "## timeout in dwc2_hcd_endpoint_disable() ##\n"); | 
|  | 2215 | rc = -EBUSY; | 
|  | 2216 | goto err; | 
|  | 2217 | } | 
|  | 2218 |  | 
|  | 2219 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 2220 | msleep(20); | 
|  | 2221 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 2222 | qh = ep->hcpriv; | 
|  | 2223 | if (!qh) { | 
|  | 2224 | rc = -EINVAL; | 
|  | 2225 | goto err; | 
|  | 2226 | } | 
|  | 2227 | } | 
|  | 2228 |  | 
|  | 2229 | dwc2_hcd_qh_unlink(hsotg, qh); | 
|  | 2230 |  | 
|  | 2231 | /* Free each QTD in the QH's QTD list */ | 
|  | 2232 | list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) | 
|  | 2233 | dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); | 
|  | 2234 |  | 
|  | 2235 | ep->hcpriv = NULL; | 
|  | 2236 |  | 
|  | 2237 | if (qh->channel && qh->channel->qh == qh) | 
|  | 2238 | qh->channel->qh = NULL; | 
|  | 2239 |  | 
|  | 2240 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 2241 |  | 
|  | 2242 | dwc2_hcd_qh_free(hsotg, qh); | 
|  | 2243 |  | 
|  | 2244 | return 0; | 
|  | 2245 |  | 
|  | 2246 | err: | 
|  | 2247 | ep->hcpriv = NULL; | 
|  | 2248 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 2249 |  | 
|  | 2250 | return rc; | 
|  | 2251 | } | 
|  | 2252 |  | 
|  | 2253 | /* Must be called with interrupt disabled and spinlock held */ | 
|  | 2254 | static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg, | 
|  | 2255 | struct usb_host_endpoint *ep) | 
|  | 2256 | { | 
|  | 2257 | struct dwc2_qh *qh = ep->hcpriv; | 
|  | 2258 |  | 
|  | 2259 | if (!qh) | 
|  | 2260 | return -EINVAL; | 
|  | 2261 |  | 
|  | 2262 | qh->data_toggle = DWC2_HC_PID_DATA0; | 
|  | 2263 |  | 
|  | 2264 | return 0; | 
|  | 2265 | } | 
|  | 2266 |  | 
|  | 2267 | /** | 
|  | 2268 | * dwc2_core_init() - Initializes the DWC_otg controller registers and | 
|  | 2269 | * prepares the core for device mode or host mode operation | 
|  | 2270 | * | 
|  | 2271 | * @hsotg:         Programming view of the DWC_otg controller | 
|  | 2272 | * @initial_setup: If true then this is the first init for this instance. | 
|  | 2273 | */ | 
|  | 2274 | int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) | 
|  | 2275 | { | 
|  | 2276 | u32 usbcfg, otgctl; | 
|  | 2277 | int retval; | 
|  | 2278 |  | 
|  | 2279 | dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); | 
|  | 2280 |  | 
|  | 2281 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 2282 |  | 
|  | 2283 | /* Set ULPI External VBUS bit if needed */ | 
|  | 2284 | usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; | 
|  | 2285 | if (hsotg->params.phy_ulpi_ext_vbus) | 
|  | 2286 | usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; | 
|  | 2287 |  | 
|  | 2288 | /* Set external TS Dline pulsing bit if needed */ | 
|  | 2289 | usbcfg &= ~GUSBCFG_TERMSELDLPULSE; | 
|  | 2290 | if (hsotg->params.ts_dline) | 
|  | 2291 | usbcfg |= GUSBCFG_TERMSELDLPULSE; | 
|  | 2292 |  | 
|  | 2293 | dwc2_writel(hsotg, usbcfg, GUSBCFG); | 
|  | 2294 |  | 
|  | 2295 | /* | 
|  | 2296 | * Reset the Controller | 
|  | 2297 | * | 
|  | 2298 | * We only need to reset the controller if this is a re-init. | 
|  | 2299 | * For the first init we know for sure that earlier code reset us (it | 
|  | 2300 | * needed to in order to properly detect various parameters). | 
|  | 2301 | */ | 
|  | 2302 | if (!initial_setup) { | 
|  | 2303 | retval = dwc2_core_reset(hsotg, false); | 
|  | 2304 | if (retval) { | 
|  | 2305 | dev_err(hsotg->dev, "%s(): Reset failed, aborting\n", | 
|  | 2306 | __func__); | 
|  | 2307 | return retval; | 
|  | 2308 | } | 
|  | 2309 | } | 
|  | 2310 |  | 
|  | 2311 | /* | 
|  | 2312 | * This needs to happen in FS mode before any other programming occurs | 
|  | 2313 | */ | 
|  | 2314 | retval = dwc2_phy_init(hsotg, initial_setup); | 
|  | 2315 | if (retval) | 
|  | 2316 | return retval; | 
|  | 2317 |  | 
|  | 2318 | /* Program the GAHBCFG Register */ | 
|  | 2319 | retval = dwc2_gahbcfg_init(hsotg); | 
|  | 2320 | if (retval) | 
|  | 2321 | return retval; | 
|  | 2322 |  | 
|  | 2323 | /* Program the GUSBCFG register */ | 
|  | 2324 | dwc2_gusbcfg_init(hsotg); | 
|  | 2325 |  | 
|  | 2326 | /* Program the GOTGCTL register */ | 
|  | 2327 | otgctl = dwc2_readl(hsotg, GOTGCTL); | 
|  | 2328 | otgctl &= ~GOTGCTL_OTGVER; | 
|  | 2329 | dwc2_writel(hsotg, otgctl, GOTGCTL); | 
|  | 2330 |  | 
|  | 2331 | /* Clear the SRP success bit for FS-I2c */ | 
|  | 2332 | hsotg->srp_success = 0; | 
|  | 2333 |  | 
|  | 2334 | /* Enable common interrupts */ | 
|  | 2335 | dwc2_enable_common_interrupts(hsotg); | 
|  | 2336 |  | 
|  | 2337 | /* | 
|  | 2338 | * Do device or host initialization based on mode during PCD and | 
|  | 2339 | * HCD initialization | 
|  | 2340 | */ | 
|  | 2341 | if (dwc2_is_host_mode(hsotg)) { | 
|  | 2342 | dev_dbg(hsotg->dev, "Host Mode\n"); | 
|  | 2343 | hsotg->op_state = OTG_STATE_A_HOST; | 
|  | 2344 | } else { | 
|  | 2345 | dev_dbg(hsotg->dev, "Device Mode\n"); | 
|  | 2346 | hsotg->op_state = OTG_STATE_B_PERIPHERAL; | 
|  | 2347 | } | 
|  | 2348 |  | 
|  | 2349 | return 0; | 
|  | 2350 | } | 
|  | 2351 |  | 
|  | 2352 | /** | 
|  | 2353 | * dwc2_core_host_init() - Initializes the DWC_otg controller registers for | 
|  | 2354 | * Host mode | 
|  | 2355 | * | 
|  | 2356 | * @hsotg: Programming view of DWC_otg controller | 
|  | 2357 | * | 
|  | 2358 | * This function flushes the Tx and Rx FIFOs and flushes any entries in the | 
|  | 2359 | * request queues. Host channels are reset to ensure that they are ready for | 
|  | 2360 | * performing transfers. | 
|  | 2361 | */ | 
|  | 2362 | static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) | 
|  | 2363 | { | 
|  | 2364 | u32 hcfg, hfir, otgctl, usbcfg; | 
|  | 2365 |  | 
|  | 2366 | dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); | 
|  | 2367 |  | 
|  | 2368 | /* Set HS/FS Timeout Calibration to 7 (max available value). | 
|  | 2369 | * The number of PHY clocks that the application programs in | 
|  | 2370 | * this field is added to the high/full speed interpacket timeout | 
|  | 2371 | * duration in the core to account for any additional delays | 
|  | 2372 | * introduced by the PHY. This can be required, because the delay | 
|  | 2373 | * introduced by the PHY in generating the linestate condition | 
|  | 2374 | * can vary from one PHY to another. | 
|  | 2375 | */ | 
|  | 2376 | usbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 2377 | usbcfg |= GUSBCFG_TOUTCAL(7); | 
|  | 2378 | dwc2_writel(hsotg, usbcfg, GUSBCFG); | 
|  | 2379 |  | 
|  | 2380 | /* Restart the Phy Clock */ | 
|  | 2381 | dwc2_writel(hsotg, 0, PCGCTL); | 
|  | 2382 |  | 
|  | 2383 | /* Initialize Host Configuration Register */ | 
|  | 2384 | dwc2_init_fs_ls_pclk_sel(hsotg); | 
|  | 2385 | if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || | 
|  | 2386 | hsotg->params.speed == DWC2_SPEED_PARAM_LOW) { | 
|  | 2387 | hcfg = dwc2_readl(hsotg, HCFG); | 
|  | 2388 | hcfg |= HCFG_FSLSSUPP; | 
|  | 2389 | dwc2_writel(hsotg, hcfg, HCFG); | 
|  | 2390 | } | 
|  | 2391 |  | 
|  | 2392 | /* | 
|  | 2393 | * This bit allows dynamic reloading of the HFIR register during | 
|  | 2394 | * runtime. This bit needs to be programmed during initial configuration | 
|  | 2395 | * and its value must not be changed during runtime. | 
|  | 2396 | */ | 
|  | 2397 | if (hsotg->params.reload_ctl) { | 
|  | 2398 | hfir = dwc2_readl(hsotg, HFIR); | 
|  | 2399 | hfir |= HFIR_RLDCTRL; | 
|  | 2400 | dwc2_writel(hsotg, hfir, HFIR); | 
|  | 2401 | } | 
|  | 2402 |  | 
|  | 2403 | if (hsotg->params.dma_desc_enable) { | 
|  | 2404 | u32 op_mode = hsotg->hw_params.op_mode; | 
|  | 2405 |  | 
|  | 2406 | if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || | 
|  | 2407 | !hsotg->hw_params.dma_desc_enable || | 
|  | 2408 | op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || | 
|  | 2409 | op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE || | 
|  | 2410 | op_mode == GHWCFG2_OP_MODE_UNDEFINED) { | 
|  | 2411 | dev_err(hsotg->dev, | 
|  | 2412 | "Hardware does not support descriptor DMA mode -\n"); | 
|  | 2413 | dev_err(hsotg->dev, | 
|  | 2414 | "falling back to buffer DMA mode.\n"); | 
|  | 2415 | hsotg->params.dma_desc_enable = false; | 
|  | 2416 | } else { | 
|  | 2417 | hcfg = dwc2_readl(hsotg, HCFG); | 
|  | 2418 | hcfg |= HCFG_DESCDMA; | 
|  | 2419 | dwc2_writel(hsotg, hcfg, HCFG); | 
|  | 2420 | } | 
|  | 2421 | } | 
|  | 2422 |  | 
|  | 2423 | /* Configure data FIFO sizes */ | 
|  | 2424 | dwc2_config_fifos(hsotg); | 
|  | 2425 |  | 
|  | 2426 | /* TODO - check this */ | 
|  | 2427 | /* Clear Host Set HNP Enable in the OTG Control Register */ | 
|  | 2428 | otgctl = dwc2_readl(hsotg, GOTGCTL); | 
|  | 2429 | otgctl &= ~GOTGCTL_HSTSETHNPEN; | 
|  | 2430 | dwc2_writel(hsotg, otgctl, GOTGCTL); | 
|  | 2431 |  | 
|  | 2432 | /* Make sure the FIFOs are flushed */ | 
|  | 2433 | dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */); | 
|  | 2434 | dwc2_flush_rx_fifo(hsotg); | 
|  | 2435 |  | 
|  | 2436 | /* Clear Host Set HNP Enable in the OTG Control Register */ | 
|  | 2437 | otgctl = dwc2_readl(hsotg, GOTGCTL); | 
|  | 2438 | otgctl &= ~GOTGCTL_HSTSETHNPEN; | 
|  | 2439 | dwc2_writel(hsotg, otgctl, GOTGCTL); | 
|  | 2440 |  | 
|  | 2441 | if (!hsotg->params.dma_desc_enable) { | 
|  | 2442 | int num_channels, i; | 
|  | 2443 | u32 hcchar; | 
|  | 2444 |  | 
|  | 2445 | /* Flush out any leftover queued requests */ | 
|  | 2446 | num_channels = hsotg->params.host_channels; | 
|  | 2447 | for (i = 0; i < num_channels; i++) { | 
|  | 2448 | hcchar = dwc2_readl(hsotg, HCCHAR(i)); | 
|  | 2449 | hcchar &= ~HCCHAR_CHENA; | 
|  | 2450 | hcchar |= HCCHAR_CHDIS; | 
|  | 2451 | hcchar &= ~HCCHAR_EPDIR; | 
|  | 2452 | dwc2_writel(hsotg, hcchar, HCCHAR(i)); | 
|  | 2453 | } | 
|  | 2454 |  | 
|  | 2455 | /* Halt all channels to put them into a known state */ | 
|  | 2456 | for (i = 0; i < num_channels; i++) { | 
|  | 2457 | hcchar = dwc2_readl(hsotg, HCCHAR(i)); | 
|  | 2458 | hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS; | 
|  | 2459 | hcchar &= ~HCCHAR_EPDIR; | 
|  | 2460 | dwc2_writel(hsotg, hcchar, HCCHAR(i)); | 
|  | 2461 | dev_dbg(hsotg->dev, "%s: Halt channel %d\n", | 
|  | 2462 | __func__, i); | 
|  | 2463 |  | 
|  | 2464 | if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i), | 
|  | 2465 | HCCHAR_CHENA, 1000)) { | 
|  | 2466 | dev_warn(hsotg->dev, "Unable to clear enable on channel %d\n", | 
|  | 2467 | i); | 
|  | 2468 | } | 
|  | 2469 | } | 
|  | 2470 | } | 
|  | 2471 |  | 
|  | 2472 | /* Enable ACG feature in host mode, if supported */ | 
|  | 2473 | dwc2_enable_acg(hsotg); | 
|  | 2474 |  | 
|  | 2475 | /* Turn on the vbus power */ | 
|  | 2476 | dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state); | 
|  | 2477 | if (hsotg->op_state == OTG_STATE_A_HOST) { | 
|  | 2478 | u32 hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 2479 |  | 
|  | 2480 | dev_dbg(hsotg->dev, "Init: Power Port (%d)\n", | 
|  | 2481 | !!(hprt0 & HPRT0_PWR)); | 
|  | 2482 | if (!(hprt0 & HPRT0_PWR)) { | 
|  | 2483 | hprt0 |= HPRT0_PWR; | 
|  | 2484 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 2485 | } | 
|  | 2486 | } | 
|  | 2487 |  | 
|  | 2488 | dwc2_enable_host_interrupts(hsotg); | 
|  | 2489 | } | 
|  | 2490 |  | 
|  | 2491 | /* | 
|  | 2492 | * Initializes dynamic portions of the DWC_otg HCD state | 
|  | 2493 | * | 
|  | 2494 | * Must be called with interrupt disabled and spinlock held | 
|  | 2495 | */ | 
|  | 2496 | static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) | 
|  | 2497 | { | 
|  | 2498 | struct dwc2_host_chan *chan, *chan_tmp; | 
|  | 2499 | int num_channels; | 
|  | 2500 | int i; | 
|  | 2501 |  | 
|  | 2502 | hsotg->flags.d32 = 0; | 
|  | 2503 | hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active; | 
|  | 2504 |  | 
|  | 2505 | if (hsotg->params.uframe_sched) { | 
|  | 2506 | hsotg->available_host_channels = | 
|  | 2507 | hsotg->params.host_channels; | 
|  | 2508 | } else { | 
|  | 2509 | hsotg->non_periodic_channels = 0; | 
|  | 2510 | hsotg->periodic_channels = 0; | 
|  | 2511 | } | 
|  | 2512 |  | 
|  | 2513 | /* | 
|  | 2514 | * Put all channels in the free channel list and clean up channel | 
|  | 2515 | * states | 
|  | 2516 | */ | 
|  | 2517 | list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list, | 
|  | 2518 | hc_list_entry) | 
|  | 2519 | list_del_init(&chan->hc_list_entry); | 
|  | 2520 |  | 
|  | 2521 | num_channels = hsotg->params.host_channels; | 
|  | 2522 | for (i = 0; i < num_channels; i++) { | 
|  | 2523 | chan = hsotg->hc_ptr_array[i]; | 
|  | 2524 | list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); | 
|  | 2525 | dwc2_hc_cleanup(hsotg, chan); | 
|  | 2526 | } | 
|  | 2527 |  | 
|  | 2528 | /* Initialize the DWC core for host mode operation */ | 
|  | 2529 | dwc2_core_host_init(hsotg); | 
|  | 2530 | } | 
|  | 2531 |  | 
|  | 2532 | static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg, | 
|  | 2533 | struct dwc2_host_chan *chan, | 
|  | 2534 | struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) | 
|  | 2535 | { | 
|  | 2536 | int hub_addr, hub_port; | 
|  | 2537 |  | 
|  | 2538 | chan->do_split = 1; | 
|  | 2539 | chan->xact_pos = qtd->isoc_split_pos; | 
|  | 2540 | chan->complete_split = qtd->complete_split; | 
|  | 2541 | dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); | 
|  | 2542 | chan->hub_addr = (u8)hub_addr; | 
|  | 2543 | chan->hub_port = (u8)hub_port; | 
|  | 2544 | } | 
|  | 2545 |  | 
|  | 2546 | static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, | 
|  | 2547 | struct dwc2_host_chan *chan, | 
|  | 2548 | struct dwc2_qtd *qtd) | 
|  | 2549 | { | 
|  | 2550 | struct dwc2_hcd_urb *urb = qtd->urb; | 
|  | 2551 | struct dwc2_hcd_iso_packet_desc *frame_desc; | 
|  | 2552 |  | 
|  | 2553 | switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) { | 
|  | 2554 | case USB_ENDPOINT_XFER_CONTROL: | 
|  | 2555 | chan->ep_type = USB_ENDPOINT_XFER_CONTROL; | 
|  | 2556 |  | 
|  | 2557 | switch (qtd->control_phase) { | 
|  | 2558 | case DWC2_CONTROL_SETUP: | 
|  | 2559 | dev_vdbg(hsotg->dev, "  Control setup transaction\n"); | 
|  | 2560 | chan->do_ping = 0; | 
|  | 2561 | chan->ep_is_in = 0; | 
|  | 2562 | chan->data_pid_start = DWC2_HC_PID_SETUP; | 
|  | 2563 | if (hsotg->params.host_dma) | 
|  | 2564 | chan->xfer_dma = urb->setup_dma; | 
|  | 2565 | else | 
|  | 2566 | chan->xfer_buf = urb->setup_packet; | 
|  | 2567 | chan->xfer_len = 8; | 
|  | 2568 | break; | 
|  | 2569 |  | 
|  | 2570 | case DWC2_CONTROL_DATA: | 
|  | 2571 | dev_vdbg(hsotg->dev, "  Control data transaction\n"); | 
|  | 2572 | chan->data_pid_start = qtd->data_toggle; | 
|  | 2573 | break; | 
|  | 2574 |  | 
|  | 2575 | case DWC2_CONTROL_STATUS: | 
|  | 2576 | /* | 
|  | 2577 | * Direction is opposite of data direction or IN if no | 
|  | 2578 | * data | 
|  | 2579 | */ | 
|  | 2580 | dev_vdbg(hsotg->dev, "  Control status transaction\n"); | 
|  | 2581 | if (urb->length == 0) | 
|  | 2582 | chan->ep_is_in = 1; | 
|  | 2583 | else | 
|  | 2584 | chan->ep_is_in = | 
|  | 2585 | dwc2_hcd_is_pipe_out(&urb->pipe_info); | 
|  | 2586 | if (chan->ep_is_in) | 
|  | 2587 | chan->do_ping = 0; | 
|  | 2588 | chan->data_pid_start = DWC2_HC_PID_DATA1; | 
|  | 2589 | chan->xfer_len = 0; | 
|  | 2590 | if (hsotg->params.host_dma) | 
|  | 2591 | chan->xfer_dma = hsotg->status_buf_dma; | 
|  | 2592 | else | 
|  | 2593 | chan->xfer_buf = hsotg->status_buf; | 
|  | 2594 | break; | 
|  | 2595 | } | 
|  | 2596 | break; | 
|  | 2597 |  | 
|  | 2598 | case USB_ENDPOINT_XFER_BULK: | 
|  | 2599 | chan->ep_type = USB_ENDPOINT_XFER_BULK; | 
|  | 2600 | break; | 
|  | 2601 |  | 
|  | 2602 | case USB_ENDPOINT_XFER_INT: | 
|  | 2603 | chan->ep_type = USB_ENDPOINT_XFER_INT; | 
|  | 2604 | break; | 
|  | 2605 |  | 
|  | 2606 | case USB_ENDPOINT_XFER_ISOC: | 
|  | 2607 | chan->ep_type = USB_ENDPOINT_XFER_ISOC; | 
|  | 2608 | if (hsotg->params.dma_desc_enable) | 
|  | 2609 | break; | 
|  | 2610 |  | 
|  | 2611 | frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; | 
|  | 2612 | frame_desc->status = 0; | 
|  | 2613 |  | 
|  | 2614 | if (hsotg->params.host_dma) { | 
|  | 2615 | chan->xfer_dma = urb->dma; | 
|  | 2616 | chan->xfer_dma += frame_desc->offset + | 
|  | 2617 | qtd->isoc_split_offset; | 
|  | 2618 | } else { | 
|  | 2619 | chan->xfer_buf = urb->buf; | 
|  | 2620 | chan->xfer_buf += frame_desc->offset + | 
|  | 2621 | qtd->isoc_split_offset; | 
|  | 2622 | } | 
|  | 2623 |  | 
|  | 2624 | chan->xfer_len = frame_desc->length - qtd->isoc_split_offset; | 
|  | 2625 |  | 
|  | 2626 | if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) { | 
|  | 2627 | if (chan->xfer_len <= 188) | 
|  | 2628 | chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL; | 
|  | 2629 | else | 
|  | 2630 | chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN; | 
|  | 2631 | } | 
|  | 2632 | break; | 
|  | 2633 | } | 
|  | 2634 | } | 
|  | 2635 |  | 
|  | 2636 | static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg, | 
|  | 2637 | struct dwc2_qh *qh, | 
|  | 2638 | struct dwc2_host_chan *chan) | 
|  | 2639 | { | 
|  | 2640 | if (!hsotg->unaligned_cache || | 
|  | 2641 | chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE) | 
|  | 2642 | return -ENOMEM; | 
|  | 2643 |  | 
|  | 2644 | if (!qh->dw_align_buf) { | 
|  | 2645 | qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache, | 
|  | 2646 | GFP_ATOMIC | GFP_DMA); | 
|  | 2647 | if (!qh->dw_align_buf) | 
|  | 2648 | return -ENOMEM; | 
|  | 2649 | } | 
|  | 2650 |  | 
|  | 2651 | qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf, | 
|  | 2652 | DWC2_KMEM_UNALIGNED_BUF_SIZE, | 
|  | 2653 | DMA_FROM_DEVICE); | 
|  | 2654 |  | 
|  | 2655 | if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) { | 
|  | 2656 | dev_err(hsotg->dev, "can't map align_buf\n"); | 
|  | 2657 | chan->align_buf = 0; | 
|  | 2658 | return -EINVAL; | 
|  | 2659 | } | 
|  | 2660 |  | 
|  | 2661 | chan->align_buf = qh->dw_align_buf_dma; | 
|  | 2662 | return 0; | 
|  | 2663 | } | 
|  | 2664 |  | 
|  | 2665 | #define DWC2_USB_DMA_ALIGN 4 | 
|  | 2666 |  | 
|  | 2667 | static void dwc2_free_dma_aligned_buffer(struct urb *urb) | 
|  | 2668 | { | 
|  | 2669 | void *stored_xfer_buffer; | 
|  | 2670 | size_t length; | 
|  | 2671 |  | 
|  | 2672 | if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) | 
|  | 2673 | return; | 
|  | 2674 |  | 
|  | 2675 | /* Restore urb->transfer_buffer from the end of the allocated area */ | 
|  | 2676 | memcpy(&stored_xfer_buffer, | 
|  | 2677 | PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length, | 
|  | 2678 | dma_get_cache_alignment()), | 
|  | 2679 | sizeof(urb->transfer_buffer)); | 
|  | 2680 |  | 
|  | 2681 | if (usb_urb_dir_in(urb)) { | 
|  | 2682 | if (usb_pipeisoc(urb->pipe)) | 
|  | 2683 | length = urb->transfer_buffer_length; | 
|  | 2684 | else | 
|  | 2685 | length = urb->actual_length; | 
|  | 2686 |  | 
|  | 2687 | memcpy(stored_xfer_buffer, urb->transfer_buffer, length); | 
|  | 2688 | } | 
|  | 2689 | kfree(urb->transfer_buffer); | 
|  | 2690 | urb->transfer_buffer = stored_xfer_buffer; | 
|  | 2691 |  | 
|  | 2692 | urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; | 
|  | 2693 | } | 
|  | 2694 |  | 
|  | 2695 | static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) | 
|  | 2696 | { | 
|  | 2697 | void *kmalloc_ptr; | 
|  | 2698 | size_t kmalloc_size; | 
|  | 2699 |  | 
|  | 2700 | if (urb->num_sgs || urb->sg || | 
|  | 2701 | urb->transfer_buffer_length == 0 || | 
|  | 2702 | !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) | 
|  | 2703 | return 0; | 
|  | 2704 |  | 
|  | 2705 | /* | 
|  | 2706 | * Allocate a buffer with enough padding for original transfer_buffer | 
|  | 2707 | * pointer. This allocation is guaranteed to be aligned properly for | 
|  | 2708 | * DMA | 
|  | 2709 | */ | 
|  | 2710 | kmalloc_size = urb->transfer_buffer_length + | 
|  | 2711 | (dma_get_cache_alignment() - 1) + | 
|  | 2712 | sizeof(urb->transfer_buffer); | 
|  | 2713 |  | 
|  | 2714 | kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); | 
|  | 2715 | if (!kmalloc_ptr) | 
|  | 2716 | return -ENOMEM; | 
|  | 2717 |  | 
|  | 2718 | /* | 
|  | 2719 | * Position value of original urb->transfer_buffer pointer to the end | 
|  | 2720 | * of allocation for later referencing | 
|  | 2721 | */ | 
|  | 2722 | memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length, | 
|  | 2723 | dma_get_cache_alignment()), | 
|  | 2724 | &urb->transfer_buffer, sizeof(urb->transfer_buffer)); | 
|  | 2725 |  | 
|  | 2726 | if (usb_urb_dir_out(urb)) | 
|  | 2727 | memcpy(kmalloc_ptr, urb->transfer_buffer, | 
|  | 2728 | urb->transfer_buffer_length); | 
|  | 2729 | urb->transfer_buffer = kmalloc_ptr; | 
|  | 2730 |  | 
|  | 2731 | urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; | 
|  | 2732 |  | 
|  | 2733 | return 0; | 
|  | 2734 | } | 
|  | 2735 |  | 
|  | 2736 | static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, | 
|  | 2737 | gfp_t mem_flags) | 
|  | 2738 | { | 
|  | 2739 | int ret; | 
|  | 2740 |  | 
|  | 2741 | /* We assume setup_dma is always aligned; warn if not */ | 
|  | 2742 | WARN_ON_ONCE(urb->setup_dma && | 
|  | 2743 | (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1))); | 
|  | 2744 |  | 
|  | 2745 | ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags); | 
|  | 2746 | if (ret) | 
|  | 2747 | return ret; | 
|  | 2748 |  | 
|  | 2749 | ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); | 
|  | 2750 | if (ret) | 
|  | 2751 | dwc2_free_dma_aligned_buffer(urb); | 
|  | 2752 |  | 
|  | 2753 | return ret; | 
|  | 2754 | } | 
|  | 2755 |  | 
|  | 2756 | static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) | 
|  | 2757 | { | 
|  | 2758 | usb_hcd_unmap_urb_for_dma(hcd, urb); | 
|  | 2759 | dwc2_free_dma_aligned_buffer(urb); | 
|  | 2760 | } | 
|  | 2761 |  | 
|  | 2762 | /** | 
|  | 2763 | * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host | 
|  | 2764 | * channel and initializes the host channel to perform the transactions. The | 
|  | 2765 | * host channel is removed from the free list. | 
|  | 2766 | * | 
|  | 2767 | * @hsotg: The HCD state structure | 
|  | 2768 | * @qh:    Transactions from the first QTD for this QH are selected and assigned | 
|  | 2769 | *         to a free host channel | 
|  | 2770 | */ | 
|  | 2771 | static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) | 
|  | 2772 | { | 
|  | 2773 | struct dwc2_host_chan *chan; | 
|  | 2774 | struct dwc2_hcd_urb *urb; | 
|  | 2775 | struct dwc2_qtd *qtd; | 
|  | 2776 |  | 
|  | 2777 | if (dbg_qh(qh)) | 
|  | 2778 | dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh); | 
|  | 2779 |  | 
|  | 2780 | if (list_empty(&qh->qtd_list)) { | 
|  | 2781 | dev_dbg(hsotg->dev, "No QTDs in QH list\n"); | 
|  | 2782 | return -ENOMEM; | 
|  | 2783 | } | 
|  | 2784 |  | 
|  | 2785 | if (list_empty(&hsotg->free_hc_list)) { | 
|  | 2786 | dev_dbg(hsotg->dev, "No free channel to assign\n"); | 
|  | 2787 | return -ENOMEM; | 
|  | 2788 | } | 
|  | 2789 |  | 
|  | 2790 | chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan, | 
|  | 2791 | hc_list_entry); | 
|  | 2792 |  | 
|  | 2793 | /* Remove host channel from free list */ | 
|  | 2794 | list_del_init(&chan->hc_list_entry); | 
|  | 2795 |  | 
|  | 2796 | qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry); | 
|  | 2797 | urb = qtd->urb; | 
|  | 2798 | qh->channel = chan; | 
|  | 2799 | qtd->in_process = 1; | 
|  | 2800 |  | 
|  | 2801 | /* | 
|  | 2802 | * Use usb_pipedevice to determine device address. This address is | 
|  | 2803 | * 0 before the SET_ADDRESS command and the correct address afterward. | 
|  | 2804 | */ | 
|  | 2805 | chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info); | 
|  | 2806 | chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info); | 
|  | 2807 | chan->speed = qh->dev_speed; | 
|  | 2808 | chan->max_packet = qh->maxp; | 
|  | 2809 |  | 
|  | 2810 | chan->xfer_started = 0; | 
|  | 2811 | chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS; | 
|  | 2812 | chan->error_state = (qtd->error_count > 0); | 
|  | 2813 | chan->halt_on_queue = 0; | 
|  | 2814 | chan->halt_pending = 0; | 
|  | 2815 | chan->requests = 0; | 
|  | 2816 |  | 
|  | 2817 | /* | 
|  | 2818 | * The following values may be modified in the transfer type section | 
|  | 2819 | * below. The xfer_len value may be reduced when the transfer is | 
|  | 2820 | * started to accommodate the max widths of the XferSize and PktCnt | 
|  | 2821 | * fields in the HCTSIZn register. | 
|  | 2822 | */ | 
|  | 2823 |  | 
|  | 2824 | chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0); | 
|  | 2825 | if (chan->ep_is_in) | 
|  | 2826 | chan->do_ping = 0; | 
|  | 2827 | else | 
|  | 2828 | chan->do_ping = qh->ping_state; | 
|  | 2829 |  | 
|  | 2830 | chan->data_pid_start = qh->data_toggle; | 
|  | 2831 | chan->multi_count = 1; | 
|  | 2832 |  | 
|  | 2833 | if (urb->actual_length > urb->length && | 
|  | 2834 | !dwc2_hcd_is_pipe_in(&urb->pipe_info)) | 
|  | 2835 | urb->actual_length = urb->length; | 
|  | 2836 |  | 
|  | 2837 | if (hsotg->params.host_dma) | 
|  | 2838 | chan->xfer_dma = urb->dma + urb->actual_length; | 
|  | 2839 | else | 
|  | 2840 | chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; | 
|  | 2841 |  | 
|  | 2842 | chan->xfer_len = urb->length - urb->actual_length; | 
|  | 2843 | chan->xfer_count = 0; | 
|  | 2844 |  | 
|  | 2845 | /* Set the split attributes if required */ | 
|  | 2846 | if (qh->do_split) | 
|  | 2847 | dwc2_hc_init_split(hsotg, chan, qtd, urb); | 
|  | 2848 | else | 
|  | 2849 | chan->do_split = 0; | 
|  | 2850 |  | 
|  | 2851 | /* Set the transfer attributes */ | 
|  | 2852 | dwc2_hc_init_xfer(hsotg, chan, qtd); | 
|  | 2853 |  | 
|  | 2854 | /* For non-dword aligned buffers */ | 
|  | 2855 | if (hsotg->params.host_dma && qh->do_split && | 
|  | 2856 | chan->ep_is_in && (chan->xfer_dma & 0x3)) { | 
|  | 2857 | dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); | 
|  | 2858 | if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) { | 
|  | 2859 | dev_err(hsotg->dev, | 
|  | 2860 | "Failed to allocate memory to handle non-aligned buffer\n"); | 
|  | 2861 | /* Add channel back to free list */ | 
|  | 2862 | chan->align_buf = 0; | 
|  | 2863 | chan->multi_count = 0; | 
|  | 2864 | list_add_tail(&chan->hc_list_entry, | 
|  | 2865 | &hsotg->free_hc_list); | 
|  | 2866 | qtd->in_process = 0; | 
|  | 2867 | qh->channel = NULL; | 
|  | 2868 | return -ENOMEM; | 
|  | 2869 | } | 
|  | 2870 | } else { | 
|  | 2871 | /* | 
|  | 2872 | * We assume that DMA is always aligned in non-split | 
|  | 2873 | * case or split out case. Warn if not. | 
|  | 2874 | */ | 
|  | 2875 | WARN_ON_ONCE(hsotg->params.host_dma && | 
|  | 2876 | (chan->xfer_dma & 0x3)); | 
|  | 2877 | chan->align_buf = 0; | 
|  | 2878 | } | 
|  | 2879 |  | 
|  | 2880 | if (chan->ep_type == USB_ENDPOINT_XFER_INT || | 
|  | 2881 | chan->ep_type == USB_ENDPOINT_XFER_ISOC) | 
|  | 2882 | /* | 
|  | 2883 | * This value may be modified when the transfer is started | 
|  | 2884 | * to reflect the actual transfer length | 
|  | 2885 | */ | 
|  | 2886 | chan->multi_count = qh->maxp_mult; | 
|  | 2887 |  | 
|  | 2888 | if (hsotg->params.dma_desc_enable) { | 
|  | 2889 | chan->desc_list_addr = qh->desc_list_dma; | 
|  | 2890 | chan->desc_list_sz = qh->desc_list_sz; | 
|  | 2891 | } | 
|  | 2892 |  | 
|  | 2893 | dwc2_hc_init(hsotg, chan); | 
|  | 2894 | chan->qh = qh; | 
|  | 2895 |  | 
|  | 2896 | return 0; | 
|  | 2897 | } | 
|  | 2898 |  | 
|  | 2899 | /** | 
|  | 2900 | * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer | 
|  | 2901 | * schedule and assigns them to available host channels. Called from the HCD | 
|  | 2902 | * interrupt handler functions. | 
|  | 2903 | * | 
|  | 2904 | * @hsotg: The HCD state structure | 
|  | 2905 | * | 
|  | 2906 | * Return: The types of new transactions that were assigned to host channels | 
|  | 2907 | */ | 
|  | 2908 | enum dwc2_transaction_type dwc2_hcd_select_transactions( | 
|  | 2909 | struct dwc2_hsotg *hsotg) | 
|  | 2910 | { | 
|  | 2911 | enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE; | 
|  | 2912 | struct list_head *qh_ptr; | 
|  | 2913 | struct dwc2_qh *qh; | 
|  | 2914 | int num_channels; | 
|  | 2915 |  | 
|  | 2916 | #ifdef DWC2_DEBUG_SOF | 
|  | 2917 | dev_vdbg(hsotg->dev, "  Select Transactions\n"); | 
|  | 2918 | #endif | 
|  | 2919 |  | 
|  | 2920 | /* Process entries in the periodic ready list */ | 
|  | 2921 | qh_ptr = hsotg->periodic_sched_ready.next; | 
|  | 2922 | while (qh_ptr != &hsotg->periodic_sched_ready) { | 
|  | 2923 | if (list_empty(&hsotg->free_hc_list)) | 
|  | 2924 | break; | 
|  | 2925 | if (hsotg->params.uframe_sched) { | 
|  | 2926 | if (hsotg->available_host_channels <= 1) | 
|  | 2927 | break; | 
|  | 2928 | hsotg->available_host_channels--; | 
|  | 2929 | } | 
|  | 2930 | qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); | 
|  | 2931 | if (dwc2_assign_and_init_hc(hsotg, qh)) | 
|  | 2932 | break; | 
|  | 2933 |  | 
|  | 2934 | /* | 
|  | 2935 | * Move the QH from the periodic ready schedule to the | 
|  | 2936 | * periodic assigned schedule | 
|  | 2937 | */ | 
|  | 2938 | qh_ptr = qh_ptr->next; | 
|  | 2939 | list_move_tail(&qh->qh_list_entry, | 
|  | 2940 | &hsotg->periodic_sched_assigned); | 
|  | 2941 | ret_val = DWC2_TRANSACTION_PERIODIC; | 
|  | 2942 | } | 
|  | 2943 |  | 
|  | 2944 | /* | 
|  | 2945 | * Process entries in the inactive portion of the non-periodic | 
|  | 2946 | * schedule. Some free host channels may not be used if they are | 
|  | 2947 | * reserved for periodic transfers. | 
|  | 2948 | */ | 
|  | 2949 | num_channels = hsotg->params.host_channels; | 
|  | 2950 | qh_ptr = hsotg->non_periodic_sched_inactive.next; | 
|  | 2951 | while (qh_ptr != &hsotg->non_periodic_sched_inactive) { | 
|  | 2952 | if (!hsotg->params.uframe_sched && | 
|  | 2953 | hsotg->non_periodic_channels >= num_channels - | 
|  | 2954 | hsotg->periodic_channels) | 
|  | 2955 | break; | 
|  | 2956 | if (list_empty(&hsotg->free_hc_list)) | 
|  | 2957 | break; | 
|  | 2958 | qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); | 
|  | 2959 | if (hsotg->params.uframe_sched) { | 
|  | 2960 | if (hsotg->available_host_channels < 1) | 
|  | 2961 | break; | 
|  | 2962 | hsotg->available_host_channels--; | 
|  | 2963 | } | 
|  | 2964 |  | 
|  | 2965 | if (dwc2_assign_and_init_hc(hsotg, qh)) | 
|  | 2966 | break; | 
|  | 2967 |  | 
|  | 2968 | /* | 
|  | 2969 | * Move the QH from the non-periodic inactive schedule to the | 
|  | 2970 | * non-periodic active schedule | 
|  | 2971 | */ | 
|  | 2972 | qh_ptr = qh_ptr->next; | 
|  | 2973 | list_move_tail(&qh->qh_list_entry, | 
|  | 2974 | &hsotg->non_periodic_sched_active); | 
|  | 2975 |  | 
|  | 2976 | if (ret_val == DWC2_TRANSACTION_NONE) | 
|  | 2977 | ret_val = DWC2_TRANSACTION_NON_PERIODIC; | 
|  | 2978 | else | 
|  | 2979 | ret_val = DWC2_TRANSACTION_ALL; | 
|  | 2980 |  | 
|  | 2981 | if (!hsotg->params.uframe_sched) | 
|  | 2982 | hsotg->non_periodic_channels++; | 
|  | 2983 | } | 
|  | 2984 |  | 
|  | 2985 | return ret_val; | 
|  | 2986 | } | 
|  | 2987 |  | 
|  | 2988 | /** | 
|  | 2989 | * dwc2_queue_transaction() - Attempts to queue a single transaction request for | 
|  | 2990 | * a host channel associated with either a periodic or non-periodic transfer | 
|  | 2991 | * | 
|  | 2992 | * @hsotg: The HCD state structure | 
|  | 2993 | * @chan:  Host channel descriptor associated with either a periodic or | 
|  | 2994 | *         non-periodic transfer | 
|  | 2995 | * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO | 
|  | 2996 | *                     for periodic transfers or the non-periodic Tx FIFO | 
|  | 2997 | *                     for non-periodic transfers | 
|  | 2998 | * | 
|  | 2999 | * Return: 1 if a request is queued and more requests may be needed to | 
|  | 3000 | * complete the transfer, 0 if no more requests are required for this | 
|  | 3001 | * transfer, -1 if there is insufficient space in the Tx FIFO | 
|  | 3002 | * | 
|  | 3003 | * This function assumes that there is space available in the appropriate | 
|  | 3004 | * request queue. For an OUT transfer or SETUP transaction in Slave mode, | 
|  | 3005 | * it checks whether space is available in the appropriate Tx FIFO. | 
|  | 3006 | * | 
|  | 3007 | * Must be called with interrupt disabled and spinlock held | 
|  | 3008 | */ | 
|  | 3009 | static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, | 
|  | 3010 | struct dwc2_host_chan *chan, | 
|  | 3011 | u16 fifo_dwords_avail) | 
|  | 3012 | { | 
|  | 3013 | int retval = 0; | 
|  | 3014 |  | 
|  | 3015 | if (chan->do_split) | 
|  | 3016 | /* Put ourselves on the list to keep order straight */ | 
|  | 3017 | list_move_tail(&chan->split_order_list_entry, | 
|  | 3018 | &hsotg->split_order); | 
|  | 3019 |  | 
|  | 3020 | if (hsotg->params.host_dma) { | 
|  | 3021 | if (hsotg->params.dma_desc_enable) { | 
|  | 3022 | if (!chan->xfer_started || | 
|  | 3023 | chan->ep_type == USB_ENDPOINT_XFER_ISOC) { | 
|  | 3024 | dwc2_hcd_start_xfer_ddma(hsotg, chan->qh); | 
|  | 3025 | chan->qh->ping_state = 0; | 
|  | 3026 | } | 
|  | 3027 | } else if (!chan->xfer_started) { | 
|  | 3028 | dwc2_hc_start_transfer(hsotg, chan); | 
|  | 3029 | chan->qh->ping_state = 0; | 
|  | 3030 | } | 
|  | 3031 | } else if (chan->halt_pending) { | 
|  | 3032 | /* Don't queue a request if the channel has been halted */ | 
|  | 3033 | } else if (chan->halt_on_queue) { | 
|  | 3034 | dwc2_hc_halt(hsotg, chan, chan->halt_status); | 
|  | 3035 | } else if (chan->do_ping) { | 
|  | 3036 | if (!chan->xfer_started) | 
|  | 3037 | dwc2_hc_start_transfer(hsotg, chan); | 
|  | 3038 | } else if (!chan->ep_is_in || | 
|  | 3039 | chan->data_pid_start == DWC2_HC_PID_SETUP) { | 
|  | 3040 | if ((fifo_dwords_avail * 4) >= chan->max_packet) { | 
|  | 3041 | if (!chan->xfer_started) { | 
|  | 3042 | dwc2_hc_start_transfer(hsotg, chan); | 
|  | 3043 | retval = 1; | 
|  | 3044 | } else { | 
|  | 3045 | retval = dwc2_hc_continue_transfer(hsotg, chan); | 
|  | 3046 | } | 
|  | 3047 | } else { | 
|  | 3048 | retval = -1; | 
|  | 3049 | } | 
|  | 3050 | } else { | 
|  | 3051 | if (!chan->xfer_started) { | 
|  | 3052 | dwc2_hc_start_transfer(hsotg, chan); | 
|  | 3053 | retval = 1; | 
|  | 3054 | } else { | 
|  | 3055 | retval = dwc2_hc_continue_transfer(hsotg, chan); | 
|  | 3056 | } | 
|  | 3057 | } | 
|  | 3058 |  | 
|  | 3059 | return retval; | 
|  | 3060 | } | 
|  | 3061 |  | 
|  | 3062 | /* | 
|  | 3063 | * Processes periodic channels for the next frame and queues transactions for | 
|  | 3064 | * these channels to the DWC_otg controller. After queueing transactions, the | 
|  | 3065 | * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions | 
|  | 3066 | * to queue as Periodic Tx FIFO or request queue space becomes available. | 
|  | 3067 | * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. | 
|  | 3068 | * | 
|  | 3069 | * Must be called with interrupt disabled and spinlock held | 
|  | 3070 | */ | 
|  | 3071 | static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) | 
|  | 3072 | { | 
|  | 3073 | struct list_head *qh_ptr; | 
|  | 3074 | struct dwc2_qh *qh; | 
|  | 3075 | u32 tx_status; | 
|  | 3076 | u32 fspcavail; | 
|  | 3077 | u32 gintmsk; | 
|  | 3078 | int status; | 
|  | 3079 | bool no_queue_space = false; | 
|  | 3080 | bool no_fifo_space = false; | 
|  | 3081 | u32 qspcavail; | 
|  | 3082 |  | 
|  | 3083 | /* If empty list then just adjust interrupt enables */ | 
|  | 3084 | if (list_empty(&hsotg->periodic_sched_assigned)) | 
|  | 3085 | goto exit; | 
|  | 3086 |  | 
|  | 3087 | if (dbg_perio()) | 
|  | 3088 | dev_vdbg(hsotg->dev, "Queue periodic transactions\n"); | 
|  | 3089 |  | 
|  | 3090 | tx_status = dwc2_readl(hsotg, HPTXSTS); | 
|  | 3091 | qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> | 
|  | 3092 | TXSTS_QSPCAVAIL_SHIFT; | 
|  | 3093 | fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> | 
|  | 3094 | TXSTS_FSPCAVAIL_SHIFT; | 
|  | 3095 |  | 
|  | 3096 | if (dbg_perio()) { | 
|  | 3097 | dev_vdbg(hsotg->dev, "  P Tx Req Queue Space Avail (before queue): %d\n", | 
|  | 3098 | qspcavail); | 
|  | 3099 | dev_vdbg(hsotg->dev, "  P Tx FIFO Space Avail (before queue): %d\n", | 
|  | 3100 | fspcavail); | 
|  | 3101 | } | 
|  | 3102 |  | 
|  | 3103 | qh_ptr = hsotg->periodic_sched_assigned.next; | 
|  | 3104 | while (qh_ptr != &hsotg->periodic_sched_assigned) { | 
|  | 3105 | tx_status = dwc2_readl(hsotg, HPTXSTS); | 
|  | 3106 | qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> | 
|  | 3107 | TXSTS_QSPCAVAIL_SHIFT; | 
|  | 3108 | if (qspcavail == 0) { | 
|  | 3109 | no_queue_space = true; | 
|  | 3110 | break; | 
|  | 3111 | } | 
|  | 3112 |  | 
|  | 3113 | qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); | 
|  | 3114 | if (!qh->channel) { | 
|  | 3115 | qh_ptr = qh_ptr->next; | 
|  | 3116 | continue; | 
|  | 3117 | } | 
|  | 3118 |  | 
|  | 3119 | /* Make sure EP's TT buffer is clean before queueing qtds */ | 
|  | 3120 | if (qh->tt_buffer_dirty) { | 
|  | 3121 | qh_ptr = qh_ptr->next; | 
|  | 3122 | continue; | 
|  | 3123 | } | 
|  | 3124 |  | 
|  | 3125 | /* | 
|  | 3126 | * Set a flag if we're queuing high-bandwidth in slave mode. | 
|  | 3127 | * The flag prevents any halts to get into the request queue in | 
|  | 3128 | * the middle of multiple high-bandwidth packets getting queued. | 
|  | 3129 | */ | 
|  | 3130 | if (!hsotg->params.host_dma && | 
|  | 3131 | qh->channel->multi_count > 1) | 
|  | 3132 | hsotg->queuing_high_bandwidth = 1; | 
|  | 3133 |  | 
|  | 3134 | fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> | 
|  | 3135 | TXSTS_FSPCAVAIL_SHIFT; | 
|  | 3136 | status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); | 
|  | 3137 | if (status < 0) { | 
|  | 3138 | no_fifo_space = true; | 
|  | 3139 | break; | 
|  | 3140 | } | 
|  | 3141 |  | 
|  | 3142 | /* | 
|  | 3143 | * In Slave mode, stay on the current transfer until there is | 
|  | 3144 | * nothing more to do or the high-bandwidth request count is | 
|  | 3145 | * reached. In DMA mode, only need to queue one request. The | 
|  | 3146 | * controller automatically handles multiple packets for | 
|  | 3147 | * high-bandwidth transfers. | 
|  | 3148 | */ | 
|  | 3149 | if (hsotg->params.host_dma || status == 0 || | 
|  | 3150 | qh->channel->requests == qh->channel->multi_count) { | 
|  | 3151 | qh_ptr = qh_ptr->next; | 
|  | 3152 | /* | 
|  | 3153 | * Move the QH from the periodic assigned schedule to | 
|  | 3154 | * the periodic queued schedule | 
|  | 3155 | */ | 
|  | 3156 | list_move_tail(&qh->qh_list_entry, | 
|  | 3157 | &hsotg->periodic_sched_queued); | 
|  | 3158 |  | 
|  | 3159 | /* done queuing high bandwidth */ | 
|  | 3160 | hsotg->queuing_high_bandwidth = 0; | 
|  | 3161 | } | 
|  | 3162 | } | 
|  | 3163 |  | 
|  | 3164 | exit: | 
|  | 3165 | if (no_queue_space || no_fifo_space || | 
|  | 3166 | (!hsotg->params.host_dma && | 
|  | 3167 | !list_empty(&hsotg->periodic_sched_assigned))) { | 
|  | 3168 | /* | 
|  | 3169 | * May need to queue more transactions as the request | 
|  | 3170 | * queue or Tx FIFO empties. Enable the periodic Tx | 
|  | 3171 | * FIFO empty interrupt. (Always use the half-empty | 
|  | 3172 | * level to ensure that new requests are loaded as | 
|  | 3173 | * soon as possible.) | 
|  | 3174 | */ | 
|  | 3175 | gintmsk = dwc2_readl(hsotg, GINTMSK); | 
|  | 3176 | if (!(gintmsk & GINTSTS_PTXFEMP)) { | 
|  | 3177 | gintmsk |= GINTSTS_PTXFEMP; | 
|  | 3178 | dwc2_writel(hsotg, gintmsk, GINTMSK); | 
|  | 3179 | } | 
|  | 3180 | } else { | 
|  | 3181 | /* | 
|  | 3182 | * Disable the Tx FIFO empty interrupt since there are | 
|  | 3183 | * no more transactions that need to be queued right | 
|  | 3184 | * now. This function is called from interrupt | 
|  | 3185 | * handlers to queue more transactions as transfer | 
|  | 3186 | * states change. | 
|  | 3187 | */ | 
|  | 3188 | gintmsk = dwc2_readl(hsotg, GINTMSK); | 
|  | 3189 | if (gintmsk & GINTSTS_PTXFEMP) { | 
|  | 3190 | gintmsk &= ~GINTSTS_PTXFEMP; | 
|  | 3191 | dwc2_writel(hsotg, gintmsk, GINTMSK); | 
|  | 3192 | } | 
|  | 3193 | } | 
|  | 3194 | } | 
|  | 3195 |  | 
|  | 3196 | /* | 
|  | 3197 | * Processes active non-periodic channels and queues transactions for these | 
|  | 3198 | * channels to the DWC_otg controller. After queueing transactions, the NP Tx | 
|  | 3199 | * FIFO Empty interrupt is enabled if there are more transactions to queue as | 
|  | 3200 | * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx | 
|  | 3201 | * FIFO Empty interrupt is disabled. | 
|  | 3202 | * | 
|  | 3203 | * Must be called with interrupt disabled and spinlock held | 
|  | 3204 | */ | 
|  | 3205 | static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg) | 
|  | 3206 | { | 
|  | 3207 | struct list_head *orig_qh_ptr; | 
|  | 3208 | struct dwc2_qh *qh; | 
|  | 3209 | u32 tx_status; | 
|  | 3210 | u32 qspcavail; | 
|  | 3211 | u32 fspcavail; | 
|  | 3212 | u32 gintmsk; | 
|  | 3213 | int status; | 
|  | 3214 | int no_queue_space = 0; | 
|  | 3215 | int no_fifo_space = 0; | 
|  | 3216 | int more_to_do = 0; | 
|  | 3217 |  | 
|  | 3218 | dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n"); | 
|  | 3219 |  | 
|  | 3220 | tx_status = dwc2_readl(hsotg, GNPTXSTS); | 
|  | 3221 | qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> | 
|  | 3222 | TXSTS_QSPCAVAIL_SHIFT; | 
|  | 3223 | fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> | 
|  | 3224 | TXSTS_FSPCAVAIL_SHIFT; | 
|  | 3225 | dev_vdbg(hsotg->dev, "  NP Tx Req Queue Space Avail (before queue): %d\n", | 
|  | 3226 | qspcavail); | 
|  | 3227 | dev_vdbg(hsotg->dev, "  NP Tx FIFO Space Avail (before queue): %d\n", | 
|  | 3228 | fspcavail); | 
|  | 3229 |  | 
|  | 3230 | /* | 
|  | 3231 | * Keep track of the starting point. Skip over the start-of-list | 
|  | 3232 | * entry. | 
|  | 3233 | */ | 
|  | 3234 | if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active) | 
|  | 3235 | hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; | 
|  | 3236 | orig_qh_ptr = hsotg->non_periodic_qh_ptr; | 
|  | 3237 |  | 
|  | 3238 | /* | 
|  | 3239 | * Process once through the active list or until no more space is | 
|  | 3240 | * available in the request queue or the Tx FIFO | 
|  | 3241 | */ | 
|  | 3242 | do { | 
|  | 3243 | tx_status = dwc2_readl(hsotg, GNPTXSTS); | 
|  | 3244 | qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> | 
|  | 3245 | TXSTS_QSPCAVAIL_SHIFT; | 
|  | 3246 | if (!hsotg->params.host_dma && qspcavail == 0) { | 
|  | 3247 | no_queue_space = 1; | 
|  | 3248 | break; | 
|  | 3249 | } | 
|  | 3250 |  | 
|  | 3251 | qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh, | 
|  | 3252 | qh_list_entry); | 
|  | 3253 | if (!qh->channel) | 
|  | 3254 | goto next; | 
|  | 3255 |  | 
|  | 3256 | /* Make sure EP's TT buffer is clean before queueing qtds */ | 
|  | 3257 | if (qh->tt_buffer_dirty) | 
|  | 3258 | goto next; | 
|  | 3259 |  | 
|  | 3260 | fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> | 
|  | 3261 | TXSTS_FSPCAVAIL_SHIFT; | 
|  | 3262 | status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); | 
|  | 3263 |  | 
|  | 3264 | if (status > 0) { | 
|  | 3265 | more_to_do = 1; | 
|  | 3266 | } else if (status < 0) { | 
|  | 3267 | no_fifo_space = 1; | 
|  | 3268 | break; | 
|  | 3269 | } | 
|  | 3270 | next: | 
|  | 3271 | /* Advance to next QH, skipping start-of-list entry */ | 
|  | 3272 | hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next; | 
|  | 3273 | if (hsotg->non_periodic_qh_ptr == | 
|  | 3274 | &hsotg->non_periodic_sched_active) | 
|  | 3275 | hsotg->non_periodic_qh_ptr = | 
|  | 3276 | hsotg->non_periodic_qh_ptr->next; | 
|  | 3277 | } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr); | 
|  | 3278 |  | 
|  | 3279 | if (!hsotg->params.host_dma) { | 
|  | 3280 | tx_status = dwc2_readl(hsotg, GNPTXSTS); | 
|  | 3281 | qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> | 
|  | 3282 | TXSTS_QSPCAVAIL_SHIFT; | 
|  | 3283 | fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> | 
|  | 3284 | TXSTS_FSPCAVAIL_SHIFT; | 
|  | 3285 | dev_vdbg(hsotg->dev, | 
|  | 3286 | "  NP Tx Req Queue Space Avail (after queue): %d\n", | 
|  | 3287 | qspcavail); | 
|  | 3288 | dev_vdbg(hsotg->dev, | 
|  | 3289 | "  NP Tx FIFO Space Avail (after queue): %d\n", | 
|  | 3290 | fspcavail); | 
|  | 3291 |  | 
|  | 3292 | if (more_to_do || no_queue_space || no_fifo_space) { | 
|  | 3293 | /* | 
|  | 3294 | * May need to queue more transactions as the request | 
|  | 3295 | * queue or Tx FIFO empties. Enable the non-periodic | 
|  | 3296 | * Tx FIFO empty interrupt. (Always use the half-empty | 
|  | 3297 | * level to ensure that new requests are loaded as | 
|  | 3298 | * soon as possible.) | 
|  | 3299 | */ | 
|  | 3300 | gintmsk = dwc2_readl(hsotg, GINTMSK); | 
|  | 3301 | gintmsk |= GINTSTS_NPTXFEMP; | 
|  | 3302 | dwc2_writel(hsotg, gintmsk, GINTMSK); | 
|  | 3303 | } else { | 
|  | 3304 | /* | 
|  | 3305 | * Disable the Tx FIFO empty interrupt since there are | 
|  | 3306 | * no more transactions that need to be queued right | 
|  | 3307 | * now. This function is called from interrupt | 
|  | 3308 | * handlers to queue more transactions as transfer | 
|  | 3309 | * states change. | 
|  | 3310 | */ | 
|  | 3311 | gintmsk = dwc2_readl(hsotg, GINTMSK); | 
|  | 3312 | gintmsk &= ~GINTSTS_NPTXFEMP; | 
|  | 3313 | dwc2_writel(hsotg, gintmsk, GINTMSK); | 
|  | 3314 | } | 
|  | 3315 | } | 
|  | 3316 | } | 
|  | 3317 |  | 
|  | 3318 | /** | 
|  | 3319 | * dwc2_hcd_queue_transactions() - Processes the currently active host channels | 
|  | 3320 | * and queues transactions for these channels to the DWC_otg controller. Called | 
|  | 3321 | * from the HCD interrupt handler functions. | 
|  | 3322 | * | 
|  | 3323 | * @hsotg:   The HCD state structure | 
|  | 3324 | * @tr_type: The type(s) of transactions to queue (non-periodic, periodic, | 
|  | 3325 | *           or both) | 
|  | 3326 | * | 
|  | 3327 | * Must be called with interrupt disabled and spinlock held | 
|  | 3328 | */ | 
|  | 3329 | void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, | 
|  | 3330 | enum dwc2_transaction_type tr_type) | 
|  | 3331 | { | 
|  | 3332 | #ifdef DWC2_DEBUG_SOF | 
|  | 3333 | dev_vdbg(hsotg->dev, "Queue Transactions\n"); | 
|  | 3334 | #endif | 
|  | 3335 | /* Process host channels associated with periodic transfers */ | 
|  | 3336 | if (tr_type == DWC2_TRANSACTION_PERIODIC || | 
|  | 3337 | tr_type == DWC2_TRANSACTION_ALL) | 
|  | 3338 | dwc2_process_periodic_channels(hsotg); | 
|  | 3339 |  | 
|  | 3340 | /* Process host channels associated with non-periodic transfers */ | 
|  | 3341 | if (tr_type == DWC2_TRANSACTION_NON_PERIODIC || | 
|  | 3342 | tr_type == DWC2_TRANSACTION_ALL) { | 
|  | 3343 | if (!list_empty(&hsotg->non_periodic_sched_active)) { | 
|  | 3344 | dwc2_process_non_periodic_channels(hsotg); | 
|  | 3345 | } else { | 
|  | 3346 | /* | 
|  | 3347 | * Ensure NP Tx FIFO empty interrupt is disabled when | 
|  | 3348 | * there are no non-periodic transfers to process | 
|  | 3349 | */ | 
|  | 3350 | u32 gintmsk = dwc2_readl(hsotg, GINTMSK); | 
|  | 3351 |  | 
|  | 3352 | gintmsk &= ~GINTSTS_NPTXFEMP; | 
|  | 3353 | dwc2_writel(hsotg, gintmsk, GINTMSK); | 
|  | 3354 | } | 
|  | 3355 | } | 
|  | 3356 | } | 
|  | 3357 |  | 
|  | 3358 | static void dwc2_conn_id_status_change(struct work_struct *work) | 
|  | 3359 | { | 
|  | 3360 | struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, | 
|  | 3361 | wf_otg); | 
|  | 3362 | u32 count = 0; | 
|  | 3363 | u32 gotgctl; | 
|  | 3364 | unsigned long flags; | 
|  | 3365 |  | 
|  | 3366 | dev_dbg(hsotg->dev, "%s()\n", __func__); | 
|  | 3367 |  | 
|  | 3368 | gotgctl = dwc2_readl(hsotg, GOTGCTL); | 
|  | 3369 | dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl); | 
|  | 3370 | dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n", | 
|  | 3371 | !!(gotgctl & GOTGCTL_CONID_B)); | 
|  | 3372 |  | 
|  | 3373 | /* B-Device connector (Device Mode) */ | 
|  | 3374 | if (gotgctl & GOTGCTL_CONID_B) { | 
|  | 3375 | dwc2_vbus_supply_exit(hsotg); | 
|  | 3376 | /* Wait for switch to device mode */ | 
|  | 3377 | dev_dbg(hsotg->dev, "connId B\n"); | 
|  | 3378 | if (hsotg->bus_suspended) { | 
|  | 3379 | dev_info(hsotg->dev, | 
|  | 3380 | "Do port resume before switching to device mode\n"); | 
|  | 3381 | dwc2_port_resume(hsotg); | 
|  | 3382 | } | 
|  | 3383 | while (!dwc2_is_device_mode(hsotg)) { | 
|  | 3384 | dev_info(hsotg->dev, | 
|  | 3385 | "Waiting for Peripheral Mode, Mode=%s\n", | 
|  | 3386 | dwc2_is_host_mode(hsotg) ? "Host" : | 
|  | 3387 | "Peripheral"); | 
|  | 3388 | msleep(20); | 
|  | 3389 | /* | 
|  | 3390 | * Sometimes the initial GOTGCTRL read is wrong, so | 
|  | 3391 | * check it again and jump to host mode if that was | 
|  | 3392 | * the case. | 
|  | 3393 | */ | 
|  | 3394 | gotgctl = dwc2_readl(hsotg, GOTGCTL); | 
|  | 3395 | if (!(gotgctl & GOTGCTL_CONID_B)) | 
|  | 3396 | goto host; | 
|  | 3397 | if (++count > 250) | 
|  | 3398 | break; | 
|  | 3399 | } | 
|  | 3400 | if (count > 250) | 
|  | 3401 | dev_err(hsotg->dev, | 
|  | 3402 | "Connection id status change timed out\n"); | 
|  | 3403 | hsotg->op_state = OTG_STATE_B_PERIPHERAL; | 
|  | 3404 | dwc2_core_init(hsotg, false); | 
|  | 3405 | dwc2_enable_global_interrupts(hsotg); | 
|  | 3406 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 3407 | dwc2_hsotg_core_init_disconnected(hsotg, false); | 
|  | 3408 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 3409 | /* Enable ACG feature in device mode,if supported */ | 
|  | 3410 | dwc2_enable_acg(hsotg); | 
|  | 3411 | dwc2_hsotg_core_connect(hsotg); | 
|  | 3412 | } else { | 
|  | 3413 | host: | 
|  | 3414 | /* A-Device connector (Host Mode) */ | 
|  | 3415 | dev_dbg(hsotg->dev, "connId A\n"); | 
|  | 3416 | while (!dwc2_is_host_mode(hsotg)) { | 
|  | 3417 | dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n", | 
|  | 3418 | dwc2_is_host_mode(hsotg) ? | 
|  | 3419 | "Host" : "Peripheral"); | 
|  | 3420 | msleep(20); | 
|  | 3421 | if (++count > 250) | 
|  | 3422 | break; | 
|  | 3423 | } | 
|  | 3424 | if (count > 250) | 
|  | 3425 | dev_err(hsotg->dev, | 
|  | 3426 | "Connection id status change timed out\n"); | 
|  | 3427 |  | 
|  | 3428 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 3429 | dwc2_hsotg_disconnect(hsotg); | 
|  | 3430 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 3431 |  | 
|  | 3432 | hsotg->op_state = OTG_STATE_A_HOST; | 
|  | 3433 | /* Initialize the Core for Host mode */ | 
|  | 3434 | dwc2_core_init(hsotg, false); | 
|  | 3435 | dwc2_enable_global_interrupts(hsotg); | 
|  | 3436 | dwc2_hcd_start(hsotg); | 
|  | 3437 | } | 
|  | 3438 | } | 
|  | 3439 |  | 
|  | 3440 | static void dwc2_wakeup_detected(struct timer_list *t) | 
|  | 3441 | { | 
|  | 3442 | struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer); | 
|  | 3443 | u32 hprt0; | 
|  | 3444 |  | 
|  | 3445 | dev_dbg(hsotg->dev, "%s()\n", __func__); | 
|  | 3446 |  | 
|  | 3447 | /* | 
|  | 3448 | * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms | 
|  | 3449 | * so that OPT tests pass with all PHYs.) | 
|  | 3450 | */ | 
|  | 3451 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3452 | dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0); | 
|  | 3453 | hprt0 &= ~HPRT0_RES; | 
|  | 3454 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3455 | dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", | 
|  | 3456 | dwc2_readl(hsotg, HPRT0)); | 
|  | 3457 |  | 
|  | 3458 | dwc2_hcd_rem_wakeup(hsotg); | 
|  | 3459 | hsotg->bus_suspended = false; | 
|  | 3460 |  | 
|  | 3461 | /* Change to L0 state */ | 
|  | 3462 | hsotg->lx_state = DWC2_L0; | 
|  | 3463 | } | 
|  | 3464 |  | 
|  | 3465 | static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg) | 
|  | 3466 | { | 
|  | 3467 | struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); | 
|  | 3468 |  | 
|  | 3469 | return hcd->self.b_hnp_enable; | 
|  | 3470 | } | 
|  | 3471 |  | 
|  | 3472 | /* Must NOT be called with interrupt disabled or spinlock held */ | 
|  | 3473 | static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) | 
|  | 3474 | { | 
|  | 3475 | unsigned long flags; | 
|  | 3476 | u32 hprt0; | 
|  | 3477 | u32 pcgctl; | 
|  | 3478 | u32 gotgctl; | 
|  | 3479 |  | 
|  | 3480 | dev_dbg(hsotg->dev, "%s()\n", __func__); | 
|  | 3481 |  | 
|  | 3482 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 3483 |  | 
|  | 3484 | if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) { | 
|  | 3485 | gotgctl = dwc2_readl(hsotg, GOTGCTL); | 
|  | 3486 | gotgctl |= GOTGCTL_HSTSETHNPEN; | 
|  | 3487 | dwc2_writel(hsotg, gotgctl, GOTGCTL); | 
|  | 3488 | hsotg->op_state = OTG_STATE_A_SUSPEND; | 
|  | 3489 | } | 
|  | 3490 |  | 
|  | 3491 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3492 | hprt0 |= HPRT0_SUSP; | 
|  | 3493 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3494 |  | 
|  | 3495 | hsotg->bus_suspended = true; | 
|  | 3496 |  | 
|  | 3497 | /* | 
|  | 3498 | * If power_down is supported, Phy clock will be suspended | 
|  | 3499 | * after registers are backuped. | 
|  | 3500 | */ | 
|  | 3501 | if (!hsotg->params.power_down) { | 
|  | 3502 | /* Suspend the Phy Clock */ | 
|  | 3503 | pcgctl = dwc2_readl(hsotg, PCGCTL); | 
|  | 3504 | pcgctl |= PCGCTL_STOPPCLK; | 
|  | 3505 | dwc2_writel(hsotg, pcgctl, PCGCTL); | 
|  | 3506 | udelay(10); | 
|  | 3507 | } | 
|  | 3508 |  | 
|  | 3509 | /* For HNP the bus must be suspended for at least 200ms */ | 
|  | 3510 | if (dwc2_host_is_b_hnp_enabled(hsotg)) { | 
|  | 3511 | pcgctl = dwc2_readl(hsotg, PCGCTL); | 
|  | 3512 | pcgctl &= ~PCGCTL_STOPPCLK; | 
|  | 3513 | dwc2_writel(hsotg, pcgctl, PCGCTL); | 
|  | 3514 |  | 
|  | 3515 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 3516 |  | 
|  | 3517 | msleep(200); | 
|  | 3518 | } else { | 
|  | 3519 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 3520 | } | 
|  | 3521 | } | 
|  | 3522 |  | 
|  | 3523 | /* Must NOT be called with interrupt disabled or spinlock held */ | 
|  | 3524 | static void dwc2_port_resume(struct dwc2_hsotg *hsotg) | 
|  | 3525 | { | 
|  | 3526 | unsigned long flags; | 
|  | 3527 | u32 hprt0; | 
|  | 3528 | u32 pcgctl; | 
|  | 3529 |  | 
|  | 3530 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 3531 |  | 
|  | 3532 | /* | 
|  | 3533 | * If power_down is supported, Phy clock is already resumed | 
|  | 3534 | * after registers restore. | 
|  | 3535 | */ | 
|  | 3536 | if (!hsotg->params.power_down) { | 
|  | 3537 | pcgctl = dwc2_readl(hsotg, PCGCTL); | 
|  | 3538 | pcgctl &= ~PCGCTL_STOPPCLK; | 
|  | 3539 | dwc2_writel(hsotg, pcgctl, PCGCTL); | 
|  | 3540 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 3541 | msleep(20); | 
|  | 3542 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 3543 | } | 
|  | 3544 |  | 
|  | 3545 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3546 | hprt0 |= HPRT0_RES; | 
|  | 3547 | hprt0 &= ~HPRT0_SUSP; | 
|  | 3548 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3549 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 3550 |  | 
|  | 3551 | msleep(USB_RESUME_TIMEOUT); | 
|  | 3552 |  | 
|  | 3553 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 3554 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3555 | hprt0 &= ~(HPRT0_RES | HPRT0_SUSP); | 
|  | 3556 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3557 | hsotg->bus_suspended = false; | 
|  | 3558 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 3559 | } | 
|  | 3560 |  | 
|  | 3561 | /* Handles hub class-specific requests */ | 
|  | 3562 | static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, | 
|  | 3563 | u16 wvalue, u16 windex, char *buf, u16 wlength) | 
|  | 3564 | { | 
|  | 3565 | struct usb_hub_descriptor *hub_desc; | 
|  | 3566 | int retval = 0; | 
|  | 3567 | u32 hprt0; | 
|  | 3568 | u32 port_status; | 
|  | 3569 | u32 speed; | 
|  | 3570 | u32 pcgctl; | 
|  | 3571 | u32 pwr; | 
|  | 3572 |  | 
|  | 3573 | switch (typereq) { | 
|  | 3574 | case ClearHubFeature: | 
|  | 3575 | dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue); | 
|  | 3576 |  | 
|  | 3577 | switch (wvalue) { | 
|  | 3578 | case C_HUB_LOCAL_POWER: | 
|  | 3579 | case C_HUB_OVER_CURRENT: | 
|  | 3580 | /* Nothing required here */ | 
|  | 3581 | break; | 
|  | 3582 |  | 
|  | 3583 | default: | 
|  | 3584 | retval = -EINVAL; | 
|  | 3585 | dev_err(hsotg->dev, | 
|  | 3586 | "ClearHubFeature request %1xh unknown\n", | 
|  | 3587 | wvalue); | 
|  | 3588 | } | 
|  | 3589 | break; | 
|  | 3590 |  | 
|  | 3591 | case ClearPortFeature: | 
|  | 3592 | if (wvalue != USB_PORT_FEAT_L1) | 
|  | 3593 | if (!windex || windex > 1) | 
|  | 3594 | goto error; | 
|  | 3595 | switch (wvalue) { | 
|  | 3596 | case USB_PORT_FEAT_ENABLE: | 
|  | 3597 | dev_dbg(hsotg->dev, | 
|  | 3598 | "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); | 
|  | 3599 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3600 | hprt0 |= HPRT0_ENA; | 
|  | 3601 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3602 | break; | 
|  | 3603 |  | 
|  | 3604 | case USB_PORT_FEAT_SUSPEND: | 
|  | 3605 | dev_dbg(hsotg->dev, | 
|  | 3606 | "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); | 
|  | 3607 |  | 
|  | 3608 | if (hsotg->bus_suspended) { | 
|  | 3609 | if (hsotg->hibernated) | 
|  | 3610 | dwc2_exit_hibernation(hsotg, 0, 0, 1); | 
|  | 3611 | else | 
|  | 3612 | dwc2_port_resume(hsotg); | 
|  | 3613 | } | 
|  | 3614 | break; | 
|  | 3615 |  | 
|  | 3616 | case USB_PORT_FEAT_POWER: | 
|  | 3617 | dev_dbg(hsotg->dev, | 
|  | 3618 | "ClearPortFeature USB_PORT_FEAT_POWER\n"); | 
|  | 3619 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3620 | pwr = hprt0 & HPRT0_PWR; | 
|  | 3621 | hprt0 &= ~HPRT0_PWR; | 
|  | 3622 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3623 | if (pwr) | 
|  | 3624 | dwc2_vbus_supply_exit(hsotg); | 
|  | 3625 | break; | 
|  | 3626 |  | 
|  | 3627 | case USB_PORT_FEAT_INDICATOR: | 
|  | 3628 | dev_dbg(hsotg->dev, | 
|  | 3629 | "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); | 
|  | 3630 | /* Port indicator not supported */ | 
|  | 3631 | break; | 
|  | 3632 |  | 
|  | 3633 | case USB_PORT_FEAT_C_CONNECTION: | 
|  | 3634 | /* | 
|  | 3635 | * Clears driver's internal Connect Status Change flag | 
|  | 3636 | */ | 
|  | 3637 | dev_dbg(hsotg->dev, | 
|  | 3638 | "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); | 
|  | 3639 | hsotg->flags.b.port_connect_status_change = 0; | 
|  | 3640 | break; | 
|  | 3641 |  | 
|  | 3642 | case USB_PORT_FEAT_C_RESET: | 
|  | 3643 | /* Clears driver's internal Port Reset Change flag */ | 
|  | 3644 | dev_dbg(hsotg->dev, | 
|  | 3645 | "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); | 
|  | 3646 | hsotg->flags.b.port_reset_change = 0; | 
|  | 3647 | break; | 
|  | 3648 |  | 
|  | 3649 | case USB_PORT_FEAT_C_ENABLE: | 
|  | 3650 | /* | 
|  | 3651 | * Clears the driver's internal Port Enable/Disable | 
|  | 3652 | * Change flag | 
|  | 3653 | */ | 
|  | 3654 | dev_dbg(hsotg->dev, | 
|  | 3655 | "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); | 
|  | 3656 | hsotg->flags.b.port_enable_change = 0; | 
|  | 3657 | break; | 
|  | 3658 |  | 
|  | 3659 | case USB_PORT_FEAT_C_SUSPEND: | 
|  | 3660 | /* | 
|  | 3661 | * Clears the driver's internal Port Suspend Change | 
|  | 3662 | * flag, which is set when resume signaling on the host | 
|  | 3663 | * port is complete | 
|  | 3664 | */ | 
|  | 3665 | dev_dbg(hsotg->dev, | 
|  | 3666 | "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); | 
|  | 3667 | hsotg->flags.b.port_suspend_change = 0; | 
|  | 3668 | break; | 
|  | 3669 |  | 
|  | 3670 | case USB_PORT_FEAT_C_PORT_L1: | 
|  | 3671 | dev_dbg(hsotg->dev, | 
|  | 3672 | "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n"); | 
|  | 3673 | hsotg->flags.b.port_l1_change = 0; | 
|  | 3674 | break; | 
|  | 3675 |  | 
|  | 3676 | case USB_PORT_FEAT_C_OVER_CURRENT: | 
|  | 3677 | dev_dbg(hsotg->dev, | 
|  | 3678 | "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); | 
|  | 3679 | hsotg->flags.b.port_over_current_change = 0; | 
|  | 3680 | break; | 
|  | 3681 |  | 
|  | 3682 | default: | 
|  | 3683 | retval = -EINVAL; | 
|  | 3684 | dev_err(hsotg->dev, | 
|  | 3685 | "ClearPortFeature request %1xh unknown or unsupported\n", | 
|  | 3686 | wvalue); | 
|  | 3687 | } | 
|  | 3688 | break; | 
|  | 3689 |  | 
|  | 3690 | case GetHubDescriptor: | 
|  | 3691 | dev_dbg(hsotg->dev, "GetHubDescriptor\n"); | 
|  | 3692 | hub_desc = (struct usb_hub_descriptor *)buf; | 
|  | 3693 | hub_desc->bDescLength = 9; | 
|  | 3694 | hub_desc->bDescriptorType = USB_DT_HUB; | 
|  | 3695 | hub_desc->bNbrPorts = 1; | 
|  | 3696 | hub_desc->wHubCharacteristics = | 
|  | 3697 | cpu_to_le16(HUB_CHAR_COMMON_LPSM | | 
|  | 3698 | HUB_CHAR_INDV_PORT_OCPM); | 
|  | 3699 | hub_desc->bPwrOn2PwrGood = 1; | 
|  | 3700 | hub_desc->bHubContrCurrent = 0; | 
|  | 3701 | hub_desc->u.hs.DeviceRemovable[0] = 0; | 
|  | 3702 | hub_desc->u.hs.DeviceRemovable[1] = 0xff; | 
|  | 3703 | break; | 
|  | 3704 |  | 
|  | 3705 | case GetHubStatus: | 
|  | 3706 | dev_dbg(hsotg->dev, "GetHubStatus\n"); | 
|  | 3707 | memset(buf, 0, 4); | 
|  | 3708 | break; | 
|  | 3709 |  | 
|  | 3710 | case GetPortStatus: | 
|  | 3711 | dev_vdbg(hsotg->dev, | 
|  | 3712 | "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex, | 
|  | 3713 | hsotg->flags.d32); | 
|  | 3714 | if (!windex || windex > 1) | 
|  | 3715 | goto error; | 
|  | 3716 |  | 
|  | 3717 | port_status = 0; | 
|  | 3718 | if (hsotg->flags.b.port_connect_status_change) | 
|  | 3719 | port_status |= USB_PORT_STAT_C_CONNECTION << 16; | 
|  | 3720 | if (hsotg->flags.b.port_enable_change) | 
|  | 3721 | port_status |= USB_PORT_STAT_C_ENABLE << 16; | 
|  | 3722 | if (hsotg->flags.b.port_suspend_change) | 
|  | 3723 | port_status |= USB_PORT_STAT_C_SUSPEND << 16; | 
|  | 3724 | if (hsotg->flags.b.port_l1_change) | 
|  | 3725 | port_status |= USB_PORT_STAT_C_L1 << 16; | 
|  | 3726 | if (hsotg->flags.b.port_reset_change) | 
|  | 3727 | port_status |= USB_PORT_STAT_C_RESET << 16; | 
|  | 3728 | if (hsotg->flags.b.port_over_current_change) { | 
|  | 3729 | dev_warn(hsotg->dev, "Overcurrent change detected\n"); | 
|  | 3730 | port_status |= USB_PORT_STAT_C_OVERCURRENT << 16; | 
|  | 3731 | } | 
|  | 3732 |  | 
|  | 3733 | if (!hsotg->flags.b.port_connect_status) { | 
|  | 3734 | /* | 
|  | 3735 | * The port is disconnected, which means the core is | 
|  | 3736 | * either in device mode or it soon will be. Just | 
|  | 3737 | * return 0's for the remainder of the port status | 
|  | 3738 | * since the port register can't be read if the core | 
|  | 3739 | * is in device mode. | 
|  | 3740 | */ | 
|  | 3741 | *(__le32 *)buf = cpu_to_le32(port_status); | 
|  | 3742 | break; | 
|  | 3743 | } | 
|  | 3744 |  | 
|  | 3745 | hprt0 = dwc2_readl(hsotg, HPRT0); | 
|  | 3746 | dev_vdbg(hsotg->dev, "  HPRT0: 0x%08x\n", hprt0); | 
|  | 3747 |  | 
|  | 3748 | if (hprt0 & HPRT0_CONNSTS) | 
|  | 3749 | port_status |= USB_PORT_STAT_CONNECTION; | 
|  | 3750 | if (hprt0 & HPRT0_ENA) | 
|  | 3751 | port_status |= USB_PORT_STAT_ENABLE; | 
|  | 3752 | if (hprt0 & HPRT0_SUSP) | 
|  | 3753 | port_status |= USB_PORT_STAT_SUSPEND; | 
|  | 3754 | if (hprt0 & HPRT0_OVRCURRACT) | 
|  | 3755 | port_status |= USB_PORT_STAT_OVERCURRENT; | 
|  | 3756 | if (hprt0 & HPRT0_RST) | 
|  | 3757 | port_status |= USB_PORT_STAT_RESET; | 
|  | 3758 | if (hprt0 & HPRT0_PWR) | 
|  | 3759 | port_status |= USB_PORT_STAT_POWER; | 
|  | 3760 |  | 
|  | 3761 | speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; | 
|  | 3762 | if (speed == HPRT0_SPD_HIGH_SPEED) | 
|  | 3763 | port_status |= USB_PORT_STAT_HIGH_SPEED; | 
|  | 3764 | else if (speed == HPRT0_SPD_LOW_SPEED) | 
|  | 3765 | port_status |= USB_PORT_STAT_LOW_SPEED; | 
|  | 3766 |  | 
|  | 3767 | if (hprt0 & HPRT0_TSTCTL_MASK) | 
|  | 3768 | port_status |= USB_PORT_STAT_TEST; | 
|  | 3769 | /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ | 
|  | 3770 |  | 
|  | 3771 | if (hsotg->params.dma_desc_fs_enable) { | 
|  | 3772 | /* | 
|  | 3773 | * Enable descriptor DMA only if a full speed | 
|  | 3774 | * device is connected. | 
|  | 3775 | */ | 
|  | 3776 | if (hsotg->new_connection && | 
|  | 3777 | ((port_status & | 
|  | 3778 | (USB_PORT_STAT_CONNECTION | | 
|  | 3779 | USB_PORT_STAT_HIGH_SPEED | | 
|  | 3780 | USB_PORT_STAT_LOW_SPEED)) == | 
|  | 3781 | USB_PORT_STAT_CONNECTION)) { | 
|  | 3782 | u32 hcfg; | 
|  | 3783 |  | 
|  | 3784 | dev_info(hsotg->dev, "Enabling descriptor DMA mode\n"); | 
|  | 3785 | hsotg->params.dma_desc_enable = true; | 
|  | 3786 | hcfg = dwc2_readl(hsotg, HCFG); | 
|  | 3787 | hcfg |= HCFG_DESCDMA; | 
|  | 3788 | dwc2_writel(hsotg, hcfg, HCFG); | 
|  | 3789 | hsotg->new_connection = false; | 
|  | 3790 | } | 
|  | 3791 | } | 
|  | 3792 |  | 
|  | 3793 | dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status); | 
|  | 3794 | *(__le32 *)buf = cpu_to_le32(port_status); | 
|  | 3795 | break; | 
|  | 3796 |  | 
|  | 3797 | case SetHubFeature: | 
|  | 3798 | dev_dbg(hsotg->dev, "SetHubFeature\n"); | 
|  | 3799 | /* No HUB features supported */ | 
|  | 3800 | break; | 
|  | 3801 |  | 
|  | 3802 | case SetPortFeature: | 
|  | 3803 | dev_dbg(hsotg->dev, "SetPortFeature\n"); | 
|  | 3804 | if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1)) | 
|  | 3805 | goto error; | 
|  | 3806 |  | 
|  | 3807 | if (!hsotg->flags.b.port_connect_status) { | 
|  | 3808 | /* | 
|  | 3809 | * The port is disconnected, which means the core is | 
|  | 3810 | * either in device mode or it soon will be. Just | 
|  | 3811 | * return without doing anything since the port | 
|  | 3812 | * register can't be written if the core is in device | 
|  | 3813 | * mode. | 
|  | 3814 | */ | 
|  | 3815 | break; | 
|  | 3816 | } | 
|  | 3817 |  | 
|  | 3818 | switch (wvalue) { | 
|  | 3819 | case USB_PORT_FEAT_SUSPEND: | 
|  | 3820 | dev_dbg(hsotg->dev, | 
|  | 3821 | "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); | 
|  | 3822 | if (windex != hsotg->otg_port) | 
|  | 3823 | goto error; | 
|  | 3824 | if (hsotg->params.power_down == 2) | 
|  | 3825 | dwc2_enter_hibernation(hsotg, 1); | 
|  | 3826 | else | 
|  | 3827 | dwc2_port_suspend(hsotg, windex); | 
|  | 3828 | break; | 
|  | 3829 |  | 
|  | 3830 | case USB_PORT_FEAT_POWER: | 
|  | 3831 | dev_dbg(hsotg->dev, | 
|  | 3832 | "SetPortFeature - USB_PORT_FEAT_POWER\n"); | 
|  | 3833 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3834 | pwr = hprt0 & HPRT0_PWR; | 
|  | 3835 | hprt0 |= HPRT0_PWR; | 
|  | 3836 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3837 | if (!pwr) | 
|  | 3838 | dwc2_vbus_supply_init(hsotg); | 
|  | 3839 | break; | 
|  | 3840 |  | 
|  | 3841 | case USB_PORT_FEAT_RESET: | 
|  | 3842 | if (hsotg->params.power_down == 2 && | 
|  | 3843 | hsotg->hibernated) | 
|  | 3844 | dwc2_exit_hibernation(hsotg, 0, 1, 1); | 
|  | 3845 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3846 | dev_dbg(hsotg->dev, | 
|  | 3847 | "SetPortFeature - USB_PORT_FEAT_RESET\n"); | 
|  | 3848 | pcgctl = dwc2_readl(hsotg, PCGCTL); | 
|  | 3849 | pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK); | 
|  | 3850 | dwc2_writel(hsotg, pcgctl, PCGCTL); | 
|  | 3851 | /* ??? Original driver does this */ | 
|  | 3852 | dwc2_writel(hsotg, 0, PCGCTL); | 
|  | 3853 |  | 
|  | 3854 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3855 | pwr = hprt0 & HPRT0_PWR; | 
|  | 3856 | /* Clear suspend bit if resetting from suspend state */ | 
|  | 3857 | hprt0 &= ~HPRT0_SUSP; | 
|  | 3858 |  | 
|  | 3859 | /* | 
|  | 3860 | * When B-Host the Port reset bit is set in the Start | 
|  | 3861 | * HCD Callback function, so that the reset is started | 
|  | 3862 | * within 1ms of the HNP success interrupt | 
|  | 3863 | */ | 
|  | 3864 | if (!dwc2_hcd_is_b_host(hsotg)) { | 
|  | 3865 | hprt0 |= HPRT0_PWR | HPRT0_RST; | 
|  | 3866 | dev_dbg(hsotg->dev, | 
|  | 3867 | "In host mode, hprt0=%08x\n", hprt0); | 
|  | 3868 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3869 | if (!pwr) | 
|  | 3870 | dwc2_vbus_supply_init(hsotg); | 
|  | 3871 | } | 
|  | 3872 |  | 
|  | 3873 | /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ | 
|  | 3874 | msleep(50); | 
|  | 3875 | hprt0 &= ~HPRT0_RST; | 
|  | 3876 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3877 | hsotg->lx_state = DWC2_L0; /* Now back to On state */ | 
|  | 3878 | break; | 
|  | 3879 |  | 
|  | 3880 | case USB_PORT_FEAT_INDICATOR: | 
|  | 3881 | dev_dbg(hsotg->dev, | 
|  | 3882 | "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); | 
|  | 3883 | /* Not supported */ | 
|  | 3884 | break; | 
|  | 3885 |  | 
|  | 3886 | case USB_PORT_FEAT_TEST: | 
|  | 3887 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 3888 | dev_dbg(hsotg->dev, | 
|  | 3889 | "SetPortFeature - USB_PORT_FEAT_TEST\n"); | 
|  | 3890 | hprt0 &= ~HPRT0_TSTCTL_MASK; | 
|  | 3891 | hprt0 |= (windex >> 8) << HPRT0_TSTCTL_SHIFT; | 
|  | 3892 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 3893 | break; | 
|  | 3894 |  | 
|  | 3895 | default: | 
|  | 3896 | retval = -EINVAL; | 
|  | 3897 | dev_err(hsotg->dev, | 
|  | 3898 | "SetPortFeature %1xh unknown or unsupported\n", | 
|  | 3899 | wvalue); | 
|  | 3900 | break; | 
|  | 3901 | } | 
|  | 3902 | break; | 
|  | 3903 |  | 
|  | 3904 | default: | 
|  | 3905 | error: | 
|  | 3906 | retval = -EINVAL; | 
|  | 3907 | dev_dbg(hsotg->dev, | 
|  | 3908 | "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n", | 
|  | 3909 | typereq, windex, wvalue); | 
|  | 3910 | break; | 
|  | 3911 | } | 
|  | 3912 |  | 
|  | 3913 | return retval; | 
|  | 3914 | } | 
|  | 3915 |  | 
|  | 3916 | static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port) | 
|  | 3917 | { | 
|  | 3918 | int retval; | 
|  | 3919 |  | 
|  | 3920 | if (port != 1) | 
|  | 3921 | return -EINVAL; | 
|  | 3922 |  | 
|  | 3923 | retval = (hsotg->flags.b.port_connect_status_change || | 
|  | 3924 | hsotg->flags.b.port_reset_change || | 
|  | 3925 | hsotg->flags.b.port_enable_change || | 
|  | 3926 | hsotg->flags.b.port_suspend_change || | 
|  | 3927 | hsotg->flags.b.port_over_current_change); | 
|  | 3928 |  | 
|  | 3929 | if (retval) { | 
|  | 3930 | dev_dbg(hsotg->dev, | 
|  | 3931 | "DWC OTG HCD HUB STATUS DATA: Root port status changed\n"); | 
|  | 3932 | dev_dbg(hsotg->dev, "  port_connect_status_change: %d\n", | 
|  | 3933 | hsotg->flags.b.port_connect_status_change); | 
|  | 3934 | dev_dbg(hsotg->dev, "  port_reset_change: %d\n", | 
|  | 3935 | hsotg->flags.b.port_reset_change); | 
|  | 3936 | dev_dbg(hsotg->dev, "  port_enable_change: %d\n", | 
|  | 3937 | hsotg->flags.b.port_enable_change); | 
|  | 3938 | dev_dbg(hsotg->dev, "  port_suspend_change: %d\n", | 
|  | 3939 | hsotg->flags.b.port_suspend_change); | 
|  | 3940 | dev_dbg(hsotg->dev, "  port_over_current_change: %d\n", | 
|  | 3941 | hsotg->flags.b.port_over_current_change); | 
|  | 3942 | } | 
|  | 3943 |  | 
|  | 3944 | return retval; | 
|  | 3945 | } | 
|  | 3946 |  | 
|  | 3947 | int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg) | 
|  | 3948 | { | 
|  | 3949 | u32 hfnum = dwc2_readl(hsotg, HFNUM); | 
|  | 3950 |  | 
|  | 3951 | #ifdef DWC2_DEBUG_SOF | 
|  | 3952 | dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n", | 
|  | 3953 | (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT); | 
|  | 3954 | #endif | 
|  | 3955 | return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; | 
|  | 3956 | } | 
|  | 3957 |  | 
|  | 3958 | int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us) | 
|  | 3959 | { | 
|  | 3960 | u32 hprt = dwc2_readl(hsotg, HPRT0); | 
|  | 3961 | u32 hfir = dwc2_readl(hsotg, HFIR); | 
|  | 3962 | u32 hfnum = dwc2_readl(hsotg, HFNUM); | 
|  | 3963 | unsigned int us_per_frame; | 
|  | 3964 | unsigned int frame_number; | 
|  | 3965 | unsigned int remaining; | 
|  | 3966 | unsigned int interval; | 
|  | 3967 | unsigned int phy_clks; | 
|  | 3968 |  | 
|  | 3969 | /* High speed has 125 us per (micro) frame; others are 1 ms per */ | 
|  | 3970 | us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125; | 
|  | 3971 |  | 
|  | 3972 | /* Extract fields */ | 
|  | 3973 | frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT; | 
|  | 3974 | remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT; | 
|  | 3975 | interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT; | 
|  | 3976 |  | 
|  | 3977 | /* | 
|  | 3978 | * Number of phy clocks since the last tick of the frame number after | 
|  | 3979 | * "us" has passed. | 
|  | 3980 | */ | 
|  | 3981 | phy_clks = (interval - remaining) + | 
|  | 3982 | DIV_ROUND_UP(interval * us, us_per_frame); | 
|  | 3983 |  | 
|  | 3984 | return dwc2_frame_num_inc(frame_number, phy_clks / interval); | 
|  | 3985 | } | 
|  | 3986 |  | 
|  | 3987 | int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg) | 
|  | 3988 | { | 
|  | 3989 | return hsotg->op_state == OTG_STATE_B_HOST; | 
|  | 3990 | } | 
|  | 3991 |  | 
|  | 3992 | static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg, | 
|  | 3993 | int iso_desc_count, | 
|  | 3994 | gfp_t mem_flags) | 
|  | 3995 | { | 
|  | 3996 | struct dwc2_hcd_urb *urb; | 
|  | 3997 | u32 size = sizeof(*urb) + iso_desc_count * | 
|  | 3998 | sizeof(struct dwc2_hcd_iso_packet_desc); | 
|  | 3999 |  | 
|  | 4000 | urb = kzalloc(size, mem_flags); | 
|  | 4001 | if (urb) | 
|  | 4002 | urb->packet_count = iso_desc_count; | 
|  | 4003 | return urb; | 
|  | 4004 | } | 
|  | 4005 |  | 
|  | 4006 | static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg, | 
|  | 4007 | struct dwc2_hcd_urb *urb, u8 dev_addr, | 
|  | 4008 | u8 ep_num, u8 ep_type, u8 ep_dir, | 
|  | 4009 | u16 maxp, u16 maxp_mult) | 
|  | 4010 | { | 
|  | 4011 | if (dbg_perio() || | 
|  | 4012 | ep_type == USB_ENDPOINT_XFER_BULK || | 
|  | 4013 | ep_type == USB_ENDPOINT_XFER_CONTROL) | 
|  | 4014 | dev_vdbg(hsotg->dev, | 
|  | 4015 | "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n", | 
|  | 4016 | dev_addr, ep_num, ep_dir, ep_type, maxp, maxp_mult); | 
|  | 4017 | urb->pipe_info.dev_addr = dev_addr; | 
|  | 4018 | urb->pipe_info.ep_num = ep_num; | 
|  | 4019 | urb->pipe_info.pipe_type = ep_type; | 
|  | 4020 | urb->pipe_info.pipe_dir = ep_dir; | 
|  | 4021 | urb->pipe_info.maxp = maxp; | 
|  | 4022 | urb->pipe_info.maxp_mult = maxp_mult; | 
|  | 4023 | } | 
|  | 4024 |  | 
|  | 4025 | /* | 
|  | 4026 | * NOTE: This function will be removed once the peripheral controller code | 
|  | 4027 | * is integrated and the driver is stable | 
|  | 4028 | */ | 
|  | 4029 | void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) | 
|  | 4030 | { | 
|  | 4031 | #ifdef DEBUG | 
|  | 4032 | struct dwc2_host_chan *chan; | 
|  | 4033 | struct dwc2_hcd_urb *urb; | 
|  | 4034 | struct dwc2_qtd *qtd; | 
|  | 4035 | int num_channels; | 
|  | 4036 | u32 np_tx_status; | 
|  | 4037 | u32 p_tx_status; | 
|  | 4038 | int i; | 
|  | 4039 |  | 
|  | 4040 | num_channels = hsotg->params.host_channels; | 
|  | 4041 | dev_dbg(hsotg->dev, "\n"); | 
|  | 4042 | dev_dbg(hsotg->dev, | 
|  | 4043 | "************************************************************\n"); | 
|  | 4044 | dev_dbg(hsotg->dev, "HCD State:\n"); | 
|  | 4045 | dev_dbg(hsotg->dev, "  Num channels: %d\n", num_channels); | 
|  | 4046 |  | 
|  | 4047 | for (i = 0; i < num_channels; i++) { | 
|  | 4048 | chan = hsotg->hc_ptr_array[i]; | 
|  | 4049 | dev_dbg(hsotg->dev, "  Channel %d:\n", i); | 
|  | 4050 | dev_dbg(hsotg->dev, | 
|  | 4051 | "    dev_addr: %d, ep_num: %d, ep_is_in: %d\n", | 
|  | 4052 | chan->dev_addr, chan->ep_num, chan->ep_is_in); | 
|  | 4053 | dev_dbg(hsotg->dev, "    speed: %d\n", chan->speed); | 
|  | 4054 | dev_dbg(hsotg->dev, "    ep_type: %d\n", chan->ep_type); | 
|  | 4055 | dev_dbg(hsotg->dev, "    max_packet: %d\n", chan->max_packet); | 
|  | 4056 | dev_dbg(hsotg->dev, "    data_pid_start: %d\n", | 
|  | 4057 | chan->data_pid_start); | 
|  | 4058 | dev_dbg(hsotg->dev, "    multi_count: %d\n", chan->multi_count); | 
|  | 4059 | dev_dbg(hsotg->dev, "    xfer_started: %d\n", | 
|  | 4060 | chan->xfer_started); | 
|  | 4061 | dev_dbg(hsotg->dev, "    xfer_buf: %p\n", chan->xfer_buf); | 
|  | 4062 | dev_dbg(hsotg->dev, "    xfer_dma: %08lx\n", | 
|  | 4063 | (unsigned long)chan->xfer_dma); | 
|  | 4064 | dev_dbg(hsotg->dev, "    xfer_len: %d\n", chan->xfer_len); | 
|  | 4065 | dev_dbg(hsotg->dev, "    xfer_count: %d\n", chan->xfer_count); | 
|  | 4066 | dev_dbg(hsotg->dev, "    halt_on_queue: %d\n", | 
|  | 4067 | chan->halt_on_queue); | 
|  | 4068 | dev_dbg(hsotg->dev, "    halt_pending: %d\n", | 
|  | 4069 | chan->halt_pending); | 
|  | 4070 | dev_dbg(hsotg->dev, "    halt_status: %d\n", chan->halt_status); | 
|  | 4071 | dev_dbg(hsotg->dev, "    do_split: %d\n", chan->do_split); | 
|  | 4072 | dev_dbg(hsotg->dev, "    complete_split: %d\n", | 
|  | 4073 | chan->complete_split); | 
|  | 4074 | dev_dbg(hsotg->dev, "    hub_addr: %d\n", chan->hub_addr); | 
|  | 4075 | dev_dbg(hsotg->dev, "    hub_port: %d\n", chan->hub_port); | 
|  | 4076 | dev_dbg(hsotg->dev, "    xact_pos: %d\n", chan->xact_pos); | 
|  | 4077 | dev_dbg(hsotg->dev, "    requests: %d\n", chan->requests); | 
|  | 4078 | dev_dbg(hsotg->dev, "    qh: %p\n", chan->qh); | 
|  | 4079 |  | 
|  | 4080 | if (chan->xfer_started) { | 
|  | 4081 | u32 hfnum, hcchar, hctsiz, hcint, hcintmsk; | 
|  | 4082 |  | 
|  | 4083 | hfnum = dwc2_readl(hsotg, HFNUM); | 
|  | 4084 | hcchar = dwc2_readl(hsotg, HCCHAR(i)); | 
|  | 4085 | hctsiz = dwc2_readl(hsotg, HCTSIZ(i)); | 
|  | 4086 | hcint = dwc2_readl(hsotg, HCINT(i)); | 
|  | 4087 | hcintmsk = dwc2_readl(hsotg, HCINTMSK(i)); | 
|  | 4088 | dev_dbg(hsotg->dev, "    hfnum: 0x%08x\n", hfnum); | 
|  | 4089 | dev_dbg(hsotg->dev, "    hcchar: 0x%08x\n", hcchar); | 
|  | 4090 | dev_dbg(hsotg->dev, "    hctsiz: 0x%08x\n", hctsiz); | 
|  | 4091 | dev_dbg(hsotg->dev, "    hcint: 0x%08x\n", hcint); | 
|  | 4092 | dev_dbg(hsotg->dev, "    hcintmsk: 0x%08x\n", hcintmsk); | 
|  | 4093 | } | 
|  | 4094 |  | 
|  | 4095 | if (!(chan->xfer_started && chan->qh)) | 
|  | 4096 | continue; | 
|  | 4097 |  | 
|  | 4098 | list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) { | 
|  | 4099 | if (!qtd->in_process) | 
|  | 4100 | break; | 
|  | 4101 | urb = qtd->urb; | 
|  | 4102 | dev_dbg(hsotg->dev, "    URB Info:\n"); | 
|  | 4103 | dev_dbg(hsotg->dev, "      qtd: %p, urb: %p\n", | 
|  | 4104 | qtd, urb); | 
|  | 4105 | if (urb) { | 
|  | 4106 | dev_dbg(hsotg->dev, | 
|  | 4107 | "      Dev: %d, EP: %d %s\n", | 
|  | 4108 | dwc2_hcd_get_dev_addr(&urb->pipe_info), | 
|  | 4109 | dwc2_hcd_get_ep_num(&urb->pipe_info), | 
|  | 4110 | dwc2_hcd_is_pipe_in(&urb->pipe_info) ? | 
|  | 4111 | "IN" : "OUT"); | 
|  | 4112 | dev_dbg(hsotg->dev, | 
|  | 4113 | "      Max packet size: %d (%d mult)\n", | 
|  | 4114 | dwc2_hcd_get_maxp(&urb->pipe_info), | 
|  | 4115 | dwc2_hcd_get_maxp_mult(&urb->pipe_info)); | 
|  | 4116 | dev_dbg(hsotg->dev, | 
|  | 4117 | "      transfer_buffer: %p\n", | 
|  | 4118 | urb->buf); | 
|  | 4119 | dev_dbg(hsotg->dev, | 
|  | 4120 | "      transfer_dma: %08lx\n", | 
|  | 4121 | (unsigned long)urb->dma); | 
|  | 4122 | dev_dbg(hsotg->dev, | 
|  | 4123 | "      transfer_buffer_length: %d\n", | 
|  | 4124 | urb->length); | 
|  | 4125 | dev_dbg(hsotg->dev, "      actual_length: %d\n", | 
|  | 4126 | urb->actual_length); | 
|  | 4127 | } | 
|  | 4128 | } | 
|  | 4129 | } | 
|  | 4130 |  | 
|  | 4131 | dev_dbg(hsotg->dev, "  non_periodic_channels: %d\n", | 
|  | 4132 | hsotg->non_periodic_channels); | 
|  | 4133 | dev_dbg(hsotg->dev, "  periodic_channels: %d\n", | 
|  | 4134 | hsotg->periodic_channels); | 
|  | 4135 | dev_dbg(hsotg->dev, "  periodic_usecs: %d\n", hsotg->periodic_usecs); | 
|  | 4136 | np_tx_status = dwc2_readl(hsotg, GNPTXSTS); | 
|  | 4137 | dev_dbg(hsotg->dev, "  NP Tx Req Queue Space Avail: %d\n", | 
|  | 4138 | (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT); | 
|  | 4139 | dev_dbg(hsotg->dev, "  NP Tx FIFO Space Avail: %d\n", | 
|  | 4140 | (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT); | 
|  | 4141 | p_tx_status = dwc2_readl(hsotg, HPTXSTS); | 
|  | 4142 | dev_dbg(hsotg->dev, "  P Tx Req Queue Space Avail: %d\n", | 
|  | 4143 | (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT); | 
|  | 4144 | dev_dbg(hsotg->dev, "  P Tx FIFO Space Avail: %d\n", | 
|  | 4145 | (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT); | 
|  | 4146 | dwc2_dump_global_registers(hsotg); | 
|  | 4147 | dwc2_dump_host_registers(hsotg); | 
|  | 4148 | dev_dbg(hsotg->dev, | 
|  | 4149 | "************************************************************\n"); | 
|  | 4150 | dev_dbg(hsotg->dev, "\n"); | 
|  | 4151 | #endif | 
|  | 4152 | } | 
|  | 4153 |  | 
|  | 4154 | struct wrapper_priv_data { | 
|  | 4155 | struct dwc2_hsotg *hsotg; | 
|  | 4156 | }; | 
|  | 4157 |  | 
|  | 4158 | /* Gets the dwc2_hsotg from a usb_hcd */ | 
|  | 4159 | static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd) | 
|  | 4160 | { | 
|  | 4161 | struct wrapper_priv_data *p; | 
|  | 4162 |  | 
|  | 4163 | p = (struct wrapper_priv_data *)&hcd->hcd_priv; | 
|  | 4164 | return p->hsotg; | 
|  | 4165 | } | 
|  | 4166 |  | 
|  | 4167 | /** | 
|  | 4168 | * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context | 
|  | 4169 | * | 
|  | 4170 | * This will get the dwc2_tt structure (and ttport) associated with the given | 
|  | 4171 | * context (which is really just a struct urb pointer). | 
|  | 4172 | * | 
|  | 4173 | * The first time this is called for a given TT we allocate memory for our | 
|  | 4174 | * structure.  When everyone is done and has called dwc2_host_put_tt_info() | 
|  | 4175 | * then the refcount for the structure will go to 0 and we'll free it. | 
|  | 4176 | * | 
|  | 4177 | * @hsotg:     The HCD state structure for the DWC OTG controller. | 
|  | 4178 | * @context:   The priv pointer from a struct dwc2_hcd_urb. | 
|  | 4179 | * @mem_flags: Flags for allocating memory. | 
|  | 4180 | * @ttport:    We'll return this device's port number here.  That's used to | 
|  | 4181 | *             reference into the bitmap if we're on a multi_tt hub. | 
|  | 4182 | * | 
|  | 4183 | * Return: a pointer to a struct dwc2_tt.  Don't forget to call | 
|  | 4184 | *         dwc2_host_put_tt_info()!  Returns NULL upon memory alloc failure. | 
|  | 4185 | */ | 
|  | 4186 |  | 
|  | 4187 | struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context, | 
|  | 4188 | gfp_t mem_flags, int *ttport) | 
|  | 4189 | { | 
|  | 4190 | struct urb *urb = context; | 
|  | 4191 | struct dwc2_tt *dwc_tt = NULL; | 
|  | 4192 |  | 
|  | 4193 | if (urb->dev->tt) { | 
|  | 4194 | *ttport = urb->dev->ttport; | 
|  | 4195 |  | 
|  | 4196 | dwc_tt = urb->dev->tt->hcpriv; | 
|  | 4197 | if (!dwc_tt) { | 
|  | 4198 | size_t bitmap_size; | 
|  | 4199 |  | 
|  | 4200 | /* | 
|  | 4201 | * For single_tt we need one schedule.  For multi_tt | 
|  | 4202 | * we need one per port. | 
|  | 4203 | */ | 
|  | 4204 | bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP * | 
|  | 4205 | sizeof(dwc_tt->periodic_bitmaps[0]); | 
|  | 4206 | if (urb->dev->tt->multi) | 
|  | 4207 | bitmap_size *= urb->dev->tt->hub->maxchild; | 
|  | 4208 |  | 
|  | 4209 | dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size, | 
|  | 4210 | mem_flags); | 
|  | 4211 | if (!dwc_tt) | 
|  | 4212 | return NULL; | 
|  | 4213 |  | 
|  | 4214 | dwc_tt->usb_tt = urb->dev->tt; | 
|  | 4215 | dwc_tt->usb_tt->hcpriv = dwc_tt; | 
|  | 4216 | } | 
|  | 4217 |  | 
|  | 4218 | dwc_tt->refcount++; | 
|  | 4219 | } | 
|  | 4220 |  | 
|  | 4221 | return dwc_tt; | 
|  | 4222 | } | 
|  | 4223 |  | 
|  | 4224 | /** | 
|  | 4225 | * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info() | 
|  | 4226 | * | 
|  | 4227 | * Frees resources allocated by dwc2_host_get_tt_info() if all current holders | 
|  | 4228 | * of the structure are done. | 
|  | 4229 | * | 
|  | 4230 | * It's OK to call this with NULL. | 
|  | 4231 | * | 
|  | 4232 | * @hsotg:     The HCD state structure for the DWC OTG controller. | 
|  | 4233 | * @dwc_tt:    The pointer returned by dwc2_host_get_tt_info. | 
|  | 4234 | */ | 
|  | 4235 | void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt) | 
|  | 4236 | { | 
|  | 4237 | /* Model kfree and make put of NULL a no-op */ | 
|  | 4238 | if (!dwc_tt) | 
|  | 4239 | return; | 
|  | 4240 |  | 
|  | 4241 | WARN_ON(dwc_tt->refcount < 1); | 
|  | 4242 |  | 
|  | 4243 | dwc_tt->refcount--; | 
|  | 4244 | if (!dwc_tt->refcount) { | 
|  | 4245 | dwc_tt->usb_tt->hcpriv = NULL; | 
|  | 4246 | kfree(dwc_tt); | 
|  | 4247 | } | 
|  | 4248 | } | 
|  | 4249 |  | 
|  | 4250 | int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) | 
|  | 4251 | { | 
|  | 4252 | struct urb *urb = context; | 
|  | 4253 |  | 
|  | 4254 | return urb->dev->speed; | 
|  | 4255 | } | 
|  | 4256 |  | 
|  | 4257 | static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw, | 
|  | 4258 | struct urb *urb) | 
|  | 4259 | { | 
|  | 4260 | struct usb_bus *bus = hcd_to_bus(hcd); | 
|  | 4261 |  | 
|  | 4262 | if (urb->interval) | 
|  | 4263 | bus->bandwidth_allocated += bw / urb->interval; | 
|  | 4264 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) | 
|  | 4265 | bus->bandwidth_isoc_reqs++; | 
|  | 4266 | else | 
|  | 4267 | bus->bandwidth_int_reqs++; | 
|  | 4268 | } | 
|  | 4269 |  | 
|  | 4270 | static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw, | 
|  | 4271 | struct urb *urb) | 
|  | 4272 | { | 
|  | 4273 | struct usb_bus *bus = hcd_to_bus(hcd); | 
|  | 4274 |  | 
|  | 4275 | if (urb->interval) | 
|  | 4276 | bus->bandwidth_allocated -= bw / urb->interval; | 
|  | 4277 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) | 
|  | 4278 | bus->bandwidth_isoc_reqs--; | 
|  | 4279 | else | 
|  | 4280 | bus->bandwidth_int_reqs--; | 
|  | 4281 | } | 
|  | 4282 |  | 
|  | 4283 | /* | 
|  | 4284 | * Sets the final status of an URB and returns it to the upper layer. Any | 
|  | 4285 | * required cleanup of the URB is performed. | 
|  | 4286 | * | 
|  | 4287 | * Must be called with interrupt disabled and spinlock held | 
|  | 4288 | */ | 
|  | 4289 | void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, | 
|  | 4290 | int status) | 
|  | 4291 | { | 
|  | 4292 | struct urb *urb; | 
|  | 4293 | int i; | 
|  | 4294 |  | 
|  | 4295 | if (!qtd) { | 
|  | 4296 | dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); | 
|  | 4297 | return; | 
|  | 4298 | } | 
|  | 4299 |  | 
|  | 4300 | if (!qtd->urb) { | 
|  | 4301 | dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__); | 
|  | 4302 | return; | 
|  | 4303 | } | 
|  | 4304 |  | 
|  | 4305 | urb = qtd->urb->priv; | 
|  | 4306 | if (!urb) { | 
|  | 4307 | dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__); | 
|  | 4308 | return; | 
|  | 4309 | } | 
|  | 4310 |  | 
|  | 4311 | urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb); | 
|  | 4312 |  | 
|  | 4313 | if (dbg_urb(urb)) | 
|  | 4314 | dev_vdbg(hsotg->dev, | 
|  | 4315 | "%s: urb %p device %d ep %d-%s status %d actual %d\n", | 
|  | 4316 | __func__, urb, usb_pipedevice(urb->pipe), | 
|  | 4317 | usb_pipeendpoint(urb->pipe), | 
|  | 4318 | usb_pipein(urb->pipe) ? "IN" : "OUT", status, | 
|  | 4319 | urb->actual_length); | 
|  | 4320 |  | 
|  | 4321 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { | 
|  | 4322 | urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb); | 
|  | 4323 | for (i = 0; i < urb->number_of_packets; ++i) { | 
|  | 4324 | urb->iso_frame_desc[i].actual_length = | 
|  | 4325 | dwc2_hcd_urb_get_iso_desc_actual_length( | 
|  | 4326 | qtd->urb, i); | 
|  | 4327 | urb->iso_frame_desc[i].status = | 
|  | 4328 | dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i); | 
|  | 4329 | } | 
|  | 4330 | } | 
|  | 4331 |  | 
|  | 4332 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS && dbg_perio()) { | 
|  | 4333 | for (i = 0; i < urb->number_of_packets; i++) | 
|  | 4334 | dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n", | 
|  | 4335 | i, urb->iso_frame_desc[i].status); | 
|  | 4336 | } | 
|  | 4337 |  | 
|  | 4338 | urb->status = status; | 
|  | 4339 | if (!status) { | 
|  | 4340 | if ((urb->transfer_flags & URB_SHORT_NOT_OK) && | 
|  | 4341 | urb->actual_length < urb->transfer_buffer_length) | 
|  | 4342 | urb->status = -EREMOTEIO; | 
|  | 4343 | } | 
|  | 4344 |  | 
|  | 4345 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || | 
|  | 4346 | usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { | 
|  | 4347 | struct usb_host_endpoint *ep = urb->ep; | 
|  | 4348 |  | 
|  | 4349 | if (ep) | 
|  | 4350 | dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg), | 
|  | 4351 | dwc2_hcd_get_ep_bandwidth(hsotg, ep), | 
|  | 4352 | urb); | 
|  | 4353 | } | 
|  | 4354 |  | 
|  | 4355 | usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb); | 
|  | 4356 | urb->hcpriv = NULL; | 
|  | 4357 | kfree(qtd->urb); | 
|  | 4358 | qtd->urb = NULL; | 
|  | 4359 |  | 
|  | 4360 | usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status); | 
|  | 4361 | } | 
|  | 4362 |  | 
|  | 4363 | /* | 
|  | 4364 | * Work queue function for starting the HCD when A-Cable is connected | 
|  | 4365 | */ | 
|  | 4366 | static void dwc2_hcd_start_func(struct work_struct *work) | 
|  | 4367 | { | 
|  | 4368 | struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, | 
|  | 4369 | start_work.work); | 
|  | 4370 |  | 
|  | 4371 | dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg); | 
|  | 4372 | dwc2_host_start(hsotg); | 
|  | 4373 | } | 
|  | 4374 |  | 
|  | 4375 | /* | 
|  | 4376 | * Reset work queue function | 
|  | 4377 | */ | 
|  | 4378 | static void dwc2_hcd_reset_func(struct work_struct *work) | 
|  | 4379 | { | 
|  | 4380 | struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg, | 
|  | 4381 | reset_work.work); | 
|  | 4382 | unsigned long flags; | 
|  | 4383 | u32 hprt0; | 
|  | 4384 |  | 
|  | 4385 | dev_dbg(hsotg->dev, "USB RESET function called\n"); | 
|  | 4386 |  | 
|  | 4387 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4388 |  | 
|  | 4389 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 4390 | hprt0 &= ~HPRT0_RST; | 
|  | 4391 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 4392 | hsotg->flags.b.port_reset_change = 1; | 
|  | 4393 |  | 
|  | 4394 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4395 | } | 
|  | 4396 |  | 
|  | 4397 | /* | 
|  | 4398 | * ========================================================================= | 
|  | 4399 | *  Linux HC Driver Functions | 
|  | 4400 | * ========================================================================= | 
|  | 4401 | */ | 
|  | 4402 |  | 
|  | 4403 | /* | 
|  | 4404 | * Initializes the DWC_otg controller and its root hub and prepares it for host | 
|  | 4405 | * mode operation. Activates the root port. Returns 0 on success and a negative | 
|  | 4406 | * error code on failure. | 
|  | 4407 | */ | 
|  | 4408 | static int _dwc2_hcd_start(struct usb_hcd *hcd) | 
|  | 4409 | { | 
|  | 4410 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4411 | struct usb_bus *bus = hcd_to_bus(hcd); | 
|  | 4412 | unsigned long flags; | 
|  | 4413 | u32 hprt0; | 
|  | 4414 | int ret; | 
|  | 4415 |  | 
|  | 4416 | dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); | 
|  | 4417 |  | 
|  | 4418 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4419 | hsotg->lx_state = DWC2_L0; | 
|  | 4420 | hcd->state = HC_STATE_RUNNING; | 
|  | 4421 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 
|  | 4422 |  | 
|  | 4423 | if (dwc2_is_device_mode(hsotg)) { | 
|  | 4424 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4425 | return 0;	/* why 0 ?? */ | 
|  | 4426 | } | 
|  | 4427 |  | 
|  | 4428 | dwc2_hcd_reinit(hsotg); | 
|  | 4429 |  | 
|  | 4430 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 4431 | /* Has vbus power been turned on in dwc2_core_host_init ? */ | 
|  | 4432 | if (hprt0 & HPRT0_PWR) { | 
|  | 4433 | /* Enable external vbus supply before resuming root hub */ | 
|  | 4434 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4435 | ret = dwc2_vbus_supply_init(hsotg); | 
|  | 4436 | if (ret) | 
|  | 4437 | return ret; | 
|  | 4438 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4439 | } | 
|  | 4440 |  | 
|  | 4441 | /* Initialize and connect root hub if one is not already attached */ | 
|  | 4442 | if (bus->root_hub) { | 
|  | 4443 | dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n"); | 
|  | 4444 | /* Inform the HUB driver to resume */ | 
|  | 4445 | usb_hcd_resume_root_hub(hcd); | 
|  | 4446 | } | 
|  | 4447 |  | 
|  | 4448 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4449 |  | 
|  | 4450 | return 0; | 
|  | 4451 | } | 
|  | 4452 |  | 
|  | 4453 | /* | 
|  | 4454 | * Halts the DWC_otg host mode operations in a clean manner. USB transfers are | 
|  | 4455 | * stopped. | 
|  | 4456 | */ | 
|  | 4457 | static void _dwc2_hcd_stop(struct usb_hcd *hcd) | 
|  | 4458 | { | 
|  | 4459 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4460 | unsigned long flags; | 
|  | 4461 | u32 hprt0; | 
|  | 4462 |  | 
|  | 4463 | /* Turn off all host-specific interrupts */ | 
|  | 4464 | dwc2_disable_host_interrupts(hsotg); | 
|  | 4465 |  | 
|  | 4466 | /* Wait for interrupt processing to finish */ | 
|  | 4467 | synchronize_irq(hcd->irq); | 
|  | 4468 |  | 
|  | 4469 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4470 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 4471 | /* Ensure hcd is disconnected */ | 
|  | 4472 | dwc2_hcd_disconnect(hsotg, true); | 
|  | 4473 | dwc2_hcd_stop(hsotg); | 
|  | 4474 | hsotg->lx_state = DWC2_L3; | 
|  | 4475 | hcd->state = HC_STATE_HALT; | 
|  | 4476 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 
|  | 4477 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4478 |  | 
|  | 4479 | /* keep balanced supply init/exit by checking HPRT0_PWR */ | 
|  | 4480 | if (hprt0 & HPRT0_PWR) | 
|  | 4481 | dwc2_vbus_supply_exit(hsotg); | 
|  | 4482 |  | 
|  | 4483 | usleep_range(1000, 3000); | 
|  | 4484 | } | 
|  | 4485 |  | 
|  | 4486 | static int _dwc2_hcd_suspend(struct usb_hcd *hcd) | 
|  | 4487 | { | 
|  | 4488 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4489 | unsigned long flags; | 
|  | 4490 | int ret = 0; | 
|  | 4491 | u32 hprt0; | 
|  | 4492 |  | 
|  | 4493 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4494 |  | 
|  | 4495 | if (dwc2_is_device_mode(hsotg)) | 
|  | 4496 | goto unlock; | 
|  | 4497 |  | 
|  | 4498 | if (hsotg->lx_state != DWC2_L0) | 
|  | 4499 | goto unlock; | 
|  | 4500 |  | 
|  | 4501 | if (!HCD_HW_ACCESSIBLE(hcd)) | 
|  | 4502 | goto unlock; | 
|  | 4503 |  | 
|  | 4504 | if (hsotg->op_state == OTG_STATE_B_PERIPHERAL) | 
|  | 4505 | goto unlock; | 
|  | 4506 |  | 
|  | 4507 | if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL) | 
|  | 4508 | goto skip_power_saving; | 
|  | 4509 |  | 
|  | 4510 | /* | 
|  | 4511 | * Drive USB suspend and disable port Power | 
|  | 4512 | * if usb bus is not suspended. | 
|  | 4513 | */ | 
|  | 4514 | if (!hsotg->bus_suspended) { | 
|  | 4515 | hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 4516 | hprt0 |= HPRT0_SUSP; | 
|  | 4517 | hprt0 &= ~HPRT0_PWR; | 
|  | 4518 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 4519 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4520 | dwc2_vbus_supply_exit(hsotg); | 
|  | 4521 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4522 | } | 
|  | 4523 |  | 
|  | 4524 | /* Enter partial_power_down */ | 
|  | 4525 | ret = dwc2_enter_partial_power_down(hsotg); | 
|  | 4526 | if (ret) { | 
|  | 4527 | if (ret != -ENOTSUPP) | 
|  | 4528 | dev_err(hsotg->dev, | 
|  | 4529 | "enter partial_power_down failed\n"); | 
|  | 4530 | goto skip_power_saving; | 
|  | 4531 | } | 
|  | 4532 |  | 
|  | 4533 | /* Ask phy to be suspended */ | 
|  | 4534 | if (!IS_ERR_OR_NULL(hsotg->uphy)) { | 
|  | 4535 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4536 | usb_phy_set_suspend(hsotg->uphy, true); | 
|  | 4537 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4538 | } | 
|  | 4539 |  | 
|  | 4540 | /* After entering partial_power_down, hardware is no more accessible */ | 
|  | 4541 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 
|  | 4542 |  | 
|  | 4543 | skip_power_saving: | 
|  | 4544 | hsotg->lx_state = DWC2_L2; | 
|  | 4545 | unlock: | 
|  | 4546 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4547 |  | 
|  | 4548 | return ret; | 
|  | 4549 | } | 
|  | 4550 |  | 
|  | 4551 | static int _dwc2_hcd_resume(struct usb_hcd *hcd) | 
|  | 4552 | { | 
|  | 4553 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4554 | unsigned long flags; | 
|  | 4555 | int ret = 0; | 
|  | 4556 |  | 
|  | 4557 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4558 |  | 
|  | 4559 | if (dwc2_is_device_mode(hsotg)) | 
|  | 4560 | goto unlock; | 
|  | 4561 |  | 
|  | 4562 | if (hsotg->lx_state != DWC2_L2) | 
|  | 4563 | goto unlock; | 
|  | 4564 |  | 
|  | 4565 | if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL) { | 
|  | 4566 | hsotg->lx_state = DWC2_L0; | 
|  | 4567 | goto unlock; | 
|  | 4568 | } | 
|  | 4569 |  | 
|  | 4570 | /* | 
|  | 4571 | * Set HW accessible bit before powering on the controller | 
|  | 4572 | * since an interrupt may rise. | 
|  | 4573 | */ | 
|  | 4574 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 
|  | 4575 |  | 
|  | 4576 | /* | 
|  | 4577 | * Enable power if not already done. | 
|  | 4578 | * This must not be spinlocked since duration | 
|  | 4579 | * of this call is unknown. | 
|  | 4580 | */ | 
|  | 4581 | if (!IS_ERR_OR_NULL(hsotg->uphy)) { | 
|  | 4582 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4583 | usb_phy_set_suspend(hsotg->uphy, false); | 
|  | 4584 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4585 | } | 
|  | 4586 |  | 
|  | 4587 | /* Exit partial_power_down */ | 
|  | 4588 | ret = dwc2_exit_partial_power_down(hsotg, true); | 
|  | 4589 | if (ret && (ret != -ENOTSUPP)) | 
|  | 4590 | dev_err(hsotg->dev, "exit partial_power_down failed\n"); | 
|  | 4591 |  | 
|  | 4592 | hsotg->lx_state = DWC2_L0; | 
|  | 4593 |  | 
|  | 4594 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4595 |  | 
|  | 4596 | if (hsotg->bus_suspended) { | 
|  | 4597 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4598 | hsotg->flags.b.port_suspend_change = 1; | 
|  | 4599 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4600 | dwc2_port_resume(hsotg); | 
|  | 4601 | } else { | 
|  | 4602 | dwc2_vbus_supply_init(hsotg); | 
|  | 4603 |  | 
|  | 4604 | /* Wait for controller to correctly update D+/D- level */ | 
|  | 4605 | usleep_range(3000, 5000); | 
|  | 4606 |  | 
|  | 4607 | /* | 
|  | 4608 | * Clear Port Enable and Port Status changes. | 
|  | 4609 | * Enable Port Power. | 
|  | 4610 | */ | 
|  | 4611 | dwc2_writel(hsotg, HPRT0_PWR | HPRT0_CONNDET | | 
|  | 4612 | HPRT0_ENACHG, HPRT0); | 
|  | 4613 | /* Wait for controller to detect Port Connect */ | 
|  | 4614 | usleep_range(5000, 7000); | 
|  | 4615 | } | 
|  | 4616 |  | 
|  | 4617 | return ret; | 
|  | 4618 | unlock: | 
|  | 4619 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4620 |  | 
|  | 4621 | return ret; | 
|  | 4622 | } | 
|  | 4623 |  | 
|  | 4624 | /* Returns the current frame number */ | 
|  | 4625 | static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd) | 
|  | 4626 | { | 
|  | 4627 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4628 |  | 
|  | 4629 | return dwc2_hcd_get_frame_number(hsotg); | 
|  | 4630 | } | 
|  | 4631 |  | 
|  | 4632 | static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, | 
|  | 4633 | char *fn_name) | 
|  | 4634 | { | 
|  | 4635 | #ifdef VERBOSE_DEBUG | 
|  | 4636 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4637 | char *pipetype = NULL; | 
|  | 4638 | char *speed = NULL; | 
|  | 4639 |  | 
|  | 4640 | dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); | 
|  | 4641 | dev_vdbg(hsotg->dev, "  Device address: %d\n", | 
|  | 4642 | usb_pipedevice(urb->pipe)); | 
|  | 4643 | dev_vdbg(hsotg->dev, "  Endpoint: %d, %s\n", | 
|  | 4644 | usb_pipeendpoint(urb->pipe), | 
|  | 4645 | usb_pipein(urb->pipe) ? "IN" : "OUT"); | 
|  | 4646 |  | 
|  | 4647 | switch (usb_pipetype(urb->pipe)) { | 
|  | 4648 | case PIPE_CONTROL: | 
|  | 4649 | pipetype = "CONTROL"; | 
|  | 4650 | break; | 
|  | 4651 | case PIPE_BULK: | 
|  | 4652 | pipetype = "BULK"; | 
|  | 4653 | break; | 
|  | 4654 | case PIPE_INTERRUPT: | 
|  | 4655 | pipetype = "INTERRUPT"; | 
|  | 4656 | break; | 
|  | 4657 | case PIPE_ISOCHRONOUS: | 
|  | 4658 | pipetype = "ISOCHRONOUS"; | 
|  | 4659 | break; | 
|  | 4660 | } | 
|  | 4661 |  | 
|  | 4662 | dev_vdbg(hsotg->dev, "  Endpoint type: %s %s (%s)\n", pipetype, | 
|  | 4663 | usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ? | 
|  | 4664 | "IN" : "OUT"); | 
|  | 4665 |  | 
|  | 4666 | switch (urb->dev->speed) { | 
|  | 4667 | case USB_SPEED_HIGH: | 
|  | 4668 | speed = "HIGH"; | 
|  | 4669 | break; | 
|  | 4670 | case USB_SPEED_FULL: | 
|  | 4671 | speed = "FULL"; | 
|  | 4672 | break; | 
|  | 4673 | case USB_SPEED_LOW: | 
|  | 4674 | speed = "LOW"; | 
|  | 4675 | break; | 
|  | 4676 | default: | 
|  | 4677 | speed = "UNKNOWN"; | 
|  | 4678 | break; | 
|  | 4679 | } | 
|  | 4680 |  | 
|  | 4681 | dev_vdbg(hsotg->dev, "  Speed: %s\n", speed); | 
|  | 4682 | dev_vdbg(hsotg->dev, "  Max packet size: %d (%d mult)\n", | 
|  | 4683 | usb_endpoint_maxp(&urb->ep->desc), | 
|  | 4684 | usb_endpoint_maxp_mult(&urb->ep->desc)); | 
|  | 4685 |  | 
|  | 4686 | dev_vdbg(hsotg->dev, "  Data buffer length: %d\n", | 
|  | 4687 | urb->transfer_buffer_length); | 
|  | 4688 | dev_vdbg(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n", | 
|  | 4689 | urb->transfer_buffer, (unsigned long)urb->transfer_dma); | 
|  | 4690 | dev_vdbg(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n", | 
|  | 4691 | urb->setup_packet, (unsigned long)urb->setup_dma); | 
|  | 4692 | dev_vdbg(hsotg->dev, "  Interval: %d\n", urb->interval); | 
|  | 4693 |  | 
|  | 4694 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { | 
|  | 4695 | int i; | 
|  | 4696 |  | 
|  | 4697 | for (i = 0; i < urb->number_of_packets; i++) { | 
|  | 4698 | dev_vdbg(hsotg->dev, "  ISO Desc %d:\n", i); | 
|  | 4699 | dev_vdbg(hsotg->dev, "    offset: %d, length %d\n", | 
|  | 4700 | urb->iso_frame_desc[i].offset, | 
|  | 4701 | urb->iso_frame_desc[i].length); | 
|  | 4702 | } | 
|  | 4703 | } | 
|  | 4704 | #endif | 
|  | 4705 | } | 
|  | 4706 |  | 
|  | 4707 | /* | 
|  | 4708 | * Starts processing a USB transfer request specified by a USB Request Block | 
|  | 4709 | * (URB). mem_flags indicates the type of memory allocation to use while | 
|  | 4710 | * processing this URB. | 
|  | 4711 | */ | 
|  | 4712 | static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | 
|  | 4713 | gfp_t mem_flags) | 
|  | 4714 | { | 
|  | 4715 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4716 | struct usb_host_endpoint *ep = urb->ep; | 
|  | 4717 | struct dwc2_hcd_urb *dwc2_urb; | 
|  | 4718 | int i; | 
|  | 4719 | int retval; | 
|  | 4720 | int alloc_bandwidth = 0; | 
|  | 4721 | u8 ep_type = 0; | 
|  | 4722 | u32 tflags = 0; | 
|  | 4723 | void *buf; | 
|  | 4724 | unsigned long flags; | 
|  | 4725 | struct dwc2_qh *qh; | 
|  | 4726 | bool qh_allocated = false; | 
|  | 4727 | struct dwc2_qtd *qtd; | 
|  | 4728 |  | 
|  | 4729 | if (dbg_urb(urb)) { | 
|  | 4730 | dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); | 
|  | 4731 | dwc2_dump_urb_info(hcd, urb, "urb_enqueue"); | 
|  | 4732 | } | 
|  | 4733 |  | 
|  | 4734 | if (!ep) | 
|  | 4735 | return -EINVAL; | 
|  | 4736 |  | 
|  | 4737 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || | 
|  | 4738 | usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { | 
|  | 4739 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4740 | if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep)) | 
|  | 4741 | alloc_bandwidth = 1; | 
|  | 4742 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4743 | } | 
|  | 4744 |  | 
|  | 4745 | switch (usb_pipetype(urb->pipe)) { | 
|  | 4746 | case PIPE_CONTROL: | 
|  | 4747 | ep_type = USB_ENDPOINT_XFER_CONTROL; | 
|  | 4748 | break; | 
|  | 4749 | case PIPE_ISOCHRONOUS: | 
|  | 4750 | ep_type = USB_ENDPOINT_XFER_ISOC; | 
|  | 4751 | break; | 
|  | 4752 | case PIPE_BULK: | 
|  | 4753 | ep_type = USB_ENDPOINT_XFER_BULK; | 
|  | 4754 | break; | 
|  | 4755 | case PIPE_INTERRUPT: | 
|  | 4756 | ep_type = USB_ENDPOINT_XFER_INT; | 
|  | 4757 | break; | 
|  | 4758 | } | 
|  | 4759 |  | 
|  | 4760 | dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets, | 
|  | 4761 | mem_flags); | 
|  | 4762 | if (!dwc2_urb) | 
|  | 4763 | return -ENOMEM; | 
|  | 4764 |  | 
|  | 4765 | dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe), | 
|  | 4766 | usb_pipeendpoint(urb->pipe), ep_type, | 
|  | 4767 | usb_pipein(urb->pipe), | 
|  | 4768 | usb_endpoint_maxp(&ep->desc), | 
|  | 4769 | usb_endpoint_maxp_mult(&ep->desc)); | 
|  | 4770 |  | 
|  | 4771 | buf = urb->transfer_buffer; | 
|  | 4772 |  | 
|  | 4773 | if (hcd->self.uses_dma) { | 
|  | 4774 | if (!buf && (urb->transfer_dma & 3)) { | 
|  | 4775 | dev_err(hsotg->dev, | 
|  | 4776 | "%s: unaligned transfer with no transfer_buffer", | 
|  | 4777 | __func__); | 
|  | 4778 | retval = -EINVAL; | 
|  | 4779 | goto fail0; | 
|  | 4780 | } | 
|  | 4781 | } | 
|  | 4782 |  | 
|  | 4783 | if (!(urb->transfer_flags & URB_NO_INTERRUPT)) | 
|  | 4784 | tflags |= URB_GIVEBACK_ASAP; | 
|  | 4785 | if (urb->transfer_flags & URB_ZERO_PACKET) | 
|  | 4786 | tflags |= URB_SEND_ZERO_PACKET; | 
|  | 4787 |  | 
|  | 4788 | dwc2_urb->priv = urb; | 
|  | 4789 | dwc2_urb->buf = buf; | 
|  | 4790 | dwc2_urb->dma = urb->transfer_dma; | 
|  | 4791 | dwc2_urb->length = urb->transfer_buffer_length; | 
|  | 4792 | dwc2_urb->setup_packet = urb->setup_packet; | 
|  | 4793 | dwc2_urb->setup_dma = urb->setup_dma; | 
|  | 4794 | dwc2_urb->flags = tflags; | 
|  | 4795 | dwc2_urb->interval = urb->interval; | 
|  | 4796 | dwc2_urb->status = -EINPROGRESS; | 
|  | 4797 |  | 
|  | 4798 | for (i = 0; i < urb->number_of_packets; ++i) | 
|  | 4799 | dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, | 
|  | 4800 | urb->iso_frame_desc[i].offset, | 
|  | 4801 | urb->iso_frame_desc[i].length); | 
|  | 4802 |  | 
|  | 4803 | urb->hcpriv = dwc2_urb; | 
|  | 4804 | qh = (struct dwc2_qh *)ep->hcpriv; | 
|  | 4805 | /* Create QH for the endpoint if it doesn't exist */ | 
|  | 4806 | if (!qh) { | 
|  | 4807 | qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags); | 
|  | 4808 | if (!qh) { | 
|  | 4809 | retval = -ENOMEM; | 
|  | 4810 | goto fail0; | 
|  | 4811 | } | 
|  | 4812 | ep->hcpriv = qh; | 
|  | 4813 | qh_allocated = true; | 
|  | 4814 | } | 
|  | 4815 |  | 
|  | 4816 | qtd = kzalloc(sizeof(*qtd), mem_flags); | 
|  | 4817 | if (!qtd) { | 
|  | 4818 | retval = -ENOMEM; | 
|  | 4819 | goto fail1; | 
|  | 4820 | } | 
|  | 4821 |  | 
|  | 4822 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4823 | retval = usb_hcd_link_urb_to_ep(hcd, urb); | 
|  | 4824 | if (retval) | 
|  | 4825 | goto fail2; | 
|  | 4826 |  | 
|  | 4827 | retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); | 
|  | 4828 | if (retval) | 
|  | 4829 | goto fail3; | 
|  | 4830 |  | 
|  | 4831 | if (alloc_bandwidth) { | 
|  | 4832 | dwc2_allocate_bus_bandwidth(hcd, | 
|  | 4833 | dwc2_hcd_get_ep_bandwidth(hsotg, ep), | 
|  | 4834 | urb); | 
|  | 4835 | } | 
|  | 4836 |  | 
|  | 4837 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4838 |  | 
|  | 4839 | return 0; | 
|  | 4840 |  | 
|  | 4841 | fail3: | 
|  | 4842 | dwc2_urb->priv = NULL; | 
|  | 4843 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 
|  | 4844 | if (qh_allocated && qh->channel && qh->channel->qh == qh) | 
|  | 4845 | qh->channel->qh = NULL; | 
|  | 4846 | fail2: | 
|  | 4847 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4848 | urb->hcpriv = NULL; | 
|  | 4849 | kfree(qtd); | 
|  | 4850 | qtd = NULL; | 
|  | 4851 | fail1: | 
|  | 4852 | if (qh_allocated) { | 
|  | 4853 | struct dwc2_qtd *qtd2, *qtd2_tmp; | 
|  | 4854 |  | 
|  | 4855 | ep->hcpriv = NULL; | 
|  | 4856 | dwc2_hcd_qh_unlink(hsotg, qh); | 
|  | 4857 | /* Free each QTD in the QH's QTD list */ | 
|  | 4858 | list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list, | 
|  | 4859 | qtd_list_entry) | 
|  | 4860 | dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh); | 
|  | 4861 | dwc2_hcd_qh_free(hsotg, qh); | 
|  | 4862 | } | 
|  | 4863 | fail0: | 
|  | 4864 | kfree(dwc2_urb); | 
|  | 4865 |  | 
|  | 4866 | return retval; | 
|  | 4867 | } | 
|  | 4868 |  | 
|  | 4869 | /* | 
|  | 4870 | * Aborts/cancels a USB transfer request. Always returns 0 to indicate success. | 
|  | 4871 | */ | 
|  | 4872 | static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, | 
|  | 4873 | int status) | 
|  | 4874 | { | 
|  | 4875 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4876 | int rc; | 
|  | 4877 | unsigned long flags; | 
|  | 4878 |  | 
|  | 4879 | dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n"); | 
|  | 4880 | dwc2_dump_urb_info(hcd, urb, "urb_dequeue"); | 
|  | 4881 |  | 
|  | 4882 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4883 |  | 
|  | 4884 | rc = usb_hcd_check_unlink_urb(hcd, urb, status); | 
|  | 4885 | if (rc) | 
|  | 4886 | goto out; | 
|  | 4887 |  | 
|  | 4888 | if (!urb->hcpriv) { | 
|  | 4889 | dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n"); | 
|  | 4890 | goto out; | 
|  | 4891 | } | 
|  | 4892 |  | 
|  | 4893 | rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv); | 
|  | 4894 |  | 
|  | 4895 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 
|  | 4896 |  | 
|  | 4897 | kfree(urb->hcpriv); | 
|  | 4898 | urb->hcpriv = NULL; | 
|  | 4899 |  | 
|  | 4900 | /* Higher layer software sets URB status */ | 
|  | 4901 | spin_unlock(&hsotg->lock); | 
|  | 4902 | usb_hcd_giveback_urb(hcd, urb, status); | 
|  | 4903 | spin_lock(&hsotg->lock); | 
|  | 4904 |  | 
|  | 4905 | dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n"); | 
|  | 4906 | dev_dbg(hsotg->dev, "  urb->status = %d\n", urb->status); | 
|  | 4907 | out: | 
|  | 4908 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4909 |  | 
|  | 4910 | return rc; | 
|  | 4911 | } | 
|  | 4912 |  | 
|  | 4913 | /* | 
|  | 4914 | * Frees resources in the DWC_otg controller related to a given endpoint. Also | 
|  | 4915 | * clears state in the HCD related to the endpoint. Any URBs for the endpoint | 
|  | 4916 | * must already be dequeued. | 
|  | 4917 | */ | 
|  | 4918 | static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd, | 
|  | 4919 | struct usb_host_endpoint *ep) | 
|  | 4920 | { | 
|  | 4921 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4922 |  | 
|  | 4923 | dev_dbg(hsotg->dev, | 
|  | 4924 | "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n", | 
|  | 4925 | ep->desc.bEndpointAddress, ep->hcpriv); | 
|  | 4926 | dwc2_hcd_endpoint_disable(hsotg, ep, 250); | 
|  | 4927 | } | 
|  | 4928 |  | 
|  | 4929 | /* | 
|  | 4930 | * Resets endpoint specific parameter values, in current version used to reset | 
|  | 4931 | * the data toggle (as a WA). This function can be called from usb_clear_halt | 
|  | 4932 | * routine. | 
|  | 4933 | */ | 
|  | 4934 | static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd, | 
|  | 4935 | struct usb_host_endpoint *ep) | 
|  | 4936 | { | 
|  | 4937 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4938 | unsigned long flags; | 
|  | 4939 |  | 
|  | 4940 | dev_dbg(hsotg->dev, | 
|  | 4941 | "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", | 
|  | 4942 | ep->desc.bEndpointAddress); | 
|  | 4943 |  | 
|  | 4944 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4945 | dwc2_hcd_endpoint_reset(hsotg, ep); | 
|  | 4946 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 4947 | } | 
|  | 4948 |  | 
|  | 4949 | /* | 
|  | 4950 | * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if | 
|  | 4951 | * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid | 
|  | 4952 | * interrupt. | 
|  | 4953 | * | 
|  | 4954 | * This function is called by the USB core when an interrupt occurs | 
|  | 4955 | */ | 
|  | 4956 | static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd) | 
|  | 4957 | { | 
|  | 4958 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4959 |  | 
|  | 4960 | return dwc2_handle_hcd_intr(hsotg); | 
|  | 4961 | } | 
|  | 4962 |  | 
|  | 4963 | /* | 
|  | 4964 | * Creates Status Change bitmap for the root hub and root port. The bitmap is | 
|  | 4965 | * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 | 
|  | 4966 | * is the status change indicator for the single root port. Returns 1 if either | 
|  | 4967 | * change indicator is 1, otherwise returns 0. | 
|  | 4968 | */ | 
|  | 4969 | static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) | 
|  | 4970 | { | 
|  | 4971 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4972 |  | 
|  | 4973 | buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1; | 
|  | 4974 | return buf[0] != 0; | 
|  | 4975 | } | 
|  | 4976 |  | 
|  | 4977 | /* Handles hub class-specific requests */ | 
|  | 4978 | static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue, | 
|  | 4979 | u16 windex, char *buf, u16 wlength) | 
|  | 4980 | { | 
|  | 4981 | int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq, | 
|  | 4982 | wvalue, windex, buf, wlength); | 
|  | 4983 | return retval; | 
|  | 4984 | } | 
|  | 4985 |  | 
|  | 4986 | /* Handles hub TT buffer clear completions */ | 
|  | 4987 | static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd, | 
|  | 4988 | struct usb_host_endpoint *ep) | 
|  | 4989 | { | 
|  | 4990 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 4991 | struct dwc2_qh *qh; | 
|  | 4992 | unsigned long flags; | 
|  | 4993 |  | 
|  | 4994 | qh = ep->hcpriv; | 
|  | 4995 | if (!qh) | 
|  | 4996 | return; | 
|  | 4997 |  | 
|  | 4998 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 4999 | qh->tt_buffer_dirty = 0; | 
|  | 5000 |  | 
|  | 5001 | if (hsotg->flags.b.port_connect_status) | 
|  | 5002 | dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL); | 
|  | 5003 |  | 
|  | 5004 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 5005 | } | 
|  | 5006 |  | 
|  | 5007 | /* | 
|  | 5008 | * HPRT0_SPD_HIGH_SPEED: high speed | 
|  | 5009 | * HPRT0_SPD_FULL_SPEED: full speed | 
|  | 5010 | */ | 
|  | 5011 | static void dwc2_change_bus_speed(struct usb_hcd *hcd, int speed) | 
|  | 5012 | { | 
|  | 5013 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 5014 |  | 
|  | 5015 | if (hsotg->params.speed == speed) | 
|  | 5016 | return; | 
|  | 5017 |  | 
|  | 5018 | hsotg->params.speed = speed; | 
|  | 5019 | queue_work(hsotg->wq_otg, &hsotg->wf_otg); | 
|  | 5020 | } | 
|  | 5021 |  | 
|  | 5022 | static void dwc2_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | 
|  | 5023 | { | 
|  | 5024 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 5025 |  | 
|  | 5026 | if (!hsotg->params.change_speed_quirk) | 
|  | 5027 | return; | 
|  | 5028 |  | 
|  | 5029 | /* | 
|  | 5030 | * On removal, set speed to default high-speed. | 
|  | 5031 | */ | 
|  | 5032 | if (udev->parent && udev->parent->speed > USB_SPEED_UNKNOWN && | 
|  | 5033 | udev->parent->speed < USB_SPEED_HIGH) { | 
|  | 5034 | dev_info(hsotg->dev, "Set speed to default high-speed\n"); | 
|  | 5035 | dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED); | 
|  | 5036 | } | 
|  | 5037 | } | 
|  | 5038 |  | 
|  | 5039 | static int dwc2_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | 
|  | 5040 | { | 
|  | 5041 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 
|  | 5042 |  | 
|  | 5043 | if (!hsotg->params.change_speed_quirk) | 
|  | 5044 | return 0; | 
|  | 5045 |  | 
|  | 5046 | if (udev->speed == USB_SPEED_HIGH) { | 
|  | 5047 | dev_info(hsotg->dev, "Set speed to high-speed\n"); | 
|  | 5048 | dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED); | 
|  | 5049 | } else if ((udev->speed == USB_SPEED_FULL || | 
|  | 5050 | udev->speed == USB_SPEED_LOW)) { | 
|  | 5051 | /* | 
|  | 5052 | * Change speed setting to full-speed if there's | 
|  | 5053 | * a full-speed or low-speed device plugged in. | 
|  | 5054 | */ | 
|  | 5055 | dev_info(hsotg->dev, "Set speed to full-speed\n"); | 
|  | 5056 | dwc2_change_bus_speed(hcd, HPRT0_SPD_FULL_SPEED); | 
|  | 5057 | } | 
|  | 5058 |  | 
|  | 5059 | return 0; | 
|  | 5060 | } | 
|  | 5061 |  | 
|  | 5062 | static struct hc_driver dwc2_hc_driver = { | 
|  | 5063 | .description = "dwc2_hsotg", | 
|  | 5064 | .product_desc = "DWC OTG Controller", | 
|  | 5065 | .hcd_priv_size = sizeof(struct wrapper_priv_data), | 
|  | 5066 |  | 
|  | 5067 | .irq = _dwc2_hcd_irq, | 
|  | 5068 | .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, | 
|  | 5069 |  | 
|  | 5070 | .start = _dwc2_hcd_start, | 
|  | 5071 | .stop = _dwc2_hcd_stop, | 
|  | 5072 | .urb_enqueue = _dwc2_hcd_urb_enqueue, | 
|  | 5073 | .urb_dequeue = _dwc2_hcd_urb_dequeue, | 
|  | 5074 | .endpoint_disable = _dwc2_hcd_endpoint_disable, | 
|  | 5075 | .endpoint_reset = _dwc2_hcd_endpoint_reset, | 
|  | 5076 | .get_frame_number = _dwc2_hcd_get_frame_number, | 
|  | 5077 |  | 
|  | 5078 | .hub_status_data = _dwc2_hcd_hub_status_data, | 
|  | 5079 | .hub_control = _dwc2_hcd_hub_control, | 
|  | 5080 | .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete, | 
|  | 5081 |  | 
|  | 5082 | .bus_suspend = _dwc2_hcd_suspend, | 
|  | 5083 | .bus_resume = _dwc2_hcd_resume, | 
|  | 5084 |  | 
|  | 5085 | .map_urb_for_dma	= dwc2_map_urb_for_dma, | 
|  | 5086 | .unmap_urb_for_dma	= dwc2_unmap_urb_for_dma, | 
|  | 5087 | }; | 
|  | 5088 |  | 
|  | 5089 | /* | 
|  | 5090 | * Frees secondary storage associated with the dwc2_hsotg structure contained | 
|  | 5091 | * in the struct usb_hcd field | 
|  | 5092 | */ | 
|  | 5093 | static void dwc2_hcd_free(struct dwc2_hsotg *hsotg) | 
|  | 5094 | { | 
|  | 5095 | u32 ahbcfg; | 
|  | 5096 | u32 dctl; | 
|  | 5097 | int i; | 
|  | 5098 |  | 
|  | 5099 | dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n"); | 
|  | 5100 |  | 
|  | 5101 | /* Free memory for QH/QTD lists */ | 
|  | 5102 | dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive); | 
|  | 5103 | dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting); | 
|  | 5104 | dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active); | 
|  | 5105 | dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive); | 
|  | 5106 | dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready); | 
|  | 5107 | dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned); | 
|  | 5108 | dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued); | 
|  | 5109 |  | 
|  | 5110 | /* Free memory for the host channels */ | 
|  | 5111 | for (i = 0; i < MAX_EPS_CHANNELS; i++) { | 
|  | 5112 | struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; | 
|  | 5113 |  | 
|  | 5114 | if (chan) { | 
|  | 5115 | dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n", | 
|  | 5116 | i, chan); | 
|  | 5117 | hsotg->hc_ptr_array[i] = NULL; | 
|  | 5118 | kfree(chan); | 
|  | 5119 | } | 
|  | 5120 | } | 
|  | 5121 |  | 
|  | 5122 | if (hsotg->params.host_dma) { | 
|  | 5123 | if (hsotg->status_buf) { | 
|  | 5124 | dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, | 
|  | 5125 | hsotg->status_buf, | 
|  | 5126 | hsotg->status_buf_dma); | 
|  | 5127 | hsotg->status_buf = NULL; | 
|  | 5128 | } | 
|  | 5129 | } else { | 
|  | 5130 | kfree(hsotg->status_buf); | 
|  | 5131 | hsotg->status_buf = NULL; | 
|  | 5132 | } | 
|  | 5133 |  | 
|  | 5134 | ahbcfg = dwc2_readl(hsotg, GAHBCFG); | 
|  | 5135 |  | 
|  | 5136 | /* Disable all interrupts */ | 
|  | 5137 | ahbcfg &= ~GAHBCFG_GLBL_INTR_EN; | 
|  | 5138 | dwc2_writel(hsotg, ahbcfg, GAHBCFG); | 
|  | 5139 | dwc2_writel(hsotg, 0, GINTMSK); | 
|  | 5140 |  | 
|  | 5141 | if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) { | 
|  | 5142 | dctl = dwc2_readl(hsotg, DCTL); | 
|  | 5143 | dctl |= DCTL_SFTDISCON; | 
|  | 5144 | dwc2_writel(hsotg, dctl, DCTL); | 
|  | 5145 | } | 
|  | 5146 |  | 
|  | 5147 | if (hsotg->wq_otg) { | 
|  | 5148 | if (!cancel_work_sync(&hsotg->wf_otg)) | 
|  | 5149 | flush_workqueue(hsotg->wq_otg); | 
|  | 5150 | destroy_workqueue(hsotg->wq_otg); | 
|  | 5151 | } | 
|  | 5152 |  | 
|  | 5153 | del_timer(&hsotg->wkp_timer); | 
|  | 5154 | } | 
|  | 5155 |  | 
|  | 5156 | static void dwc2_hcd_release(struct dwc2_hsotg *hsotg) | 
|  | 5157 | { | 
|  | 5158 | /* Turn off all host-specific interrupts */ | 
|  | 5159 | dwc2_disable_host_interrupts(hsotg); | 
|  | 5160 |  | 
|  | 5161 | dwc2_hcd_free(hsotg); | 
|  | 5162 | } | 
|  | 5163 |  | 
|  | 5164 | /* | 
|  | 5165 | * Initializes the HCD. This function allocates memory for and initializes the | 
|  | 5166 | * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the | 
|  | 5167 | * USB bus with the core and calls the hc_driver->start() function. It returns | 
|  | 5168 | * a negative error on failure. | 
|  | 5169 | */ | 
|  | 5170 | int dwc2_hcd_init(struct dwc2_hsotg *hsotg) | 
|  | 5171 | { | 
|  | 5172 | struct platform_device *pdev = to_platform_device(hsotg->dev); | 
|  | 5173 | struct resource *res; | 
|  | 5174 | struct usb_hcd *hcd; | 
|  | 5175 | struct dwc2_host_chan *channel; | 
|  | 5176 | u32 hcfg; | 
|  | 5177 | int i, num_channels; | 
|  | 5178 | int retval; | 
|  | 5179 |  | 
|  | 5180 | if (usb_disabled()) | 
|  | 5181 | return -ENODEV; | 
|  | 5182 |  | 
|  | 5183 | dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n"); | 
|  | 5184 |  | 
|  | 5185 | retval = -ENOMEM; | 
|  | 5186 |  | 
|  | 5187 | hcfg = dwc2_readl(hsotg, HCFG); | 
|  | 5188 | dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg); | 
|  | 5189 |  | 
|  | 5190 | #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS | 
|  | 5191 | hsotg->frame_num_array = kcalloc(FRAME_NUM_ARRAY_SIZE, | 
|  | 5192 | sizeof(*hsotg->frame_num_array), | 
|  | 5193 | GFP_KERNEL); | 
|  | 5194 | if (!hsotg->frame_num_array) | 
|  | 5195 | goto error1; | 
|  | 5196 | hsotg->last_frame_num_array = | 
|  | 5197 | kcalloc(FRAME_NUM_ARRAY_SIZE, | 
|  | 5198 | sizeof(*hsotg->last_frame_num_array), GFP_KERNEL); | 
|  | 5199 | if (!hsotg->last_frame_num_array) | 
|  | 5200 | goto error1; | 
|  | 5201 | #endif | 
|  | 5202 | hsotg->last_frame_num = HFNUM_MAX_FRNUM; | 
|  | 5203 |  | 
|  | 5204 | /* Check if the bus driver or platform code has setup a dma_mask */ | 
|  | 5205 | if (hsotg->params.host_dma && | 
|  | 5206 | !hsotg->dev->dma_mask) { | 
|  | 5207 | dev_warn(hsotg->dev, | 
|  | 5208 | "dma_mask not set, disabling DMA\n"); | 
|  | 5209 | hsotg->params.host_dma = false; | 
|  | 5210 | hsotg->params.dma_desc_enable = false; | 
|  | 5211 | } | 
|  | 5212 |  | 
|  | 5213 | /* Set device flags indicating whether the HCD supports DMA */ | 
|  | 5214 | if (hsotg->params.host_dma) { | 
|  | 5215 | if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) | 
|  | 5216 | dev_warn(hsotg->dev, "can't set DMA mask\n"); | 
|  | 5217 | if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) | 
|  | 5218 | dev_warn(hsotg->dev, "can't set coherent DMA mask\n"); | 
|  | 5219 | } | 
|  | 5220 |  | 
|  | 5221 | if (hsotg->params.change_speed_quirk) { | 
|  | 5222 | dwc2_hc_driver.free_dev = dwc2_free_dev; | 
|  | 5223 | dwc2_hc_driver.reset_device = dwc2_reset_device; | 
|  | 5224 | } | 
|  | 5225 |  | 
|  | 5226 | hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev)); | 
|  | 5227 | if (!hcd) | 
|  | 5228 | goto error1; | 
|  | 5229 |  | 
|  | 5230 | if (!hsotg->params.host_dma) | 
|  | 5231 | hcd->self.uses_dma = 0; | 
|  | 5232 |  | 
|  | 5233 | hcd->has_tt = 1; | 
|  | 5234 |  | 
|  | 5235 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
|  | 5236 | hcd->rsrc_start = res->start; | 
|  | 5237 | hcd->rsrc_len = resource_size(res); | 
|  | 5238 |  | 
|  | 5239 | ((struct wrapper_priv_data *)&hcd->hcd_priv)->hsotg = hsotg; | 
|  | 5240 | hsotg->priv = hcd; | 
|  | 5241 |  | 
|  | 5242 | /* | 
|  | 5243 | * Disable the global interrupt until all the interrupt handlers are | 
|  | 5244 | * installed | 
|  | 5245 | */ | 
|  | 5246 | dwc2_disable_global_interrupts(hsotg); | 
|  | 5247 |  | 
|  | 5248 | /* Initialize the DWC_otg core, and select the Phy type */ | 
|  | 5249 | retval = dwc2_core_init(hsotg, true); | 
|  | 5250 | if (retval) | 
|  | 5251 | goto error2; | 
|  | 5252 |  | 
|  | 5253 | /* Create new workqueue and init work */ | 
|  | 5254 | retval = -ENOMEM; | 
|  | 5255 | hsotg->wq_otg = alloc_ordered_workqueue("dwc2", 0); | 
|  | 5256 | if (!hsotg->wq_otg) { | 
|  | 5257 | dev_err(hsotg->dev, "Failed to create workqueue\n"); | 
|  | 5258 | goto error2; | 
|  | 5259 | } | 
|  | 5260 | INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change); | 
|  | 5261 |  | 
|  | 5262 | timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0); | 
|  | 5263 |  | 
|  | 5264 | /* Initialize the non-periodic schedule */ | 
|  | 5265 | INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive); | 
|  | 5266 | INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting); | 
|  | 5267 | INIT_LIST_HEAD(&hsotg->non_periodic_sched_active); | 
|  | 5268 |  | 
|  | 5269 | /* Initialize the periodic schedule */ | 
|  | 5270 | INIT_LIST_HEAD(&hsotg->periodic_sched_inactive); | 
|  | 5271 | INIT_LIST_HEAD(&hsotg->periodic_sched_ready); | 
|  | 5272 | INIT_LIST_HEAD(&hsotg->periodic_sched_assigned); | 
|  | 5273 | INIT_LIST_HEAD(&hsotg->periodic_sched_queued); | 
|  | 5274 |  | 
|  | 5275 | INIT_LIST_HEAD(&hsotg->split_order); | 
|  | 5276 |  | 
|  | 5277 | /* | 
|  | 5278 | * Create a host channel descriptor for each host channel implemented | 
|  | 5279 | * in the controller. Initialize the channel descriptor array. | 
|  | 5280 | */ | 
|  | 5281 | INIT_LIST_HEAD(&hsotg->free_hc_list); | 
|  | 5282 | num_channels = hsotg->params.host_channels; | 
|  | 5283 | memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array)); | 
|  | 5284 |  | 
|  | 5285 | for (i = 0; i < num_channels; i++) { | 
|  | 5286 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); | 
|  | 5287 | if (!channel) | 
|  | 5288 | goto error3; | 
|  | 5289 | channel->hc_num = i; | 
|  | 5290 | INIT_LIST_HEAD(&channel->split_order_list_entry); | 
|  | 5291 | hsotg->hc_ptr_array[i] = channel; | 
|  | 5292 | } | 
|  | 5293 |  | 
|  | 5294 | /* Initialize hsotg start work */ | 
|  | 5295 | INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func); | 
|  | 5296 |  | 
|  | 5297 | /* Initialize port reset work */ | 
|  | 5298 | INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func); | 
|  | 5299 |  | 
|  | 5300 | /* | 
|  | 5301 | * Allocate space for storing data on status transactions. Normally no | 
|  | 5302 | * data is sent, but this space acts as a bit bucket. This must be | 
|  | 5303 | * done after usb_add_hcd since that function allocates the DMA buffer | 
|  | 5304 | * pool. | 
|  | 5305 | */ | 
|  | 5306 | if (hsotg->params.host_dma) | 
|  | 5307 | hsotg->status_buf = dma_alloc_coherent(hsotg->dev, | 
|  | 5308 | DWC2_HCD_STATUS_BUF_SIZE, | 
|  | 5309 | &hsotg->status_buf_dma, GFP_KERNEL); | 
|  | 5310 | else | 
|  | 5311 | hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE, | 
|  | 5312 | GFP_KERNEL); | 
|  | 5313 |  | 
|  | 5314 | if (!hsotg->status_buf) | 
|  | 5315 | goto error3; | 
|  | 5316 |  | 
|  | 5317 | /* | 
|  | 5318 | * Create kmem caches to handle descriptor buffers in descriptor | 
|  | 5319 | * DMA mode. | 
|  | 5320 | * Alignment must be set to 512 bytes. | 
|  | 5321 | */ | 
|  | 5322 | if (hsotg->params.dma_desc_enable || | 
|  | 5323 | hsotg->params.dma_desc_fs_enable) { | 
|  | 5324 | hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc", | 
|  | 5325 | sizeof(struct dwc2_dma_desc) * | 
|  | 5326 | MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA, | 
|  | 5327 | NULL); | 
|  | 5328 | if (!hsotg->desc_gen_cache) { | 
|  | 5329 | dev_err(hsotg->dev, | 
|  | 5330 | "unable to create dwc2 generic desc cache\n"); | 
|  | 5331 |  | 
|  | 5332 | /* | 
|  | 5333 | * Disable descriptor dma mode since it will not be | 
|  | 5334 | * usable. | 
|  | 5335 | */ | 
|  | 5336 | hsotg->params.dma_desc_enable = false; | 
|  | 5337 | hsotg->params.dma_desc_fs_enable = false; | 
|  | 5338 | } | 
|  | 5339 |  | 
|  | 5340 | hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc", | 
|  | 5341 | sizeof(struct dwc2_dma_desc) * | 
|  | 5342 | MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL); | 
|  | 5343 | if (!hsotg->desc_hsisoc_cache) { | 
|  | 5344 | dev_err(hsotg->dev, | 
|  | 5345 | "unable to create dwc2 hs isoc desc cache\n"); | 
|  | 5346 |  | 
|  | 5347 | kmem_cache_destroy(hsotg->desc_gen_cache); | 
|  | 5348 |  | 
|  | 5349 | /* | 
|  | 5350 | * Disable descriptor dma mode since it will not be | 
|  | 5351 | * usable. | 
|  | 5352 | */ | 
|  | 5353 | hsotg->params.dma_desc_enable = false; | 
|  | 5354 | hsotg->params.dma_desc_fs_enable = false; | 
|  | 5355 | } | 
|  | 5356 | } | 
|  | 5357 |  | 
|  | 5358 | if (hsotg->params.host_dma) { | 
|  | 5359 | /* | 
|  | 5360 | * Create kmem caches to handle non-aligned buffer | 
|  | 5361 | * in Buffer DMA mode. | 
|  | 5362 | */ | 
|  | 5363 | hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma", | 
|  | 5364 | DWC2_KMEM_UNALIGNED_BUF_SIZE, 4, | 
|  | 5365 | SLAB_CACHE_DMA, NULL); | 
|  | 5366 | if (!hsotg->unaligned_cache) | 
|  | 5367 | dev_err(hsotg->dev, | 
|  | 5368 | "unable to create dwc2 unaligned cache\n"); | 
|  | 5369 | } | 
|  | 5370 |  | 
|  | 5371 | hsotg->otg_port = 1; | 
|  | 5372 | hsotg->frame_list = NULL; | 
|  | 5373 | hsotg->frame_list_dma = 0; | 
|  | 5374 | hsotg->periodic_qh_count = 0; | 
|  | 5375 |  | 
|  | 5376 | /* Initiate lx_state to L3 disconnected state */ | 
|  | 5377 | hsotg->lx_state = DWC2_L3; | 
|  | 5378 |  | 
|  | 5379 | hcd->self.otg_port = hsotg->otg_port; | 
|  | 5380 |  | 
|  | 5381 | /* Don't support SG list at this point */ | 
|  | 5382 | hcd->self.sg_tablesize = 0; | 
|  | 5383 |  | 
|  | 5384 | if (!IS_ERR_OR_NULL(hsotg->uphy)) | 
|  | 5385 | otg_set_host(hsotg->uphy->otg, &hcd->self); | 
|  | 5386 |  | 
|  | 5387 | /* | 
|  | 5388 | * Finish generic HCD initialization and start the HCD. This function | 
|  | 5389 | * allocates the DMA buffer pool, registers the USB bus, requests the | 
|  | 5390 | * IRQ line, and calls hcd_start method. | 
|  | 5391 | */ | 
|  | 5392 | retval = usb_add_hcd(hcd, hsotg->irq, IRQF_SHARED); | 
|  | 5393 | if (retval < 0) | 
|  | 5394 | goto error4; | 
|  | 5395 |  | 
|  | 5396 | device_wakeup_enable(hcd->self.controller); | 
|  | 5397 |  | 
|  | 5398 | dwc2_hcd_dump_state(hsotg); | 
|  | 5399 |  | 
|  | 5400 | dwc2_enable_global_interrupts(hsotg); | 
|  | 5401 |  | 
|  | 5402 | return 0; | 
|  | 5403 |  | 
|  | 5404 | error4: | 
|  | 5405 | kmem_cache_destroy(hsotg->unaligned_cache); | 
|  | 5406 | kmem_cache_destroy(hsotg->desc_hsisoc_cache); | 
|  | 5407 | kmem_cache_destroy(hsotg->desc_gen_cache); | 
|  | 5408 | error3: | 
|  | 5409 | dwc2_hcd_release(hsotg); | 
|  | 5410 | error2: | 
|  | 5411 | usb_put_hcd(hcd); | 
|  | 5412 | error1: | 
|  | 5413 |  | 
|  | 5414 | #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS | 
|  | 5415 | kfree(hsotg->last_frame_num_array); | 
|  | 5416 | kfree(hsotg->frame_num_array); | 
|  | 5417 | #endif | 
|  | 5418 |  | 
|  | 5419 | dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval); | 
|  | 5420 | return retval; | 
|  | 5421 | } | 
|  | 5422 |  | 
|  | 5423 | /* | 
|  | 5424 | * Removes the HCD. | 
|  | 5425 | * Frees memory and resources associated with the HCD and deregisters the bus. | 
|  | 5426 | */ | 
|  | 5427 | void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) | 
|  | 5428 | { | 
|  | 5429 | struct usb_hcd *hcd; | 
|  | 5430 |  | 
|  | 5431 | dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n"); | 
|  | 5432 |  | 
|  | 5433 | hcd = dwc2_hsotg_to_hcd(hsotg); | 
|  | 5434 | dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd); | 
|  | 5435 |  | 
|  | 5436 | if (!hcd) { | 
|  | 5437 | dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n", | 
|  | 5438 | __func__); | 
|  | 5439 | return; | 
|  | 5440 | } | 
|  | 5441 |  | 
|  | 5442 | if (!IS_ERR_OR_NULL(hsotg->uphy)) | 
|  | 5443 | otg_set_host(hsotg->uphy->otg, NULL); | 
|  | 5444 |  | 
|  | 5445 | usb_remove_hcd(hcd); | 
|  | 5446 | hsotg->priv = NULL; | 
|  | 5447 |  | 
|  | 5448 | kmem_cache_destroy(hsotg->unaligned_cache); | 
|  | 5449 | kmem_cache_destroy(hsotg->desc_hsisoc_cache); | 
|  | 5450 | kmem_cache_destroy(hsotg->desc_gen_cache); | 
|  | 5451 |  | 
|  | 5452 | dwc2_hcd_release(hsotg); | 
|  | 5453 | usb_put_hcd(hcd); | 
|  | 5454 |  | 
|  | 5455 | #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS | 
|  | 5456 | kfree(hsotg->last_frame_num_array); | 
|  | 5457 | kfree(hsotg->frame_num_array); | 
|  | 5458 | #endif | 
|  | 5459 | } | 
|  | 5460 |  | 
|  | 5461 | /** | 
|  | 5462 | * dwc2_backup_host_registers() - Backup controller host registers. | 
|  | 5463 | * When suspending usb bus, registers needs to be backuped | 
|  | 5464 | * if controller power is disabled once suspended. | 
|  | 5465 | * | 
|  | 5466 | * @hsotg: Programming view of the DWC_otg controller | 
|  | 5467 | */ | 
|  | 5468 | int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) | 
|  | 5469 | { | 
|  | 5470 | struct dwc2_hregs_backup *hr; | 
|  | 5471 | int i; | 
|  | 5472 |  | 
|  | 5473 | dev_dbg(hsotg->dev, "%s\n", __func__); | 
|  | 5474 |  | 
|  | 5475 | /* Backup Host regs */ | 
|  | 5476 | hr = &hsotg->hr_backup; | 
|  | 5477 | hr->hcfg = dwc2_readl(hsotg, HCFG); | 
|  | 5478 | hr->haintmsk = dwc2_readl(hsotg, HAINTMSK); | 
|  | 5479 | for (i = 0; i < hsotg->params.host_channels; ++i) | 
|  | 5480 | hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i)); | 
|  | 5481 |  | 
|  | 5482 | hr->hprt0 = dwc2_read_hprt0(hsotg); | 
|  | 5483 | hr->hfir = dwc2_readl(hsotg, HFIR); | 
|  | 5484 | hr->hptxfsiz = dwc2_readl(hsotg, HPTXFSIZ); | 
|  | 5485 | hr->valid = true; | 
|  | 5486 |  | 
|  | 5487 | return 0; | 
|  | 5488 | } | 
|  | 5489 |  | 
|  | 5490 | /** | 
|  | 5491 | * dwc2_restore_host_registers() - Restore controller host registers. | 
|  | 5492 | * When resuming usb bus, device registers needs to be restored | 
|  | 5493 | * if controller power were disabled. | 
|  | 5494 | * | 
|  | 5495 | * @hsotg: Programming view of the DWC_otg controller | 
|  | 5496 | */ | 
|  | 5497 | int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) | 
|  | 5498 | { | 
|  | 5499 | struct dwc2_hregs_backup *hr; | 
|  | 5500 | int i; | 
|  | 5501 |  | 
|  | 5502 | dev_dbg(hsotg->dev, "%s\n", __func__); | 
|  | 5503 |  | 
|  | 5504 | /* Restore host regs */ | 
|  | 5505 | hr = &hsotg->hr_backup; | 
|  | 5506 | if (!hr->valid) { | 
|  | 5507 | dev_err(hsotg->dev, "%s: no host registers to restore\n", | 
|  | 5508 | __func__); | 
|  | 5509 | return -EINVAL; | 
|  | 5510 | } | 
|  | 5511 | hr->valid = false; | 
|  | 5512 |  | 
|  | 5513 | dwc2_writel(hsotg, hr->hcfg, HCFG); | 
|  | 5514 | dwc2_writel(hsotg, hr->haintmsk, HAINTMSK); | 
|  | 5515 |  | 
|  | 5516 | for (i = 0; i < hsotg->params.host_channels; ++i) | 
|  | 5517 | dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i)); | 
|  | 5518 |  | 
|  | 5519 | dwc2_writel(hsotg, hr->hprt0, HPRT0); | 
|  | 5520 | dwc2_writel(hsotg, hr->hfir, HFIR); | 
|  | 5521 | dwc2_writel(hsotg, hr->hptxfsiz, HPTXFSIZ); | 
|  | 5522 | hsotg->frame_number = 0; | 
|  | 5523 |  | 
|  | 5524 | return 0; | 
|  | 5525 | } | 
|  | 5526 |  | 
|  | 5527 | /** | 
|  | 5528 | * dwc2_host_enter_hibernation() - Put controller in Hibernation. | 
|  | 5529 | * | 
|  | 5530 | * @hsotg: Programming view of the DWC_otg controller | 
|  | 5531 | */ | 
|  | 5532 | int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg) | 
|  | 5533 | { | 
|  | 5534 | unsigned long flags; | 
|  | 5535 | int ret = 0; | 
|  | 5536 | u32 hprt0; | 
|  | 5537 | u32 pcgcctl; | 
|  | 5538 | u32 gusbcfg; | 
|  | 5539 | u32 gpwrdn; | 
|  | 5540 |  | 
|  | 5541 | dev_dbg(hsotg->dev, "Preparing host for hibernation\n"); | 
|  | 5542 | ret = dwc2_backup_global_registers(hsotg); | 
|  | 5543 | if (ret) { | 
|  | 5544 | dev_err(hsotg->dev, "%s: failed to backup global registers\n", | 
|  | 5545 | __func__); | 
|  | 5546 | return ret; | 
|  | 5547 | } | 
|  | 5548 | ret = dwc2_backup_host_registers(hsotg); | 
|  | 5549 | if (ret) { | 
|  | 5550 | dev_err(hsotg->dev, "%s: failed to backup host registers\n", | 
|  | 5551 | __func__); | 
|  | 5552 | return ret; | 
|  | 5553 | } | 
|  | 5554 |  | 
|  | 5555 | /* Enter USB Suspend Mode */ | 
|  | 5556 | hprt0 = dwc2_readl(hsotg, HPRT0); | 
|  | 5557 | hprt0 |= HPRT0_SUSP; | 
|  | 5558 | hprt0 &= ~HPRT0_ENA; | 
|  | 5559 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 5560 |  | 
|  | 5561 | /* Wait for the HPRT0.PrtSusp register field to be set */ | 
|  | 5562 | if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000)) | 
|  | 5563 | dev_warn(hsotg->dev, "Suspend wasn't generated\n"); | 
|  | 5564 |  | 
|  | 5565 | /* | 
|  | 5566 | * We need to disable interrupts to prevent servicing of any IRQ | 
|  | 5567 | * during going to hibernation | 
|  | 5568 | */ | 
|  | 5569 | spin_lock_irqsave(&hsotg->lock, flags); | 
|  | 5570 | hsotg->lx_state = DWC2_L2; | 
|  | 5571 |  | 
|  | 5572 | gusbcfg = dwc2_readl(hsotg, GUSBCFG); | 
|  | 5573 | if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) { | 
|  | 5574 | /* ULPI interface */ | 
|  | 5575 | /* Suspend the Phy Clock */ | 
|  | 5576 | pcgcctl = dwc2_readl(hsotg, PCGCTL); | 
|  | 5577 | pcgcctl |= PCGCTL_STOPPCLK; | 
|  | 5578 | dwc2_writel(hsotg, pcgcctl, PCGCTL); | 
|  | 5579 | udelay(10); | 
|  | 5580 |  | 
|  | 5581 | gpwrdn = dwc2_readl(hsotg, GPWRDN); | 
|  | 5582 | gpwrdn |= GPWRDN_PMUACTV; | 
|  | 5583 | dwc2_writel(hsotg, gpwrdn, GPWRDN); | 
|  | 5584 | udelay(10); | 
|  | 5585 | } else { | 
|  | 5586 | /* UTMI+ Interface */ | 
|  | 5587 | gpwrdn = dwc2_readl(hsotg, GPWRDN); | 
|  | 5588 | gpwrdn |= GPWRDN_PMUACTV; | 
|  | 5589 | dwc2_writel(hsotg, gpwrdn, GPWRDN); | 
|  | 5590 | udelay(10); | 
|  | 5591 |  | 
|  | 5592 | pcgcctl = dwc2_readl(hsotg, PCGCTL); | 
|  | 5593 | pcgcctl |= PCGCTL_STOPPCLK; | 
|  | 5594 | dwc2_writel(hsotg, pcgcctl, PCGCTL); | 
|  | 5595 | udelay(10); | 
|  | 5596 | } | 
|  | 5597 |  | 
|  | 5598 | /* Enable interrupts from wake up logic */ | 
|  | 5599 | gpwrdn = dwc2_readl(hsotg, GPWRDN); | 
|  | 5600 | gpwrdn |= GPWRDN_PMUINTSEL; | 
|  | 5601 | dwc2_writel(hsotg, gpwrdn, GPWRDN); | 
|  | 5602 | udelay(10); | 
|  | 5603 |  | 
|  | 5604 | /* Unmask host mode interrupts in GPWRDN */ | 
|  | 5605 | gpwrdn = dwc2_readl(hsotg, GPWRDN); | 
|  | 5606 | gpwrdn |= GPWRDN_DISCONN_DET_MSK; | 
|  | 5607 | gpwrdn |= GPWRDN_LNSTSCHG_MSK; | 
|  | 5608 | gpwrdn |= GPWRDN_STS_CHGINT_MSK; | 
|  | 5609 | dwc2_writel(hsotg, gpwrdn, GPWRDN); | 
|  | 5610 | udelay(10); | 
|  | 5611 |  | 
|  | 5612 | /* Enable Power Down Clamp */ | 
|  | 5613 | gpwrdn = dwc2_readl(hsotg, GPWRDN); | 
|  | 5614 | gpwrdn |= GPWRDN_PWRDNCLMP; | 
|  | 5615 | dwc2_writel(hsotg, gpwrdn, GPWRDN); | 
|  | 5616 | udelay(10); | 
|  | 5617 |  | 
|  | 5618 | /* Switch off VDD */ | 
|  | 5619 | gpwrdn = dwc2_readl(hsotg, GPWRDN); | 
|  | 5620 | gpwrdn |= GPWRDN_PWRDNSWTCH; | 
|  | 5621 | dwc2_writel(hsotg, gpwrdn, GPWRDN); | 
|  | 5622 |  | 
|  | 5623 | hsotg->hibernated = 1; | 
|  | 5624 | hsotg->bus_suspended = 1; | 
|  | 5625 | dev_dbg(hsotg->dev, "Host hibernation completed\n"); | 
|  | 5626 | spin_unlock_irqrestore(&hsotg->lock, flags); | 
|  | 5627 | return ret; | 
|  | 5628 | } | 
|  | 5629 |  | 
|  | 5630 | /* | 
|  | 5631 | * dwc2_host_exit_hibernation() | 
|  | 5632 | * | 
|  | 5633 | * @hsotg: Programming view of the DWC_otg controller | 
|  | 5634 | * @rem_wakeup: indicates whether resume is initiated by Device or Host. | 
|  | 5635 | * @param reset: indicates whether resume is initiated by Reset. | 
|  | 5636 | * | 
|  | 5637 | * Return: non-zero if failed to enter to hibernation. | 
|  | 5638 | * | 
|  | 5639 | * This function is for exiting from Host mode hibernation by | 
|  | 5640 | * Host Initiated Resume/Reset and Device Initiated Remote-Wakeup. | 
|  | 5641 | */ | 
|  | 5642 | int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup, | 
|  | 5643 | int reset) | 
|  | 5644 | { | 
|  | 5645 | u32 gpwrdn; | 
|  | 5646 | u32 hprt0; | 
|  | 5647 | int ret = 0; | 
|  | 5648 | struct dwc2_gregs_backup *gr; | 
|  | 5649 | struct dwc2_hregs_backup *hr; | 
|  | 5650 |  | 
|  | 5651 | gr = &hsotg->gr_backup; | 
|  | 5652 | hr = &hsotg->hr_backup; | 
|  | 5653 |  | 
|  | 5654 | dev_dbg(hsotg->dev, | 
|  | 5655 | "%s: called with rem_wakeup = %d reset = %d\n", | 
|  | 5656 | __func__, rem_wakeup, reset); | 
|  | 5657 |  | 
|  | 5658 | dwc2_hib_restore_common(hsotg, rem_wakeup, 1); | 
|  | 5659 | hsotg->hibernated = 0; | 
|  | 5660 |  | 
|  | 5661 | /* | 
|  | 5662 | * This step is not described in functional spec but if not wait for | 
|  | 5663 | * this delay, mismatch interrupts occurred because just after restore | 
|  | 5664 | * core is in Device mode(gintsts.curmode == 0) | 
|  | 5665 | */ | 
|  | 5666 | mdelay(100); | 
|  | 5667 |  | 
|  | 5668 | /* Clear all pending interupts */ | 
|  | 5669 | dwc2_writel(hsotg, 0xffffffff, GINTSTS); | 
|  | 5670 |  | 
|  | 5671 | /* De-assert Restore */ | 
|  | 5672 | gpwrdn = dwc2_readl(hsotg, GPWRDN); | 
|  | 5673 | gpwrdn &= ~GPWRDN_RESTORE; | 
|  | 5674 | dwc2_writel(hsotg, gpwrdn, GPWRDN); | 
|  | 5675 | udelay(10); | 
|  | 5676 |  | 
|  | 5677 | /* Restore GUSBCFG, HCFG */ | 
|  | 5678 | dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG); | 
|  | 5679 | dwc2_writel(hsotg, hr->hcfg, HCFG); | 
|  | 5680 |  | 
|  | 5681 | /* De-assert Wakeup Logic */ | 
|  | 5682 | gpwrdn = dwc2_readl(hsotg, GPWRDN); | 
|  | 5683 | gpwrdn &= ~GPWRDN_PMUACTV; | 
|  | 5684 | dwc2_writel(hsotg, gpwrdn, GPWRDN); | 
|  | 5685 | udelay(10); | 
|  | 5686 |  | 
|  | 5687 | hprt0 = hr->hprt0; | 
|  | 5688 | hprt0 |= HPRT0_PWR; | 
|  | 5689 | hprt0 &= ~HPRT0_ENA; | 
|  | 5690 | hprt0 &= ~HPRT0_SUSP; | 
|  | 5691 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 5692 |  | 
|  | 5693 | hprt0 = hr->hprt0; | 
|  | 5694 | hprt0 |= HPRT0_PWR; | 
|  | 5695 | hprt0 &= ~HPRT0_ENA; | 
|  | 5696 | hprt0 &= ~HPRT0_SUSP; | 
|  | 5697 |  | 
|  | 5698 | if (reset) { | 
|  | 5699 | hprt0 |= HPRT0_RST; | 
|  | 5700 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 5701 |  | 
|  | 5702 | /* Wait for Resume time and then program HPRT again */ | 
|  | 5703 | mdelay(60); | 
|  | 5704 | hprt0 &= ~HPRT0_RST; | 
|  | 5705 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 5706 | } else { | 
|  | 5707 | hprt0 |= HPRT0_RES; | 
|  | 5708 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 5709 |  | 
|  | 5710 | /* Wait for Resume time and then program HPRT again */ | 
|  | 5711 | mdelay(100); | 
|  | 5712 | hprt0 &= ~HPRT0_RES; | 
|  | 5713 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 5714 | } | 
|  | 5715 | /* Clear all interrupt status */ | 
|  | 5716 | hprt0 = dwc2_readl(hsotg, HPRT0); | 
|  | 5717 | hprt0 |= HPRT0_CONNDET; | 
|  | 5718 | hprt0 |= HPRT0_ENACHG; | 
|  | 5719 | hprt0 &= ~HPRT0_ENA; | 
|  | 5720 | dwc2_writel(hsotg, hprt0, HPRT0); | 
|  | 5721 |  | 
|  | 5722 | hprt0 = dwc2_readl(hsotg, HPRT0); | 
|  | 5723 |  | 
|  | 5724 | /* Clear all pending interupts */ | 
|  | 5725 | dwc2_writel(hsotg, 0xffffffff, GINTSTS); | 
|  | 5726 |  | 
|  | 5727 | /* Restore global registers */ | 
|  | 5728 | ret = dwc2_restore_global_registers(hsotg); | 
|  | 5729 | if (ret) { | 
|  | 5730 | dev_err(hsotg->dev, "%s: failed to restore registers\n", | 
|  | 5731 | __func__); | 
|  | 5732 | return ret; | 
|  | 5733 | } | 
|  | 5734 |  | 
|  | 5735 | /* Restore host registers */ | 
|  | 5736 | ret = dwc2_restore_host_registers(hsotg); | 
|  | 5737 | if (ret) { | 
|  | 5738 | dev_err(hsotg->dev, "%s: failed to restore host registers\n", | 
|  | 5739 | __func__); | 
|  | 5740 | return ret; | 
|  | 5741 | } | 
|  | 5742 |  | 
|  | 5743 | dwc2_hcd_rem_wakeup(hsotg); | 
|  | 5744 |  | 
|  | 5745 | hsotg->hibernated = 0; | 
|  | 5746 | hsotg->bus_suspended = 0; | 
|  | 5747 | hsotg->lx_state = DWC2_L0; | 
|  | 5748 | dev_dbg(hsotg->dev, "Host hibernation restore complete\n"); | 
|  | 5749 | return ret; | 
|  | 5750 | } |