| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /** | 
|  | 3 | * xhci-dbgcap.c - xHCI debug capability support | 
|  | 4 | * | 
|  | 5 | * Copyright (C) 2017 Intel Corporation | 
|  | 6 | * | 
|  | 7 | * Author: Lu Baolu <baolu.lu@linux.intel.com> | 
|  | 8 | */ | 
|  | 9 | #include <linux/dma-mapping.h> | 
|  | 10 | #include <linux/slab.h> | 
|  | 11 | #include <linux/nls.h> | 
|  | 12 |  | 
|  | 13 | #include "xhci.h" | 
|  | 14 | #include "xhci-trace.h" | 
|  | 15 | #include "xhci-dbgcap.h" | 
|  | 16 |  | 
|  | 17 | static inline void * | 
|  | 18 | dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size, | 
|  | 19 | dma_addr_t *dma_handle, gfp_t flags) | 
|  | 20 | { | 
|  | 21 | void		*vaddr; | 
|  | 22 |  | 
|  | 23 | vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, | 
|  | 24 | size, dma_handle, flags); | 
|  | 25 | memset(vaddr, 0, size); | 
|  | 26 | return vaddr; | 
|  | 27 | } | 
|  | 28 |  | 
|  | 29 | static inline void | 
|  | 30 | dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size, | 
|  | 31 | void *cpu_addr, dma_addr_t dma_handle) | 
|  | 32 | { | 
|  | 33 | if (cpu_addr) | 
|  | 34 | dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev, | 
|  | 35 | size, cpu_addr, dma_handle); | 
|  | 36 | } | 
|  | 37 |  | 
|  | 38 | static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings) | 
|  | 39 | { | 
|  | 40 | struct usb_string_descriptor	*s_desc; | 
|  | 41 | u32				string_length; | 
|  | 42 |  | 
|  | 43 | /* Serial string: */ | 
|  | 44 | s_desc = (struct usb_string_descriptor *)strings->serial; | 
|  | 45 | utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL), | 
|  | 46 | UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, | 
|  | 47 | DBC_MAX_STRING_LENGTH); | 
|  | 48 |  | 
|  | 49 | s_desc->bLength		= (strlen(DBC_STRING_SERIAL) + 1) * 2; | 
|  | 50 | s_desc->bDescriptorType	= USB_DT_STRING; | 
|  | 51 | string_length		= s_desc->bLength; | 
|  | 52 | string_length		<<= 8; | 
|  | 53 |  | 
|  | 54 | /* Product string: */ | 
|  | 55 | s_desc = (struct usb_string_descriptor *)strings->product; | 
|  | 56 | utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT), | 
|  | 57 | UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, | 
|  | 58 | DBC_MAX_STRING_LENGTH); | 
|  | 59 |  | 
|  | 60 | s_desc->bLength		= (strlen(DBC_STRING_PRODUCT) + 1) * 2; | 
|  | 61 | s_desc->bDescriptorType	= USB_DT_STRING; | 
|  | 62 | string_length		+= s_desc->bLength; | 
|  | 63 | string_length		<<= 8; | 
|  | 64 |  | 
|  | 65 | /* Manufacture string: */ | 
|  | 66 | s_desc = (struct usb_string_descriptor *)strings->manufacturer; | 
|  | 67 | utf8s_to_utf16s(DBC_STRING_MANUFACTURER, | 
|  | 68 | strlen(DBC_STRING_MANUFACTURER), | 
|  | 69 | UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, | 
|  | 70 | DBC_MAX_STRING_LENGTH); | 
|  | 71 |  | 
|  | 72 | s_desc->bLength		= (strlen(DBC_STRING_MANUFACTURER) + 1) * 2; | 
|  | 73 | s_desc->bDescriptorType	= USB_DT_STRING; | 
|  | 74 | string_length		+= s_desc->bLength; | 
|  | 75 | string_length		<<= 8; | 
|  | 76 |  | 
|  | 77 | /* String0: */ | 
|  | 78 | strings->string0[0]	= 4; | 
|  | 79 | strings->string0[1]	= USB_DT_STRING; | 
|  | 80 | strings->string0[2]	= 0x09; | 
|  | 81 | strings->string0[3]	= 0x04; | 
|  | 82 | string_length		+= 4; | 
|  | 83 |  | 
|  | 84 | return string_length; | 
|  | 85 | } | 
|  | 86 |  | 
|  | 87 | static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length) | 
|  | 88 | { | 
|  | 89 | struct xhci_dbc		*dbc; | 
|  | 90 | struct dbc_info_context	*info; | 
|  | 91 | struct xhci_ep_ctx	*ep_ctx; | 
|  | 92 | u32			dev_info; | 
|  | 93 | dma_addr_t		deq, dma; | 
|  | 94 | unsigned int		max_burst; | 
|  | 95 |  | 
|  | 96 | dbc = xhci->dbc; | 
|  | 97 | if (!dbc) | 
|  | 98 | return; | 
|  | 99 |  | 
|  | 100 | /* Populate info Context: */ | 
|  | 101 | info			= (struct dbc_info_context *)dbc->ctx->bytes; | 
|  | 102 | dma			= dbc->string_dma; | 
|  | 103 | info->string0		= cpu_to_le64(dma); | 
|  | 104 | info->manufacturer	= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH); | 
|  | 105 | info->product		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); | 
|  | 106 | info->serial		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); | 
|  | 107 | info->length		= cpu_to_le32(string_length); | 
|  | 108 |  | 
|  | 109 | /* Populate bulk out endpoint context: */ | 
|  | 110 | ep_ctx			= dbc_bulkout_ctx(dbc); | 
|  | 111 | max_burst		= DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); | 
|  | 112 | deq			= dbc_bulkout_enq(dbc); | 
|  | 113 | ep_ctx->ep_info		= 0; | 
|  | 114 | ep_ctx->ep_info2	= dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); | 
|  | 115 | ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_out->cycle_state); | 
|  | 116 |  | 
|  | 117 | /* Populate bulk in endpoint context: */ | 
|  | 118 | ep_ctx			= dbc_bulkin_ctx(dbc); | 
|  | 119 | deq			= dbc_bulkin_enq(dbc); | 
|  | 120 | ep_ctx->ep_info		= 0; | 
|  | 121 | ep_ctx->ep_info2	= dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); | 
|  | 122 | ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_in->cycle_state); | 
|  | 123 |  | 
|  | 124 | /* Set DbC context and info registers: */ | 
|  | 125 | xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp); | 
|  | 126 |  | 
|  | 127 | dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL); | 
|  | 128 | writel(dev_info, &dbc->regs->devinfo1); | 
|  | 129 |  | 
|  | 130 | dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID); | 
|  | 131 | writel(dev_info, &dbc->regs->devinfo2); | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | static void xhci_dbc_giveback(struct dbc_request *req, int status) | 
|  | 135 | __releases(&dbc->lock) | 
|  | 136 | __acquires(&dbc->lock) | 
|  | 137 | { | 
|  | 138 | struct dbc_ep		*dep = req->dep; | 
|  | 139 | struct xhci_dbc		*dbc = dep->dbc; | 
|  | 140 | struct xhci_hcd		*xhci = dbc->xhci; | 
|  | 141 | struct device		*dev = xhci_to_hcd(dbc->xhci)->self.sysdev; | 
|  | 142 |  | 
|  | 143 | list_del_init(&req->list_pending); | 
|  | 144 | req->trb_dma = 0; | 
|  | 145 | req->trb = NULL; | 
|  | 146 |  | 
|  | 147 | if (req->status == -EINPROGRESS) | 
|  | 148 | req->status = status; | 
|  | 149 |  | 
|  | 150 | trace_xhci_dbc_giveback_request(req); | 
|  | 151 |  | 
|  | 152 | dma_unmap_single(dev, | 
|  | 153 | req->dma, | 
|  | 154 | req->length, | 
|  | 155 | dbc_ep_dma_direction(dep)); | 
|  | 156 |  | 
|  | 157 | /* Give back the transfer request: */ | 
|  | 158 | spin_unlock(&dbc->lock); | 
|  | 159 | req->complete(xhci, req); | 
|  | 160 | spin_lock(&dbc->lock); | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | static void xhci_dbc_flush_single_request(struct dbc_request *req) | 
|  | 164 | { | 
|  | 165 | union xhci_trb	*trb = req->trb; | 
|  | 166 |  | 
|  | 167 | trb->generic.field[0]	= 0; | 
|  | 168 | trb->generic.field[1]	= 0; | 
|  | 169 | trb->generic.field[2]	= 0; | 
|  | 170 | trb->generic.field[3]	&= cpu_to_le32(TRB_CYCLE); | 
|  | 171 | trb->generic.field[3]	|= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); | 
|  | 172 |  | 
|  | 173 | xhci_dbc_giveback(req, -ESHUTDOWN); | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep) | 
|  | 177 | { | 
|  | 178 | struct dbc_request	*req, *tmp; | 
|  | 179 |  | 
|  | 180 | list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending) | 
|  | 181 | xhci_dbc_flush_single_request(req); | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc) | 
|  | 185 | { | 
|  | 186 | xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]); | 
|  | 187 | xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]); | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | struct dbc_request * | 
|  | 191 | dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags) | 
|  | 192 | { | 
|  | 193 | struct dbc_request	*req; | 
|  | 194 |  | 
|  | 195 | req = kzalloc(sizeof(*req), gfp_flags); | 
|  | 196 | if (!req) | 
|  | 197 | return NULL; | 
|  | 198 |  | 
|  | 199 | req->dep = dep; | 
|  | 200 | INIT_LIST_HEAD(&req->list_pending); | 
|  | 201 | INIT_LIST_HEAD(&req->list_pool); | 
|  | 202 | req->direction = dep->direction; | 
|  | 203 |  | 
|  | 204 | trace_xhci_dbc_alloc_request(req); | 
|  | 205 |  | 
|  | 206 | return req; | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | void | 
|  | 210 | dbc_free_request(struct dbc_ep *dep, struct dbc_request *req) | 
|  | 211 | { | 
|  | 212 | trace_xhci_dbc_free_request(req); | 
|  | 213 |  | 
|  | 214 | kfree(req); | 
|  | 215 | } | 
|  | 216 |  | 
|  | 217 | static void | 
|  | 218 | xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1, | 
|  | 219 | u32 field2, u32 field3, u32 field4) | 
|  | 220 | { | 
|  | 221 | union xhci_trb		*trb, *next; | 
|  | 222 |  | 
|  | 223 | trb = ring->enqueue; | 
|  | 224 | trb->generic.field[0]	= cpu_to_le32(field1); | 
|  | 225 | trb->generic.field[1]	= cpu_to_le32(field2); | 
|  | 226 | trb->generic.field[2]	= cpu_to_le32(field3); | 
|  | 227 | trb->generic.field[3]	= cpu_to_le32(field4); | 
|  | 228 |  | 
|  | 229 | trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic); | 
|  | 230 |  | 
|  | 231 | ring->num_trbs_free--; | 
|  | 232 | next = ++(ring->enqueue); | 
|  | 233 | if (TRB_TYPE_LINK_LE32(next->link.control)) { | 
|  | 234 | next->link.control ^= cpu_to_le32(TRB_CYCLE); | 
|  | 235 | ring->enqueue = ring->enq_seg->trbs; | 
|  | 236 | ring->cycle_state ^= 1; | 
|  | 237 | } | 
|  | 238 | } | 
|  | 239 |  | 
|  | 240 | static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep, | 
|  | 241 | struct dbc_request *req) | 
|  | 242 | { | 
|  | 243 | u64			addr; | 
|  | 244 | union xhci_trb		*trb; | 
|  | 245 | unsigned int		num_trbs; | 
|  | 246 | struct xhci_dbc		*dbc = dep->dbc; | 
|  | 247 | struct xhci_ring	*ring = dep->ring; | 
|  | 248 | u32			length, control, cycle; | 
|  | 249 |  | 
|  | 250 | num_trbs = count_trbs(req->dma, req->length); | 
|  | 251 | WARN_ON(num_trbs != 1); | 
|  | 252 | if (ring->num_trbs_free < num_trbs) | 
|  | 253 | return -EBUSY; | 
|  | 254 |  | 
|  | 255 | addr	= req->dma; | 
|  | 256 | trb	= ring->enqueue; | 
|  | 257 | cycle	= ring->cycle_state; | 
|  | 258 | length	= TRB_LEN(req->length); | 
|  | 259 | control	= TRB_TYPE(TRB_NORMAL) | TRB_IOC; | 
|  | 260 |  | 
|  | 261 | if (cycle) | 
|  | 262 | control &= cpu_to_le32(~TRB_CYCLE); | 
|  | 263 | else | 
|  | 264 | control |= cpu_to_le32(TRB_CYCLE); | 
|  | 265 |  | 
|  | 266 | req->trb = ring->enqueue; | 
|  | 267 | req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); | 
|  | 268 | xhci_dbc_queue_trb(ring, | 
|  | 269 | lower_32_bits(addr), | 
|  | 270 | upper_32_bits(addr), | 
|  | 271 | length, control); | 
|  | 272 |  | 
|  | 273 | /* | 
|  | 274 | * Add a barrier between writes of trb fields and flipping | 
|  | 275 | * the cycle bit: | 
|  | 276 | */ | 
|  | 277 | wmb(); | 
|  | 278 |  | 
|  | 279 | if (cycle) | 
|  | 280 | trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE); | 
|  | 281 | else | 
|  | 282 | trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE); | 
|  | 283 |  | 
|  | 284 | writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell); | 
|  | 285 |  | 
|  | 286 | return 0; | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | static int | 
|  | 290 | dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req) | 
|  | 291 | { | 
|  | 292 | int			ret; | 
|  | 293 | struct device		*dev; | 
|  | 294 | struct xhci_dbc		*dbc = dep->dbc; | 
|  | 295 | struct xhci_hcd		*xhci = dbc->xhci; | 
|  | 296 |  | 
|  | 297 | dev = xhci_to_hcd(xhci)->self.sysdev; | 
|  | 298 |  | 
|  | 299 | if (!req->length || !req->buf) | 
|  | 300 | return -EINVAL; | 
|  | 301 |  | 
|  | 302 | req->actual		= 0; | 
|  | 303 | req->status		= -EINPROGRESS; | 
|  | 304 |  | 
|  | 305 | req->dma = dma_map_single(dev, | 
|  | 306 | req->buf, | 
|  | 307 | req->length, | 
|  | 308 | dbc_ep_dma_direction(dep)); | 
|  | 309 | if (dma_mapping_error(dev, req->dma)) { | 
|  | 310 | xhci_err(xhci, "failed to map buffer\n"); | 
|  | 311 | return -EFAULT; | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | ret = xhci_dbc_queue_bulk_tx(dep, req); | 
|  | 315 | if (ret) { | 
|  | 316 | xhci_err(xhci, "failed to queue trbs\n"); | 
|  | 317 | dma_unmap_single(dev, | 
|  | 318 | req->dma, | 
|  | 319 | req->length, | 
|  | 320 | dbc_ep_dma_direction(dep)); | 
|  | 321 | return -EFAULT; | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | list_add_tail(&req->list_pending, &dep->list_pending); | 
|  | 325 |  | 
|  | 326 | return 0; | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, | 
|  | 330 | gfp_t gfp_flags) | 
|  | 331 | { | 
|  | 332 | unsigned long		flags; | 
|  | 333 | struct xhci_dbc		*dbc = dep->dbc; | 
|  | 334 | int			ret = -ESHUTDOWN; | 
|  | 335 |  | 
|  | 336 | spin_lock_irqsave(&dbc->lock, flags); | 
|  | 337 | if (dbc->state == DS_CONFIGURED) | 
|  | 338 | ret = dbc_ep_do_queue(dep, req); | 
|  | 339 | spin_unlock_irqrestore(&dbc->lock, flags); | 
|  | 340 |  | 
|  | 341 | mod_delayed_work(system_wq, &dbc->event_work, 0); | 
|  | 342 |  | 
|  | 343 | trace_xhci_dbc_queue_request(req); | 
|  | 344 |  | 
|  | 345 | return ret; | 
|  | 346 | } | 
|  | 347 |  | 
|  | 348 | static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction) | 
|  | 349 | { | 
|  | 350 | struct dbc_ep		*dep; | 
|  | 351 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 352 |  | 
|  | 353 | dep			= &dbc->eps[direction]; | 
|  | 354 | dep->dbc		= dbc; | 
|  | 355 | dep->direction		= direction; | 
|  | 356 | dep->ring		= direction ? dbc->ring_in : dbc->ring_out; | 
|  | 357 |  | 
|  | 358 | INIT_LIST_HEAD(&dep->list_pending); | 
|  | 359 | } | 
|  | 360 |  | 
|  | 361 | static void xhci_dbc_eps_init(struct xhci_hcd *xhci) | 
|  | 362 | { | 
|  | 363 | xhci_dbc_do_eps_init(xhci, BULK_OUT); | 
|  | 364 | xhci_dbc_do_eps_init(xhci, BULK_IN); | 
|  | 365 | } | 
|  | 366 |  | 
|  | 367 | static void xhci_dbc_eps_exit(struct xhci_hcd *xhci) | 
|  | 368 | { | 
|  | 369 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 370 |  | 
|  | 371 | memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps)); | 
|  | 372 | } | 
|  | 373 |  | 
|  | 374 | static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags) | 
|  | 375 | { | 
|  | 376 | int			ret; | 
|  | 377 | dma_addr_t		deq; | 
|  | 378 | u32			string_length; | 
|  | 379 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 380 |  | 
|  | 381 | /* Allocate various rings for events and transfers: */ | 
|  | 382 | dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags); | 
|  | 383 | if (!dbc->ring_evt) | 
|  | 384 | goto evt_fail; | 
|  | 385 |  | 
|  | 386 | dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags); | 
|  | 387 | if (!dbc->ring_in) | 
|  | 388 | goto in_fail; | 
|  | 389 |  | 
|  | 390 | dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags); | 
|  | 391 | if (!dbc->ring_out) | 
|  | 392 | goto out_fail; | 
|  | 393 |  | 
|  | 394 | /* Allocate and populate ERST: */ | 
|  | 395 | ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags); | 
|  | 396 | if (ret) | 
|  | 397 | goto erst_fail; | 
|  | 398 |  | 
|  | 399 | /* Allocate context data structure: */ | 
|  | 400 | dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); | 
|  | 401 | if (!dbc->ctx) | 
|  | 402 | goto ctx_fail; | 
|  | 403 |  | 
|  | 404 | /* Allocate the string table: */ | 
|  | 405 | dbc->string_size = sizeof(struct dbc_str_descs); | 
|  | 406 | dbc->string = dbc_dma_alloc_coherent(xhci, | 
|  | 407 | dbc->string_size, | 
|  | 408 | &dbc->string_dma, | 
|  | 409 | flags); | 
|  | 410 | if (!dbc->string) | 
|  | 411 | goto string_fail; | 
|  | 412 |  | 
|  | 413 | /* Setup ERST register: */ | 
|  | 414 | writel(dbc->erst.erst_size, &dbc->regs->ersts); | 
|  | 415 | xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba); | 
|  | 416 | deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, | 
|  | 417 | dbc->ring_evt->dequeue); | 
|  | 418 | xhci_write_64(xhci, deq, &dbc->regs->erdp); | 
|  | 419 |  | 
|  | 420 | /* Setup strings and contexts: */ | 
|  | 421 | string_length = xhci_dbc_populate_strings(dbc->string); | 
|  | 422 | xhci_dbc_init_contexts(xhci, string_length); | 
|  | 423 |  | 
|  | 424 | mmiowb(); | 
|  | 425 |  | 
|  | 426 | xhci_dbc_eps_init(xhci); | 
|  | 427 | dbc->state = DS_INITIALIZED; | 
|  | 428 |  | 
|  | 429 | return 0; | 
|  | 430 |  | 
|  | 431 | string_fail: | 
|  | 432 | xhci_free_container_ctx(xhci, dbc->ctx); | 
|  | 433 | dbc->ctx = NULL; | 
|  | 434 | ctx_fail: | 
|  | 435 | xhci_free_erst(xhci, &dbc->erst); | 
|  | 436 | erst_fail: | 
|  | 437 | xhci_ring_free(xhci, dbc->ring_out); | 
|  | 438 | dbc->ring_out = NULL; | 
|  | 439 | out_fail: | 
|  | 440 | xhci_ring_free(xhci, dbc->ring_in); | 
|  | 441 | dbc->ring_in = NULL; | 
|  | 442 | in_fail: | 
|  | 443 | xhci_ring_free(xhci, dbc->ring_evt); | 
|  | 444 | dbc->ring_evt = NULL; | 
|  | 445 | evt_fail: | 
|  | 446 | return -ENOMEM; | 
|  | 447 | } | 
|  | 448 |  | 
|  | 449 | static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci) | 
|  | 450 | { | 
|  | 451 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 452 |  | 
|  | 453 | if (!dbc) | 
|  | 454 | return; | 
|  | 455 |  | 
|  | 456 | xhci_dbc_eps_exit(xhci); | 
|  | 457 |  | 
|  | 458 | if (dbc->string) { | 
|  | 459 | dbc_dma_free_coherent(xhci, | 
|  | 460 | dbc->string_size, | 
|  | 461 | dbc->string, dbc->string_dma); | 
|  | 462 | dbc->string = NULL; | 
|  | 463 | } | 
|  | 464 |  | 
|  | 465 | xhci_free_container_ctx(xhci, dbc->ctx); | 
|  | 466 | dbc->ctx = NULL; | 
|  | 467 |  | 
|  | 468 | xhci_free_erst(xhci, &dbc->erst); | 
|  | 469 | xhci_ring_free(xhci, dbc->ring_out); | 
|  | 470 | xhci_ring_free(xhci, dbc->ring_in); | 
|  | 471 | xhci_ring_free(xhci, dbc->ring_evt); | 
|  | 472 | dbc->ring_in = NULL; | 
|  | 473 | dbc->ring_out = NULL; | 
|  | 474 | dbc->ring_evt = NULL; | 
|  | 475 | } | 
|  | 476 |  | 
|  | 477 | static int xhci_do_dbc_start(struct xhci_hcd *xhci) | 
|  | 478 | { | 
|  | 479 | int			ret; | 
|  | 480 | u32			ctrl; | 
|  | 481 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 482 |  | 
|  | 483 | if (dbc->state != DS_DISABLED) | 
|  | 484 | return -EINVAL; | 
|  | 485 |  | 
|  | 486 | writel(0, &dbc->regs->control); | 
|  | 487 | ret = xhci_handshake(&dbc->regs->control, | 
|  | 488 | DBC_CTRL_DBC_ENABLE, | 
|  | 489 | 0, 1000); | 
|  | 490 | if (ret) | 
|  | 491 | return ret; | 
|  | 492 |  | 
|  | 493 | ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC); | 
|  | 494 | if (ret) | 
|  | 495 | return ret; | 
|  | 496 |  | 
|  | 497 | ctrl = readl(&dbc->regs->control); | 
|  | 498 | writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE, | 
|  | 499 | &dbc->regs->control); | 
|  | 500 | ret = xhci_handshake(&dbc->regs->control, | 
|  | 501 | DBC_CTRL_DBC_ENABLE, | 
|  | 502 | DBC_CTRL_DBC_ENABLE, 1000); | 
|  | 503 | if (ret) | 
|  | 504 | return ret; | 
|  | 505 |  | 
|  | 506 | dbc->state = DS_ENABLED; | 
|  | 507 |  | 
|  | 508 | return 0; | 
|  | 509 | } | 
|  | 510 |  | 
|  | 511 | static int xhci_do_dbc_stop(struct xhci_hcd *xhci) | 
|  | 512 | { | 
|  | 513 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 514 |  | 
|  | 515 | if (dbc->state == DS_DISABLED) | 
|  | 516 | return -1; | 
|  | 517 |  | 
|  | 518 | writel(0, &dbc->regs->control); | 
|  | 519 | dbc->state = DS_DISABLED; | 
|  | 520 |  | 
|  | 521 | return 0; | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | static int xhci_dbc_start(struct xhci_hcd *xhci) | 
|  | 525 | { | 
|  | 526 | int			ret; | 
|  | 527 | unsigned long		flags; | 
|  | 528 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 529 |  | 
|  | 530 | WARN_ON(!dbc); | 
|  | 531 |  | 
|  | 532 | pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); | 
|  | 533 |  | 
|  | 534 | spin_lock_irqsave(&dbc->lock, flags); | 
|  | 535 | ret = xhci_do_dbc_start(xhci); | 
|  | 536 | spin_unlock_irqrestore(&dbc->lock, flags); | 
|  | 537 |  | 
|  | 538 | if (ret) { | 
|  | 539 | pm_runtime_put(xhci_to_hcd(xhci)->self.controller); | 
|  | 540 | return ret; | 
|  | 541 | } | 
|  | 542 |  | 
|  | 543 | return mod_delayed_work(system_wq, &dbc->event_work, 1); | 
|  | 544 | } | 
|  | 545 |  | 
|  | 546 | static void xhci_dbc_stop(struct xhci_hcd *xhci) | 
|  | 547 | { | 
|  | 548 | int ret; | 
|  | 549 | unsigned long		flags; | 
|  | 550 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 551 | struct dbc_port		*port = &dbc->port; | 
|  | 552 |  | 
|  | 553 | WARN_ON(!dbc); | 
|  | 554 |  | 
|  | 555 | cancel_delayed_work_sync(&dbc->event_work); | 
|  | 556 |  | 
|  | 557 | if (port->registered) | 
|  | 558 | xhci_dbc_tty_unregister_device(xhci); | 
|  | 559 |  | 
|  | 560 | spin_lock_irqsave(&dbc->lock, flags); | 
|  | 561 | ret = xhci_do_dbc_stop(xhci); | 
|  | 562 | spin_unlock_irqrestore(&dbc->lock, flags); | 
|  | 563 |  | 
|  | 564 | if (!ret) { | 
|  | 565 | xhci_dbc_mem_cleanup(xhci); | 
|  | 566 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); | 
|  | 567 | } | 
|  | 568 | } | 
|  | 569 |  | 
|  | 570 | static void | 
|  | 571 | dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) | 
|  | 572 | { | 
|  | 573 | u32			portsc; | 
|  | 574 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 575 |  | 
|  | 576 | portsc = readl(&dbc->regs->portsc); | 
|  | 577 | if (portsc & DBC_PORTSC_CONN_CHANGE) | 
|  | 578 | xhci_info(xhci, "DbC port connect change\n"); | 
|  | 579 |  | 
|  | 580 | if (portsc & DBC_PORTSC_RESET_CHANGE) | 
|  | 581 | xhci_info(xhci, "DbC port reset change\n"); | 
|  | 582 |  | 
|  | 583 | if (portsc & DBC_PORTSC_LINK_CHANGE) | 
|  | 584 | xhci_info(xhci, "DbC port link status change\n"); | 
|  | 585 |  | 
|  | 586 | if (portsc & DBC_PORTSC_CONFIG_CHANGE) | 
|  | 587 | xhci_info(xhci, "DbC config error change\n"); | 
|  | 588 |  | 
|  | 589 | /* Port reset change bit will be cleared in other place: */ | 
|  | 590 | writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc); | 
|  | 591 | } | 
|  | 592 |  | 
|  | 593 | static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event) | 
|  | 594 | { | 
|  | 595 | struct dbc_ep		*dep; | 
|  | 596 | struct xhci_ring	*ring; | 
|  | 597 | int			ep_id; | 
|  | 598 | int			status; | 
|  | 599 | u32			comp_code; | 
|  | 600 | size_t			remain_length; | 
|  | 601 | struct dbc_request	*req = NULL, *r; | 
|  | 602 |  | 
|  | 603 | comp_code	= GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); | 
|  | 604 | remain_length	= EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2])); | 
|  | 605 | ep_id		= TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); | 
|  | 606 | dep		= (ep_id == EPID_OUT) ? | 
|  | 607 | get_out_ep(xhci) : get_in_ep(xhci); | 
|  | 608 | ring		= dep->ring; | 
|  | 609 |  | 
|  | 610 | switch (comp_code) { | 
|  | 611 | case COMP_SUCCESS: | 
|  | 612 | remain_length = 0; | 
|  | 613 | /* FALLTHROUGH */ | 
|  | 614 | case COMP_SHORT_PACKET: | 
|  | 615 | status = 0; | 
|  | 616 | break; | 
|  | 617 | case COMP_TRB_ERROR: | 
|  | 618 | case COMP_BABBLE_DETECTED_ERROR: | 
|  | 619 | case COMP_USB_TRANSACTION_ERROR: | 
|  | 620 | case COMP_STALL_ERROR: | 
|  | 621 | xhci_warn(xhci, "tx error %d detected\n", comp_code); | 
|  | 622 | status = -comp_code; | 
|  | 623 | break; | 
|  | 624 | default: | 
|  | 625 | xhci_err(xhci, "unknown tx error %d\n", comp_code); | 
|  | 626 | status = -comp_code; | 
|  | 627 | break; | 
|  | 628 | } | 
|  | 629 |  | 
|  | 630 | /* Match the pending request: */ | 
|  | 631 | list_for_each_entry(r, &dep->list_pending, list_pending) { | 
|  | 632 | if (r->trb_dma == event->trans_event.buffer) { | 
|  | 633 | req = r; | 
|  | 634 | break; | 
|  | 635 | } | 
|  | 636 | } | 
|  | 637 |  | 
|  | 638 | if (!req) { | 
|  | 639 | xhci_warn(xhci, "no matched request\n"); | 
|  | 640 | return; | 
|  | 641 | } | 
|  | 642 |  | 
|  | 643 | trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); | 
|  | 644 |  | 
|  | 645 | ring->num_trbs_free++; | 
|  | 646 | req->actual = req->length - remain_length; | 
|  | 647 | xhci_dbc_giveback(req, status); | 
|  | 648 | } | 
|  | 649 |  | 
|  | 650 | static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) | 
|  | 651 | { | 
|  | 652 | dma_addr_t		deq; | 
|  | 653 | struct dbc_ep		*dep; | 
|  | 654 | union xhci_trb		*evt; | 
|  | 655 | u32			ctrl, portsc; | 
|  | 656 | struct xhci_hcd		*xhci = dbc->xhci; | 
|  | 657 | bool			update_erdp = false; | 
|  | 658 |  | 
|  | 659 | /* DbC state machine: */ | 
|  | 660 | switch (dbc->state) { | 
|  | 661 | case DS_DISABLED: | 
|  | 662 | case DS_INITIALIZED: | 
|  | 663 |  | 
|  | 664 | return EVT_ERR; | 
|  | 665 | case DS_ENABLED: | 
|  | 666 | portsc = readl(&dbc->regs->portsc); | 
|  | 667 | if (portsc & DBC_PORTSC_CONN_STATUS) { | 
|  | 668 | dbc->state = DS_CONNECTED; | 
|  | 669 | xhci_info(xhci, "DbC connected\n"); | 
|  | 670 | } | 
|  | 671 |  | 
|  | 672 | return EVT_DONE; | 
|  | 673 | case DS_CONNECTED: | 
|  | 674 | ctrl = readl(&dbc->regs->control); | 
|  | 675 | if (ctrl & DBC_CTRL_DBC_RUN) { | 
|  | 676 | dbc->state = DS_CONFIGURED; | 
|  | 677 | xhci_info(xhci, "DbC configured\n"); | 
|  | 678 | portsc = readl(&dbc->regs->portsc); | 
|  | 679 | writel(portsc, &dbc->regs->portsc); | 
|  | 680 | return EVT_GSER; | 
|  | 681 | } | 
|  | 682 |  | 
|  | 683 | return EVT_DONE; | 
|  | 684 | case DS_CONFIGURED: | 
|  | 685 | /* Handle cable unplug event: */ | 
|  | 686 | portsc = readl(&dbc->regs->portsc); | 
|  | 687 | if (!(portsc & DBC_PORTSC_PORT_ENABLED) && | 
|  | 688 | !(portsc & DBC_PORTSC_CONN_STATUS)) { | 
|  | 689 | xhci_info(xhci, "DbC cable unplugged\n"); | 
|  | 690 | dbc->state = DS_ENABLED; | 
|  | 691 | xhci_dbc_flush_reqests(dbc); | 
|  | 692 |  | 
|  | 693 | return EVT_DISC; | 
|  | 694 | } | 
|  | 695 |  | 
|  | 696 | /* Handle debug port reset event: */ | 
|  | 697 | if (portsc & DBC_PORTSC_RESET_CHANGE) { | 
|  | 698 | xhci_info(xhci, "DbC port reset\n"); | 
|  | 699 | writel(portsc, &dbc->regs->portsc); | 
|  | 700 | dbc->state = DS_ENABLED; | 
|  | 701 | xhci_dbc_flush_reqests(dbc); | 
|  | 702 |  | 
|  | 703 | return EVT_DISC; | 
|  | 704 | } | 
|  | 705 |  | 
|  | 706 | /* Handle endpoint stall event: */ | 
|  | 707 | ctrl = readl(&dbc->regs->control); | 
|  | 708 | if ((ctrl & DBC_CTRL_HALT_IN_TR) || | 
|  | 709 | (ctrl & DBC_CTRL_HALT_OUT_TR)) { | 
|  | 710 | xhci_info(xhci, "DbC Endpoint stall\n"); | 
|  | 711 | dbc->state = DS_STALLED; | 
|  | 712 |  | 
|  | 713 | if (ctrl & DBC_CTRL_HALT_IN_TR) { | 
|  | 714 | dep = get_in_ep(xhci); | 
|  | 715 | xhci_dbc_flush_endpoint_requests(dep); | 
|  | 716 | } | 
|  | 717 |  | 
|  | 718 | if (ctrl & DBC_CTRL_HALT_OUT_TR) { | 
|  | 719 | dep = get_out_ep(xhci); | 
|  | 720 | xhci_dbc_flush_endpoint_requests(dep); | 
|  | 721 | } | 
|  | 722 |  | 
|  | 723 | return EVT_DONE; | 
|  | 724 | } | 
|  | 725 |  | 
|  | 726 | /* Clear DbC run change bit: */ | 
|  | 727 | if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) { | 
|  | 728 | writel(ctrl, &dbc->regs->control); | 
|  | 729 | ctrl = readl(&dbc->regs->control); | 
|  | 730 | } | 
|  | 731 |  | 
|  | 732 | break; | 
|  | 733 | case DS_STALLED: | 
|  | 734 | ctrl = readl(&dbc->regs->control); | 
|  | 735 | if (!(ctrl & DBC_CTRL_HALT_IN_TR) && | 
|  | 736 | !(ctrl & DBC_CTRL_HALT_OUT_TR) && | 
|  | 737 | (ctrl & DBC_CTRL_DBC_RUN)) { | 
|  | 738 | dbc->state = DS_CONFIGURED; | 
|  | 739 | break; | 
|  | 740 | } | 
|  | 741 |  | 
|  | 742 | return EVT_DONE; | 
|  | 743 | default: | 
|  | 744 | xhci_err(xhci, "Unknown DbC state %d\n", dbc->state); | 
|  | 745 | break; | 
|  | 746 | } | 
|  | 747 |  | 
|  | 748 | /* Handle the events in the event ring: */ | 
|  | 749 | evt = dbc->ring_evt->dequeue; | 
|  | 750 | while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) == | 
|  | 751 | dbc->ring_evt->cycle_state) { | 
|  | 752 | /* | 
|  | 753 | * Add a barrier between reading the cycle flag and any | 
|  | 754 | * reads of the event's flags/data below: | 
|  | 755 | */ | 
|  | 756 | rmb(); | 
|  | 757 |  | 
|  | 758 | trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic); | 
|  | 759 |  | 
|  | 760 | switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) { | 
|  | 761 | case TRB_TYPE(TRB_PORT_STATUS): | 
|  | 762 | dbc_handle_port_status(xhci, evt); | 
|  | 763 | break; | 
|  | 764 | case TRB_TYPE(TRB_TRANSFER): | 
|  | 765 | dbc_handle_xfer_event(xhci, evt); | 
|  | 766 | break; | 
|  | 767 | default: | 
|  | 768 | break; | 
|  | 769 | } | 
|  | 770 |  | 
|  | 771 | inc_deq(xhci, dbc->ring_evt); | 
|  | 772 | evt = dbc->ring_evt->dequeue; | 
|  | 773 | update_erdp = true; | 
|  | 774 | } | 
|  | 775 |  | 
|  | 776 | /* Update event ring dequeue pointer: */ | 
|  | 777 | if (update_erdp) { | 
|  | 778 | deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, | 
|  | 779 | dbc->ring_evt->dequeue); | 
|  | 780 | xhci_write_64(xhci, deq, &dbc->regs->erdp); | 
|  | 781 | } | 
|  | 782 |  | 
|  | 783 | return EVT_DONE; | 
|  | 784 | } | 
|  | 785 |  | 
|  | 786 | static void xhci_dbc_handle_events(struct work_struct *work) | 
|  | 787 | { | 
|  | 788 | int			ret; | 
|  | 789 | enum evtreturn		evtr; | 
|  | 790 | struct xhci_dbc		*dbc; | 
|  | 791 | unsigned long		flags; | 
|  | 792 | struct xhci_hcd		*xhci; | 
|  | 793 |  | 
|  | 794 | dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); | 
|  | 795 | xhci = dbc->xhci; | 
|  | 796 |  | 
|  | 797 | spin_lock_irqsave(&dbc->lock, flags); | 
|  | 798 | evtr = xhci_dbc_do_handle_events(dbc); | 
|  | 799 | spin_unlock_irqrestore(&dbc->lock, flags); | 
|  | 800 |  | 
|  | 801 | switch (evtr) { | 
|  | 802 | case EVT_GSER: | 
|  | 803 | ret = xhci_dbc_tty_register_device(xhci); | 
|  | 804 | if (ret) { | 
|  | 805 | xhci_err(xhci, "failed to alloc tty device\n"); | 
|  | 806 | break; | 
|  | 807 | } | 
|  | 808 |  | 
|  | 809 | xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n"); | 
|  | 810 | break; | 
|  | 811 | case EVT_DISC: | 
|  | 812 | xhci_dbc_tty_unregister_device(xhci); | 
|  | 813 | break; | 
|  | 814 | case EVT_DONE: | 
|  | 815 | break; | 
|  | 816 | default: | 
|  | 817 | xhci_info(xhci, "stop handling dbc events\n"); | 
|  | 818 | return; | 
|  | 819 | } | 
|  | 820 |  | 
|  | 821 | mod_delayed_work(system_wq, &dbc->event_work, 1); | 
|  | 822 | } | 
|  | 823 |  | 
|  | 824 | static void xhci_do_dbc_exit(struct xhci_hcd *xhci) | 
|  | 825 | { | 
|  | 826 | unsigned long		flags; | 
|  | 827 |  | 
|  | 828 | spin_lock_irqsave(&xhci->lock, flags); | 
|  | 829 | kfree(xhci->dbc); | 
|  | 830 | xhci->dbc = NULL; | 
|  | 831 | spin_unlock_irqrestore(&xhci->lock, flags); | 
|  | 832 | } | 
|  | 833 |  | 
|  | 834 | static int xhci_do_dbc_init(struct xhci_hcd *xhci) | 
|  | 835 | { | 
|  | 836 | u32			reg; | 
|  | 837 | struct xhci_dbc		*dbc; | 
|  | 838 | unsigned long		flags; | 
|  | 839 | void __iomem		*base; | 
|  | 840 | int			dbc_cap_offs; | 
|  | 841 |  | 
|  | 842 | base = &xhci->cap_regs->hc_capbase; | 
|  | 843 | dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG); | 
|  | 844 | if (!dbc_cap_offs) | 
|  | 845 | return -ENODEV; | 
|  | 846 |  | 
|  | 847 | dbc = kzalloc(sizeof(*dbc), GFP_KERNEL); | 
|  | 848 | if (!dbc) | 
|  | 849 | return -ENOMEM; | 
|  | 850 |  | 
|  | 851 | dbc->regs = base + dbc_cap_offs; | 
|  | 852 |  | 
|  | 853 | /* We will avoid using DbC in xhci driver if it's in use. */ | 
|  | 854 | reg = readl(&dbc->regs->control); | 
|  | 855 | if (reg & DBC_CTRL_DBC_ENABLE) { | 
|  | 856 | kfree(dbc); | 
|  | 857 | return -EBUSY; | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | spin_lock_irqsave(&xhci->lock, flags); | 
|  | 861 | if (xhci->dbc) { | 
|  | 862 | spin_unlock_irqrestore(&xhci->lock, flags); | 
|  | 863 | kfree(dbc); | 
|  | 864 | return -EBUSY; | 
|  | 865 | } | 
|  | 866 | xhci->dbc = dbc; | 
|  | 867 | spin_unlock_irqrestore(&xhci->lock, flags); | 
|  | 868 |  | 
|  | 869 | dbc->xhci = xhci; | 
|  | 870 | INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events); | 
|  | 871 | spin_lock_init(&dbc->lock); | 
|  | 872 |  | 
|  | 873 | return 0; | 
|  | 874 | } | 
|  | 875 |  | 
|  | 876 | static ssize_t dbc_show(struct device *dev, | 
|  | 877 | struct device_attribute *attr, | 
|  | 878 | char *buf) | 
|  | 879 | { | 
|  | 880 | const char		*p; | 
|  | 881 | struct xhci_dbc		*dbc; | 
|  | 882 | struct xhci_hcd		*xhci; | 
|  | 883 |  | 
|  | 884 | xhci = hcd_to_xhci(dev_get_drvdata(dev)); | 
|  | 885 | dbc = xhci->dbc; | 
|  | 886 |  | 
|  | 887 | switch (dbc->state) { | 
|  | 888 | case DS_DISABLED: | 
|  | 889 | p = "disabled"; | 
|  | 890 | break; | 
|  | 891 | case DS_INITIALIZED: | 
|  | 892 | p = "initialized"; | 
|  | 893 | break; | 
|  | 894 | case DS_ENABLED: | 
|  | 895 | p = "enabled"; | 
|  | 896 | break; | 
|  | 897 | case DS_CONNECTED: | 
|  | 898 | p = "connected"; | 
|  | 899 | break; | 
|  | 900 | case DS_CONFIGURED: | 
|  | 901 | p = "configured"; | 
|  | 902 | break; | 
|  | 903 | case DS_STALLED: | 
|  | 904 | p = "stalled"; | 
|  | 905 | break; | 
|  | 906 | default: | 
|  | 907 | p = "unknown"; | 
|  | 908 | } | 
|  | 909 |  | 
|  | 910 | return sprintf(buf, "%s\n", p); | 
|  | 911 | } | 
|  | 912 |  | 
|  | 913 | static ssize_t dbc_store(struct device *dev, | 
|  | 914 | struct device_attribute *attr, | 
|  | 915 | const char *buf, size_t count) | 
|  | 916 | { | 
|  | 917 | struct xhci_hcd		*xhci; | 
|  | 918 |  | 
|  | 919 | xhci = hcd_to_xhci(dev_get_drvdata(dev)); | 
|  | 920 |  | 
|  | 921 | if (!strncmp(buf, "enable", 6)) | 
|  | 922 | xhci_dbc_start(xhci); | 
|  | 923 | else if (!strncmp(buf, "disable", 7)) | 
|  | 924 | xhci_dbc_stop(xhci); | 
|  | 925 | else | 
|  | 926 | return -EINVAL; | 
|  | 927 |  | 
|  | 928 | return count; | 
|  | 929 | } | 
|  | 930 |  | 
|  | 931 | static DEVICE_ATTR_RW(dbc); | 
|  | 932 |  | 
|  | 933 | int xhci_dbc_init(struct xhci_hcd *xhci) | 
|  | 934 | { | 
|  | 935 | int			ret; | 
|  | 936 | struct device		*dev = xhci_to_hcd(xhci)->self.controller; | 
|  | 937 |  | 
|  | 938 | ret = xhci_do_dbc_init(xhci); | 
|  | 939 | if (ret) | 
|  | 940 | goto init_err3; | 
|  | 941 |  | 
|  | 942 | ret = xhci_dbc_tty_register_driver(xhci); | 
|  | 943 | if (ret) | 
|  | 944 | goto init_err2; | 
|  | 945 |  | 
|  | 946 | ret = device_create_file(dev, &dev_attr_dbc); | 
|  | 947 | if (ret) | 
|  | 948 | goto init_err1; | 
|  | 949 |  | 
|  | 950 | return 0; | 
|  | 951 |  | 
|  | 952 | init_err1: | 
|  | 953 | xhci_dbc_tty_unregister_driver(); | 
|  | 954 | init_err2: | 
|  | 955 | xhci_do_dbc_exit(xhci); | 
|  | 956 | init_err3: | 
|  | 957 | return ret; | 
|  | 958 | } | 
|  | 959 |  | 
|  | 960 | void xhci_dbc_exit(struct xhci_hcd *xhci) | 
|  | 961 | { | 
|  | 962 | struct device		*dev = xhci_to_hcd(xhci)->self.controller; | 
|  | 963 |  | 
|  | 964 | if (!xhci->dbc) | 
|  | 965 | return; | 
|  | 966 |  | 
|  | 967 | device_remove_file(dev, &dev_attr_dbc); | 
|  | 968 | xhci_dbc_tty_unregister_driver(); | 
|  | 969 | xhci_dbc_stop(xhci); | 
|  | 970 | xhci_do_dbc_exit(xhci); | 
|  | 971 | } | 
|  | 972 |  | 
|  | 973 | #ifdef CONFIG_PM | 
|  | 974 | int xhci_dbc_suspend(struct xhci_hcd *xhci) | 
|  | 975 | { | 
|  | 976 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 977 |  | 
|  | 978 | if (!dbc) | 
|  | 979 | return 0; | 
|  | 980 |  | 
|  | 981 | if (dbc->state == DS_CONFIGURED) | 
|  | 982 | dbc->resume_required = 1; | 
|  | 983 |  | 
|  | 984 | xhci_dbc_stop(xhci); | 
|  | 985 |  | 
|  | 986 | return 0; | 
|  | 987 | } | 
|  | 988 |  | 
|  | 989 | int xhci_dbc_resume(struct xhci_hcd *xhci) | 
|  | 990 | { | 
|  | 991 | int			ret = 0; | 
|  | 992 | struct xhci_dbc		*dbc = xhci->dbc; | 
|  | 993 |  | 
|  | 994 | if (!dbc) | 
|  | 995 | return 0; | 
|  | 996 |  | 
|  | 997 | if (dbc->resume_required) { | 
|  | 998 | dbc->resume_required = 0; | 
|  | 999 | xhci_dbc_start(xhci); | 
|  | 1000 | } | 
|  | 1001 |  | 
|  | 1002 | return ret; | 
|  | 1003 | } | 
|  | 1004 | #endif /* CONFIG_PM */ |