b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> |
| 4 | Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> |
| 5 | <http://rt2x00.serialmonkey.com> |
| 6 | |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | Module: rt2x00usb |
| 11 | Abstract: rt2x00 generic usb device routines. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/usb.h> |
| 18 | #include <linux/bug.h> |
| 19 | |
| 20 | #include "rt2x00.h" |
| 21 | #include "rt2x00usb.h" |
| 22 | |
| 23 | static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status) |
| 24 | { |
| 25 | if (status == -ENODEV || status == -ENOENT) |
| 26 | return true; |
| 27 | |
| 28 | if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) |
| 29 | return false; |
| 30 | |
| 31 | if (status == -EPROTO || status == -ETIMEDOUT) |
| 32 | rt2x00dev->num_proto_errs++; |
| 33 | else |
| 34 | rt2x00dev->num_proto_errs = 0; |
| 35 | |
| 36 | if (rt2x00dev->num_proto_errs > 3) |
| 37 | return true; |
| 38 | |
| 39 | return false; |
| 40 | } |
| 41 | |
| 42 | /* |
| 43 | * Interfacing with the HW. |
| 44 | */ |
| 45 | int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, |
| 46 | const u8 request, const u8 requesttype, |
| 47 | const u16 offset, const u16 value, |
| 48 | void *buffer, const u16 buffer_length, |
| 49 | const int timeout) |
| 50 | { |
| 51 | struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); |
| 52 | int status; |
| 53 | unsigned int pipe = |
| 54 | (requesttype == USB_VENDOR_REQUEST_IN) ? |
| 55 | usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); |
| 56 | unsigned long expire = jiffies + msecs_to_jiffies(timeout); |
| 57 | |
| 58 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) |
| 59 | return -ENODEV; |
| 60 | |
| 61 | do { |
| 62 | status = usb_control_msg(usb_dev, pipe, request, requesttype, |
| 63 | value, offset, buffer, buffer_length, |
| 64 | timeout / 2); |
| 65 | if (status >= 0) |
| 66 | return 0; |
| 67 | |
| 68 | if (rt2x00usb_check_usb_error(rt2x00dev, status)) { |
| 69 | /* Device has disappeared. */ |
| 70 | clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); |
| 71 | break; |
| 72 | } |
| 73 | } while (time_before(jiffies, expire)); |
| 74 | |
| 75 | rt2x00_err(rt2x00dev, |
| 76 | "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n", |
| 77 | request, offset, status); |
| 78 | |
| 79 | return status; |
| 80 | } |
| 81 | EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request); |
| 82 | |
| 83 | int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev, |
| 84 | const u8 request, const u8 requesttype, |
| 85 | const u16 offset, void *buffer, |
| 86 | const u16 buffer_length, const int timeout) |
| 87 | { |
| 88 | int status; |
| 89 | |
| 90 | BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex)); |
| 91 | |
| 92 | /* |
| 93 | * Check for Cache availability. |
| 94 | */ |
| 95 | if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) { |
| 96 | rt2x00_err(rt2x00dev, "CSR cache not available\n"); |
| 97 | return -ENOMEM; |
| 98 | } |
| 99 | |
| 100 | if (requesttype == USB_VENDOR_REQUEST_OUT) |
| 101 | memcpy(rt2x00dev->csr.cache, buffer, buffer_length); |
| 102 | |
| 103 | status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype, |
| 104 | offset, 0, rt2x00dev->csr.cache, |
| 105 | buffer_length, timeout); |
| 106 | |
| 107 | if (!status && requesttype == USB_VENDOR_REQUEST_IN) |
| 108 | memcpy(buffer, rt2x00dev->csr.cache, buffer_length); |
| 109 | |
| 110 | return status; |
| 111 | } |
| 112 | EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock); |
| 113 | |
| 114 | int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, |
| 115 | const u8 request, const u8 requesttype, |
| 116 | const u16 offset, void *buffer, |
| 117 | const u16 buffer_length) |
| 118 | { |
| 119 | int status = 0; |
| 120 | unsigned char *tb; |
| 121 | u16 off, len, bsize; |
| 122 | |
| 123 | mutex_lock(&rt2x00dev->csr_mutex); |
| 124 | |
| 125 | tb = (char *)buffer; |
| 126 | off = offset; |
| 127 | len = buffer_length; |
| 128 | while (len && !status) { |
| 129 | bsize = min_t(u16, CSR_CACHE_SIZE, len); |
| 130 | status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, |
| 131 | requesttype, off, tb, |
| 132 | bsize, REGISTER_TIMEOUT); |
| 133 | |
| 134 | tb += bsize; |
| 135 | len -= bsize; |
| 136 | off += bsize; |
| 137 | } |
| 138 | |
| 139 | mutex_unlock(&rt2x00dev->csr_mutex); |
| 140 | |
| 141 | return status; |
| 142 | } |
| 143 | EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); |
| 144 | |
| 145 | int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, |
| 146 | const unsigned int offset, |
| 147 | const struct rt2x00_field32 field, |
| 148 | u32 *reg) |
| 149 | { |
| 150 | unsigned int i; |
| 151 | |
| 152 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) |
| 153 | return -ENODEV; |
| 154 | |
| 155 | for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { |
| 156 | *reg = rt2x00usb_register_read_lock(rt2x00dev, offset); |
| 157 | if (!rt2x00_get_field32(*reg, field)) |
| 158 | return 1; |
| 159 | udelay(REGISTER_BUSY_DELAY); |
| 160 | } |
| 161 | |
| 162 | rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", |
| 163 | offset, *reg); |
| 164 | *reg = ~0; |
| 165 | |
| 166 | return 0; |
| 167 | } |
| 168 | EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read); |
| 169 | |
| 170 | |
| 171 | struct rt2x00_async_read_data { |
| 172 | __le32 reg; |
| 173 | struct usb_ctrlrequest cr; |
| 174 | struct rt2x00_dev *rt2x00dev; |
| 175 | bool (*callback)(struct rt2x00_dev *, int, u32); |
| 176 | }; |
| 177 | |
| 178 | static void rt2x00usb_register_read_async_cb(struct urb *urb) |
| 179 | { |
| 180 | struct rt2x00_async_read_data *rd = urb->context; |
| 181 | if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) { |
| 182 | usb_anchor_urb(urb, rd->rt2x00dev->anchor); |
| 183 | if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { |
| 184 | usb_unanchor_urb(urb); |
| 185 | kfree(rd); |
| 186 | } |
| 187 | } else |
| 188 | kfree(rd); |
| 189 | } |
| 190 | |
| 191 | void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev, |
| 192 | const unsigned int offset, |
| 193 | bool (*callback)(struct rt2x00_dev*, int, u32)) |
| 194 | { |
| 195 | struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); |
| 196 | struct urb *urb; |
| 197 | struct rt2x00_async_read_data *rd; |
| 198 | |
| 199 | rd = kmalloc(sizeof(*rd), GFP_ATOMIC); |
| 200 | if (!rd) |
| 201 | return; |
| 202 | |
| 203 | urb = usb_alloc_urb(0, GFP_ATOMIC); |
| 204 | if (!urb) { |
| 205 | kfree(rd); |
| 206 | return; |
| 207 | } |
| 208 | |
| 209 | rd->rt2x00dev = rt2x00dev; |
| 210 | rd->callback = callback; |
| 211 | rd->cr.bRequestType = USB_VENDOR_REQUEST_IN; |
| 212 | rd->cr.bRequest = USB_MULTI_READ; |
| 213 | rd->cr.wValue = 0; |
| 214 | rd->cr.wIndex = cpu_to_le16(offset); |
| 215 | rd->cr.wLength = cpu_to_le16(sizeof(u32)); |
| 216 | |
| 217 | usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0), |
| 218 | (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg), |
| 219 | rt2x00usb_register_read_async_cb, rd); |
| 220 | usb_anchor_urb(urb, rt2x00dev->anchor); |
| 221 | if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { |
| 222 | usb_unanchor_urb(urb); |
| 223 | kfree(rd); |
| 224 | } |
| 225 | usb_free_urb(urb); |
| 226 | } |
| 227 | EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async); |
| 228 | |
| 229 | /* |
| 230 | * TX data handlers. |
| 231 | */ |
| 232 | static void rt2x00usb_work_txdone_entry(struct queue_entry *entry) |
| 233 | { |
| 234 | /* |
| 235 | * If the transfer to hardware succeeded, it does not mean the |
| 236 | * frame was send out correctly. It only means the frame |
| 237 | * was successfully pushed to the hardware, we have no |
| 238 | * way to determine the transmission status right now. |
| 239 | * (Only indirectly by looking at the failed TX counters |
| 240 | * in the register). |
| 241 | */ |
| 242 | if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) |
| 243 | rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); |
| 244 | else |
| 245 | rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); |
| 246 | } |
| 247 | |
| 248 | static void rt2x00usb_work_txdone(struct work_struct *work) |
| 249 | { |
| 250 | struct rt2x00_dev *rt2x00dev = |
| 251 | container_of(work, struct rt2x00_dev, txdone_work); |
| 252 | struct data_queue *queue; |
| 253 | struct queue_entry *entry; |
| 254 | |
| 255 | tx_queue_for_each(rt2x00dev, queue) { |
| 256 | while (!rt2x00queue_empty(queue)) { |
| 257 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); |
| 258 | |
| 259 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || |
| 260 | !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) |
| 261 | break; |
| 262 | |
| 263 | rt2x00usb_work_txdone_entry(entry); |
| 264 | } |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | static void rt2x00usb_interrupt_txdone(struct urb *urb) |
| 269 | { |
| 270 | struct queue_entry *entry = (struct queue_entry *)urb->context; |
| 271 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
| 272 | |
| 273 | if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
| 274 | return; |
| 275 | /* |
| 276 | * Check if the frame was correctly uploaded |
| 277 | */ |
| 278 | if (urb->status) |
| 279 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
| 280 | /* |
| 281 | * Report the frame as DMA done |
| 282 | */ |
| 283 | rt2x00lib_dmadone(entry); |
| 284 | |
| 285 | if (rt2x00dev->ops->lib->tx_dma_done) |
| 286 | rt2x00dev->ops->lib->tx_dma_done(entry); |
| 287 | /* |
| 288 | * Schedule the delayed work for reading the TX status |
| 289 | * from the device. |
| 290 | */ |
| 291 | if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO) || |
| 292 | !kfifo_is_empty(&rt2x00dev->txstatus_fifo)) |
| 293 | queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); |
| 294 | } |
| 295 | |
| 296 | static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) |
| 297 | { |
| 298 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
| 299 | struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); |
| 300 | struct queue_entry_priv_usb *entry_priv = entry->priv_data; |
| 301 | u32 length; |
| 302 | int status; |
| 303 | |
| 304 | if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) || |
| 305 | test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) |
| 306 | return false; |
| 307 | |
| 308 | /* |
| 309 | * USB devices require certain padding at the end of each frame |
| 310 | * and urb. Those paddings are not included in skbs. Pass entry |
| 311 | * to the driver to determine what the overall length should be. |
| 312 | */ |
| 313 | length = rt2x00dev->ops->lib->get_tx_data_len(entry); |
| 314 | |
| 315 | status = skb_padto(entry->skb, length); |
| 316 | if (unlikely(status)) { |
| 317 | /* TODO: report something more appropriate than IO_FAILED. */ |
| 318 | rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n"); |
| 319 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
| 320 | rt2x00lib_dmadone(entry); |
| 321 | |
| 322 | return false; |
| 323 | } |
| 324 | |
| 325 | usb_fill_bulk_urb(entry_priv->urb, usb_dev, |
| 326 | usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), |
| 327 | entry->skb->data, length, |
| 328 | rt2x00usb_interrupt_txdone, entry); |
| 329 | |
| 330 | status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); |
| 331 | if (status) { |
| 332 | if (rt2x00usb_check_usb_error(rt2x00dev, status)) |
| 333 | clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); |
| 334 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
| 335 | rt2x00lib_dmadone(entry); |
| 336 | } |
| 337 | |
| 338 | return false; |
| 339 | } |
| 340 | |
| 341 | /* |
| 342 | * RX data handlers. |
| 343 | */ |
| 344 | static void rt2x00usb_work_rxdone(struct work_struct *work) |
| 345 | { |
| 346 | struct rt2x00_dev *rt2x00dev = |
| 347 | container_of(work, struct rt2x00_dev, rxdone_work); |
| 348 | struct queue_entry *entry; |
| 349 | struct skb_frame_desc *skbdesc; |
| 350 | u8 rxd[32]; |
| 351 | |
| 352 | while (!rt2x00queue_empty(rt2x00dev->rx)) { |
| 353 | entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); |
| 354 | |
| 355 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
| 356 | break; |
| 357 | |
| 358 | /* |
| 359 | * Fill in desc fields of the skb descriptor |
| 360 | */ |
| 361 | skbdesc = get_skb_frame_desc(entry->skb); |
| 362 | skbdesc->desc = rxd; |
| 363 | skbdesc->desc_len = entry->queue->desc_size; |
| 364 | |
| 365 | /* |
| 366 | * Send the frame to rt2x00lib for further processing. |
| 367 | */ |
| 368 | rt2x00lib_rxdone(entry, GFP_KERNEL); |
| 369 | } |
| 370 | } |
| 371 | |
| 372 | static void rt2x00usb_interrupt_rxdone(struct urb *urb) |
| 373 | { |
| 374 | struct queue_entry *entry = (struct queue_entry *)urb->context; |
| 375 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
| 376 | |
| 377 | if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
| 378 | return; |
| 379 | |
| 380 | /* |
| 381 | * Check if the received data is simply too small |
| 382 | * to be actually valid, or if the urb is signaling |
| 383 | * a problem. |
| 384 | */ |
| 385 | if (urb->actual_length < entry->queue->desc_size || urb->status) |
| 386 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
| 387 | |
| 388 | /* |
| 389 | * Report the frame as DMA done |
| 390 | */ |
| 391 | rt2x00lib_dmadone(entry); |
| 392 | |
| 393 | /* |
| 394 | * Schedule the delayed work for processing RX data |
| 395 | */ |
| 396 | queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); |
| 397 | } |
| 398 | |
| 399 | static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) |
| 400 | { |
| 401 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
| 402 | struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); |
| 403 | struct queue_entry_priv_usb *entry_priv = entry->priv_data; |
| 404 | int status; |
| 405 | |
| 406 | if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
| 407 | return false; |
| 408 | |
| 409 | rt2x00lib_dmastart(entry); |
| 410 | |
| 411 | usb_fill_bulk_urb(entry_priv->urb, usb_dev, |
| 412 | usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint), |
| 413 | entry->skb->data, entry->skb->len, |
| 414 | rt2x00usb_interrupt_rxdone, entry); |
| 415 | |
| 416 | status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); |
| 417 | if (status) { |
| 418 | if (rt2x00usb_check_usb_error(rt2x00dev, status)) |
| 419 | clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); |
| 420 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
| 421 | rt2x00lib_dmadone(entry); |
| 422 | } |
| 423 | |
| 424 | return false; |
| 425 | } |
| 426 | |
| 427 | void rt2x00usb_kick_queue(struct data_queue *queue) |
| 428 | { |
| 429 | switch (queue->qid) { |
| 430 | case QID_AC_VO: |
| 431 | case QID_AC_VI: |
| 432 | case QID_AC_BE: |
| 433 | case QID_AC_BK: |
| 434 | if (!rt2x00queue_empty(queue)) |
| 435 | rt2x00queue_for_each_entry(queue, |
| 436 | Q_INDEX_DONE, |
| 437 | Q_INDEX, |
| 438 | NULL, |
| 439 | rt2x00usb_kick_tx_entry); |
| 440 | break; |
| 441 | case QID_RX: |
| 442 | if (!rt2x00queue_full(queue)) |
| 443 | rt2x00queue_for_each_entry(queue, |
| 444 | Q_INDEX, |
| 445 | Q_INDEX_DONE, |
| 446 | NULL, |
| 447 | rt2x00usb_kick_rx_entry); |
| 448 | break; |
| 449 | default: |
| 450 | break; |
| 451 | } |
| 452 | } |
| 453 | EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue); |
| 454 | |
| 455 | static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data) |
| 456 | { |
| 457 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
| 458 | struct queue_entry_priv_usb *entry_priv = entry->priv_data; |
| 459 | struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; |
| 460 | |
| 461 | if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
| 462 | return false; |
| 463 | |
| 464 | usb_kill_urb(entry_priv->urb); |
| 465 | |
| 466 | /* |
| 467 | * Kill guardian urb (if required by driver). |
| 468 | */ |
| 469 | if ((entry->queue->qid == QID_BEACON) && |
| 470 | (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))) |
| 471 | usb_kill_urb(bcn_priv->guardian_urb); |
| 472 | |
| 473 | return false; |
| 474 | } |
| 475 | |
| 476 | void rt2x00usb_flush_queue(struct data_queue *queue, bool drop) |
| 477 | { |
| 478 | struct work_struct *completion; |
| 479 | unsigned int i; |
| 480 | |
| 481 | if (drop) |
| 482 | rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL, |
| 483 | rt2x00usb_flush_entry); |
| 484 | |
| 485 | /* |
| 486 | * Obtain the queue completion handler |
| 487 | */ |
| 488 | switch (queue->qid) { |
| 489 | case QID_AC_VO: |
| 490 | case QID_AC_VI: |
| 491 | case QID_AC_BE: |
| 492 | case QID_AC_BK: |
| 493 | completion = &queue->rt2x00dev->txdone_work; |
| 494 | break; |
| 495 | case QID_RX: |
| 496 | completion = &queue->rt2x00dev->rxdone_work; |
| 497 | break; |
| 498 | default: |
| 499 | return; |
| 500 | } |
| 501 | |
| 502 | for (i = 0; i < 10; i++) { |
| 503 | /* |
| 504 | * Check if the driver is already done, otherwise we |
| 505 | * have to sleep a little while to give the driver/hw |
| 506 | * the oppurtunity to complete interrupt process itself. |
| 507 | */ |
| 508 | if (rt2x00queue_empty(queue)) |
| 509 | break; |
| 510 | |
| 511 | /* |
| 512 | * Schedule the completion handler manually, when this |
| 513 | * worker function runs, it should cleanup the queue. |
| 514 | */ |
| 515 | queue_work(queue->rt2x00dev->workqueue, completion); |
| 516 | |
| 517 | /* |
| 518 | * Wait for a little while to give the driver |
| 519 | * the oppurtunity to recover itself. |
| 520 | */ |
| 521 | msleep(50); |
| 522 | } |
| 523 | } |
| 524 | EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue); |
| 525 | |
| 526 | static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue) |
| 527 | { |
| 528 | rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n", |
| 529 | queue->qid); |
| 530 | |
| 531 | rt2x00queue_stop_queue(queue); |
| 532 | rt2x00queue_flush_queue(queue, true); |
| 533 | rt2x00queue_start_queue(queue); |
| 534 | } |
| 535 | |
| 536 | static int rt2x00usb_dma_timeout(struct data_queue *queue) |
| 537 | { |
| 538 | struct queue_entry *entry; |
| 539 | |
| 540 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); |
| 541 | return rt2x00queue_dma_timeout(entry); |
| 542 | } |
| 543 | |
| 544 | void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) |
| 545 | { |
| 546 | struct data_queue *queue; |
| 547 | |
| 548 | tx_queue_for_each(rt2x00dev, queue) { |
| 549 | if (!rt2x00queue_empty(queue)) { |
| 550 | if (rt2x00usb_dma_timeout(queue)) |
| 551 | rt2x00usb_watchdog_tx_dma(queue); |
| 552 | } |
| 553 | } |
| 554 | } |
| 555 | EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); |
| 556 | |
| 557 | /* |
| 558 | * Radio handlers |
| 559 | */ |
| 560 | void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) |
| 561 | { |
| 562 | rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0, |
| 563 | REGISTER_TIMEOUT); |
| 564 | } |
| 565 | EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); |
| 566 | |
| 567 | /* |
| 568 | * Device initialization handlers. |
| 569 | */ |
| 570 | void rt2x00usb_clear_entry(struct queue_entry *entry) |
| 571 | { |
| 572 | entry->flags = 0; |
| 573 | |
| 574 | if (entry->queue->qid == QID_RX) |
| 575 | rt2x00usb_kick_rx_entry(entry, NULL); |
| 576 | } |
| 577 | EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); |
| 578 | |
| 579 | static void rt2x00usb_assign_endpoint(struct data_queue *queue, |
| 580 | struct usb_endpoint_descriptor *ep_desc) |
| 581 | { |
| 582 | struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev); |
| 583 | int pipe; |
| 584 | |
| 585 | queue->usb_endpoint = usb_endpoint_num(ep_desc); |
| 586 | |
| 587 | if (queue->qid == QID_RX) { |
| 588 | pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint); |
| 589 | queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0); |
| 590 | } else { |
| 591 | pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint); |
| 592 | queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1); |
| 593 | } |
| 594 | |
| 595 | if (!queue->usb_maxpacket) |
| 596 | queue->usb_maxpacket = 1; |
| 597 | } |
| 598 | |
| 599 | static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev) |
| 600 | { |
| 601 | struct usb_interface *intf = to_usb_interface(rt2x00dev->dev); |
| 602 | struct usb_host_interface *intf_desc = intf->cur_altsetting; |
| 603 | struct usb_endpoint_descriptor *ep_desc; |
| 604 | struct data_queue *queue = rt2x00dev->tx; |
| 605 | struct usb_endpoint_descriptor *tx_ep_desc = NULL; |
| 606 | unsigned int i; |
| 607 | |
| 608 | /* |
| 609 | * Walk through all available endpoints to search for "bulk in" |
| 610 | * and "bulk out" endpoints. When we find such endpoints collect |
| 611 | * the information we need from the descriptor and assign it |
| 612 | * to the queue. |
| 613 | */ |
| 614 | for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { |
| 615 | ep_desc = &intf_desc->endpoint[i].desc; |
| 616 | |
| 617 | if (usb_endpoint_is_bulk_in(ep_desc)) { |
| 618 | rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc); |
| 619 | } else if (usb_endpoint_is_bulk_out(ep_desc) && |
| 620 | (queue != queue_end(rt2x00dev))) { |
| 621 | rt2x00usb_assign_endpoint(queue, ep_desc); |
| 622 | queue = queue_next(queue); |
| 623 | |
| 624 | tx_ep_desc = ep_desc; |
| 625 | } |
| 626 | } |
| 627 | |
| 628 | /* |
| 629 | * At least 1 endpoint for RX and 1 endpoint for TX must be available. |
| 630 | */ |
| 631 | if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) { |
| 632 | rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n"); |
| 633 | return -EPIPE; |
| 634 | } |
| 635 | |
| 636 | /* |
| 637 | * It might be possible not all queues have a dedicated endpoint. |
| 638 | * Loop through all TX queues and copy the endpoint information |
| 639 | * which we have gathered from already assigned endpoints. |
| 640 | */ |
| 641 | txall_queue_for_each(rt2x00dev, queue) { |
| 642 | if (!queue->usb_endpoint) |
| 643 | rt2x00usb_assign_endpoint(queue, tx_ep_desc); |
| 644 | } |
| 645 | |
| 646 | return 0; |
| 647 | } |
| 648 | |
| 649 | static int rt2x00usb_alloc_entries(struct data_queue *queue) |
| 650 | { |
| 651 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; |
| 652 | struct queue_entry_priv_usb *entry_priv; |
| 653 | struct queue_entry_priv_usb_bcn *bcn_priv; |
| 654 | unsigned int i; |
| 655 | |
| 656 | for (i = 0; i < queue->limit; i++) { |
| 657 | entry_priv = queue->entries[i].priv_data; |
| 658 | entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL); |
| 659 | if (!entry_priv->urb) |
| 660 | return -ENOMEM; |
| 661 | } |
| 662 | |
| 663 | /* |
| 664 | * If this is not the beacon queue or |
| 665 | * no guardian byte was required for the beacon, |
| 666 | * then we are done. |
| 667 | */ |
| 668 | if (queue->qid != QID_BEACON || |
| 669 | !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)) |
| 670 | return 0; |
| 671 | |
| 672 | for (i = 0; i < queue->limit; i++) { |
| 673 | bcn_priv = queue->entries[i].priv_data; |
| 674 | bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL); |
| 675 | if (!bcn_priv->guardian_urb) |
| 676 | return -ENOMEM; |
| 677 | } |
| 678 | |
| 679 | return 0; |
| 680 | } |
| 681 | |
| 682 | static void rt2x00usb_free_entries(struct data_queue *queue) |
| 683 | { |
| 684 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; |
| 685 | struct queue_entry_priv_usb *entry_priv; |
| 686 | struct queue_entry_priv_usb_bcn *bcn_priv; |
| 687 | unsigned int i; |
| 688 | |
| 689 | if (!queue->entries) |
| 690 | return; |
| 691 | |
| 692 | for (i = 0; i < queue->limit; i++) { |
| 693 | entry_priv = queue->entries[i].priv_data; |
| 694 | usb_kill_urb(entry_priv->urb); |
| 695 | usb_free_urb(entry_priv->urb); |
| 696 | } |
| 697 | |
| 698 | /* |
| 699 | * If this is not the beacon queue or |
| 700 | * no guardian byte was required for the beacon, |
| 701 | * then we are done. |
| 702 | */ |
| 703 | if (queue->qid != QID_BEACON || |
| 704 | !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)) |
| 705 | return; |
| 706 | |
| 707 | for (i = 0; i < queue->limit; i++) { |
| 708 | bcn_priv = queue->entries[i].priv_data; |
| 709 | usb_kill_urb(bcn_priv->guardian_urb); |
| 710 | usb_free_urb(bcn_priv->guardian_urb); |
| 711 | } |
| 712 | } |
| 713 | |
| 714 | int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) |
| 715 | { |
| 716 | struct data_queue *queue; |
| 717 | int status; |
| 718 | |
| 719 | /* |
| 720 | * Find endpoints for each queue |
| 721 | */ |
| 722 | status = rt2x00usb_find_endpoints(rt2x00dev); |
| 723 | if (status) |
| 724 | goto exit; |
| 725 | |
| 726 | /* |
| 727 | * Allocate DMA |
| 728 | */ |
| 729 | queue_for_each(rt2x00dev, queue) { |
| 730 | status = rt2x00usb_alloc_entries(queue); |
| 731 | if (status) |
| 732 | goto exit; |
| 733 | } |
| 734 | |
| 735 | return 0; |
| 736 | |
| 737 | exit: |
| 738 | rt2x00usb_uninitialize(rt2x00dev); |
| 739 | |
| 740 | return status; |
| 741 | } |
| 742 | EXPORT_SYMBOL_GPL(rt2x00usb_initialize); |
| 743 | |
| 744 | void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) |
| 745 | { |
| 746 | struct data_queue *queue; |
| 747 | |
| 748 | usb_kill_anchored_urbs(rt2x00dev->anchor); |
| 749 | hrtimer_cancel(&rt2x00dev->txstatus_timer); |
| 750 | cancel_work_sync(&rt2x00dev->rxdone_work); |
| 751 | cancel_work_sync(&rt2x00dev->txdone_work); |
| 752 | |
| 753 | queue_for_each(rt2x00dev, queue) |
| 754 | rt2x00usb_free_entries(queue); |
| 755 | } |
| 756 | EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); |
| 757 | |
| 758 | /* |
| 759 | * USB driver handlers. |
| 760 | */ |
| 761 | static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev) |
| 762 | { |
| 763 | kfree(rt2x00dev->rf); |
| 764 | rt2x00dev->rf = NULL; |
| 765 | |
| 766 | kfree(rt2x00dev->eeprom); |
| 767 | rt2x00dev->eeprom = NULL; |
| 768 | |
| 769 | kfree(rt2x00dev->csr.cache); |
| 770 | rt2x00dev->csr.cache = NULL; |
| 771 | } |
| 772 | |
| 773 | static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) |
| 774 | { |
| 775 | rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); |
| 776 | if (!rt2x00dev->csr.cache) |
| 777 | goto exit; |
| 778 | |
| 779 | rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); |
| 780 | if (!rt2x00dev->eeprom) |
| 781 | goto exit; |
| 782 | |
| 783 | rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); |
| 784 | if (!rt2x00dev->rf) |
| 785 | goto exit; |
| 786 | |
| 787 | return 0; |
| 788 | |
| 789 | exit: |
| 790 | rt2x00_probe_err("Failed to allocate registers\n"); |
| 791 | |
| 792 | rt2x00usb_free_reg(rt2x00dev); |
| 793 | |
| 794 | return -ENOMEM; |
| 795 | } |
| 796 | |
| 797 | int rt2x00usb_probe(struct usb_interface *usb_intf, |
| 798 | const struct rt2x00_ops *ops) |
| 799 | { |
| 800 | struct usb_device *usb_dev = interface_to_usbdev(usb_intf); |
| 801 | struct ieee80211_hw *hw; |
| 802 | struct rt2x00_dev *rt2x00dev; |
| 803 | int retval; |
| 804 | |
| 805 | usb_dev = usb_get_dev(usb_dev); |
| 806 | usb_reset_device(usb_dev); |
| 807 | |
| 808 | hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); |
| 809 | if (!hw) { |
| 810 | rt2x00_probe_err("Failed to allocate hardware\n"); |
| 811 | retval = -ENOMEM; |
| 812 | goto exit_put_device; |
| 813 | } |
| 814 | |
| 815 | usb_set_intfdata(usb_intf, hw); |
| 816 | |
| 817 | rt2x00dev = hw->priv; |
| 818 | rt2x00dev->dev = &usb_intf->dev; |
| 819 | rt2x00dev->ops = ops; |
| 820 | rt2x00dev->hw = hw; |
| 821 | |
| 822 | rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB); |
| 823 | |
| 824 | INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone); |
| 825 | INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone); |
| 826 | hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC, |
| 827 | HRTIMER_MODE_REL); |
| 828 | |
| 829 | retval = rt2x00usb_alloc_reg(rt2x00dev); |
| 830 | if (retval) |
| 831 | goto exit_free_device; |
| 832 | |
| 833 | rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev, |
| 834 | sizeof(struct usb_anchor), |
| 835 | GFP_KERNEL); |
| 836 | if (!rt2x00dev->anchor) { |
| 837 | retval = -ENOMEM; |
| 838 | goto exit_free_reg; |
| 839 | } |
| 840 | init_usb_anchor(rt2x00dev->anchor); |
| 841 | |
| 842 | retval = rt2x00lib_probe_dev(rt2x00dev); |
| 843 | if (retval) |
| 844 | goto exit_free_anchor; |
| 845 | |
| 846 | return 0; |
| 847 | |
| 848 | exit_free_anchor: |
| 849 | usb_kill_anchored_urbs(rt2x00dev->anchor); |
| 850 | |
| 851 | exit_free_reg: |
| 852 | rt2x00usb_free_reg(rt2x00dev); |
| 853 | |
| 854 | exit_free_device: |
| 855 | ieee80211_free_hw(hw); |
| 856 | |
| 857 | exit_put_device: |
| 858 | usb_put_dev(usb_dev); |
| 859 | |
| 860 | usb_set_intfdata(usb_intf, NULL); |
| 861 | |
| 862 | return retval; |
| 863 | } |
| 864 | EXPORT_SYMBOL_GPL(rt2x00usb_probe); |
| 865 | |
| 866 | void rt2x00usb_disconnect(struct usb_interface *usb_intf) |
| 867 | { |
| 868 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); |
| 869 | struct rt2x00_dev *rt2x00dev = hw->priv; |
| 870 | |
| 871 | /* |
| 872 | * Free all allocated data. |
| 873 | */ |
| 874 | rt2x00lib_remove_dev(rt2x00dev); |
| 875 | rt2x00usb_free_reg(rt2x00dev); |
| 876 | ieee80211_free_hw(hw); |
| 877 | |
| 878 | /* |
| 879 | * Free the USB device data. |
| 880 | */ |
| 881 | usb_set_intfdata(usb_intf, NULL); |
| 882 | usb_put_dev(interface_to_usbdev(usb_intf)); |
| 883 | } |
| 884 | EXPORT_SYMBOL_GPL(rt2x00usb_disconnect); |
| 885 | |
| 886 | #ifdef CONFIG_PM |
| 887 | int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) |
| 888 | { |
| 889 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); |
| 890 | struct rt2x00_dev *rt2x00dev = hw->priv; |
| 891 | |
| 892 | return rt2x00lib_suspend(rt2x00dev, state); |
| 893 | } |
| 894 | EXPORT_SYMBOL_GPL(rt2x00usb_suspend); |
| 895 | |
| 896 | int rt2x00usb_resume(struct usb_interface *usb_intf) |
| 897 | { |
| 898 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); |
| 899 | struct rt2x00_dev *rt2x00dev = hw->priv; |
| 900 | |
| 901 | return rt2x00lib_resume(rt2x00dev); |
| 902 | } |
| 903 | EXPORT_SYMBOL_GPL(rt2x00usb_resume); |
| 904 | #endif /* CONFIG_PM */ |
| 905 | |
| 906 | /* |
| 907 | * rt2x00usb module information. |
| 908 | */ |
| 909 | MODULE_AUTHOR(DRV_PROJECT); |
| 910 | MODULE_VERSION(DRV_VERSION); |
| 911 | MODULE_DESCRIPTION("rt2x00 usb library"); |
| 912 | MODULE_LICENSE("GPL"); |