| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /* | 
|  | 3 | * Thunderbolt Cactus Ridge driver - control channel and configuration commands | 
|  | 4 | * | 
|  | 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | 
|  | 6 | */ | 
|  | 7 |  | 
|  | 8 | #include <linux/crc32.h> | 
|  | 9 | #include <linux/delay.h> | 
|  | 10 | #include <linux/slab.h> | 
|  | 11 | #include <linux/pci.h> | 
|  | 12 | #include <linux/dmapool.h> | 
|  | 13 | #include <linux/workqueue.h> | 
|  | 14 |  | 
|  | 15 | #include "ctl.h" | 
|  | 16 |  | 
|  | 17 |  | 
|  | 18 | #define TB_CTL_RX_PKG_COUNT	10 | 
|  | 19 | #define TB_CTL_RETRIES		4 | 
|  | 20 |  | 
|  | 21 | /** | 
|  | 22 | * struct tb_cfg - thunderbolt control channel | 
|  | 23 | */ | 
|  | 24 | struct tb_ctl { | 
|  | 25 | struct tb_nhi *nhi; | 
|  | 26 | struct tb_ring *tx; | 
|  | 27 | struct tb_ring *rx; | 
|  | 28 |  | 
|  | 29 | struct dma_pool *frame_pool; | 
|  | 30 | struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT]; | 
|  | 31 | struct mutex request_queue_lock; | 
|  | 32 | struct list_head request_queue; | 
|  | 33 | bool running; | 
|  | 34 |  | 
|  | 35 | event_cb callback; | 
|  | 36 | void *callback_data; | 
|  | 37 | }; | 
|  | 38 |  | 
|  | 39 |  | 
|  | 40 | #define tb_ctl_WARN(ctl, format, arg...) \ | 
|  | 41 | dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg) | 
|  | 42 |  | 
|  | 43 | #define tb_ctl_err(ctl, format, arg...) \ | 
|  | 44 | dev_err(&(ctl)->nhi->pdev->dev, format, ## arg) | 
|  | 45 |  | 
|  | 46 | #define tb_ctl_warn(ctl, format, arg...) \ | 
|  | 47 | dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg) | 
|  | 48 |  | 
|  | 49 | #define tb_ctl_info(ctl, format, arg...) \ | 
|  | 50 | dev_info(&(ctl)->nhi->pdev->dev, format, ## arg) | 
|  | 51 |  | 
|  | 52 | #define tb_ctl_dbg(ctl, format, arg...) \ | 
|  | 53 | dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg) | 
|  | 54 |  | 
|  | 55 | static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue); | 
|  | 56 | /* Serializes access to request kref_get/put */ | 
|  | 57 | static DEFINE_MUTEX(tb_cfg_request_lock); | 
|  | 58 |  | 
|  | 59 | /** | 
|  | 60 | * tb_cfg_request_alloc() - Allocates a new config request | 
|  | 61 | * | 
|  | 62 | * This is refcounted object so when you are done with this, call | 
|  | 63 | * tb_cfg_request_put() to it. | 
|  | 64 | */ | 
|  | 65 | struct tb_cfg_request *tb_cfg_request_alloc(void) | 
|  | 66 | { | 
|  | 67 | struct tb_cfg_request *req; | 
|  | 68 |  | 
|  | 69 | req = kzalloc(sizeof(*req), GFP_KERNEL); | 
|  | 70 | if (!req) | 
|  | 71 | return NULL; | 
|  | 72 |  | 
|  | 73 | kref_init(&req->kref); | 
|  | 74 |  | 
|  | 75 | return req; | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 | /** | 
|  | 79 | * tb_cfg_request_get() - Increase refcount of a request | 
|  | 80 | * @req: Request whose refcount is increased | 
|  | 81 | */ | 
|  | 82 | void tb_cfg_request_get(struct tb_cfg_request *req) | 
|  | 83 | { | 
|  | 84 | mutex_lock(&tb_cfg_request_lock); | 
|  | 85 | kref_get(&req->kref); | 
|  | 86 | mutex_unlock(&tb_cfg_request_lock); | 
|  | 87 | } | 
|  | 88 |  | 
|  | 89 | static void tb_cfg_request_destroy(struct kref *kref) | 
|  | 90 | { | 
|  | 91 | struct tb_cfg_request *req = container_of(kref, typeof(*req), kref); | 
|  | 92 |  | 
|  | 93 | kfree(req); | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | /** | 
|  | 97 | * tb_cfg_request_put() - Decrease refcount and possibly release the request | 
|  | 98 | * @req: Request whose refcount is decreased | 
|  | 99 | * | 
|  | 100 | * Call this function when you are done with the request. When refcount | 
|  | 101 | * goes to %0 the object is released. | 
|  | 102 | */ | 
|  | 103 | void tb_cfg_request_put(struct tb_cfg_request *req) | 
|  | 104 | { | 
|  | 105 | mutex_lock(&tb_cfg_request_lock); | 
|  | 106 | kref_put(&req->kref, tb_cfg_request_destroy); | 
|  | 107 | mutex_unlock(&tb_cfg_request_lock); | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | static int tb_cfg_request_enqueue(struct tb_ctl *ctl, | 
|  | 111 | struct tb_cfg_request *req) | 
|  | 112 | { | 
|  | 113 | WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)); | 
|  | 114 | WARN_ON(req->ctl); | 
|  | 115 |  | 
|  | 116 | mutex_lock(&ctl->request_queue_lock); | 
|  | 117 | if (!ctl->running) { | 
|  | 118 | mutex_unlock(&ctl->request_queue_lock); | 
|  | 119 | return -ENOTCONN; | 
|  | 120 | } | 
|  | 121 | req->ctl = ctl; | 
|  | 122 | list_add_tail(&req->list, &ctl->request_queue); | 
|  | 123 | set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); | 
|  | 124 | mutex_unlock(&ctl->request_queue_lock); | 
|  | 125 | return 0; | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | static void tb_cfg_request_dequeue(struct tb_cfg_request *req) | 
|  | 129 | { | 
|  | 130 | struct tb_ctl *ctl = req->ctl; | 
|  | 131 |  | 
|  | 132 | mutex_lock(&ctl->request_queue_lock); | 
|  | 133 | list_del(&req->list); | 
|  | 134 | clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); | 
|  | 135 | if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) | 
|  | 136 | wake_up(&tb_cfg_request_cancel_queue); | 
|  | 137 | mutex_unlock(&ctl->request_queue_lock); | 
|  | 138 | } | 
|  | 139 |  | 
|  | 140 | static bool tb_cfg_request_is_active(struct tb_cfg_request *req) | 
|  | 141 | { | 
|  | 142 | return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | static struct tb_cfg_request * | 
|  | 146 | tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) | 
|  | 147 | { | 
|  | 148 | struct tb_cfg_request *req; | 
|  | 149 | bool found = false; | 
|  | 150 |  | 
|  | 151 | mutex_lock(&pkg->ctl->request_queue_lock); | 
|  | 152 | list_for_each_entry(req, &pkg->ctl->request_queue, list) { | 
|  | 153 | tb_cfg_request_get(req); | 
|  | 154 | if (req->match(req, pkg)) { | 
|  | 155 | found = true; | 
|  | 156 | break; | 
|  | 157 | } | 
|  | 158 | tb_cfg_request_put(req); | 
|  | 159 | } | 
|  | 160 | mutex_unlock(&pkg->ctl->request_queue_lock); | 
|  | 161 |  | 
|  | 162 | return found ? req : NULL; | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | /* utility functions */ | 
|  | 166 |  | 
|  | 167 |  | 
|  | 168 | static int check_header(const struct ctl_pkg *pkg, u32 len, | 
|  | 169 | enum tb_cfg_pkg_type type, u64 route) | 
|  | 170 | { | 
|  | 171 | struct tb_cfg_header *header = pkg->buffer; | 
|  | 172 |  | 
|  | 173 | /* check frame, TODO: frame flags */ | 
|  | 174 | if (WARN(len != pkg->frame.size, | 
|  | 175 | "wrong framesize (expected %#x, got %#x)\n", | 
|  | 176 | len, pkg->frame.size)) | 
|  | 177 | return -EIO; | 
|  | 178 | if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", | 
|  | 179 | type, pkg->frame.eof)) | 
|  | 180 | return -EIO; | 
|  | 181 | if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", | 
|  | 182 | pkg->frame.sof)) | 
|  | 183 | return -EIO; | 
|  | 184 |  | 
|  | 185 | /* check header */ | 
|  | 186 | if (WARN(header->unknown != 1 << 9, | 
|  | 187 | "header->unknown is %#x\n", header->unknown)) | 
|  | 188 | return -EIO; | 
|  | 189 | if (WARN(route != tb_cfg_get_route(header), | 
|  | 190 | "wrong route (expected %llx, got %llx)", | 
|  | 191 | route, tb_cfg_get_route(header))) | 
|  | 192 | return -EIO; | 
|  | 193 | return 0; | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | static int check_config_address(struct tb_cfg_address addr, | 
|  | 197 | enum tb_cfg_space space, u32 offset, | 
|  | 198 | u32 length) | 
|  | 199 | { | 
|  | 200 | if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero)) | 
|  | 201 | return -EIO; | 
|  | 202 | if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)", | 
|  | 203 | space, addr.space)) | 
|  | 204 | return -EIO; | 
|  | 205 | if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)", | 
|  | 206 | offset, addr.offset)) | 
|  | 207 | return -EIO; | 
|  | 208 | if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)", | 
|  | 209 | length, addr.length)) | 
|  | 210 | return -EIO; | 
|  | 211 | /* | 
|  | 212 | * We cannot check addr->port as it is set to the upstream port of the | 
|  | 213 | * sender. | 
|  | 214 | */ | 
|  | 215 | return 0; | 
|  | 216 | } | 
|  | 217 |  | 
|  | 218 | static struct tb_cfg_result decode_error(const struct ctl_pkg *response) | 
|  | 219 | { | 
|  | 220 | struct cfg_error_pkg *pkg = response->buffer; | 
|  | 221 | struct tb_cfg_result res = { 0 }; | 
|  | 222 | res.response_route = tb_cfg_get_route(&pkg->header); | 
|  | 223 | res.response_port = 0; | 
|  | 224 | res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR, | 
|  | 225 | tb_cfg_get_route(&pkg->header)); | 
|  | 226 | if (res.err) | 
|  | 227 | return res; | 
|  | 228 |  | 
|  | 229 | WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1); | 
|  | 230 | WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1); | 
|  | 231 | WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1); | 
|  | 232 | res.err = 1; | 
|  | 233 | res.tb_error = pkg->error; | 
|  | 234 | res.response_port = pkg->port; | 
|  | 235 | return res; | 
|  | 236 |  | 
|  | 237 | } | 
|  | 238 |  | 
|  | 239 | static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len, | 
|  | 240 | enum tb_cfg_pkg_type type, u64 route) | 
|  | 241 | { | 
|  | 242 | struct tb_cfg_header *header = pkg->buffer; | 
|  | 243 | struct tb_cfg_result res = { 0 }; | 
|  | 244 |  | 
|  | 245 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) | 
|  | 246 | return decode_error(pkg); | 
|  | 247 |  | 
|  | 248 | res.response_port = 0; /* will be updated later for cfg_read/write */ | 
|  | 249 | res.response_route = tb_cfg_get_route(header); | 
|  | 250 | res.err = check_header(pkg, len, type, route); | 
|  | 251 | return res; | 
|  | 252 | } | 
|  | 253 |  | 
|  | 254 | static void tb_cfg_print_error(struct tb_ctl *ctl, | 
|  | 255 | const struct tb_cfg_result *res) | 
|  | 256 | { | 
|  | 257 | WARN_ON(res->err != 1); | 
|  | 258 | switch (res->tb_error) { | 
|  | 259 | case TB_CFG_ERROR_PORT_NOT_CONNECTED: | 
|  | 260 | /* Port is not connected. This can happen during surprise | 
|  | 261 | * removal. Do not warn. */ | 
|  | 262 | return; | 
|  | 263 | case TB_CFG_ERROR_INVALID_CONFIG_SPACE: | 
|  | 264 | /* | 
|  | 265 | * Invalid cfg_space/offset/length combination in | 
|  | 266 | * cfg_read/cfg_write. | 
|  | 267 | */ | 
|  | 268 | tb_ctl_WARN(ctl, | 
|  | 269 | "CFG_ERROR(%llx:%x): Invalid config space or offset\n", | 
|  | 270 | res->response_route, res->response_port); | 
|  | 271 | return; | 
|  | 272 | case TB_CFG_ERROR_NO_SUCH_PORT: | 
|  | 273 | /* | 
|  | 274 | * - The route contains a non-existent port. | 
|  | 275 | * - The route contains a non-PHY port (e.g. PCIe). | 
|  | 276 | * - The port in cfg_read/cfg_write does not exist. | 
|  | 277 | */ | 
|  | 278 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n", | 
|  | 279 | res->response_route, res->response_port); | 
|  | 280 | return; | 
|  | 281 | case TB_CFG_ERROR_LOOP: | 
|  | 282 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", | 
|  | 283 | res->response_route, res->response_port); | 
|  | 284 | return; | 
|  | 285 | default: | 
|  | 286 | /* 5,6,7,9 and 11 are also valid error codes */ | 
|  | 287 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", | 
|  | 288 | res->response_route, res->response_port); | 
|  | 289 | return; | 
|  | 290 | } | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | static __be32 tb_crc(const void *data, size_t len) | 
|  | 294 | { | 
|  | 295 | return cpu_to_be32(~__crc32c_le(~0, data, len)); | 
|  | 296 | } | 
|  | 297 |  | 
|  | 298 | static void tb_ctl_pkg_free(struct ctl_pkg *pkg) | 
|  | 299 | { | 
|  | 300 | if (pkg) { | 
|  | 301 | dma_pool_free(pkg->ctl->frame_pool, | 
|  | 302 | pkg->buffer, pkg->frame.buffer_phy); | 
|  | 303 | kfree(pkg); | 
|  | 304 | } | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) | 
|  | 308 | { | 
|  | 309 | struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL); | 
|  | 310 | if (!pkg) | 
|  | 311 | return NULL; | 
|  | 312 | pkg->ctl = ctl; | 
|  | 313 | pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, | 
|  | 314 | &pkg->frame.buffer_phy); | 
|  | 315 | if (!pkg->buffer) { | 
|  | 316 | kfree(pkg); | 
|  | 317 | return NULL; | 
|  | 318 | } | 
|  | 319 | return pkg; | 
|  | 320 | } | 
|  | 321 |  | 
|  | 322 |  | 
|  | 323 | /* RX/TX handling */ | 
|  | 324 |  | 
|  | 325 | static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, | 
|  | 326 | bool canceled) | 
|  | 327 | { | 
|  | 328 | struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); | 
|  | 329 | tb_ctl_pkg_free(pkg); | 
|  | 330 | } | 
|  | 331 |  | 
|  | 332 | /** | 
|  | 333 | * tb_cfg_tx() - transmit a packet on the control channel | 
|  | 334 | * | 
|  | 335 | * len must be a multiple of four. | 
|  | 336 | * | 
|  | 337 | * Return: Returns 0 on success or an error code on failure. | 
|  | 338 | */ | 
|  | 339 | static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, | 
|  | 340 | enum tb_cfg_pkg_type type) | 
|  | 341 | { | 
|  | 342 | int res; | 
|  | 343 | struct ctl_pkg *pkg; | 
|  | 344 | if (len % 4 != 0) { /* required for le->be conversion */ | 
|  | 345 | tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len); | 
|  | 346 | return -EINVAL; | 
|  | 347 | } | 
|  | 348 | if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */ | 
|  | 349 | tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n", | 
|  | 350 | len, TB_FRAME_SIZE - 4); | 
|  | 351 | return -EINVAL; | 
|  | 352 | } | 
|  | 353 | pkg = tb_ctl_pkg_alloc(ctl); | 
|  | 354 | if (!pkg) | 
|  | 355 | return -ENOMEM; | 
|  | 356 | pkg->frame.callback = tb_ctl_tx_callback; | 
|  | 357 | pkg->frame.size = len + 4; | 
|  | 358 | pkg->frame.sof = type; | 
|  | 359 | pkg->frame.eof = type; | 
|  | 360 | cpu_to_be32_array(pkg->buffer, data, len / 4); | 
|  | 361 | *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); | 
|  | 362 |  | 
|  | 363 | res = tb_ring_tx(ctl->tx, &pkg->frame); | 
|  | 364 | if (res) /* ring is stopped */ | 
|  | 365 | tb_ctl_pkg_free(pkg); | 
|  | 366 | return res; | 
|  | 367 | } | 
|  | 368 |  | 
|  | 369 | /** | 
|  | 370 | * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback | 
|  | 371 | */ | 
|  | 372 | static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, | 
|  | 373 | struct ctl_pkg *pkg, size_t size) | 
|  | 374 | { | 
|  | 375 | return ctl->callback(ctl->callback_data, type, pkg->buffer, size); | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | static void tb_ctl_rx_submit(struct ctl_pkg *pkg) | 
|  | 379 | { | 
|  | 380 | tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* | 
|  | 381 | * We ignore failures during stop. | 
|  | 382 | * All rx packets are referenced | 
|  | 383 | * from ctl->rx_packets, so we do | 
|  | 384 | * not loose them. | 
|  | 385 | */ | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 | static int tb_async_error(const struct ctl_pkg *pkg) | 
|  | 389 | { | 
|  | 390 | const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg; | 
|  | 391 |  | 
|  | 392 | if (pkg->frame.eof != TB_CFG_PKG_ERROR) | 
|  | 393 | return false; | 
|  | 394 |  | 
|  | 395 | switch (error->error) { | 
|  | 396 | case TB_CFG_ERROR_LINK_ERROR: | 
|  | 397 | case TB_CFG_ERROR_HEC_ERROR_DETECTED: | 
|  | 398 | case TB_CFG_ERROR_FLOW_CONTROL_ERROR: | 
|  | 399 | return true; | 
|  | 400 |  | 
|  | 401 | default: | 
|  | 402 | return false; | 
|  | 403 | } | 
|  | 404 | } | 
|  | 405 |  | 
|  | 406 | static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, | 
|  | 407 | bool canceled) | 
|  | 408 | { | 
|  | 409 | struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); | 
|  | 410 | struct tb_cfg_request *req; | 
|  | 411 | __be32 crc32; | 
|  | 412 |  | 
|  | 413 | if (canceled) | 
|  | 414 | return; /* | 
|  | 415 | * ring is stopped, packet is referenced from | 
|  | 416 | * ctl->rx_packets. | 
|  | 417 | */ | 
|  | 418 |  | 
|  | 419 | if (frame->size < 4 || frame->size % 4 != 0) { | 
|  | 420 | tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", | 
|  | 421 | frame->size); | 
|  | 422 | goto rx; | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | frame->size -= 4; /* remove checksum */ | 
|  | 426 | crc32 = tb_crc(pkg->buffer, frame->size); | 
|  | 427 | be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); | 
|  | 428 |  | 
|  | 429 | switch (frame->eof) { | 
|  | 430 | case TB_CFG_PKG_READ: | 
|  | 431 | case TB_CFG_PKG_WRITE: | 
|  | 432 | case TB_CFG_PKG_ERROR: | 
|  | 433 | case TB_CFG_PKG_OVERRIDE: | 
|  | 434 | case TB_CFG_PKG_RESET: | 
|  | 435 | if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { | 
|  | 436 | tb_ctl_err(pkg->ctl, | 
|  | 437 | "RX: checksum mismatch, dropping packet\n"); | 
|  | 438 | goto rx; | 
|  | 439 | } | 
|  | 440 | if (tb_async_error(pkg)) { | 
|  | 441 | tb_ctl_handle_event(pkg->ctl, frame->eof, | 
|  | 442 | pkg, frame->size); | 
|  | 443 | goto rx; | 
|  | 444 | } | 
|  | 445 | break; | 
|  | 446 |  | 
|  | 447 | case TB_CFG_PKG_EVENT: | 
|  | 448 | case TB_CFG_PKG_XDOMAIN_RESP: | 
|  | 449 | case TB_CFG_PKG_XDOMAIN_REQ: | 
|  | 450 | if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { | 
|  | 451 | tb_ctl_err(pkg->ctl, | 
|  | 452 | "RX: checksum mismatch, dropping packet\n"); | 
|  | 453 | goto rx; | 
|  | 454 | } | 
|  | 455 | /* Fall through */ | 
|  | 456 | case TB_CFG_PKG_ICM_EVENT: | 
|  | 457 | if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) | 
|  | 458 | goto rx; | 
|  | 459 | break; | 
|  | 460 |  | 
|  | 461 | default: | 
|  | 462 | break; | 
|  | 463 | } | 
|  | 464 |  | 
|  | 465 | /* | 
|  | 466 | * The received packet will be processed only if there is an | 
|  | 467 | * active request and that the packet is what is expected. This | 
|  | 468 | * prevents packets such as replies coming after timeout has | 
|  | 469 | * triggered from messing with the active requests. | 
|  | 470 | */ | 
|  | 471 | req = tb_cfg_request_find(pkg->ctl, pkg); | 
|  | 472 | if (req) { | 
|  | 473 | if (req->copy(req, pkg)) | 
|  | 474 | schedule_work(&req->work); | 
|  | 475 | tb_cfg_request_put(req); | 
|  | 476 | } | 
|  | 477 |  | 
|  | 478 | rx: | 
|  | 479 | tb_ctl_rx_submit(pkg); | 
|  | 480 | } | 
|  | 481 |  | 
|  | 482 | static void tb_cfg_request_work(struct work_struct *work) | 
|  | 483 | { | 
|  | 484 | struct tb_cfg_request *req = container_of(work, typeof(*req), work); | 
|  | 485 |  | 
|  | 486 | if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) | 
|  | 487 | req->callback(req->callback_data); | 
|  | 488 |  | 
|  | 489 | tb_cfg_request_dequeue(req); | 
|  | 490 | tb_cfg_request_put(req); | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | /** | 
|  | 494 | * tb_cfg_request() - Start control request not waiting for it to complete | 
|  | 495 | * @ctl: Control channel to use | 
|  | 496 | * @req: Request to start | 
|  | 497 | * @callback: Callback called when the request is completed | 
|  | 498 | * @callback_data: Data to be passed to @callback | 
|  | 499 | * | 
|  | 500 | * This queues @req on the given control channel without waiting for it | 
|  | 501 | * to complete. When the request completes @callback is called. | 
|  | 502 | */ | 
|  | 503 | int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, | 
|  | 504 | void (*callback)(void *), void *callback_data) | 
|  | 505 | { | 
|  | 506 | int ret; | 
|  | 507 |  | 
|  | 508 | req->flags = 0; | 
|  | 509 | req->callback = callback; | 
|  | 510 | req->callback_data = callback_data; | 
|  | 511 | INIT_WORK(&req->work, tb_cfg_request_work); | 
|  | 512 | INIT_LIST_HEAD(&req->list); | 
|  | 513 |  | 
|  | 514 | tb_cfg_request_get(req); | 
|  | 515 | ret = tb_cfg_request_enqueue(ctl, req); | 
|  | 516 | if (ret) | 
|  | 517 | goto err_put; | 
|  | 518 |  | 
|  | 519 | ret = tb_ctl_tx(ctl, req->request, req->request_size, | 
|  | 520 | req->request_type); | 
|  | 521 | if (ret) | 
|  | 522 | goto err_dequeue; | 
|  | 523 |  | 
|  | 524 | if (!req->response) | 
|  | 525 | schedule_work(&req->work); | 
|  | 526 |  | 
|  | 527 | return 0; | 
|  | 528 |  | 
|  | 529 | err_dequeue: | 
|  | 530 | tb_cfg_request_dequeue(req); | 
|  | 531 | err_put: | 
|  | 532 | tb_cfg_request_put(req); | 
|  | 533 |  | 
|  | 534 | return ret; | 
|  | 535 | } | 
|  | 536 |  | 
|  | 537 | /** | 
|  | 538 | * tb_cfg_request_cancel() - Cancel a control request | 
|  | 539 | * @req: Request to cancel | 
|  | 540 | * @err: Error to assign to the request | 
|  | 541 | * | 
|  | 542 | * This function can be used to cancel ongoing request. It will wait | 
|  | 543 | * until the request is not active anymore. | 
|  | 544 | */ | 
|  | 545 | void tb_cfg_request_cancel(struct tb_cfg_request *req, int err) | 
|  | 546 | { | 
|  | 547 | set_bit(TB_CFG_REQUEST_CANCELED, &req->flags); | 
|  | 548 | schedule_work(&req->work); | 
|  | 549 | wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req)); | 
|  | 550 | req->result.err = err; | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | static void tb_cfg_request_complete(void *data) | 
|  | 554 | { | 
|  | 555 | complete(data); | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | /** | 
|  | 559 | * tb_cfg_request_sync() - Start control request and wait until it completes | 
|  | 560 | * @ctl: Control channel to use | 
|  | 561 | * @req: Request to start | 
|  | 562 | * @timeout_msec: Timeout how long to wait @req to complete | 
|  | 563 | * | 
|  | 564 | * Starts a control request and waits until it completes. If timeout | 
|  | 565 | * triggers the request is canceled before function returns. Note the | 
|  | 566 | * caller needs to make sure only one message for given switch is active | 
|  | 567 | * at a time. | 
|  | 568 | */ | 
|  | 569 | struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, | 
|  | 570 | struct tb_cfg_request *req, | 
|  | 571 | int timeout_msec) | 
|  | 572 | { | 
|  | 573 | unsigned long timeout = msecs_to_jiffies(timeout_msec); | 
|  | 574 | struct tb_cfg_result res = { 0 }; | 
|  | 575 | DECLARE_COMPLETION_ONSTACK(done); | 
|  | 576 | int ret; | 
|  | 577 |  | 
|  | 578 | ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done); | 
|  | 579 | if (ret) { | 
|  | 580 | res.err = ret; | 
|  | 581 | return res; | 
|  | 582 | } | 
|  | 583 |  | 
|  | 584 | if (!wait_for_completion_timeout(&done, timeout)) | 
|  | 585 | tb_cfg_request_cancel(req, -ETIMEDOUT); | 
|  | 586 |  | 
|  | 587 | flush_work(&req->work); | 
|  | 588 |  | 
|  | 589 | return req->result; | 
|  | 590 | } | 
|  | 591 |  | 
|  | 592 | /* public interface, alloc/start/stop/free */ | 
|  | 593 |  | 
|  | 594 | /** | 
|  | 595 | * tb_ctl_alloc() - allocate a control channel | 
|  | 596 | * | 
|  | 597 | * cb will be invoked once for every hot plug event. | 
|  | 598 | * | 
|  | 599 | * Return: Returns a pointer on success or NULL on failure. | 
|  | 600 | */ | 
|  | 601 | struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data) | 
|  | 602 | { | 
|  | 603 | int i; | 
|  | 604 | struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); | 
|  | 605 | if (!ctl) | 
|  | 606 | return NULL; | 
|  | 607 | ctl->nhi = nhi; | 
|  | 608 | ctl->callback = cb; | 
|  | 609 | ctl->callback_data = cb_data; | 
|  | 610 |  | 
|  | 611 | mutex_init(&ctl->request_queue_lock); | 
|  | 612 | INIT_LIST_HEAD(&ctl->request_queue); | 
|  | 613 | ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, | 
|  | 614 | TB_FRAME_SIZE, 4, 0); | 
|  | 615 | if (!ctl->frame_pool) | 
|  | 616 | goto err; | 
|  | 617 |  | 
|  | 618 | ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND); | 
|  | 619 | if (!ctl->tx) | 
|  | 620 | goto err; | 
|  | 621 |  | 
|  | 622 | ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff, | 
|  | 623 | 0xffff, NULL, NULL); | 
|  | 624 | if (!ctl->rx) | 
|  | 625 | goto err; | 
|  | 626 |  | 
|  | 627 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) { | 
|  | 628 | ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); | 
|  | 629 | if (!ctl->rx_packets[i]) | 
|  | 630 | goto err; | 
|  | 631 | ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | tb_ctl_info(ctl, "control channel created\n"); | 
|  | 635 | return ctl; | 
|  | 636 | err: | 
|  | 637 | tb_ctl_free(ctl); | 
|  | 638 | return NULL; | 
|  | 639 | } | 
|  | 640 |  | 
|  | 641 | /** | 
|  | 642 | * tb_ctl_free() - free a control channel | 
|  | 643 | * | 
|  | 644 | * Must be called after tb_ctl_stop. | 
|  | 645 | * | 
|  | 646 | * Must NOT be called from ctl->callback. | 
|  | 647 | */ | 
|  | 648 | void tb_ctl_free(struct tb_ctl *ctl) | 
|  | 649 | { | 
|  | 650 | int i; | 
|  | 651 |  | 
|  | 652 | if (!ctl) | 
|  | 653 | return; | 
|  | 654 |  | 
|  | 655 | if (ctl->rx) | 
|  | 656 | tb_ring_free(ctl->rx); | 
|  | 657 | if (ctl->tx) | 
|  | 658 | tb_ring_free(ctl->tx); | 
|  | 659 |  | 
|  | 660 | /* free RX packets */ | 
|  | 661 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) | 
|  | 662 | tb_ctl_pkg_free(ctl->rx_packets[i]); | 
|  | 663 |  | 
|  | 664 |  | 
|  | 665 | if (ctl->frame_pool) | 
|  | 666 | dma_pool_destroy(ctl->frame_pool); | 
|  | 667 | kfree(ctl); | 
|  | 668 | } | 
|  | 669 |  | 
|  | 670 | /** | 
|  | 671 | * tb_cfg_start() - start/resume the control channel | 
|  | 672 | */ | 
|  | 673 | void tb_ctl_start(struct tb_ctl *ctl) | 
|  | 674 | { | 
|  | 675 | int i; | 
|  | 676 | tb_ctl_info(ctl, "control channel starting...\n"); | 
|  | 677 | tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ | 
|  | 678 | tb_ring_start(ctl->rx); | 
|  | 679 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) | 
|  | 680 | tb_ctl_rx_submit(ctl->rx_packets[i]); | 
|  | 681 |  | 
|  | 682 | ctl->running = true; | 
|  | 683 | } | 
|  | 684 |  | 
|  | 685 | /** | 
|  | 686 | * control() - pause the control channel | 
|  | 687 | * | 
|  | 688 | * All invocations of ctl->callback will have finished after this method | 
|  | 689 | * returns. | 
|  | 690 | * | 
|  | 691 | * Must NOT be called from ctl->callback. | 
|  | 692 | */ | 
|  | 693 | void tb_ctl_stop(struct tb_ctl *ctl) | 
|  | 694 | { | 
|  | 695 | mutex_lock(&ctl->request_queue_lock); | 
|  | 696 | ctl->running = false; | 
|  | 697 | mutex_unlock(&ctl->request_queue_lock); | 
|  | 698 |  | 
|  | 699 | tb_ring_stop(ctl->rx); | 
|  | 700 | tb_ring_stop(ctl->tx); | 
|  | 701 |  | 
|  | 702 | if (!list_empty(&ctl->request_queue)) | 
|  | 703 | tb_ctl_WARN(ctl, "dangling request in request_queue\n"); | 
|  | 704 | INIT_LIST_HEAD(&ctl->request_queue); | 
|  | 705 | tb_ctl_info(ctl, "control channel stopped\n"); | 
|  | 706 | } | 
|  | 707 |  | 
|  | 708 | /* public interface, commands */ | 
|  | 709 |  | 
|  | 710 | /** | 
|  | 711 | * tb_cfg_error() - send error packet | 
|  | 712 | * | 
|  | 713 | * Return: Returns 0 on success or an error code on failure. | 
|  | 714 | */ | 
|  | 715 | int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port, | 
|  | 716 | enum tb_cfg_error error) | 
|  | 717 | { | 
|  | 718 | struct cfg_error_pkg pkg = { | 
|  | 719 | .header = tb_cfg_make_header(route), | 
|  | 720 | .port = port, | 
|  | 721 | .error = error, | 
|  | 722 | }; | 
|  | 723 | tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port); | 
|  | 724 | return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); | 
|  | 725 | } | 
|  | 726 |  | 
|  | 727 | static bool tb_cfg_match(const struct tb_cfg_request *req, | 
|  | 728 | const struct ctl_pkg *pkg) | 
|  | 729 | { | 
|  | 730 | u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); | 
|  | 731 |  | 
|  | 732 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) | 
|  | 733 | return true; | 
|  | 734 |  | 
|  | 735 | if (pkg->frame.eof != req->response_type) | 
|  | 736 | return false; | 
|  | 737 | if (route != tb_cfg_get_route(req->request)) | 
|  | 738 | return false; | 
|  | 739 | if (pkg->frame.size != req->response_size) | 
|  | 740 | return false; | 
|  | 741 |  | 
|  | 742 | if (pkg->frame.eof == TB_CFG_PKG_READ || | 
|  | 743 | pkg->frame.eof == TB_CFG_PKG_WRITE) { | 
|  | 744 | const struct cfg_read_pkg *req_hdr = req->request; | 
|  | 745 | const struct cfg_read_pkg *res_hdr = pkg->buffer; | 
|  | 746 |  | 
|  | 747 | if (req_hdr->addr.seq != res_hdr->addr.seq) | 
|  | 748 | return false; | 
|  | 749 | } | 
|  | 750 |  | 
|  | 751 | return true; | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) | 
|  | 755 | { | 
|  | 756 | struct tb_cfg_result res; | 
|  | 757 |  | 
|  | 758 | /* Now make sure it is in expected format */ | 
|  | 759 | res = parse_header(pkg, req->response_size, req->response_type, | 
|  | 760 | tb_cfg_get_route(req->request)); | 
|  | 761 | if (!res.err) | 
|  | 762 | memcpy(req->response, pkg->buffer, req->response_size); | 
|  | 763 |  | 
|  | 764 | req->result = res; | 
|  | 765 |  | 
|  | 766 | /* Always complete when first response is received */ | 
|  | 767 | return true; | 
|  | 768 | } | 
|  | 769 |  | 
|  | 770 | /** | 
|  | 771 | * tb_cfg_reset() - send a reset packet and wait for a response | 
|  | 772 | * | 
|  | 773 | * If the switch at route is incorrectly configured then we will not receive a | 
|  | 774 | * reply (even though the switch will reset). The caller should check for | 
|  | 775 | * -ETIMEDOUT and attempt to reconfigure the switch. | 
|  | 776 | */ | 
|  | 777 | struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, | 
|  | 778 | int timeout_msec) | 
|  | 779 | { | 
|  | 780 | struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; | 
|  | 781 | struct tb_cfg_result res = { 0 }; | 
|  | 782 | struct tb_cfg_header reply; | 
|  | 783 | struct tb_cfg_request *req; | 
|  | 784 |  | 
|  | 785 | req = tb_cfg_request_alloc(); | 
|  | 786 | if (!req) { | 
|  | 787 | res.err = -ENOMEM; | 
|  | 788 | return res; | 
|  | 789 | } | 
|  | 790 |  | 
|  | 791 | req->match = tb_cfg_match; | 
|  | 792 | req->copy = tb_cfg_copy; | 
|  | 793 | req->request = &request; | 
|  | 794 | req->request_size = sizeof(request); | 
|  | 795 | req->request_type = TB_CFG_PKG_RESET; | 
|  | 796 | req->response = &reply; | 
|  | 797 | req->response_size = sizeof(reply); | 
|  | 798 | req->response_type = TB_CFG_PKG_RESET; | 
|  | 799 |  | 
|  | 800 | res = tb_cfg_request_sync(ctl, req, timeout_msec); | 
|  | 801 |  | 
|  | 802 | tb_cfg_request_put(req); | 
|  | 803 |  | 
|  | 804 | return res; | 
|  | 805 | } | 
|  | 806 |  | 
|  | 807 | /** | 
|  | 808 | * tb_cfg_read() - read from config space into buffer | 
|  | 809 | * | 
|  | 810 | * Offset and length are in dwords. | 
|  | 811 | */ | 
|  | 812 | struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, | 
|  | 813 | u64 route, u32 port, enum tb_cfg_space space, | 
|  | 814 | u32 offset, u32 length, int timeout_msec) | 
|  | 815 | { | 
|  | 816 | struct tb_cfg_result res = { 0 }; | 
|  | 817 | struct cfg_read_pkg request = { | 
|  | 818 | .header = tb_cfg_make_header(route), | 
|  | 819 | .addr = { | 
|  | 820 | .port = port, | 
|  | 821 | .space = space, | 
|  | 822 | .offset = offset, | 
|  | 823 | .length = length, | 
|  | 824 | }, | 
|  | 825 | }; | 
|  | 826 | struct cfg_write_pkg reply; | 
|  | 827 | int retries = 0; | 
|  | 828 |  | 
|  | 829 | while (retries < TB_CTL_RETRIES) { | 
|  | 830 | struct tb_cfg_request *req; | 
|  | 831 |  | 
|  | 832 | req = tb_cfg_request_alloc(); | 
|  | 833 | if (!req) { | 
|  | 834 | res.err = -ENOMEM; | 
|  | 835 | return res; | 
|  | 836 | } | 
|  | 837 |  | 
|  | 838 | request.addr.seq = retries++; | 
|  | 839 |  | 
|  | 840 | req->match = tb_cfg_match; | 
|  | 841 | req->copy = tb_cfg_copy; | 
|  | 842 | req->request = &request; | 
|  | 843 | req->request_size = sizeof(request); | 
|  | 844 | req->request_type = TB_CFG_PKG_READ; | 
|  | 845 | req->response = &reply; | 
|  | 846 | req->response_size = 12 + 4 * length; | 
|  | 847 | req->response_type = TB_CFG_PKG_READ; | 
|  | 848 |  | 
|  | 849 | res = tb_cfg_request_sync(ctl, req, timeout_msec); | 
|  | 850 |  | 
|  | 851 | tb_cfg_request_put(req); | 
|  | 852 |  | 
|  | 853 | if (res.err != -ETIMEDOUT) | 
|  | 854 | break; | 
|  | 855 |  | 
|  | 856 | /* Wait a bit (arbitrary time) until we send a retry */ | 
|  | 857 | usleep_range(10, 100); | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | if (res.err) | 
|  | 861 | return res; | 
|  | 862 |  | 
|  | 863 | res.response_port = reply.addr.port; | 
|  | 864 | res.err = check_config_address(reply.addr, space, offset, length); | 
|  | 865 | if (!res.err) | 
|  | 866 | memcpy(buffer, &reply.data, 4 * length); | 
|  | 867 | return res; | 
|  | 868 | } | 
|  | 869 |  | 
|  | 870 | /** | 
|  | 871 | * tb_cfg_write() - write from buffer into config space | 
|  | 872 | * | 
|  | 873 | * Offset and length are in dwords. | 
|  | 874 | */ | 
|  | 875 | struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, | 
|  | 876 | u64 route, u32 port, enum tb_cfg_space space, | 
|  | 877 | u32 offset, u32 length, int timeout_msec) | 
|  | 878 | { | 
|  | 879 | struct tb_cfg_result res = { 0 }; | 
|  | 880 | struct cfg_write_pkg request = { | 
|  | 881 | .header = tb_cfg_make_header(route), | 
|  | 882 | .addr = { | 
|  | 883 | .port = port, | 
|  | 884 | .space = space, | 
|  | 885 | .offset = offset, | 
|  | 886 | .length = length, | 
|  | 887 | }, | 
|  | 888 | }; | 
|  | 889 | struct cfg_read_pkg reply; | 
|  | 890 | int retries = 0; | 
|  | 891 |  | 
|  | 892 | memcpy(&request.data, buffer, length * 4); | 
|  | 893 |  | 
|  | 894 | while (retries < TB_CTL_RETRIES) { | 
|  | 895 | struct tb_cfg_request *req; | 
|  | 896 |  | 
|  | 897 | req = tb_cfg_request_alloc(); | 
|  | 898 | if (!req) { | 
|  | 899 | res.err = -ENOMEM; | 
|  | 900 | return res; | 
|  | 901 | } | 
|  | 902 |  | 
|  | 903 | request.addr.seq = retries++; | 
|  | 904 |  | 
|  | 905 | req->match = tb_cfg_match; | 
|  | 906 | req->copy = tb_cfg_copy; | 
|  | 907 | req->request = &request; | 
|  | 908 | req->request_size = 12 + 4 * length; | 
|  | 909 | req->request_type = TB_CFG_PKG_WRITE; | 
|  | 910 | req->response = &reply; | 
|  | 911 | req->response_size = sizeof(reply); | 
|  | 912 | req->response_type = TB_CFG_PKG_WRITE; | 
|  | 913 |  | 
|  | 914 | res = tb_cfg_request_sync(ctl, req, timeout_msec); | 
|  | 915 |  | 
|  | 916 | tb_cfg_request_put(req); | 
|  | 917 |  | 
|  | 918 | if (res.err != -ETIMEDOUT) | 
|  | 919 | break; | 
|  | 920 |  | 
|  | 921 | /* Wait a bit (arbitrary time) until we send a retry */ | 
|  | 922 | usleep_range(10, 100); | 
|  | 923 | } | 
|  | 924 |  | 
|  | 925 | if (res.err) | 
|  | 926 | return res; | 
|  | 927 |  | 
|  | 928 | res.response_port = reply.addr.port; | 
|  | 929 | res.err = check_config_address(reply.addr, space, offset, length); | 
|  | 930 | return res; | 
|  | 931 | } | 
|  | 932 |  | 
|  | 933 | int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, | 
|  | 934 | enum tb_cfg_space space, u32 offset, u32 length) | 
|  | 935 | { | 
|  | 936 | struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, | 
|  | 937 | space, offset, length, TB_CFG_DEFAULT_TIMEOUT); | 
|  | 938 | switch (res.err) { | 
|  | 939 | case 0: | 
|  | 940 | /* Success */ | 
|  | 941 | break; | 
|  | 942 |  | 
|  | 943 | case 1: | 
|  | 944 | /* Thunderbolt error, tb_error holds the actual number */ | 
|  | 945 | tb_cfg_print_error(ctl, &res); | 
|  | 946 | return -EIO; | 
|  | 947 |  | 
|  | 948 | case -ETIMEDOUT: | 
|  | 949 | tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n", | 
|  | 950 | space, offset); | 
|  | 951 | break; | 
|  | 952 |  | 
|  | 953 | default: | 
|  | 954 | WARN(1, "tb_cfg_read: %d\n", res.err); | 
|  | 955 | break; | 
|  | 956 | } | 
|  | 957 | return res.err; | 
|  | 958 | } | 
|  | 959 |  | 
|  | 960 | int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, | 
|  | 961 | enum tb_cfg_space space, u32 offset, u32 length) | 
|  | 962 | { | 
|  | 963 | struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, | 
|  | 964 | space, offset, length, TB_CFG_DEFAULT_TIMEOUT); | 
|  | 965 | switch (res.err) { | 
|  | 966 | case 0: | 
|  | 967 | /* Success */ | 
|  | 968 | break; | 
|  | 969 |  | 
|  | 970 | case 1: | 
|  | 971 | /* Thunderbolt error, tb_error holds the actual number */ | 
|  | 972 | tb_cfg_print_error(ctl, &res); | 
|  | 973 | return -EIO; | 
|  | 974 |  | 
|  | 975 | case -ETIMEDOUT: | 
|  | 976 | tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n", | 
|  | 977 | space, offset); | 
|  | 978 | break; | 
|  | 979 |  | 
|  | 980 | default: | 
|  | 981 | WARN(1, "tb_cfg_write: %d\n", res.err); | 
|  | 982 | break; | 
|  | 983 | } | 
|  | 984 | return res.err; | 
|  | 985 | } | 
|  | 986 |  | 
|  | 987 | /** | 
|  | 988 | * tb_cfg_get_upstream_port() - get upstream port number of switch at route | 
|  | 989 | * | 
|  | 990 | * Reads the first dword from the switches TB_CFG_SWITCH config area and | 
|  | 991 | * returns the port number from which the reply originated. | 
|  | 992 | * | 
|  | 993 | * Return: Returns the upstream port number on success or an error code on | 
|  | 994 | * failure. | 
|  | 995 | */ | 
|  | 996 | int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) | 
|  | 997 | { | 
|  | 998 | u32 dummy; | 
|  | 999 | struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, | 
|  | 1000 | TB_CFG_SWITCH, 0, 1, | 
|  | 1001 | TB_CFG_DEFAULT_TIMEOUT); | 
|  | 1002 | if (res.err == 1) | 
|  | 1003 | return -EIO; | 
|  | 1004 | if (res.err) | 
|  | 1005 | return res.err; | 
|  | 1006 | return res.response_port; | 
|  | 1007 | } |