b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2012 Intel, Inc. |
| 4 | * Copyright (C) 2013 Intel, Inc. |
| 5 | * Copyright (C) 2014 Linaro Limited |
| 6 | * Copyright (C) 2011-2016 Google, Inc. |
| 7 | * |
| 8 | * This software is licensed under the terms of the GNU General Public |
| 9 | * License version 2, as published by the Free Software Foundation, and |
| 10 | * may be copied, distributed, and modified under those terms. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | /* This source file contains the implementation of a special device driver |
| 20 | * that intends to provide a *very* fast communication channel between the |
| 21 | * guest system and the QEMU emulator. |
| 22 | * |
| 23 | * Usage from the guest is simply the following (error handling simplified): |
| 24 | * |
| 25 | * int fd = open("/dev/qemu_pipe",O_RDWR); |
| 26 | * .... write() or read() through the pipe. |
| 27 | * |
| 28 | * This driver doesn't deal with the exact protocol used during the session. |
| 29 | * It is intended to be as simple as something like: |
| 30 | * |
| 31 | * // do this _just_ after opening the fd to connect to a specific |
| 32 | * // emulator service. |
| 33 | * const char* msg = "<pipename>"; |
| 34 | * if (write(fd, msg, strlen(msg)+1) < 0) { |
| 35 | * ... could not connect to <pipename> service |
| 36 | * close(fd); |
| 37 | * } |
| 38 | * |
| 39 | * // after this, simply read() and write() to communicate with the |
| 40 | * // service. Exact protocol details left as an exercise to the reader. |
| 41 | * |
| 42 | * This driver is very fast because it doesn't copy any data through |
| 43 | * intermediate buffers, since the emulator is capable of translating |
| 44 | * guest user addresses into host ones. |
| 45 | * |
| 46 | * Note that we must however ensure that each user page involved in the |
| 47 | * exchange is properly mapped during a transfer. |
| 48 | */ |
| 49 | |
| 50 | #include <linux/module.h> |
| 51 | #include <linux/mod_devicetable.h> |
| 52 | #include <linux/interrupt.h> |
| 53 | #include <linux/kernel.h> |
| 54 | #include <linux/spinlock.h> |
| 55 | #include <linux/miscdevice.h> |
| 56 | #include <linux/platform_device.h> |
| 57 | #include <linux/poll.h> |
| 58 | #include <linux/sched.h> |
| 59 | #include <linux/bitops.h> |
| 60 | #include <linux/slab.h> |
| 61 | #include <linux/io.h> |
| 62 | #include <linux/dma-mapping.h> |
| 63 | #include <linux/mm.h> |
| 64 | #include <linux/acpi.h> |
| 65 | #include <linux/bug.h> |
| 66 | #include "goldfish_pipe_qemu.h" |
| 67 | |
| 68 | /* |
| 69 | * Update this when something changes in the driver's behavior so the host |
| 70 | * can benefit from knowing it |
| 71 | */ |
| 72 | enum { |
| 73 | PIPE_DRIVER_VERSION = 2, |
| 74 | PIPE_CURRENT_DEVICE_VERSION = 2 |
| 75 | }; |
| 76 | |
| 77 | enum { |
| 78 | MAX_BUFFERS_PER_COMMAND = 336, |
| 79 | MAX_SIGNALLED_PIPES = 64, |
| 80 | INITIAL_PIPES_CAPACITY = 64 |
| 81 | }; |
| 82 | |
| 83 | struct goldfish_pipe_dev; |
| 84 | |
| 85 | /* A per-pipe command structure, shared with the host */ |
| 86 | struct goldfish_pipe_command { |
| 87 | s32 cmd; /* PipeCmdCode, guest -> host */ |
| 88 | s32 id; /* pipe id, guest -> host */ |
| 89 | s32 status; /* command execution status, host -> guest */ |
| 90 | s32 reserved; /* to pad to 64-bit boundary */ |
| 91 | union { |
| 92 | /* Parameters for PIPE_CMD_{READ,WRITE} */ |
| 93 | struct { |
| 94 | /* number of buffers, guest -> host */ |
| 95 | u32 buffers_count; |
| 96 | /* number of consumed bytes, host -> guest */ |
| 97 | s32 consumed_size; |
| 98 | /* buffer pointers, guest -> host */ |
| 99 | u64 ptrs[MAX_BUFFERS_PER_COMMAND]; |
| 100 | /* buffer sizes, guest -> host */ |
| 101 | u32 sizes[MAX_BUFFERS_PER_COMMAND]; |
| 102 | } rw_params; |
| 103 | }; |
| 104 | }; |
| 105 | |
| 106 | /* A single signalled pipe information */ |
| 107 | struct signalled_pipe_buffer { |
| 108 | u32 id; |
| 109 | u32 flags; |
| 110 | }; |
| 111 | |
| 112 | /* Parameters for the PIPE_CMD_OPEN command */ |
| 113 | struct open_command_param { |
| 114 | u64 command_buffer_ptr; |
| 115 | u32 rw_params_max_count; |
| 116 | }; |
| 117 | |
| 118 | /* Device-level set of buffers shared with the host */ |
| 119 | struct goldfish_pipe_dev_buffers { |
| 120 | struct open_command_param open_command_params; |
| 121 | struct signalled_pipe_buffer |
| 122 | signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; |
| 123 | }; |
| 124 | |
| 125 | /* This data type models a given pipe instance */ |
| 126 | struct goldfish_pipe { |
| 127 | /* pipe ID - index into goldfish_pipe_dev::pipes array */ |
| 128 | u32 id; |
| 129 | |
| 130 | /* The wake flags pipe is waiting for |
| 131 | * Note: not protected with any lock, uses atomic operations |
| 132 | * and barriers to make it thread-safe. |
| 133 | */ |
| 134 | unsigned long flags; |
| 135 | |
| 136 | /* wake flags host have signalled, |
| 137 | * - protected by goldfish_pipe_dev::lock |
| 138 | */ |
| 139 | unsigned long signalled_flags; |
| 140 | |
| 141 | /* A pointer to command buffer */ |
| 142 | struct goldfish_pipe_command *command_buffer; |
| 143 | |
| 144 | /* doubly linked list of signalled pipes, protected by |
| 145 | * goldfish_pipe_dev::lock |
| 146 | */ |
| 147 | struct goldfish_pipe *prev_signalled; |
| 148 | struct goldfish_pipe *next_signalled; |
| 149 | |
| 150 | /* |
| 151 | * A pipe's own lock. Protects the following: |
| 152 | * - *command_buffer - makes sure a command can safely write its |
| 153 | * parameters to the host and read the results back. |
| 154 | */ |
| 155 | struct mutex lock; |
| 156 | |
| 157 | /* A wake queue for sleeping until host signals an event */ |
| 158 | wait_queue_head_t wake_queue; |
| 159 | |
| 160 | /* Pointer to the parent goldfish_pipe_dev instance */ |
| 161 | struct goldfish_pipe_dev *dev; |
| 162 | |
| 163 | /* A buffer of pages, too large to fit into a stack frame */ |
| 164 | struct page *pages[MAX_BUFFERS_PER_COMMAND]; |
| 165 | }; |
| 166 | |
| 167 | /* The global driver data. Holds a reference to the i/o page used to |
| 168 | * communicate with the emulator, and a wake queue for blocked tasks |
| 169 | * waiting to be awoken. |
| 170 | */ |
| 171 | struct goldfish_pipe_dev { |
| 172 | /* A magic number to check if this is an instance of this struct */ |
| 173 | void *magic; |
| 174 | |
| 175 | /* |
| 176 | * Global device spinlock. Protects the following members: |
| 177 | * - pipes, pipes_capacity |
| 178 | * - [*pipes, *pipes + pipes_capacity) - array data |
| 179 | * - first_signalled_pipe, |
| 180 | * goldfish_pipe::prev_signalled, |
| 181 | * goldfish_pipe::next_signalled, |
| 182 | * goldfish_pipe::signalled_flags - all singnalled-related fields, |
| 183 | * in all allocated pipes |
| 184 | * - open_command_params - PIPE_CMD_OPEN-related buffers |
| 185 | * |
| 186 | * It looks like a lot of different fields, but the trick is that |
| 187 | * the only operation that happens often is the signalled pipes array |
| 188 | * manipulation. That's why it's OK for now to keep the rest of the |
| 189 | * fields under the same lock. If we notice too much contention because |
| 190 | * of PIPE_CMD_OPEN, then we should add a separate lock there. |
| 191 | */ |
| 192 | spinlock_t lock; |
| 193 | |
| 194 | /* |
| 195 | * Array of the pipes of |pipes_capacity| elements, |
| 196 | * indexed by goldfish_pipe::id |
| 197 | */ |
| 198 | struct goldfish_pipe **pipes; |
| 199 | u32 pipes_capacity; |
| 200 | |
| 201 | /* Pointers to the buffers host uses for interaction with this driver */ |
| 202 | struct goldfish_pipe_dev_buffers *buffers; |
| 203 | |
| 204 | /* Head of a doubly linked list of signalled pipes */ |
| 205 | struct goldfish_pipe *first_signalled_pipe; |
| 206 | |
| 207 | /* ptr to platform device's device struct */ |
| 208 | struct device *pdev_dev; |
| 209 | |
| 210 | /* Some device-specific data */ |
| 211 | int irq; |
| 212 | int version; |
| 213 | unsigned char __iomem *base; |
| 214 | |
| 215 | /* an irq tasklet to run goldfish_interrupt_task */ |
| 216 | struct tasklet_struct irq_tasklet; |
| 217 | |
| 218 | struct miscdevice miscdev; |
| 219 | }; |
| 220 | |
| 221 | static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe, |
| 222 | enum PipeCmdCode cmd) |
| 223 | { |
| 224 | pipe->command_buffer->cmd = cmd; |
| 225 | /* failure by default */ |
| 226 | pipe->command_buffer->status = PIPE_ERROR_INVAL; |
| 227 | writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); |
| 228 | return pipe->command_buffer->status; |
| 229 | } |
| 230 | |
| 231 | static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
| 232 | { |
| 233 | int status; |
| 234 | |
| 235 | if (mutex_lock_interruptible(&pipe->lock)) |
| 236 | return PIPE_ERROR_IO; |
| 237 | status = goldfish_pipe_cmd_locked(pipe, cmd); |
| 238 | mutex_unlock(&pipe->lock); |
| 239 | return status; |
| 240 | } |
| 241 | |
| 242 | /* |
| 243 | * This function converts an error code returned by the emulator through |
| 244 | * the PIPE_REG_STATUS i/o register into a valid negative errno value. |
| 245 | */ |
| 246 | static int goldfish_pipe_error_convert(int status) |
| 247 | { |
| 248 | switch (status) { |
| 249 | case PIPE_ERROR_AGAIN: |
| 250 | return -EAGAIN; |
| 251 | case PIPE_ERROR_NOMEM: |
| 252 | return -ENOMEM; |
| 253 | case PIPE_ERROR_IO: |
| 254 | return -EIO; |
| 255 | default: |
| 256 | return -EINVAL; |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | static int pin_user_pages(unsigned long first_page, |
| 261 | unsigned long last_page, |
| 262 | unsigned int last_page_size, |
| 263 | int is_write, |
| 264 | struct page *pages[MAX_BUFFERS_PER_COMMAND], |
| 265 | unsigned int *iter_last_page_size) |
| 266 | { |
| 267 | int ret; |
| 268 | int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; |
| 269 | |
| 270 | if (requested_pages > MAX_BUFFERS_PER_COMMAND) { |
| 271 | requested_pages = MAX_BUFFERS_PER_COMMAND; |
| 272 | *iter_last_page_size = PAGE_SIZE; |
| 273 | } else { |
| 274 | *iter_last_page_size = last_page_size; |
| 275 | } |
| 276 | |
| 277 | ret = get_user_pages_fast(first_page, requested_pages, |
| 278 | !is_write ? FOLL_WRITE : 0, |
| 279 | pages); |
| 280 | if (ret <= 0) |
| 281 | return -EFAULT; |
| 282 | if (ret < requested_pages) |
| 283 | *iter_last_page_size = PAGE_SIZE; |
| 284 | |
| 285 | return ret; |
| 286 | } |
| 287 | |
| 288 | static void release_user_pages(struct page **pages, int pages_count, |
| 289 | int is_write, s32 consumed_size) |
| 290 | { |
| 291 | int i; |
| 292 | |
| 293 | for (i = 0; i < pages_count; i++) { |
| 294 | if (!is_write && consumed_size > 0) |
| 295 | set_page_dirty(pages[i]); |
| 296 | put_page(pages[i]); |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | /* Populate the call parameters, merging adjacent pages together */ |
| 301 | static void populate_rw_params(struct page **pages, |
| 302 | int pages_count, |
| 303 | unsigned long address, |
| 304 | unsigned long address_end, |
| 305 | unsigned long first_page, |
| 306 | unsigned long last_page, |
| 307 | unsigned int iter_last_page_size, |
| 308 | int is_write, |
| 309 | struct goldfish_pipe_command *command) |
| 310 | { |
| 311 | /* |
| 312 | * Process the first page separately - it's the only page that |
| 313 | * needs special handling for its start address. |
| 314 | */ |
| 315 | unsigned long xaddr = page_to_phys(pages[0]); |
| 316 | unsigned long xaddr_prev = xaddr; |
| 317 | int buffer_idx = 0; |
| 318 | int i = 1; |
| 319 | int size_on_page = first_page == last_page |
| 320 | ? (int)(address_end - address) |
| 321 | : (PAGE_SIZE - (address & ~PAGE_MASK)); |
| 322 | command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK)); |
| 323 | command->rw_params.sizes[0] = size_on_page; |
| 324 | for (; i < pages_count; ++i) { |
| 325 | xaddr = page_to_phys(pages[i]); |
| 326 | size_on_page = (i == pages_count - 1) ? |
| 327 | iter_last_page_size : PAGE_SIZE; |
| 328 | if (xaddr == xaddr_prev + PAGE_SIZE) { |
| 329 | command->rw_params.sizes[buffer_idx] += size_on_page; |
| 330 | } else { |
| 331 | ++buffer_idx; |
| 332 | command->rw_params.ptrs[buffer_idx] = (u64)xaddr; |
| 333 | command->rw_params.sizes[buffer_idx] = size_on_page; |
| 334 | } |
| 335 | xaddr_prev = xaddr; |
| 336 | } |
| 337 | command->rw_params.buffers_count = buffer_idx + 1; |
| 338 | } |
| 339 | |
| 340 | static int transfer_max_buffers(struct goldfish_pipe *pipe, |
| 341 | unsigned long address, |
| 342 | unsigned long address_end, |
| 343 | int is_write, |
| 344 | unsigned long last_page, |
| 345 | unsigned int last_page_size, |
| 346 | s32 *consumed_size, |
| 347 | int *status) |
| 348 | { |
| 349 | unsigned long first_page = address & PAGE_MASK; |
| 350 | unsigned int iter_last_page_size; |
| 351 | int pages_count; |
| 352 | |
| 353 | /* Serialize access to the pipe command buffers */ |
| 354 | if (mutex_lock_interruptible(&pipe->lock)) |
| 355 | return -ERESTARTSYS; |
| 356 | |
| 357 | pages_count = pin_user_pages(first_page, last_page, |
| 358 | last_page_size, is_write, |
| 359 | pipe->pages, &iter_last_page_size); |
| 360 | if (pages_count < 0) { |
| 361 | mutex_unlock(&pipe->lock); |
| 362 | return pages_count; |
| 363 | } |
| 364 | |
| 365 | populate_rw_params(pipe->pages, pages_count, address, address_end, |
| 366 | first_page, last_page, iter_last_page_size, is_write, |
| 367 | pipe->command_buffer); |
| 368 | |
| 369 | /* Transfer the data */ |
| 370 | *status = goldfish_pipe_cmd_locked(pipe, |
| 371 | is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); |
| 372 | |
| 373 | *consumed_size = pipe->command_buffer->rw_params.consumed_size; |
| 374 | |
| 375 | release_user_pages(pipe->pages, pages_count, is_write, *consumed_size); |
| 376 | |
| 377 | mutex_unlock(&pipe->lock); |
| 378 | return 0; |
| 379 | } |
| 380 | |
| 381 | static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) |
| 382 | { |
| 383 | u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
| 384 | |
| 385 | set_bit(wake_bit, &pipe->flags); |
| 386 | |
| 387 | /* Tell the emulator we're going to wait for a wake event */ |
| 388 | goldfish_pipe_cmd(pipe, |
| 389 | is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); |
| 390 | |
| 391 | while (test_bit(wake_bit, &pipe->flags)) { |
| 392 | if (wait_event_interruptible(pipe->wake_queue, |
| 393 | !test_bit(wake_bit, &pipe->flags))) |
| 394 | return -ERESTARTSYS; |
| 395 | |
| 396 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
| 397 | return -EIO; |
| 398 | } |
| 399 | |
| 400 | return 0; |
| 401 | } |
| 402 | |
| 403 | static ssize_t goldfish_pipe_read_write(struct file *filp, |
| 404 | char __user *buffer, |
| 405 | size_t bufflen, |
| 406 | int is_write) |
| 407 | { |
| 408 | struct goldfish_pipe *pipe = filp->private_data; |
| 409 | int count = 0, ret = -EINVAL; |
| 410 | unsigned long address, address_end, last_page; |
| 411 | unsigned int last_page_size; |
| 412 | |
| 413 | /* If the emulator already closed the pipe, no need to go further */ |
| 414 | if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))) |
| 415 | return -EIO; |
| 416 | /* Null reads or writes succeeds */ |
| 417 | if (unlikely(bufflen == 0)) |
| 418 | return 0; |
| 419 | /* Check the buffer range for access */ |
| 420 | if (unlikely(!access_ok(buffer, bufflen))) |
| 421 | return -EFAULT; |
| 422 | |
| 423 | address = (unsigned long)buffer; |
| 424 | address_end = address + bufflen; |
| 425 | last_page = (address_end - 1) & PAGE_MASK; |
| 426 | last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1; |
| 427 | |
| 428 | while (address < address_end) { |
| 429 | s32 consumed_size; |
| 430 | int status; |
| 431 | |
| 432 | ret = transfer_max_buffers(pipe, address, address_end, is_write, |
| 433 | last_page, last_page_size, |
| 434 | &consumed_size, &status); |
| 435 | if (ret < 0) |
| 436 | break; |
| 437 | |
| 438 | if (consumed_size > 0) { |
| 439 | /* No matter what's the status, we've transferred |
| 440 | * something. |
| 441 | */ |
| 442 | count += consumed_size; |
| 443 | address += consumed_size; |
| 444 | } |
| 445 | if (status > 0) |
| 446 | continue; |
| 447 | if (status == 0) { |
| 448 | /* EOF */ |
| 449 | ret = 0; |
| 450 | break; |
| 451 | } |
| 452 | if (count > 0) { |
| 453 | /* |
| 454 | * An error occurred, but we already transferred |
| 455 | * something on one of the previous iterations. |
| 456 | * Just return what we already copied and log this |
| 457 | * err. |
| 458 | */ |
| 459 | if (status != PIPE_ERROR_AGAIN) |
| 460 | dev_err_ratelimited(pipe->dev->pdev_dev, |
| 461 | "backend error %d on %s\n", |
| 462 | status, is_write ? "write" : "read"); |
| 463 | break; |
| 464 | } |
| 465 | |
| 466 | /* |
| 467 | * If the error is not PIPE_ERROR_AGAIN, or if we are in |
| 468 | * non-blocking mode, just return the error code. |
| 469 | */ |
| 470 | if (status != PIPE_ERROR_AGAIN || |
| 471 | (filp->f_flags & O_NONBLOCK) != 0) { |
| 472 | ret = goldfish_pipe_error_convert(status); |
| 473 | break; |
| 474 | } |
| 475 | |
| 476 | status = wait_for_host_signal(pipe, is_write); |
| 477 | if (status < 0) |
| 478 | return status; |
| 479 | } |
| 480 | |
| 481 | if (count > 0) |
| 482 | return count; |
| 483 | return ret; |
| 484 | } |
| 485 | |
| 486 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, |
| 487 | size_t bufflen, loff_t *ppos) |
| 488 | { |
| 489 | return goldfish_pipe_read_write(filp, buffer, bufflen, |
| 490 | /* is_write */ 0); |
| 491 | } |
| 492 | |
| 493 | static ssize_t goldfish_pipe_write(struct file *filp, |
| 494 | const char __user *buffer, size_t bufflen, |
| 495 | loff_t *ppos) |
| 496 | { |
| 497 | /* cast away the const */ |
| 498 | char __user *no_const_buffer = (char __user *)buffer; |
| 499 | |
| 500 | return goldfish_pipe_read_write(filp, no_const_buffer, bufflen, |
| 501 | /* is_write */ 1); |
| 502 | } |
| 503 | |
| 504 | static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) |
| 505 | { |
| 506 | struct goldfish_pipe *pipe = filp->private_data; |
| 507 | __poll_t mask = 0; |
| 508 | int status; |
| 509 | |
| 510 | poll_wait(filp, &pipe->wake_queue, wait); |
| 511 | |
| 512 | status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL); |
| 513 | if (status < 0) |
| 514 | return -ERESTARTSYS; |
| 515 | |
| 516 | if (status & PIPE_POLL_IN) |
| 517 | mask |= EPOLLIN | EPOLLRDNORM; |
| 518 | if (status & PIPE_POLL_OUT) |
| 519 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 520 | if (status & PIPE_POLL_HUP) |
| 521 | mask |= EPOLLHUP; |
| 522 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
| 523 | mask |= EPOLLERR; |
| 524 | |
| 525 | return mask; |
| 526 | } |
| 527 | |
| 528 | static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, |
| 529 | u32 id, u32 flags) |
| 530 | { |
| 531 | struct goldfish_pipe *pipe; |
| 532 | |
| 533 | if (WARN_ON(id >= dev->pipes_capacity)) |
| 534 | return; |
| 535 | |
| 536 | pipe = dev->pipes[id]; |
| 537 | if (!pipe) |
| 538 | return; |
| 539 | pipe->signalled_flags |= flags; |
| 540 | |
| 541 | if (pipe->prev_signalled || pipe->next_signalled || |
| 542 | dev->first_signalled_pipe == pipe) |
| 543 | return; /* already in the list */ |
| 544 | pipe->next_signalled = dev->first_signalled_pipe; |
| 545 | if (dev->first_signalled_pipe) |
| 546 | dev->first_signalled_pipe->prev_signalled = pipe; |
| 547 | dev->first_signalled_pipe = pipe; |
| 548 | } |
| 549 | |
| 550 | static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, |
| 551 | struct goldfish_pipe *pipe) |
| 552 | { |
| 553 | if (pipe->prev_signalled) |
| 554 | pipe->prev_signalled->next_signalled = pipe->next_signalled; |
| 555 | if (pipe->next_signalled) |
| 556 | pipe->next_signalled->prev_signalled = pipe->prev_signalled; |
| 557 | if (pipe == dev->first_signalled_pipe) |
| 558 | dev->first_signalled_pipe = pipe->next_signalled; |
| 559 | pipe->prev_signalled = NULL; |
| 560 | pipe->next_signalled = NULL; |
| 561 | } |
| 562 | |
| 563 | static struct goldfish_pipe *signalled_pipes_pop_front( |
| 564 | struct goldfish_pipe_dev *dev, int *wakes) |
| 565 | { |
| 566 | struct goldfish_pipe *pipe; |
| 567 | unsigned long flags; |
| 568 | |
| 569 | spin_lock_irqsave(&dev->lock, flags); |
| 570 | |
| 571 | pipe = dev->first_signalled_pipe; |
| 572 | if (pipe) { |
| 573 | *wakes = pipe->signalled_flags; |
| 574 | pipe->signalled_flags = 0; |
| 575 | /* |
| 576 | * This is an optimized version of |
| 577 | * signalled_pipes_remove_locked() |
| 578 | * - We want to make it as fast as possible to |
| 579 | * wake the sleeping pipe operations faster. |
| 580 | */ |
| 581 | dev->first_signalled_pipe = pipe->next_signalled; |
| 582 | if (dev->first_signalled_pipe) |
| 583 | dev->first_signalled_pipe->prev_signalled = NULL; |
| 584 | pipe->next_signalled = NULL; |
| 585 | } |
| 586 | |
| 587 | spin_unlock_irqrestore(&dev->lock, flags); |
| 588 | return pipe; |
| 589 | } |
| 590 | |
| 591 | static void goldfish_interrupt_task(unsigned long dev_addr) |
| 592 | { |
| 593 | /* Iterate over the signalled pipes and wake them one by one */ |
| 594 | struct goldfish_pipe_dev *dev = (struct goldfish_pipe_dev *)dev_addr; |
| 595 | struct goldfish_pipe *pipe; |
| 596 | int wakes; |
| 597 | |
| 598 | while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) { |
| 599 | if (wakes & PIPE_WAKE_CLOSED) { |
| 600 | pipe->flags = 1 << BIT_CLOSED_ON_HOST; |
| 601 | } else { |
| 602 | if (wakes & PIPE_WAKE_READ) |
| 603 | clear_bit(BIT_WAKE_ON_READ, &pipe->flags); |
| 604 | if (wakes & PIPE_WAKE_WRITE) |
| 605 | clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); |
| 606 | } |
| 607 | /* |
| 608 | * wake_up_interruptible() implies a write barrier, so don't |
| 609 | * explicitly add another one here. |
| 610 | */ |
| 611 | wake_up_interruptible(&pipe->wake_queue); |
| 612 | } |
| 613 | } |
| 614 | |
| 615 | static void goldfish_pipe_device_deinit(struct platform_device *pdev, |
| 616 | struct goldfish_pipe_dev *dev); |
| 617 | |
| 618 | /* |
| 619 | * The general idea of the interrupt handling: |
| 620 | * |
| 621 | * 1. device raises an interrupt if there's at least one signalled pipe |
| 622 | * 2. IRQ handler reads the signalled pipes and their count from the device |
| 623 | * 3. device writes them into a shared buffer and returns the count |
| 624 | * it only resets the IRQ if it has returned all signalled pipes, |
| 625 | * otherwise it leaves it raised, so IRQ handler will be called |
| 626 | * again for the next chunk |
| 627 | * 4. IRQ handler adds all returned pipes to the device's signalled pipes list |
| 628 | * 5. IRQ handler launches a tasklet to process the signalled pipes from the |
| 629 | * list in a separate context |
| 630 | */ |
| 631 | static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) |
| 632 | { |
| 633 | u32 count; |
| 634 | u32 i; |
| 635 | unsigned long flags; |
| 636 | struct goldfish_pipe_dev *dev = dev_id; |
| 637 | |
| 638 | if (dev->magic != &goldfish_pipe_device_deinit) |
| 639 | return IRQ_NONE; |
| 640 | |
| 641 | /* Request the signalled pipes from the device */ |
| 642 | spin_lock_irqsave(&dev->lock, flags); |
| 643 | |
| 644 | count = readl(dev->base + PIPE_REG_GET_SIGNALLED); |
| 645 | if (count == 0) { |
| 646 | spin_unlock_irqrestore(&dev->lock, flags); |
| 647 | return IRQ_NONE; |
| 648 | } |
| 649 | if (count > MAX_SIGNALLED_PIPES) |
| 650 | count = MAX_SIGNALLED_PIPES; |
| 651 | |
| 652 | for (i = 0; i < count; ++i) |
| 653 | signalled_pipes_add_locked(dev, |
| 654 | dev->buffers->signalled_pipe_buffers[i].id, |
| 655 | dev->buffers->signalled_pipe_buffers[i].flags); |
| 656 | |
| 657 | spin_unlock_irqrestore(&dev->lock, flags); |
| 658 | |
| 659 | tasklet_schedule(&dev->irq_tasklet); |
| 660 | return IRQ_HANDLED; |
| 661 | } |
| 662 | |
| 663 | static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) |
| 664 | { |
| 665 | int id; |
| 666 | |
| 667 | for (id = 0; id < dev->pipes_capacity; ++id) |
| 668 | if (!dev->pipes[id]) |
| 669 | return id; |
| 670 | |
| 671 | { |
| 672 | /* Reallocate the array. |
| 673 | * Since get_free_pipe_id_locked runs with interrupts disabled, |
| 674 | * we don't want to make calls that could lead to sleep. |
| 675 | */ |
| 676 | u32 new_capacity = 2 * dev->pipes_capacity; |
| 677 | struct goldfish_pipe **pipes = |
| 678 | kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); |
| 679 | if (!pipes) |
| 680 | return -ENOMEM; |
| 681 | memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); |
| 682 | kfree(dev->pipes); |
| 683 | dev->pipes = pipes; |
| 684 | id = dev->pipes_capacity; |
| 685 | dev->pipes_capacity = new_capacity; |
| 686 | } |
| 687 | return id; |
| 688 | } |
| 689 | |
| 690 | /* A helper function to get the instance of goldfish_pipe_dev from file */ |
| 691 | static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file) |
| 692 | { |
| 693 | struct miscdevice *miscdev = file->private_data; |
| 694 | |
| 695 | return container_of(miscdev, struct goldfish_pipe_dev, miscdev); |
| 696 | } |
| 697 | |
| 698 | /** |
| 699 | * goldfish_pipe_open - open a channel to the AVD |
| 700 | * @inode: inode of device |
| 701 | * @file: file struct of opener |
| 702 | * |
| 703 | * Create a new pipe link between the emulator and the use application. |
| 704 | * Each new request produces a new pipe. |
| 705 | * |
| 706 | * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit |
| 707 | * right now so this is fine. A move to 64bit will need this addressing |
| 708 | */ |
| 709 | static int goldfish_pipe_open(struct inode *inode, struct file *file) |
| 710 | { |
| 711 | struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file); |
| 712 | unsigned long flags; |
| 713 | int id; |
| 714 | int status; |
| 715 | |
| 716 | /* Allocate new pipe kernel object */ |
| 717 | struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); |
| 718 | |
| 719 | if (!pipe) |
| 720 | return -ENOMEM; |
| 721 | |
| 722 | pipe->dev = dev; |
| 723 | mutex_init(&pipe->lock); |
| 724 | init_waitqueue_head(&pipe->wake_queue); |
| 725 | |
| 726 | /* |
| 727 | * Command buffer needs to be allocated on its own page to make sure |
| 728 | * it is physically contiguous in host's address space. |
| 729 | */ |
| 730 | BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE); |
| 731 | pipe->command_buffer = |
| 732 | (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); |
| 733 | if (!pipe->command_buffer) { |
| 734 | status = -ENOMEM; |
| 735 | goto err_pipe; |
| 736 | } |
| 737 | |
| 738 | spin_lock_irqsave(&dev->lock, flags); |
| 739 | |
| 740 | id = get_free_pipe_id_locked(dev); |
| 741 | if (id < 0) { |
| 742 | status = id; |
| 743 | goto err_id_locked; |
| 744 | } |
| 745 | |
| 746 | dev->pipes[id] = pipe; |
| 747 | pipe->id = id; |
| 748 | pipe->command_buffer->id = id; |
| 749 | |
| 750 | /* Now tell the emulator we're opening a new pipe. */ |
| 751 | dev->buffers->open_command_params.rw_params_max_count = |
| 752 | MAX_BUFFERS_PER_COMMAND; |
| 753 | dev->buffers->open_command_params.command_buffer_ptr = |
| 754 | (u64)(unsigned long)__pa(pipe->command_buffer); |
| 755 | status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN); |
| 756 | spin_unlock_irqrestore(&dev->lock, flags); |
| 757 | if (status < 0) |
| 758 | goto err_cmd; |
| 759 | /* All is done, save the pipe into the file's private data field */ |
| 760 | file->private_data = pipe; |
| 761 | return 0; |
| 762 | |
| 763 | err_cmd: |
| 764 | spin_lock_irqsave(&dev->lock, flags); |
| 765 | dev->pipes[id] = NULL; |
| 766 | err_id_locked: |
| 767 | spin_unlock_irqrestore(&dev->lock, flags); |
| 768 | free_page((unsigned long)pipe->command_buffer); |
| 769 | err_pipe: |
| 770 | kfree(pipe); |
| 771 | return status; |
| 772 | } |
| 773 | |
| 774 | static int goldfish_pipe_release(struct inode *inode, struct file *filp) |
| 775 | { |
| 776 | unsigned long flags; |
| 777 | struct goldfish_pipe *pipe = filp->private_data; |
| 778 | struct goldfish_pipe_dev *dev = pipe->dev; |
| 779 | |
| 780 | /* The guest is closing the channel, so tell the emulator right now */ |
| 781 | goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE); |
| 782 | |
| 783 | spin_lock_irqsave(&dev->lock, flags); |
| 784 | dev->pipes[pipe->id] = NULL; |
| 785 | signalled_pipes_remove_locked(dev, pipe); |
| 786 | spin_unlock_irqrestore(&dev->lock, flags); |
| 787 | |
| 788 | filp->private_data = NULL; |
| 789 | free_page((unsigned long)pipe->command_buffer); |
| 790 | kfree(pipe); |
| 791 | return 0; |
| 792 | } |
| 793 | |
| 794 | static const struct file_operations goldfish_pipe_fops = { |
| 795 | .owner = THIS_MODULE, |
| 796 | .read = goldfish_pipe_read, |
| 797 | .write = goldfish_pipe_write, |
| 798 | .poll = goldfish_pipe_poll, |
| 799 | .open = goldfish_pipe_open, |
| 800 | .release = goldfish_pipe_release, |
| 801 | }; |
| 802 | |
| 803 | static void init_miscdevice(struct miscdevice *miscdev) |
| 804 | { |
| 805 | memset(miscdev, 0, sizeof(*miscdev)); |
| 806 | |
| 807 | miscdev->minor = MISC_DYNAMIC_MINOR; |
| 808 | miscdev->name = "goldfish_pipe"; |
| 809 | miscdev->fops = &goldfish_pipe_fops; |
| 810 | } |
| 811 | |
| 812 | static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth) |
| 813 | { |
| 814 | const unsigned long paddr = __pa(addr); |
| 815 | |
| 816 | writel(upper_32_bits(paddr), porth); |
| 817 | writel(lower_32_bits(paddr), portl); |
| 818 | } |
| 819 | |
| 820 | static int goldfish_pipe_device_init(struct platform_device *pdev, |
| 821 | struct goldfish_pipe_dev *dev) |
| 822 | { |
| 823 | int err; |
| 824 | |
| 825 | tasklet_init(&dev->irq_tasklet, &goldfish_interrupt_task, |
| 826 | (unsigned long)dev); |
| 827 | |
| 828 | err = devm_request_irq(&pdev->dev, dev->irq, |
| 829 | goldfish_pipe_interrupt, |
| 830 | IRQF_SHARED, "goldfish_pipe", dev); |
| 831 | if (err) { |
| 832 | dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); |
| 833 | return err; |
| 834 | } |
| 835 | |
| 836 | init_miscdevice(&dev->miscdev); |
| 837 | err = misc_register(&dev->miscdev); |
| 838 | if (err) { |
| 839 | dev_err(&pdev->dev, "unable to register v2 device\n"); |
| 840 | return err; |
| 841 | } |
| 842 | |
| 843 | dev->pdev_dev = &pdev->dev; |
| 844 | dev->first_signalled_pipe = NULL; |
| 845 | dev->pipes_capacity = INITIAL_PIPES_CAPACITY; |
| 846 | dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), |
| 847 | GFP_KERNEL); |
| 848 | if (!dev->pipes) { |
| 849 | misc_deregister(&dev->miscdev); |
| 850 | return -ENOMEM; |
| 851 | } |
| 852 | |
| 853 | /* |
| 854 | * We're going to pass two buffers, open_command_params and |
| 855 | * signalled_pipe_buffers, to the host. This means each of those buffers |
| 856 | * needs to be contained in a single physical page. The easiest choice |
| 857 | * is to just allocate a page and place the buffers in it. |
| 858 | */ |
| 859 | BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE); |
| 860 | dev->buffers = (struct goldfish_pipe_dev_buffers *) |
| 861 | __get_free_page(GFP_KERNEL); |
| 862 | if (!dev->buffers) { |
| 863 | kfree(dev->pipes); |
| 864 | misc_deregister(&dev->miscdev); |
| 865 | return -ENOMEM; |
| 866 | } |
| 867 | |
| 868 | /* Send the buffer addresses to the host */ |
| 869 | write_pa_addr(&dev->buffers->signalled_pipe_buffers, |
| 870 | dev->base + PIPE_REG_SIGNAL_BUFFER, |
| 871 | dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); |
| 872 | |
| 873 | writel(MAX_SIGNALLED_PIPES, |
| 874 | dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); |
| 875 | |
| 876 | write_pa_addr(&dev->buffers->open_command_params, |
| 877 | dev->base + PIPE_REG_OPEN_BUFFER, |
| 878 | dev->base + PIPE_REG_OPEN_BUFFER_HIGH); |
| 879 | |
| 880 | platform_set_drvdata(pdev, dev); |
| 881 | return 0; |
| 882 | } |
| 883 | |
| 884 | static void goldfish_pipe_device_deinit(struct platform_device *pdev, |
| 885 | struct goldfish_pipe_dev *dev) |
| 886 | { |
| 887 | misc_deregister(&dev->miscdev); |
| 888 | tasklet_kill(&dev->irq_tasklet); |
| 889 | kfree(dev->pipes); |
| 890 | free_page((unsigned long)dev->buffers); |
| 891 | } |
| 892 | |
| 893 | static int goldfish_pipe_probe(struct platform_device *pdev) |
| 894 | { |
| 895 | struct resource *r; |
| 896 | struct goldfish_pipe_dev *dev; |
| 897 | |
| 898 | dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); |
| 899 | if (!dev) |
| 900 | return -ENOMEM; |
| 901 | |
| 902 | dev->magic = &goldfish_pipe_device_deinit; |
| 903 | spin_lock_init(&dev->lock); |
| 904 | |
| 905 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 906 | if (!r || resource_size(r) < PAGE_SIZE) { |
| 907 | dev_err(&pdev->dev, "can't allocate i/o page\n"); |
| 908 | return -EINVAL; |
| 909 | } |
| 910 | dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); |
| 911 | if (!dev->base) { |
| 912 | dev_err(&pdev->dev, "ioremap failed\n"); |
| 913 | return -EINVAL; |
| 914 | } |
| 915 | |
| 916 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
| 917 | if (!r) |
| 918 | return -EINVAL; |
| 919 | |
| 920 | dev->irq = r->start; |
| 921 | |
| 922 | /* |
| 923 | * Exchange the versions with the host device |
| 924 | * |
| 925 | * Note: v1 driver used to not report its version, so we write it before |
| 926 | * reading device version back: this allows the host implementation to |
| 927 | * detect the old driver (if there was no version write before read). |
| 928 | */ |
| 929 | writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); |
| 930 | dev->version = readl(dev->base + PIPE_REG_VERSION); |
| 931 | if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION)) |
| 932 | return -EINVAL; |
| 933 | |
| 934 | return goldfish_pipe_device_init(pdev, dev); |
| 935 | } |
| 936 | |
| 937 | static int goldfish_pipe_remove(struct platform_device *pdev) |
| 938 | { |
| 939 | struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev); |
| 940 | |
| 941 | goldfish_pipe_device_deinit(pdev, dev); |
| 942 | return 0; |
| 943 | } |
| 944 | |
| 945 | static const struct acpi_device_id goldfish_pipe_acpi_match[] = { |
| 946 | { "GFSH0003", 0 }, |
| 947 | { }, |
| 948 | }; |
| 949 | MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); |
| 950 | |
| 951 | static const struct of_device_id goldfish_pipe_of_match[] = { |
| 952 | { .compatible = "google,android-pipe", }, |
| 953 | {}, |
| 954 | }; |
| 955 | MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); |
| 956 | |
| 957 | static struct platform_driver goldfish_pipe_driver = { |
| 958 | .probe = goldfish_pipe_probe, |
| 959 | .remove = goldfish_pipe_remove, |
| 960 | .driver = { |
| 961 | .name = "goldfish_pipe", |
| 962 | .of_match_table = goldfish_pipe_of_match, |
| 963 | .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), |
| 964 | } |
| 965 | }; |
| 966 | |
| 967 | module_platform_driver(goldfish_pipe_driver); |
| 968 | MODULE_AUTHOR("David Turner <digit@google.com>"); |
| 969 | MODULE_LICENSE("GPL v2"); |