blob: 0ca996e6fd5cbfcda03de439a879bf15fd9578ad [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <linux/dma-mapping.h>
30#include <linux/virtio.h>
31#include <linux/virtio_config.h>
32#include <linux/virtio_ring.h>
33
34#include "virtgpu_drv.h"
35#include "virtgpu_trace.h"
36
37#define MAX_INLINE_CMD_SIZE 96
38#define MAX_INLINE_RESP_SIZE 24
39#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
42
43void virtio_gpu_ctrl_ack(struct virtqueue *vq)
44{
45 struct drm_device *dev = vq->vdev->priv;
46 struct virtio_gpu_device *vgdev = dev->dev_private;
47
48 schedule_work(&vgdev->ctrlq.dequeue_work);
49}
50
51void virtio_gpu_cursor_ack(struct virtqueue *vq)
52{
53 struct drm_device *dev = vq->vdev->priv;
54 struct virtio_gpu_device *vgdev = dev->dev_private;
55
56 schedule_work(&vgdev->cursorq.dequeue_work);
57}
58
59int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
60{
61 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
62 VBUFFER_SIZE,
63 __alignof__(struct virtio_gpu_vbuffer),
64 0, NULL);
65 if (!vgdev->vbufs)
66 return -ENOMEM;
67 return 0;
68}
69
70void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
71{
72 kmem_cache_destroy(vgdev->vbufs);
73 vgdev->vbufs = NULL;
74}
75
76static struct virtio_gpu_vbuffer*
77virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
78 int size, int resp_size, void *resp_buf,
79 virtio_gpu_resp_cb resp_cb)
80{
81 struct virtio_gpu_vbuffer *vbuf;
82
83 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
84
85 BUG_ON(size > MAX_INLINE_CMD_SIZE);
86 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
87 vbuf->size = size;
88
89 vbuf->resp_cb = resp_cb;
90 vbuf->resp_size = resp_size;
91 if (resp_size <= MAX_INLINE_RESP_SIZE)
92 vbuf->resp_buf = (void *)vbuf->buf + size;
93 else
94 vbuf->resp_buf = resp_buf;
95 BUG_ON(!vbuf->resp_buf);
96 return vbuf;
97}
98
99static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
100 struct virtio_gpu_vbuffer **vbuffer_p,
101 int size)
102{
103 struct virtio_gpu_vbuffer *vbuf;
104
105 vbuf = virtio_gpu_get_vbuf(vgdev, size,
106 sizeof(struct virtio_gpu_ctrl_hdr),
107 NULL, NULL);
108 if (IS_ERR(vbuf)) {
109 *vbuffer_p = NULL;
110 return ERR_CAST(vbuf);
111 }
112 *vbuffer_p = vbuf;
113 return vbuf->buf;
114}
115
116static struct virtio_gpu_update_cursor*
117virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
118 struct virtio_gpu_vbuffer **vbuffer_p)
119{
120 struct virtio_gpu_vbuffer *vbuf;
121
122 vbuf = virtio_gpu_get_vbuf
123 (vgdev, sizeof(struct virtio_gpu_update_cursor),
124 0, NULL, NULL);
125 if (IS_ERR(vbuf)) {
126 *vbuffer_p = NULL;
127 return ERR_CAST(vbuf);
128 }
129 *vbuffer_p = vbuf;
130 return (struct virtio_gpu_update_cursor *)vbuf->buf;
131}
132
133static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
134 virtio_gpu_resp_cb cb,
135 struct virtio_gpu_vbuffer **vbuffer_p,
136 int cmd_size, int resp_size,
137 void *resp_buf)
138{
139 struct virtio_gpu_vbuffer *vbuf;
140
141 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
142 resp_size, resp_buf, cb);
143 *vbuffer_p = vbuf;
144 return (struct virtio_gpu_command *)vbuf->buf;
145}
146
147static void free_vbuf(struct virtio_gpu_device *vgdev,
148 struct virtio_gpu_vbuffer *vbuf)
149{
150 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
151 kfree(vbuf->resp_buf);
152 kfree(vbuf->data_buf);
153 kmem_cache_free(vgdev->vbufs, vbuf);
154}
155
156static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
157{
158 struct virtio_gpu_vbuffer *vbuf;
159 unsigned int len;
160 int freed = 0;
161
162 while ((vbuf = virtqueue_get_buf(vq, &len))) {
163 list_add_tail(&vbuf->list, reclaim_list);
164 freed++;
165 }
166 if (freed == 0)
167 DRM_DEBUG("Huh? zero vbufs reclaimed");
168}
169
170void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
171{
172 struct virtio_gpu_device *vgdev =
173 container_of(work, struct virtio_gpu_device,
174 ctrlq.dequeue_work);
175 struct list_head reclaim_list;
176 struct virtio_gpu_vbuffer *entry, *tmp;
177 struct virtio_gpu_ctrl_hdr *resp;
178 u64 fence_id = 0;
179
180 INIT_LIST_HEAD(&reclaim_list);
181 spin_lock(&vgdev->ctrlq.qlock);
182 do {
183 virtqueue_disable_cb(vgdev->ctrlq.vq);
184 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
185
186 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
187 spin_unlock(&vgdev->ctrlq.qlock);
188
189 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
190 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
191
192 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
193
194 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
195 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
196 struct virtio_gpu_ctrl_hdr *cmd;
197 cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
198 DRM_ERROR("response 0x%x (command 0x%x)\n",
199 le32_to_cpu(resp->type),
200 le32_to_cpu(cmd->type));
201 } else
202 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
203 }
204 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
205 u64 f = le64_to_cpu(resp->fence_id);
206
207 if (fence_id > f) {
208 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
209 __func__, fence_id, f);
210 } else {
211 fence_id = f;
212 }
213 }
214 if (entry->resp_cb)
215 entry->resp_cb(vgdev, entry);
216
217 list_del(&entry->list);
218 free_vbuf(vgdev, entry);
219 }
220 wake_up(&vgdev->ctrlq.ack_queue);
221
222 if (fence_id)
223 virtio_gpu_fence_event_process(vgdev, fence_id);
224}
225
226void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
227{
228 struct virtio_gpu_device *vgdev =
229 container_of(work, struct virtio_gpu_device,
230 cursorq.dequeue_work);
231 struct list_head reclaim_list;
232 struct virtio_gpu_vbuffer *entry, *tmp;
233
234 INIT_LIST_HEAD(&reclaim_list);
235 spin_lock(&vgdev->cursorq.qlock);
236 do {
237 virtqueue_disable_cb(vgdev->cursorq.vq);
238 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
239 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
240 spin_unlock(&vgdev->cursorq.qlock);
241
242 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
243 list_del(&entry->list);
244 free_vbuf(vgdev, entry);
245 }
246 wake_up(&vgdev->cursorq.ack_queue);
247}
248
249static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
250 struct virtio_gpu_vbuffer *vbuf)
251 __releases(&vgdev->ctrlq.qlock)
252 __acquires(&vgdev->ctrlq.qlock)
253{
254 struct virtqueue *vq = vgdev->ctrlq.vq;
255 struct scatterlist *sgs[3], vcmd, vout, vresp;
256 int outcnt = 0, incnt = 0;
257 int ret;
258
259 if (!vgdev->vqs_ready)
260 return -ENODEV;
261
262 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
263 sgs[outcnt + incnt] = &vcmd;
264 outcnt++;
265
266 if (vbuf->data_size) {
267 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
268 sgs[outcnt + incnt] = &vout;
269 outcnt++;
270 }
271
272 if (vbuf->resp_size) {
273 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
274 sgs[outcnt + incnt] = &vresp;
275 incnt++;
276 }
277
278retry:
279 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
280 if (ret == -ENOSPC) {
281 spin_unlock(&vgdev->ctrlq.qlock);
282 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
283 spin_lock(&vgdev->ctrlq.qlock);
284 goto retry;
285 } else {
286 trace_virtio_gpu_cmd_queue(vq,
287 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
288
289 virtqueue_kick(vq);
290 }
291
292 if (!ret)
293 ret = vq->num_free;
294 return ret;
295}
296
297static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
298 struct virtio_gpu_vbuffer *vbuf)
299{
300 int rc;
301
302 spin_lock(&vgdev->ctrlq.qlock);
303 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
304 spin_unlock(&vgdev->ctrlq.qlock);
305 return rc;
306}
307
308static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
309 struct virtio_gpu_vbuffer *vbuf,
310 struct virtio_gpu_ctrl_hdr *hdr,
311 struct virtio_gpu_fence *fence)
312{
313 struct virtqueue *vq = vgdev->ctrlq.vq;
314 int rc;
315
316again:
317 spin_lock(&vgdev->ctrlq.qlock);
318
319 /*
320 * Make sure we have enouth space in the virtqueue. If not
321 * wait here until we have.
322 *
323 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
324 * to wait for free space, which can result in fence ids being
325 * submitted out-of-order.
326 */
327 if (vq->num_free < 3) {
328 spin_unlock(&vgdev->ctrlq.qlock);
329 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
330 goto again;
331 }
332
333 if (fence)
334 virtio_gpu_fence_emit(vgdev, hdr, fence);
335 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
336 spin_unlock(&vgdev->ctrlq.qlock);
337 return rc;
338}
339
340static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
341 struct virtio_gpu_vbuffer *vbuf)
342{
343 struct virtqueue *vq = vgdev->cursorq.vq;
344 struct scatterlist *sgs[1], ccmd;
345 int ret;
346 int outcnt;
347
348 if (!vgdev->vqs_ready)
349 return -ENODEV;
350
351 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
352 sgs[0] = &ccmd;
353 outcnt = 1;
354
355 spin_lock(&vgdev->cursorq.qlock);
356retry:
357 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
358 if (ret == -ENOSPC) {
359 spin_unlock(&vgdev->cursorq.qlock);
360 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
361 spin_lock(&vgdev->cursorq.qlock);
362 goto retry;
363 } else {
364 trace_virtio_gpu_cmd_queue(vq,
365 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
366
367 virtqueue_kick(vq);
368 }
369
370 spin_unlock(&vgdev->cursorq.qlock);
371
372 if (!ret)
373 ret = vq->num_free;
374 return ret;
375}
376
377/* just create gem objects for userspace and long lived objects,
378 * just use dma_alloced pages for the queue objects?
379 */
380
381/* create a basic resource */
382void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
383 struct virtio_gpu_object *bo,
384 struct virtio_gpu_object_params *params,
385 struct virtio_gpu_fence *fence)
386{
387 struct virtio_gpu_resource_create_2d *cmd_p;
388 struct virtio_gpu_vbuffer *vbuf;
389
390 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
391 memset(cmd_p, 0, sizeof(*cmd_p));
392
393 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
394 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
395 cmd_p->format = cpu_to_le32(params->format);
396 cmd_p->width = cpu_to_le32(params->width);
397 cmd_p->height = cpu_to_le32(params->height);
398
399 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
400 bo->created = true;
401}
402
403void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
404 uint32_t resource_id)
405{
406 struct virtio_gpu_resource_unref *cmd_p;
407 struct virtio_gpu_vbuffer *vbuf;
408
409 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
410 memset(cmd_p, 0, sizeof(*cmd_p));
411
412 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
413 cmd_p->resource_id = cpu_to_le32(resource_id);
414
415 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
416}
417
418static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
419 uint32_t resource_id,
420 struct virtio_gpu_fence *fence)
421{
422 struct virtio_gpu_resource_detach_backing *cmd_p;
423 struct virtio_gpu_vbuffer *vbuf;
424
425 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
426 memset(cmd_p, 0, sizeof(*cmd_p));
427
428 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
429 cmd_p->resource_id = cpu_to_le32(resource_id);
430
431 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
432}
433
434void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
435 uint32_t scanout_id, uint32_t resource_id,
436 uint32_t width, uint32_t height,
437 uint32_t x, uint32_t y)
438{
439 struct virtio_gpu_set_scanout *cmd_p;
440 struct virtio_gpu_vbuffer *vbuf;
441
442 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
443 memset(cmd_p, 0, sizeof(*cmd_p));
444
445 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
446 cmd_p->resource_id = cpu_to_le32(resource_id);
447 cmd_p->scanout_id = cpu_to_le32(scanout_id);
448 cmd_p->r.width = cpu_to_le32(width);
449 cmd_p->r.height = cpu_to_le32(height);
450 cmd_p->r.x = cpu_to_le32(x);
451 cmd_p->r.y = cpu_to_le32(y);
452
453 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
454}
455
456void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
457 uint32_t resource_id,
458 uint32_t x, uint32_t y,
459 uint32_t width, uint32_t height)
460{
461 struct virtio_gpu_resource_flush *cmd_p;
462 struct virtio_gpu_vbuffer *vbuf;
463
464 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
465 memset(cmd_p, 0, sizeof(*cmd_p));
466
467 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
468 cmd_p->resource_id = cpu_to_le32(resource_id);
469 cmd_p->r.width = cpu_to_le32(width);
470 cmd_p->r.height = cpu_to_le32(height);
471 cmd_p->r.x = cpu_to_le32(x);
472 cmd_p->r.y = cpu_to_le32(y);
473
474 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
475}
476
477void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
478 struct virtio_gpu_object *bo,
479 uint64_t offset,
480 __le32 width, __le32 height,
481 __le32 x, __le32 y,
482 struct virtio_gpu_fence *fence)
483{
484 struct virtio_gpu_transfer_to_host_2d *cmd_p;
485 struct virtio_gpu_vbuffer *vbuf;
486 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
487
488 if (use_dma_api)
489 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
490 bo->pages->sgl, bo->pages->nents,
491 DMA_TO_DEVICE);
492
493 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
494 memset(cmd_p, 0, sizeof(*cmd_p));
495
496 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
497 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
498 cmd_p->offset = cpu_to_le64(offset);
499 cmd_p->r.width = width;
500 cmd_p->r.height = height;
501 cmd_p->r.x = x;
502 cmd_p->r.y = y;
503
504 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
505}
506
507static void
508virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
509 uint32_t resource_id,
510 struct virtio_gpu_mem_entry *ents,
511 uint32_t nents,
512 struct virtio_gpu_fence *fence)
513{
514 struct virtio_gpu_resource_attach_backing *cmd_p;
515 struct virtio_gpu_vbuffer *vbuf;
516
517 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
518 memset(cmd_p, 0, sizeof(*cmd_p));
519
520 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
521 cmd_p->resource_id = cpu_to_le32(resource_id);
522 cmd_p->nr_entries = cpu_to_le32(nents);
523
524 vbuf->data_buf = ents;
525 vbuf->data_size = sizeof(*ents) * nents;
526
527 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
528}
529
530static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
531 struct virtio_gpu_vbuffer *vbuf)
532{
533 struct virtio_gpu_resp_display_info *resp =
534 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
535 int i;
536
537 spin_lock(&vgdev->display_info_lock);
538 for (i = 0; i < vgdev->num_scanouts; i++) {
539 vgdev->outputs[i].info = resp->pmodes[i];
540 if (resp->pmodes[i].enabled) {
541 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
542 le32_to_cpu(resp->pmodes[i].r.width),
543 le32_to_cpu(resp->pmodes[i].r.height),
544 le32_to_cpu(resp->pmodes[i].r.x),
545 le32_to_cpu(resp->pmodes[i].r.y));
546 } else {
547 DRM_DEBUG("output %d: disabled", i);
548 }
549 }
550
551 vgdev->display_info_pending = false;
552 spin_unlock(&vgdev->display_info_lock);
553 wake_up(&vgdev->resp_wq);
554
555 if (!drm_helper_hpd_irq_event(vgdev->ddev))
556 drm_kms_helper_hotplug_event(vgdev->ddev);
557}
558
559static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
560 struct virtio_gpu_vbuffer *vbuf)
561{
562 struct virtio_gpu_get_capset_info *cmd =
563 (struct virtio_gpu_get_capset_info *)vbuf->buf;
564 struct virtio_gpu_resp_capset_info *resp =
565 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
566 int i = le32_to_cpu(cmd->capset_index);
567
568 spin_lock(&vgdev->display_info_lock);
569 if (vgdev->capsets) {
570 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
571 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
572 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
573 } else {
574 DRM_ERROR("invalid capset memory.");
575 }
576 spin_unlock(&vgdev->display_info_lock);
577 wake_up(&vgdev->resp_wq);
578}
579
580static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
581 struct virtio_gpu_vbuffer *vbuf)
582{
583 struct virtio_gpu_get_capset *cmd =
584 (struct virtio_gpu_get_capset *)vbuf->buf;
585 struct virtio_gpu_resp_capset *resp =
586 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
587 struct virtio_gpu_drv_cap_cache *cache_ent;
588
589 spin_lock(&vgdev->display_info_lock);
590 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
591 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
592 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
593 memcpy(cache_ent->caps_cache, resp->capset_data,
594 cache_ent->size);
595 /* Copy must occur before is_valid is signalled. */
596 smp_wmb();
597 atomic_set(&cache_ent->is_valid, 1);
598 break;
599 }
600 }
601 spin_unlock(&vgdev->display_info_lock);
602 wake_up_all(&vgdev->resp_wq);
603}
604
605static int virtio_get_edid_block(void *data, u8 *buf,
606 unsigned int block, size_t len)
607{
608 struct virtio_gpu_resp_edid *resp = data;
609 size_t start = block * EDID_LENGTH;
610
611 if (start + len > le32_to_cpu(resp->size))
612 return -1;
613 memcpy(buf, resp->edid + start, len);
614 return 0;
615}
616
617static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
618 struct virtio_gpu_vbuffer *vbuf)
619{
620 struct virtio_gpu_cmd_get_edid *cmd =
621 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
622 struct virtio_gpu_resp_edid *resp =
623 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
624 uint32_t scanout = le32_to_cpu(cmd->scanout);
625 struct virtio_gpu_output *output;
626 struct edid *new_edid, *old_edid;
627
628 if (scanout >= vgdev->num_scanouts)
629 return;
630 output = vgdev->outputs + scanout;
631
632 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
633 drm_connector_update_edid_property(&output->conn, new_edid);
634
635 spin_lock(&vgdev->display_info_lock);
636 old_edid = output->edid;
637 output->edid = new_edid;
638 spin_unlock(&vgdev->display_info_lock);
639
640 kfree(old_edid);
641 wake_up(&vgdev->resp_wq);
642}
643
644int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
645{
646 struct virtio_gpu_ctrl_hdr *cmd_p;
647 struct virtio_gpu_vbuffer *vbuf;
648 void *resp_buf;
649
650 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
651 GFP_KERNEL);
652 if (!resp_buf)
653 return -ENOMEM;
654
655 cmd_p = virtio_gpu_alloc_cmd_resp
656 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
657 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
658 resp_buf);
659 memset(cmd_p, 0, sizeof(*cmd_p));
660
661 vgdev->display_info_pending = true;
662 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
663 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
664 return 0;
665}
666
667int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
668{
669 struct virtio_gpu_get_capset_info *cmd_p;
670 struct virtio_gpu_vbuffer *vbuf;
671 void *resp_buf;
672
673 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
674 GFP_KERNEL);
675 if (!resp_buf)
676 return -ENOMEM;
677
678 cmd_p = virtio_gpu_alloc_cmd_resp
679 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
680 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
681 resp_buf);
682 memset(cmd_p, 0, sizeof(*cmd_p));
683
684 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
685 cmd_p->capset_index = cpu_to_le32(idx);
686 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
687 return 0;
688}
689
690int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
691 int idx, int version,
692 struct virtio_gpu_drv_cap_cache **cache_p)
693{
694 struct virtio_gpu_get_capset *cmd_p;
695 struct virtio_gpu_vbuffer *vbuf;
696 int max_size;
697 struct virtio_gpu_drv_cap_cache *cache_ent;
698 struct virtio_gpu_drv_cap_cache *search_ent;
699 void *resp_buf;
700
701 *cache_p = NULL;
702
703 if (idx >= vgdev->num_capsets)
704 return -EINVAL;
705
706 if (version > vgdev->capsets[idx].max_version)
707 return -EINVAL;
708
709 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
710 if (!cache_ent)
711 return -ENOMEM;
712
713 max_size = vgdev->capsets[idx].max_size;
714 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
715 if (!cache_ent->caps_cache) {
716 kfree(cache_ent);
717 return -ENOMEM;
718 }
719
720 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
721 GFP_KERNEL);
722 if (!resp_buf) {
723 kfree(cache_ent->caps_cache);
724 kfree(cache_ent);
725 return -ENOMEM;
726 }
727
728 cache_ent->version = version;
729 cache_ent->id = vgdev->capsets[idx].id;
730 atomic_set(&cache_ent->is_valid, 0);
731 cache_ent->size = max_size;
732 spin_lock(&vgdev->display_info_lock);
733 /* Search while under lock in case it was added by another task. */
734 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
735 if (search_ent->id == vgdev->capsets[idx].id &&
736 search_ent->version == version) {
737 *cache_p = search_ent;
738 break;
739 }
740 }
741 if (!*cache_p)
742 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
743 spin_unlock(&vgdev->display_info_lock);
744
745 if (*cache_p) {
746 /* Entry was found, so free everything that was just created. */
747 kfree(resp_buf);
748 kfree(cache_ent->caps_cache);
749 kfree(cache_ent);
750 return 0;
751 }
752
753 cmd_p = virtio_gpu_alloc_cmd_resp
754 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
755 sizeof(struct virtio_gpu_resp_capset) + max_size,
756 resp_buf);
757 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
758 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
759 cmd_p->capset_version = cpu_to_le32(version);
760 *cache_p = cache_ent;
761 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
762
763 return 0;
764}
765
766int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
767{
768 struct virtio_gpu_cmd_get_edid *cmd_p;
769 struct virtio_gpu_vbuffer *vbuf;
770 void *resp_buf;
771 int scanout;
772
773 if (WARN_ON(!vgdev->has_edid))
774 return -EINVAL;
775
776 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
777 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
778 GFP_KERNEL);
779 if (!resp_buf)
780 return -ENOMEM;
781
782 cmd_p = virtio_gpu_alloc_cmd_resp
783 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
784 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
785 resp_buf);
786 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
787 cmd_p->scanout = cpu_to_le32(scanout);
788 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
789 }
790
791 return 0;
792}
793
794void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
795 uint32_t nlen, const char *name)
796{
797 struct virtio_gpu_ctx_create *cmd_p;
798 struct virtio_gpu_vbuffer *vbuf;
799
800 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
801 memset(cmd_p, 0, sizeof(*cmd_p));
802
803 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
804 cmd_p->hdr.ctx_id = cpu_to_le32(id);
805 cmd_p->nlen = cpu_to_le32(nlen);
806 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
807 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
808 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
809}
810
811void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
812 uint32_t id)
813{
814 struct virtio_gpu_ctx_destroy *cmd_p;
815 struct virtio_gpu_vbuffer *vbuf;
816
817 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
818 memset(cmd_p, 0, sizeof(*cmd_p));
819
820 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
821 cmd_p->hdr.ctx_id = cpu_to_le32(id);
822 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
823}
824
825void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
826 uint32_t ctx_id,
827 uint32_t resource_id)
828{
829 struct virtio_gpu_ctx_resource *cmd_p;
830 struct virtio_gpu_vbuffer *vbuf;
831
832 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
833 memset(cmd_p, 0, sizeof(*cmd_p));
834
835 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
836 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
837 cmd_p->resource_id = cpu_to_le32(resource_id);
838 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
839
840}
841
842void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
843 uint32_t ctx_id,
844 uint32_t resource_id)
845{
846 struct virtio_gpu_ctx_resource *cmd_p;
847 struct virtio_gpu_vbuffer *vbuf;
848
849 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
850 memset(cmd_p, 0, sizeof(*cmd_p));
851
852 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
853 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
854 cmd_p->resource_id = cpu_to_le32(resource_id);
855 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
856}
857
858void
859virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
860 struct virtio_gpu_object *bo,
861 struct virtio_gpu_object_params *params,
862 struct virtio_gpu_fence *fence)
863{
864 struct virtio_gpu_resource_create_3d *cmd_p;
865 struct virtio_gpu_vbuffer *vbuf;
866
867 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
868 memset(cmd_p, 0, sizeof(*cmd_p));
869
870 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
871 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
872 cmd_p->format = cpu_to_le32(params->format);
873 cmd_p->width = cpu_to_le32(params->width);
874 cmd_p->height = cpu_to_le32(params->height);
875
876 cmd_p->target = cpu_to_le32(params->target);
877 cmd_p->bind = cpu_to_le32(params->bind);
878 cmd_p->depth = cpu_to_le32(params->depth);
879 cmd_p->array_size = cpu_to_le32(params->array_size);
880 cmd_p->last_level = cpu_to_le32(params->last_level);
881 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
882 cmd_p->flags = cpu_to_le32(params->flags);
883
884 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
885 bo->created = true;
886}
887
888void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
889 struct virtio_gpu_object *bo,
890 uint32_t ctx_id,
891 uint64_t offset, uint32_t level,
892 struct virtio_gpu_box *box,
893 struct virtio_gpu_fence *fence)
894{
895 struct virtio_gpu_transfer_host_3d *cmd_p;
896 struct virtio_gpu_vbuffer *vbuf;
897 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
898
899 if (use_dma_api)
900 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
901 bo->pages->sgl, bo->pages->nents,
902 DMA_TO_DEVICE);
903
904 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
905 memset(cmd_p, 0, sizeof(*cmd_p));
906
907 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
908 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
909 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
910 cmd_p->box = *box;
911 cmd_p->offset = cpu_to_le64(offset);
912 cmd_p->level = cpu_to_le32(level);
913
914 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
915}
916
917void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
918 uint32_t resource_id, uint32_t ctx_id,
919 uint64_t offset, uint32_t level,
920 struct virtio_gpu_box *box,
921 struct virtio_gpu_fence *fence)
922{
923 struct virtio_gpu_transfer_host_3d *cmd_p;
924 struct virtio_gpu_vbuffer *vbuf;
925
926 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
927 memset(cmd_p, 0, sizeof(*cmd_p));
928
929 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
930 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
931 cmd_p->resource_id = cpu_to_le32(resource_id);
932 cmd_p->box = *box;
933 cmd_p->offset = cpu_to_le64(offset);
934 cmd_p->level = cpu_to_le32(level);
935
936 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
937}
938
939void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
940 void *data, uint32_t data_size,
941 uint32_t ctx_id, struct virtio_gpu_fence *fence)
942{
943 struct virtio_gpu_cmd_submit *cmd_p;
944 struct virtio_gpu_vbuffer *vbuf;
945
946 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
947 memset(cmd_p, 0, sizeof(*cmd_p));
948
949 vbuf->data_buf = data;
950 vbuf->data_size = data_size;
951
952 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
953 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
954 cmd_p->size = cpu_to_le32(data_size);
955
956 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
957}
958
959int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
960 struct virtio_gpu_object *obj,
961 struct virtio_gpu_fence *fence)
962{
963 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
964 struct virtio_gpu_mem_entry *ents;
965 struct scatterlist *sg;
966 int si, nents;
967
968 if (WARN_ON_ONCE(!obj->created))
969 return -EINVAL;
970
971 if (!obj->pages) {
972 int ret;
973
974 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
975 if (ret)
976 return ret;
977 }
978
979 if (use_dma_api) {
980 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
981 obj->pages->sgl, obj->pages->nents,
982 DMA_TO_DEVICE);
983 nents = obj->mapped;
984 } else {
985 nents = obj->pages->nents;
986 }
987
988 /* gets freed when the ring has consumed it */
989 ents = kvmalloc_array(nents,
990 sizeof(struct virtio_gpu_mem_entry),
991 GFP_KERNEL);
992 if (!ents) {
993 DRM_ERROR("failed to allocate ent list\n");
994 return -ENOMEM;
995 }
996
997 for_each_sg(obj->pages->sgl, sg, nents, si) {
998 ents[si].addr = cpu_to_le64(use_dma_api
999 ? sg_dma_address(sg)
1000 : sg_phys(sg));
1001 ents[si].length = cpu_to_le32(sg->length);
1002 ents[si].padding = 0;
1003 }
1004
1005 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1006 ents, nents,
1007 fence);
1008 return 0;
1009}
1010
1011void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1012 struct virtio_gpu_object *obj)
1013{
1014 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1015
1016 if (use_dma_api && obj->mapped) {
1017 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1018 /* detach backing and wait for the host process it ... */
1019 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1020 dma_fence_wait(&fence->f, true);
1021 dma_fence_put(&fence->f);
1022
1023 /* ... then tear down iommu mappings */
1024 dma_unmap_sg(vgdev->vdev->dev.parent,
1025 obj->pages->sgl, obj->mapped,
1026 DMA_TO_DEVICE);
1027 obj->mapped = 0;
1028 } else {
1029 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1030 }
1031}
1032
1033void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1034 struct virtio_gpu_output *output)
1035{
1036 struct virtio_gpu_vbuffer *vbuf;
1037 struct virtio_gpu_update_cursor *cur_p;
1038
1039 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1040 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1041 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1042 virtio_gpu_queue_cursor(vgdev, vbuf);
1043}