blob: 5ae132e37277ddcc5036a0a0a3ed03d940283e07 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <linux/file.h>
29#include <linux/sync_file.h>
30
31#include <drm/drm_file.h>
32#include <drm/ttm/ttm_execbuf_util.h>
33#include <drm/virtgpu_drm.h>
34
35#include "virtgpu_drv.h"
36
37static void convert_to_hw_box(struct virtio_gpu_box *dst,
38 const struct drm_virtgpu_3d_box *src)
39{
40 dst->x = cpu_to_le32(src->x);
41 dst->y = cpu_to_le32(src->y);
42 dst->z = cpu_to_le32(src->z);
43 dst->w = cpu_to_le32(src->w);
44 dst->h = cpu_to_le32(src->h);
45 dst->d = cpu_to_le32(src->d);
46}
47
48static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
49 struct drm_file *file_priv)
50{
51 struct virtio_gpu_device *vgdev = dev->dev_private;
52 struct drm_virtgpu_map *virtio_gpu_map = data;
53
54 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
55 virtio_gpu_map->handle,
56 &virtio_gpu_map->offset);
57}
58
59int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
60 struct list_head *head)
61{
62 struct ttm_operation_ctx ctx = { false, false };
63 struct ttm_validate_buffer *buf;
64 struct ttm_buffer_object *bo;
65 struct virtio_gpu_object *qobj;
66 int ret;
67
68 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
69 if (ret != 0)
70 return ret;
71
72 list_for_each_entry(buf, head, head) {
73 bo = buf->bo;
74 qobj = container_of(bo, struct virtio_gpu_object, tbo);
75 ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
76 if (ret) {
77 ttm_eu_backoff_reservation(ticket, head);
78 return ret;
79 }
80 }
81 return 0;
82}
83
84void virtio_gpu_unref_list(struct list_head *head)
85{
86 struct ttm_validate_buffer *buf;
87 struct ttm_buffer_object *bo;
88 struct virtio_gpu_object *qobj;
89
90 list_for_each_entry(buf, head, head) {
91 bo = buf->bo;
92 qobj = container_of(bo, struct virtio_gpu_object, tbo);
93
94 drm_gem_object_put_unlocked(&qobj->gem_base);
95 }
96}
97
98/*
99 * Usage of execbuffer:
100 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
101 * However, the command as passed from user space must *not* contain the initial
102 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
103 */
104static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
105 struct drm_file *drm_file)
106{
107 struct drm_virtgpu_execbuffer *exbuf = data;
108 struct virtio_gpu_device *vgdev = dev->dev_private;
109 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
110 struct drm_gem_object *gobj;
111 struct virtio_gpu_fence *out_fence;
112 struct virtio_gpu_object *qobj;
113 int ret;
114 uint32_t *bo_handles = NULL;
115 void __user *user_bo_handles = NULL;
116 struct list_head validate_list;
117 struct ttm_validate_buffer *buflist = NULL;
118 int i;
119 struct ww_acquire_ctx ticket;
120 struct sync_file *sync_file;
121 int in_fence_fd = exbuf->fence_fd;
122 int out_fence_fd = -1;
123 void *buf;
124
125 if (vgdev->has_virgl_3d == false)
126 return -ENOSYS;
127
128 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
129 return -EINVAL;
130
131 exbuf->fence_fd = -1;
132
133 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
134 struct dma_fence *in_fence;
135
136 in_fence = sync_file_get_fence(in_fence_fd);
137
138 if (!in_fence)
139 return -EINVAL;
140
141 /*
142 * Wait if the fence is from a foreign context, or if the fence
143 * array contains any fence from a foreign context.
144 */
145 ret = 0;
146 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
147 ret = dma_fence_wait(in_fence, true);
148
149 dma_fence_put(in_fence);
150 if (ret)
151 return ret;
152 }
153
154 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
155 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
156 if (out_fence_fd < 0)
157 return out_fence_fd;
158 }
159
160 INIT_LIST_HEAD(&validate_list);
161 if (exbuf->num_bo_handles) {
162
163 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
164 sizeof(uint32_t), GFP_KERNEL);
165 buflist = kvmalloc_array(exbuf->num_bo_handles,
166 sizeof(struct ttm_validate_buffer),
167 GFP_KERNEL | __GFP_ZERO);
168 if (!bo_handles || !buflist) {
169 ret = -ENOMEM;
170 goto out_unused_fd;
171 }
172
173 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
174 if (copy_from_user(bo_handles, user_bo_handles,
175 exbuf->num_bo_handles * sizeof(uint32_t))) {
176 ret = -EFAULT;
177 goto out_unused_fd;
178 }
179
180 for (i = 0; i < exbuf->num_bo_handles; i++) {
181 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
182 if (!gobj) {
183 ret = -ENOENT;
184 goto out_unused_fd;
185 }
186
187 qobj = gem_to_virtio_gpu_obj(gobj);
188 buflist[i].bo = &qobj->tbo;
189
190 list_add(&buflist[i].head, &validate_list);
191 }
192 kvfree(bo_handles);
193 bo_handles = NULL;
194 }
195
196 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
197 if (ret)
198 goto out_free;
199
200 buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
201 if (IS_ERR(buf)) {
202 ret = PTR_ERR(buf);
203 goto out_unresv;
204 }
205
206 out_fence = virtio_gpu_fence_alloc(vgdev);
207 if(!out_fence) {
208 ret = -ENOMEM;
209 goto out_memdup;
210 }
211
212 if (out_fence_fd >= 0) {
213 sync_file = sync_file_create(&out_fence->f);
214 if (!sync_file) {
215 dma_fence_put(&out_fence->f);
216 ret = -ENOMEM;
217 goto out_memdup;
218 }
219
220 exbuf->fence_fd = out_fence_fd;
221 fd_install(out_fence_fd, sync_file->file);
222 }
223
224 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
225 vfpriv->ctx_id, out_fence);
226
227 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
228
229 /* fence the command bo */
230 virtio_gpu_unref_list(&validate_list);
231 kvfree(buflist);
232 return 0;
233
234out_memdup:
235 kfree(buf);
236out_unresv:
237 ttm_eu_backoff_reservation(&ticket, &validate_list);
238out_free:
239 virtio_gpu_unref_list(&validate_list);
240out_unused_fd:
241 kvfree(bo_handles);
242 kvfree(buflist);
243
244 if (out_fence_fd >= 0)
245 put_unused_fd(out_fence_fd);
246
247 return ret;
248}
249
250static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
251 struct drm_file *file_priv)
252{
253 struct virtio_gpu_device *vgdev = dev->dev_private;
254 struct drm_virtgpu_getparam *param = data;
255 int value;
256
257 switch (param->param) {
258 case VIRTGPU_PARAM_3D_FEATURES:
259 value = vgdev->has_virgl_3d == true ? 1 : 0;
260 break;
261 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
262 value = 1;
263 break;
264 default:
265 return -EINVAL;
266 }
267 if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
268 return -EFAULT;
269
270 return 0;
271}
272
273static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
274 struct drm_file *file_priv)
275{
276 struct virtio_gpu_device *vgdev = dev->dev_private;
277 struct drm_virtgpu_resource_create *rc = data;
278 struct virtio_gpu_fence *fence;
279 int ret;
280 struct virtio_gpu_object *qobj;
281 struct drm_gem_object *obj;
282 uint32_t handle = 0;
283 struct virtio_gpu_object_params params = { 0 };
284
285 if (vgdev->has_virgl_3d == false) {
286 if (rc->depth > 1)
287 return -EINVAL;
288 if (rc->nr_samples > 1)
289 return -EINVAL;
290 if (rc->last_level > 1)
291 return -EINVAL;
292 if (rc->target != 2)
293 return -EINVAL;
294 if (rc->array_size > 1)
295 return -EINVAL;
296 }
297
298 params.format = rc->format;
299 params.width = rc->width;
300 params.height = rc->height;
301 params.size = rc->size;
302 if (vgdev->has_virgl_3d) {
303 params.virgl = true;
304 params.target = rc->target;
305 params.bind = rc->bind;
306 params.depth = rc->depth;
307 params.array_size = rc->array_size;
308 params.last_level = rc->last_level;
309 params.nr_samples = rc->nr_samples;
310 params.flags = rc->flags;
311 }
312 /* allocate a single page size object */
313 if (params.size == 0)
314 params.size = PAGE_SIZE;
315
316 fence = virtio_gpu_fence_alloc(vgdev);
317 if (!fence)
318 return -ENOMEM;
319 qobj = virtio_gpu_alloc_object(dev, &params, fence);
320 dma_fence_put(&fence->f);
321 if (IS_ERR(qobj))
322 return PTR_ERR(qobj);
323 obj = &qobj->gem_base;
324
325 ret = drm_gem_handle_create(file_priv, obj, &handle);
326 if (ret) {
327 drm_gem_object_release(obj);
328 return ret;
329 }
330
331 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
332 rc->bo_handle = handle;
333
334 /*
335 * The handle owns the reference now. But we must drop our
336 * remaining reference *after* we no longer need to dereference
337 * the obj. Otherwise userspace could guess the handle and
338 * race closing it from another thread.
339 */
340 drm_gem_object_put_unlocked(obj);
341
342 return 0;
343}
344
345static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
346 struct drm_file *file_priv)
347{
348 struct drm_virtgpu_resource_info *ri = data;
349 struct drm_gem_object *gobj = NULL;
350 struct virtio_gpu_object *qobj = NULL;
351
352 gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
353 if (gobj == NULL)
354 return -ENOENT;
355
356 qobj = gem_to_virtio_gpu_obj(gobj);
357
358 ri->size = qobj->gem_base.size;
359 ri->res_handle = qobj->hw_res_handle;
360 drm_gem_object_put_unlocked(gobj);
361 return 0;
362}
363
364static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
365 void *data,
366 struct drm_file *file)
367{
368 struct virtio_gpu_device *vgdev = dev->dev_private;
369 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
370 struct drm_virtgpu_3d_transfer_from_host *args = data;
371 struct ttm_operation_ctx ctx = { true, false };
372 struct drm_gem_object *gobj = NULL;
373 struct virtio_gpu_object *qobj = NULL;
374 struct virtio_gpu_fence *fence;
375 int ret;
376 u32 offset = args->offset;
377 struct virtio_gpu_box box;
378
379 if (vgdev->has_virgl_3d == false)
380 return -ENOSYS;
381
382 gobj = drm_gem_object_lookup(file, args->bo_handle);
383 if (gobj == NULL)
384 return -ENOENT;
385
386 qobj = gem_to_virtio_gpu_obj(gobj);
387
388 ret = virtio_gpu_object_reserve(qobj, false);
389 if (ret)
390 goto out;
391
392 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
393 if (unlikely(ret))
394 goto out_unres;
395
396 convert_to_hw_box(&box, &args->box);
397
398 fence = virtio_gpu_fence_alloc(vgdev);
399 if (!fence) {
400 ret = -ENOMEM;
401 goto out_unres;
402 }
403 virtio_gpu_cmd_transfer_from_host_3d
404 (vgdev, qobj->hw_res_handle,
405 vfpriv->ctx_id, offset, args->level,
406 &box, fence);
407 dma_resv_add_excl_fence(qobj->tbo.base.resv,
408 &fence->f);
409
410 dma_fence_put(&fence->f);
411out_unres:
412 virtio_gpu_object_unreserve(qobj);
413out:
414 drm_gem_object_put_unlocked(gobj);
415 return ret;
416}
417
418static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
419 struct drm_file *file)
420{
421 struct virtio_gpu_device *vgdev = dev->dev_private;
422 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
423 struct drm_virtgpu_3d_transfer_to_host *args = data;
424 struct ttm_operation_ctx ctx = { true, false };
425 struct drm_gem_object *gobj = NULL;
426 struct virtio_gpu_object *qobj = NULL;
427 struct virtio_gpu_fence *fence;
428 struct virtio_gpu_box box;
429 int ret;
430 u32 offset = args->offset;
431
432 gobj = drm_gem_object_lookup(file, args->bo_handle);
433 if (gobj == NULL)
434 return -ENOENT;
435
436 qobj = gem_to_virtio_gpu_obj(gobj);
437
438 ret = virtio_gpu_object_reserve(qobj, false);
439 if (ret)
440 goto out;
441
442 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
443 if (unlikely(ret))
444 goto out_unres;
445
446 convert_to_hw_box(&box, &args->box);
447 if (!vgdev->has_virgl_3d) {
448 virtio_gpu_cmd_transfer_to_host_2d
449 (vgdev, qobj, offset,
450 box.w, box.h, box.x, box.y, NULL);
451 } else {
452 fence = virtio_gpu_fence_alloc(vgdev);
453 if (!fence) {
454 ret = -ENOMEM;
455 goto out_unres;
456 }
457 virtio_gpu_cmd_transfer_to_host_3d
458 (vgdev, qobj,
459 vfpriv ? vfpriv->ctx_id : 0, offset,
460 args->level, &box, fence);
461 dma_resv_add_excl_fence(qobj->tbo.base.resv,
462 &fence->f);
463 dma_fence_put(&fence->f);
464 }
465
466out_unres:
467 virtio_gpu_object_unreserve(qobj);
468out:
469 drm_gem_object_put_unlocked(gobj);
470 return ret;
471}
472
473static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
474 struct drm_file *file)
475{
476 struct drm_virtgpu_3d_wait *args = data;
477 struct drm_gem_object *gobj = NULL;
478 struct virtio_gpu_object *qobj = NULL;
479 int ret;
480 bool nowait = false;
481
482 gobj = drm_gem_object_lookup(file, args->handle);
483 if (gobj == NULL)
484 return -ENOENT;
485
486 qobj = gem_to_virtio_gpu_obj(gobj);
487
488 if (args->flags & VIRTGPU_WAIT_NOWAIT)
489 nowait = true;
490 ret = virtio_gpu_object_wait(qobj, nowait);
491
492 drm_gem_object_put_unlocked(gobj);
493 return ret;
494}
495
496static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
497 void *data, struct drm_file *file)
498{
499 struct virtio_gpu_device *vgdev = dev->dev_private;
500 struct drm_virtgpu_get_caps *args = data;
501 unsigned size, host_caps_size;
502 int i;
503 int found_valid = -1;
504 int ret;
505 struct virtio_gpu_drv_cap_cache *cache_ent;
506 void *ptr;
507
508 if (vgdev->num_capsets == 0)
509 return -ENOSYS;
510
511 /* don't allow userspace to pass 0 */
512 if (args->size == 0)
513 return -EINVAL;
514
515 spin_lock(&vgdev->display_info_lock);
516 for (i = 0; i < vgdev->num_capsets; i++) {
517 if (vgdev->capsets[i].id == args->cap_set_id) {
518 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
519 found_valid = i;
520 break;
521 }
522 }
523 }
524
525 if (found_valid == -1) {
526 spin_unlock(&vgdev->display_info_lock);
527 return -EINVAL;
528 }
529
530 host_caps_size = vgdev->capsets[found_valid].max_size;
531 /* only copy to user the minimum of the host caps size or the guest caps size */
532 size = min(args->size, host_caps_size);
533
534 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
535 if (cache_ent->id == args->cap_set_id &&
536 cache_ent->version == args->cap_set_ver) {
537 spin_unlock(&vgdev->display_info_lock);
538 goto copy_exit;
539 }
540 }
541 spin_unlock(&vgdev->display_info_lock);
542
543 /* not in cache - need to talk to hw */
544 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
545 &cache_ent);
546
547copy_exit:
548 ret = wait_event_timeout(vgdev->resp_wq,
549 atomic_read(&cache_ent->is_valid), 5 * HZ);
550 if (!ret)
551 return -EBUSY;
552
553 /* is_valid check must proceed before copy of the cache entry. */
554 smp_rmb();
555
556 ptr = cache_ent->caps_cache;
557
558 if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
559 return -EFAULT;
560
561 return 0;
562}
563
564struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
565 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
566 DRM_RENDER_ALLOW),
567
568 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
569 DRM_RENDER_ALLOW),
570
571 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
572 DRM_RENDER_ALLOW),
573
574 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
575 virtio_gpu_resource_create_ioctl,
576 DRM_RENDER_ALLOW),
577
578 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
579 DRM_RENDER_ALLOW),
580
581 /* make transfer async to the main ring? - no sure, can we
582 * thread these in the underlying GL
583 */
584 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
585 virtio_gpu_transfer_from_host_ioctl,
586 DRM_RENDER_ALLOW),
587 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
588 virtio_gpu_transfer_to_host_ioctl,
589 DRM_RENDER_ALLOW),
590
591 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
592 DRM_RENDER_ALLOW),
593
594 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
595 DRM_RENDER_ALLOW),
596};