blob: 8de93ccacaac71062f326af4cb14fccaf5201780 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/moduleparam.h>
27
28#include <drm/ttm/ttm_execbuf_util.h>
29
30#include "virtgpu_drv.h"
31
32static int virtio_gpu_virglrenderer_workaround = 1;
33module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
34
35static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
36 uint32_t *resid)
37{
38 if (virtio_gpu_virglrenderer_workaround) {
39 /*
40 * Hack to avoid re-using resource IDs.
41 *
42 * virglrenderer versions up to (and including) 0.7.0
43 * can't deal with that. virglrenderer commit
44 * "f91a9dd35715 Fix unlinking resources from hash
45 * table." (Feb 2019) fixes the bug.
46 */
47 static atomic_t seqno = ATOMIC_INIT(0);
48 int handle = atomic_inc_return(&seqno);
49 *resid = handle + 1;
50 } else {
51 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
52 if (handle < 0)
53 return handle;
54 *resid = handle + 1;
55 }
56 return 0;
57}
58
59static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
60{
61 if (!virtio_gpu_virglrenderer_workaround) {
62 ida_free(&vgdev->resource_ida, id - 1);
63 }
64}
65
66static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
67{
68 struct virtio_gpu_object *bo;
69 struct virtio_gpu_device *vgdev;
70
71 bo = container_of(tbo, struct virtio_gpu_object, tbo);
72 vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
73
74 if (bo->created)
75 virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
76 if (bo->pages)
77 virtio_gpu_object_free_sg_table(bo);
78 if (bo->vmap)
79 virtio_gpu_object_kunmap(bo);
80 drm_gem_object_release(&bo->gem_base);
81 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
82 kfree(bo);
83}
84
85static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
86{
87 u32 c = 1;
88
89 vgbo->placement.placement = &vgbo->placement_code;
90 vgbo->placement.busy_placement = &vgbo->placement_code;
91 vgbo->placement_code.fpfn = 0;
92 vgbo->placement_code.lpfn = 0;
93 vgbo->placement_code.flags =
94 TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
95 TTM_PL_FLAG_NO_EVICT;
96 vgbo->placement.num_placement = c;
97 vgbo->placement.num_busy_placement = c;
98
99}
100
101int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
102 struct virtio_gpu_object_params *params,
103 struct virtio_gpu_object **bo_ptr,
104 struct virtio_gpu_fence *fence)
105{
106 struct virtio_gpu_object *bo;
107 size_t acc_size;
108 int ret;
109
110 *bo_ptr = NULL;
111
112 acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
113 sizeof(struct virtio_gpu_object));
114
115 bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
116 if (bo == NULL)
117 return -ENOMEM;
118 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
119 if (ret < 0) {
120 kfree(bo);
121 return ret;
122 }
123 params->size = roundup(params->size, PAGE_SIZE);
124 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
125 if (ret != 0) {
126 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
127 kfree(bo);
128 return ret;
129 }
130 bo->dumb = params->dumb;
131
132 if (params->virgl) {
133 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
134 } else {
135 virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
136 }
137
138 virtio_gpu_init_ttm_placement(bo);
139 ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
140 ttm_bo_type_device, &bo->placement, 0,
141 true, acc_size, NULL, NULL,
142 &virtio_gpu_ttm_bo_destroy);
143 /* ttm_bo_init failure will call the destroy */
144 if (ret != 0)
145 return ret;
146
147 if (fence) {
148 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
149 struct list_head validate_list;
150 struct ttm_validate_buffer mainbuf;
151 struct ww_acquire_ctx ticket;
152 unsigned long irq_flags;
153 bool signaled;
154
155 INIT_LIST_HEAD(&validate_list);
156 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
157
158 /* use a gem reference since unref list undoes them */
159 drm_gem_object_get(&bo->gem_base);
160 mainbuf.bo = &bo->tbo;
161 list_add(&mainbuf.head, &validate_list);
162
163 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
164 if (ret == 0) {
165 spin_lock_irqsave(&drv->lock, irq_flags);
166 signaled = virtio_fence_signaled(&fence->f);
167 if (!signaled)
168 /* virtio create command still in flight */
169 ttm_eu_fence_buffer_objects(&ticket, &validate_list,
170 &fence->f);
171 spin_unlock_irqrestore(&drv->lock, irq_flags);
172 if (signaled)
173 /* virtio create command finished */
174 ttm_eu_backoff_reservation(&ticket, &validate_list);
175 }
176 virtio_gpu_unref_list(&validate_list);
177 }
178
179 *bo_ptr = bo;
180 return 0;
181}
182
183void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
184{
185 bo->vmap = NULL;
186 ttm_bo_kunmap(&bo->kmap);
187}
188
189int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
190{
191 bool is_iomem;
192 int r;
193
194 WARN_ON(bo->vmap);
195
196 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
197 if (r)
198 return r;
199 bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
200 return 0;
201}
202
203int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
204 struct virtio_gpu_object *bo)
205{
206 int ret;
207 struct page **pages = bo->tbo.ttm->pages;
208 int nr_pages = bo->tbo.num_pages;
209 struct ttm_operation_ctx ctx = {
210 .interruptible = false,
211 .no_wait_gpu = false
212 };
213 size_t max_segment;
214
215 /* wtf swapping */
216 if (bo->pages)
217 return 0;
218
219 if (bo->tbo.ttm->state == tt_unpopulated)
220 bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
221 bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
222 if (!bo->pages)
223 goto out;
224
225 max_segment = virtio_max_dma_size(qdev->vdev);
226 max_segment &= PAGE_MASK;
227 if (max_segment > SCATTERLIST_MAX_SEGMENT)
228 max_segment = SCATTERLIST_MAX_SEGMENT;
229 ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
230 nr_pages << PAGE_SHIFT,
231 max_segment, GFP_KERNEL);
232 if (ret)
233 goto out;
234 return 0;
235out:
236 kfree(bo->pages);
237 bo->pages = NULL;
238 return -ENOMEM;
239}
240
241void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
242{
243 sg_free_table(bo->pages);
244 kfree(bo->pages);
245 bo->pages = NULL;
246}
247
248int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
249{
250 int r;
251
252 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
253 if (unlikely(r != 0))
254 return r;
255 r = ttm_bo_wait(&bo->tbo, true, no_wait);
256 ttm_bo_unreserve(&bo->tbo);
257 return r;
258}
259