b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright 2011 Red Hat, Inc. |
| 3 | * Copyright © 2014 The Chromium OS Authors |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software") |
| 7 | * to deal in the software without restriction, including without limitation |
| 8 | * on the rights to use, copy, modify, merge, publish, distribute, sub |
| 9 | * license, and/or sell copies of the Software, and to permit persons to whom |
| 10 | * them Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice (including the next |
| 13 | * paragraph) shall be included in all copies or substantial portions of the |
| 14 | * Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER |
| 20 | * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 21 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Adam Jackson <ajax@redhat.com> |
| 25 | * Ben Widawsky <ben@bwidawsk.net> |
| 26 | */ |
| 27 | |
| 28 | /** |
| 29 | * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's |
| 30 | * software renderer and the X server for efficient buffer sharing. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/dma-buf.h> |
| 34 | #include <linux/module.h> |
| 35 | #include <linux/platform_device.h> |
| 36 | #include <linux/shmem_fs.h> |
| 37 | #include <linux/vmalloc.h> |
| 38 | |
| 39 | #include <drm/drm_drv.h> |
| 40 | #include <drm/drm_file.h> |
| 41 | #include <drm/drm_ioctl.h> |
| 42 | #include <drm/drm_prime.h> |
| 43 | |
| 44 | #include "vgem_drv.h" |
| 45 | |
| 46 | #define DRIVER_NAME "vgem" |
| 47 | #define DRIVER_DESC "Virtual GEM provider" |
| 48 | #define DRIVER_DATE "20120112" |
| 49 | #define DRIVER_MAJOR 1 |
| 50 | #define DRIVER_MINOR 0 |
| 51 | |
| 52 | static struct vgem_device { |
| 53 | struct drm_device drm; |
| 54 | struct platform_device *platform; |
| 55 | } *vgem_device; |
| 56 | |
| 57 | static void vgem_gem_free_object(struct drm_gem_object *obj) |
| 58 | { |
| 59 | struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); |
| 60 | |
| 61 | kvfree(vgem_obj->pages); |
| 62 | mutex_destroy(&vgem_obj->pages_lock); |
| 63 | |
| 64 | if (obj->import_attach) |
| 65 | drm_prime_gem_destroy(obj, vgem_obj->table); |
| 66 | |
| 67 | drm_gem_object_release(obj); |
| 68 | kfree(vgem_obj); |
| 69 | } |
| 70 | |
| 71 | static vm_fault_t vgem_gem_fault(struct vm_fault *vmf) |
| 72 | { |
| 73 | struct vm_area_struct *vma = vmf->vma; |
| 74 | struct drm_vgem_gem_object *obj = vma->vm_private_data; |
| 75 | /* We don't use vmf->pgoff since that has the fake offset */ |
| 76 | unsigned long vaddr = vmf->address; |
| 77 | vm_fault_t ret = VM_FAULT_SIGBUS; |
| 78 | loff_t num_pages; |
| 79 | pgoff_t page_offset; |
| 80 | page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; |
| 81 | |
| 82 | num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); |
| 83 | |
| 84 | if (page_offset >= num_pages) |
| 85 | return VM_FAULT_SIGBUS; |
| 86 | |
| 87 | mutex_lock(&obj->pages_lock); |
| 88 | if (obj->pages) { |
| 89 | get_page(obj->pages[page_offset]); |
| 90 | vmf->page = obj->pages[page_offset]; |
| 91 | ret = 0; |
| 92 | } |
| 93 | mutex_unlock(&obj->pages_lock); |
| 94 | if (ret) { |
| 95 | struct page *page; |
| 96 | |
| 97 | page = shmem_read_mapping_page( |
| 98 | file_inode(obj->base.filp)->i_mapping, |
| 99 | page_offset); |
| 100 | if (!IS_ERR(page)) { |
| 101 | vmf->page = page; |
| 102 | ret = 0; |
| 103 | } else switch (PTR_ERR(page)) { |
| 104 | case -ENOSPC: |
| 105 | case -ENOMEM: |
| 106 | ret = VM_FAULT_OOM; |
| 107 | break; |
| 108 | case -EBUSY: |
| 109 | ret = VM_FAULT_RETRY; |
| 110 | break; |
| 111 | case -EFAULT: |
| 112 | case -EINVAL: |
| 113 | ret = VM_FAULT_SIGBUS; |
| 114 | break; |
| 115 | default: |
| 116 | WARN_ON(PTR_ERR(page)); |
| 117 | ret = VM_FAULT_SIGBUS; |
| 118 | break; |
| 119 | } |
| 120 | |
| 121 | } |
| 122 | return ret; |
| 123 | } |
| 124 | |
| 125 | static const struct vm_operations_struct vgem_gem_vm_ops = { |
| 126 | .fault = vgem_gem_fault, |
| 127 | .open = drm_gem_vm_open, |
| 128 | .close = drm_gem_vm_close, |
| 129 | }; |
| 130 | |
| 131 | static int vgem_open(struct drm_device *dev, struct drm_file *file) |
| 132 | { |
| 133 | struct vgem_file *vfile; |
| 134 | int ret; |
| 135 | |
| 136 | vfile = kzalloc(sizeof(*vfile), GFP_KERNEL); |
| 137 | if (!vfile) |
| 138 | return -ENOMEM; |
| 139 | |
| 140 | file->driver_priv = vfile; |
| 141 | |
| 142 | ret = vgem_fence_open(vfile); |
| 143 | if (ret) { |
| 144 | kfree(vfile); |
| 145 | return ret; |
| 146 | } |
| 147 | |
| 148 | return 0; |
| 149 | } |
| 150 | |
| 151 | static void vgem_postclose(struct drm_device *dev, struct drm_file *file) |
| 152 | { |
| 153 | struct vgem_file *vfile = file->driver_priv; |
| 154 | |
| 155 | vgem_fence_close(vfile); |
| 156 | kfree(vfile); |
| 157 | } |
| 158 | |
| 159 | static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev, |
| 160 | unsigned long size) |
| 161 | { |
| 162 | struct drm_vgem_gem_object *obj; |
| 163 | int ret; |
| 164 | |
| 165 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
| 166 | if (!obj) |
| 167 | return ERR_PTR(-ENOMEM); |
| 168 | |
| 169 | ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); |
| 170 | if (ret) { |
| 171 | kfree(obj); |
| 172 | return ERR_PTR(ret); |
| 173 | } |
| 174 | |
| 175 | mutex_init(&obj->pages_lock); |
| 176 | |
| 177 | return obj; |
| 178 | } |
| 179 | |
| 180 | static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj) |
| 181 | { |
| 182 | drm_gem_object_release(&obj->base); |
| 183 | kfree(obj); |
| 184 | } |
| 185 | |
| 186 | static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, |
| 187 | struct drm_file *file, |
| 188 | unsigned int *handle, |
| 189 | unsigned long size) |
| 190 | { |
| 191 | struct drm_vgem_gem_object *obj; |
| 192 | int ret; |
| 193 | |
| 194 | obj = __vgem_gem_create(dev, size); |
| 195 | if (IS_ERR(obj)) |
| 196 | return ERR_CAST(obj); |
| 197 | |
| 198 | ret = drm_gem_handle_create(file, &obj->base, handle); |
| 199 | if (ret) { |
| 200 | drm_gem_object_put_unlocked(&obj->base); |
| 201 | return ERR_PTR(ret); |
| 202 | } |
| 203 | |
| 204 | return &obj->base; |
| 205 | } |
| 206 | |
| 207 | static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
| 208 | struct drm_mode_create_dumb *args) |
| 209 | { |
| 210 | struct drm_gem_object *gem_object; |
| 211 | u64 pitch, size; |
| 212 | |
| 213 | pitch = args->width * DIV_ROUND_UP(args->bpp, 8); |
| 214 | size = args->height * pitch; |
| 215 | if (size == 0) |
| 216 | return -EINVAL; |
| 217 | |
| 218 | gem_object = vgem_gem_create(dev, file, &args->handle, size); |
| 219 | if (IS_ERR(gem_object)) |
| 220 | return PTR_ERR(gem_object); |
| 221 | |
| 222 | args->size = gem_object->size; |
| 223 | args->pitch = pitch; |
| 224 | |
| 225 | drm_gem_object_put_unlocked(gem_object); |
| 226 | |
| 227 | DRM_DEBUG("Created object of size %llu\n", args->size); |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static struct drm_ioctl_desc vgem_ioctls[] = { |
| 233 | DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW), |
| 234 | DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW), |
| 235 | }; |
| 236 | |
| 237 | static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 238 | { |
| 239 | unsigned long flags = vma->vm_flags; |
| 240 | int ret; |
| 241 | |
| 242 | ret = drm_gem_mmap(filp, vma); |
| 243 | if (ret) |
| 244 | return ret; |
| 245 | |
| 246 | /* Keep the WC mmaping set by drm_gem_mmap() but our pages |
| 247 | * are ordinary and not special. |
| 248 | */ |
| 249 | vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP; |
| 250 | return 0; |
| 251 | } |
| 252 | |
| 253 | static const struct file_operations vgem_driver_fops = { |
| 254 | .owner = THIS_MODULE, |
| 255 | .open = drm_open, |
| 256 | .mmap = vgem_mmap, |
| 257 | .poll = drm_poll, |
| 258 | .read = drm_read, |
| 259 | .unlocked_ioctl = drm_ioctl, |
| 260 | .compat_ioctl = drm_compat_ioctl, |
| 261 | .release = drm_release, |
| 262 | }; |
| 263 | |
| 264 | static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) |
| 265 | { |
| 266 | mutex_lock(&bo->pages_lock); |
| 267 | if (bo->pages_pin_count++ == 0) { |
| 268 | struct page **pages; |
| 269 | |
| 270 | pages = drm_gem_get_pages(&bo->base); |
| 271 | if (IS_ERR(pages)) { |
| 272 | bo->pages_pin_count--; |
| 273 | mutex_unlock(&bo->pages_lock); |
| 274 | return pages; |
| 275 | } |
| 276 | |
| 277 | bo->pages = pages; |
| 278 | } |
| 279 | mutex_unlock(&bo->pages_lock); |
| 280 | |
| 281 | return bo->pages; |
| 282 | } |
| 283 | |
| 284 | static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) |
| 285 | { |
| 286 | mutex_lock(&bo->pages_lock); |
| 287 | if (--bo->pages_pin_count == 0) { |
| 288 | drm_gem_put_pages(&bo->base, bo->pages, true, true); |
| 289 | bo->pages = NULL; |
| 290 | } |
| 291 | mutex_unlock(&bo->pages_lock); |
| 292 | } |
| 293 | |
| 294 | static int vgem_prime_pin(struct drm_gem_object *obj) |
| 295 | { |
| 296 | struct drm_vgem_gem_object *bo = to_vgem_bo(obj); |
| 297 | long n_pages = obj->size >> PAGE_SHIFT; |
| 298 | struct page **pages; |
| 299 | |
| 300 | pages = vgem_pin_pages(bo); |
| 301 | if (IS_ERR(pages)) |
| 302 | return PTR_ERR(pages); |
| 303 | |
| 304 | /* Flush the object from the CPU cache so that importers can rely |
| 305 | * on coherent indirect access via the exported dma-address. |
| 306 | */ |
| 307 | drm_clflush_pages(pages, n_pages); |
| 308 | |
| 309 | return 0; |
| 310 | } |
| 311 | |
| 312 | static void vgem_prime_unpin(struct drm_gem_object *obj) |
| 313 | { |
| 314 | struct drm_vgem_gem_object *bo = to_vgem_bo(obj); |
| 315 | |
| 316 | vgem_unpin_pages(bo); |
| 317 | } |
| 318 | |
| 319 | static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) |
| 320 | { |
| 321 | struct drm_vgem_gem_object *bo = to_vgem_bo(obj); |
| 322 | |
| 323 | return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); |
| 324 | } |
| 325 | |
| 326 | static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, |
| 327 | struct dma_buf *dma_buf) |
| 328 | { |
| 329 | struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); |
| 330 | |
| 331 | return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev); |
| 332 | } |
| 333 | |
| 334 | static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, |
| 335 | struct dma_buf_attachment *attach, struct sg_table *sg) |
| 336 | { |
| 337 | struct drm_vgem_gem_object *obj; |
| 338 | int npages; |
| 339 | |
| 340 | obj = __vgem_gem_create(dev, attach->dmabuf->size); |
| 341 | if (IS_ERR(obj)) |
| 342 | return ERR_CAST(obj); |
| 343 | |
| 344 | npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; |
| 345 | |
| 346 | obj->table = sg; |
| 347 | obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
| 348 | if (!obj->pages) { |
| 349 | __vgem_gem_destroy(obj); |
| 350 | return ERR_PTR(-ENOMEM); |
| 351 | } |
| 352 | |
| 353 | obj->pages_pin_count++; /* perma-pinned */ |
| 354 | drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, |
| 355 | npages); |
| 356 | return &obj->base; |
| 357 | } |
| 358 | |
| 359 | static void *vgem_prime_vmap(struct drm_gem_object *obj) |
| 360 | { |
| 361 | struct drm_vgem_gem_object *bo = to_vgem_bo(obj); |
| 362 | long n_pages = obj->size >> PAGE_SHIFT; |
| 363 | struct page **pages; |
| 364 | |
| 365 | pages = vgem_pin_pages(bo); |
| 366 | if (IS_ERR(pages)) |
| 367 | return NULL; |
| 368 | |
| 369 | return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); |
| 370 | } |
| 371 | |
| 372 | static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) |
| 373 | { |
| 374 | struct drm_vgem_gem_object *bo = to_vgem_bo(obj); |
| 375 | |
| 376 | vunmap(vaddr); |
| 377 | vgem_unpin_pages(bo); |
| 378 | } |
| 379 | |
| 380 | static int vgem_prime_mmap(struct drm_gem_object *obj, |
| 381 | struct vm_area_struct *vma) |
| 382 | { |
| 383 | int ret; |
| 384 | |
| 385 | if (obj->size < vma->vm_end - vma->vm_start) |
| 386 | return -EINVAL; |
| 387 | |
| 388 | if (!obj->filp) |
| 389 | return -ENODEV; |
| 390 | |
| 391 | ret = call_mmap(obj->filp, vma); |
| 392 | if (ret) |
| 393 | return ret; |
| 394 | |
| 395 | fput(vma->vm_file); |
| 396 | vma->vm_file = get_file(obj->filp); |
| 397 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; |
| 398 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 399 | |
| 400 | return 0; |
| 401 | } |
| 402 | |
| 403 | static void vgem_release(struct drm_device *dev) |
| 404 | { |
| 405 | struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); |
| 406 | |
| 407 | platform_device_unregister(vgem->platform); |
| 408 | drm_dev_fini(&vgem->drm); |
| 409 | |
| 410 | kfree(vgem); |
| 411 | } |
| 412 | |
| 413 | static struct drm_driver vgem_driver = { |
| 414 | .driver_features = DRIVER_GEM | DRIVER_RENDER, |
| 415 | .release = vgem_release, |
| 416 | .open = vgem_open, |
| 417 | .postclose = vgem_postclose, |
| 418 | .gem_free_object_unlocked = vgem_gem_free_object, |
| 419 | .gem_vm_ops = &vgem_gem_vm_ops, |
| 420 | .ioctls = vgem_ioctls, |
| 421 | .num_ioctls = ARRAY_SIZE(vgem_ioctls), |
| 422 | .fops = &vgem_driver_fops, |
| 423 | |
| 424 | .dumb_create = vgem_gem_dumb_create, |
| 425 | |
| 426 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
| 427 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
| 428 | .gem_prime_pin = vgem_prime_pin, |
| 429 | .gem_prime_unpin = vgem_prime_unpin, |
| 430 | .gem_prime_import = vgem_prime_import, |
| 431 | .gem_prime_import_sg_table = vgem_prime_import_sg_table, |
| 432 | .gem_prime_get_sg_table = vgem_prime_get_sg_table, |
| 433 | .gem_prime_vmap = vgem_prime_vmap, |
| 434 | .gem_prime_vunmap = vgem_prime_vunmap, |
| 435 | .gem_prime_mmap = vgem_prime_mmap, |
| 436 | |
| 437 | .name = DRIVER_NAME, |
| 438 | .desc = DRIVER_DESC, |
| 439 | .date = DRIVER_DATE, |
| 440 | .major = DRIVER_MAJOR, |
| 441 | .minor = DRIVER_MINOR, |
| 442 | }; |
| 443 | |
| 444 | static int __init vgem_init(void) |
| 445 | { |
| 446 | int ret; |
| 447 | |
| 448 | vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL); |
| 449 | if (!vgem_device) |
| 450 | return -ENOMEM; |
| 451 | |
| 452 | vgem_device->platform = |
| 453 | platform_device_register_simple("vgem", -1, NULL, 0); |
| 454 | if (IS_ERR(vgem_device->platform)) { |
| 455 | ret = PTR_ERR(vgem_device->platform); |
| 456 | goto out_free; |
| 457 | } |
| 458 | |
| 459 | dma_coerce_mask_and_coherent(&vgem_device->platform->dev, |
| 460 | DMA_BIT_MASK(64)); |
| 461 | ret = drm_dev_init(&vgem_device->drm, &vgem_driver, |
| 462 | &vgem_device->platform->dev); |
| 463 | if (ret) |
| 464 | goto out_unregister; |
| 465 | |
| 466 | /* Final step: expose the device/driver to userspace */ |
| 467 | ret = drm_dev_register(&vgem_device->drm, 0); |
| 468 | if (ret) |
| 469 | goto out_fini; |
| 470 | |
| 471 | return 0; |
| 472 | |
| 473 | out_fini: |
| 474 | drm_dev_fini(&vgem_device->drm); |
| 475 | out_unregister: |
| 476 | platform_device_unregister(vgem_device->platform); |
| 477 | out_free: |
| 478 | kfree(vgem_device); |
| 479 | return ret; |
| 480 | } |
| 481 | |
| 482 | static void __exit vgem_exit(void) |
| 483 | { |
| 484 | drm_dev_unregister(&vgem_device->drm); |
| 485 | drm_dev_put(&vgem_device->drm); |
| 486 | } |
| 487 | |
| 488 | module_init(vgem_init); |
| 489 | module_exit(vgem_exit); |
| 490 | |
| 491 | MODULE_AUTHOR("Red Hat, Inc."); |
| 492 | MODULE_AUTHOR("Intel Corporation"); |
| 493 | MODULE_DESCRIPTION(DRIVER_DESC); |
| 494 | MODULE_LICENSE("GPL and additional rights"); |