blob: 4a2a540a0896c70ae6594420a5a4f8b53fb16381 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/******************************************************************************
2 * gntdev.c
3 *
4 * Device for accessing (in user-space) pages that have been granted by other
5 * domains.
6 *
7 * Copyright (c) 2006-2007, D G Murray.
8 * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
9 * (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#undef DEBUG
22
23#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
24
25#include <linux/dma-mapping.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/miscdevice.h>
30#include <linux/fs.h>
31#include <linux/uaccess.h>
32#include <linux/sched.h>
33#include <linux/sched/mm.h>
34#include <linux/spinlock.h>
35#include <linux/slab.h>
36#include <linux/highmem.h>
37#include <linux/refcount.h>
38#include <linux/workqueue.h>
39
40#include <xen/xen.h>
41#include <xen/grant_table.h>
42#include <xen/balloon.h>
43#include <xen/gntdev.h>
44#include <xen/events.h>
45#include <xen/page.h>
46#include <asm/xen/hypervisor.h>
47#include <asm/xen/hypercall.h>
48
49#include "gntdev-common.h"
50#ifdef CONFIG_XEN_GNTDEV_DMABUF
51#include "gntdev-dmabuf.h"
52#endif
53
54MODULE_LICENSE("GPL");
55MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
56 "Gerd Hoffmann <kraxel@redhat.com>");
57MODULE_DESCRIPTION("User-space granted page access driver");
58
59static int limit = 1024*1024;
60module_param(limit, int, 0644);
61MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
62 "the gntdev device");
63
64static atomic_t pages_mapped = ATOMIC_INIT(0);
65
66/* True in PV mode, false otherwise */
67static int use_ptemod;
68#define populate_freeable_maps use_ptemod
69
70static void unmap_grant_pages(struct gntdev_grant_map *map,
71 int offset, int pages);
72
73static struct miscdevice gntdev_miscdev;
74
75/* ------------------------------------------------------------------ */
76
77bool gntdev_account_mapped_pages(int count)
78{
79 return atomic_add_return(count, &pages_mapped) > limit;
80}
81
82static void gntdev_print_maps(struct gntdev_priv *priv,
83 char *text, int text_index)
84{
85#ifdef DEBUG
86 struct gntdev_grant_map *map;
87
88 pr_debug("%s: maps list (priv %p)\n", __func__, priv);
89 list_for_each_entry(map, &priv->maps, next)
90 pr_debug(" index %2d, count %2d %s\n",
91 map->index, map->count,
92 map->index == text_index && text ? text : "");
93#endif
94}
95
96static void gntdev_free_map(struct gntdev_grant_map *map)
97{
98 if (map == NULL)
99 return;
100
101#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
102 if (map->dma_vaddr) {
103 struct gnttab_dma_alloc_args args;
104
105 args.dev = map->dma_dev;
106 args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
107 args.nr_pages = map->count;
108 args.pages = map->pages;
109 args.frames = map->frames;
110 args.vaddr = map->dma_vaddr;
111 args.dev_bus_addr = map->dma_bus_addr;
112
113 gnttab_dma_free_pages(&args);
114 } else
115#endif
116 if (map->pages)
117 gnttab_free_pages(map->count, map->pages);
118
119#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
120 kfree(map->frames);
121#endif
122 kfree(map->pages);
123 kfree(map->grants);
124 kfree(map->map_ops);
125 kfree(map->unmap_ops);
126 kfree(map->kmap_ops);
127 kfree(map->kunmap_ops);
128 kfree(map->being_removed);
129 kfree(map);
130}
131
132struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
133 int dma_flags)
134{
135 struct gntdev_grant_map *add;
136 int i;
137
138 add = kzalloc(sizeof(*add), GFP_KERNEL);
139 if (NULL == add)
140 return NULL;
141
142 add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
143 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
144 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
145 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
146 add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
147 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
148 add->being_removed =
149 kcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
150 if (NULL == add->grants ||
151 NULL == add->map_ops ||
152 NULL == add->unmap_ops ||
153 NULL == add->kmap_ops ||
154 NULL == add->kunmap_ops ||
155 NULL == add->pages ||
156 NULL == add->being_removed)
157 goto err;
158
159#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
160 add->dma_flags = dma_flags;
161
162 /*
163 * Check if this mapping is requested to be backed
164 * by a DMA buffer.
165 */
166 if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
167 struct gnttab_dma_alloc_args args;
168
169 add->frames = kcalloc(count, sizeof(add->frames[0]),
170 GFP_KERNEL);
171 if (!add->frames)
172 goto err;
173
174 /* Remember the device, so we can free DMA memory. */
175 add->dma_dev = priv->dma_dev;
176
177 args.dev = priv->dma_dev;
178 args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
179 args.nr_pages = count;
180 args.pages = add->pages;
181 args.frames = add->frames;
182
183 if (gnttab_dma_alloc_pages(&args))
184 goto err;
185
186 add->dma_vaddr = args.vaddr;
187 add->dma_bus_addr = args.dev_bus_addr;
188 } else
189#endif
190 if (gnttab_alloc_pages(count, add->pages))
191 goto err;
192
193 for (i = 0; i < count; i++) {
194 add->map_ops[i].handle = -1;
195 add->unmap_ops[i].handle = -1;
196 add->kmap_ops[i].handle = -1;
197 add->kunmap_ops[i].handle = -1;
198 }
199
200 add->index = 0;
201 add->count = count;
202 refcount_set(&add->users, 1);
203
204 return add;
205
206err:
207 gntdev_free_map(add);
208 return NULL;
209}
210
211void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
212{
213 struct gntdev_grant_map *map;
214
215 list_for_each_entry(map, &priv->maps, next) {
216 if (add->index + add->count < map->index) {
217 list_add_tail(&add->next, &map->next);
218 goto done;
219 }
220 add->index = map->index + map->count;
221 }
222 list_add_tail(&add->next, &priv->maps);
223
224done:
225 gntdev_print_maps(priv, "[new]", add->index);
226}
227
228static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
229 int index, int count)
230{
231 struct gntdev_grant_map *map;
232
233 list_for_each_entry(map, &priv->maps, next) {
234 if (map->index != index)
235 continue;
236 if (count && map->count != count)
237 continue;
238 return map;
239 }
240 return NULL;
241}
242
243void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
244{
245 if (!map)
246 return;
247
248 if (!refcount_dec_and_test(&map->users))
249 return;
250
251 atomic_sub(map->count, &pages_mapped);
252 if (map->pages && !use_ptemod) {
253 /*
254 * Increment the reference count. This ensures that the
255 * subsequent call to unmap_grant_pages() will not wind up
256 * re-entering itself. It *can* wind up calling
257 * gntdev_put_map() recursively, but such calls will be with a
258 * reference count greater than 1, so they will return before
259 * this code is reached. The recursion depth is thus limited to
260 * 1. Do NOT use refcount_inc() here, as it will detect that
261 * the reference count is zero and WARN().
262 */
263 refcount_set(&map->users, 1);
264
265 /*
266 * Unmap the grants. This may or may not be asynchronous, so it
267 * is possible that the reference count is 1 on return, but it
268 * could also be greater than 1.
269 */
270 unmap_grant_pages(map, 0, map->count);
271
272 /* Check if the memory now needs to be freed */
273 if (!refcount_dec_and_test(&map->users))
274 return;
275
276 /*
277 * All pages have been returned to the hypervisor, so free the
278 * map.
279 */
280 }
281
282 if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
283 notify_remote_via_evtchn(map->notify.event);
284 evtchn_put(map->notify.event);
285 }
286
287 if (populate_freeable_maps && priv) {
288 mutex_lock(&priv->lock);
289 list_del(&map->next);
290 mutex_unlock(&priv->lock);
291 }
292
293 if (map->pages && !use_ptemod)
294 unmap_grant_pages(map, 0, map->count);
295 gntdev_free_map(map);
296}
297
298/* ------------------------------------------------------------------ */
299
300static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
301{
302 struct gntdev_grant_map *map = data;
303 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
304 int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
305 u64 pte_maddr;
306
307 BUG_ON(pgnr >= map->count);
308 pte_maddr = arbitrary_virt_to_machine(pte).maddr;
309
310 /*
311 * Set the PTE as special to force get_user_pages_fast() fall
312 * back to the slow path. If this is not supported as part of
313 * the grant map, it will be done afterwards.
314 */
315 if (xen_feature(XENFEAT_gnttab_map_avail_bits))
316 flags |= (1 << _GNTMAP_guest_avail0);
317
318 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
319 map->grants[pgnr].ref,
320 map->grants[pgnr].domid);
321 gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
322 -1 /* handle */);
323 return 0;
324}
325
326#ifdef CONFIG_X86
327static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
328{
329 set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
330 return 0;
331}
332#endif
333
334int gntdev_map_grant_pages(struct gntdev_grant_map *map)
335{
336 size_t alloced = 0;
337 int i, err = 0;
338
339 if (!use_ptemod) {
340 /* Note: it could already be mapped */
341 if (map->map_ops[0].handle != -1)
342 return 0;
343 for (i = 0; i < map->count; i++) {
344 unsigned long addr = (unsigned long)
345 pfn_to_kaddr(page_to_pfn(map->pages[i]));
346 gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
347 map->grants[i].ref,
348 map->grants[i].domid);
349 gnttab_set_unmap_op(&map->unmap_ops[i], addr,
350 map->flags, -1 /* handle */);
351 }
352 } else {
353 /*
354 * Setup the map_ops corresponding to the pte entries pointing
355 * to the kernel linear addresses of the struct pages.
356 * These ptes are completely different from the user ptes dealt
357 * with find_grant_ptes.
358 * Note that GNTMAP_device_map isn't needed here: The
359 * dev_bus_addr output field gets consumed only from ->map_ops,
360 * and by not requesting it when mapping we also avoid needing
361 * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
362 * reference to the page in the hypervisor).
363 */
364 unsigned int flags = (map->flags & ~GNTMAP_device_map) |
365 GNTMAP_host_map;
366
367 for (i = 0; i < map->count; i++) {
368 unsigned long address = (unsigned long)
369 pfn_to_kaddr(page_to_pfn(map->pages[i]));
370 BUG_ON(PageHighMem(map->pages[i]));
371
372 gnttab_set_map_op(&map->kmap_ops[i], address, flags,
373 map->grants[i].ref,
374 map->grants[i].domid);
375 gnttab_set_unmap_op(&map->kunmap_ops[i], address,
376 flags, -1);
377 }
378 }
379
380 pr_debug("map %d+%d\n", map->index, map->count);
381 err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
382 map->pages, map->count);
383
384 for (i = 0; i < map->count; i++) {
385 if (map->map_ops[i].status == GNTST_okay) {
386 map->unmap_ops[i].handle = map->map_ops[i].handle;
387 alloced++;
388 } else if (!err)
389 err = -EINVAL;
390
391 if (map->flags & GNTMAP_device_map)
392 map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
393
394 if (use_ptemod) {
395 if (map->kmap_ops[i].status == GNTST_okay) {
396 alloced++;
397 map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
398 } else if (!err)
399 err = -EINVAL;
400 }
401 }
402 atomic_add(alloced, &map->live_grants);
403 return err;
404}
405
406static void __unmap_grant_pages_done(int result,
407 struct gntab_unmap_queue_data *data)
408{
409 unsigned int i;
410 struct gntdev_grant_map *map = data->data;
411 unsigned int offset = data->unmap_ops - map->unmap_ops;
412 int successful_unmaps = 0;
413 int live_grants;
414
415 for (i = 0; i < data->count; i++) {
416 if (map->unmap_ops[offset + i].status == GNTST_okay &&
417 map->unmap_ops[offset + i].handle != -1)
418 successful_unmaps++;
419
420 WARN_ON(map->unmap_ops[offset+i].status &&
421 map->unmap_ops[offset+i].handle != -1);
422 pr_debug("unmap handle=%d st=%d\n",
423 map->unmap_ops[offset+i].handle,
424 map->unmap_ops[offset+i].status);
425 map->unmap_ops[offset+i].handle = -1;
426 if (use_ptemod) {
427 if (map->kunmap_ops[offset + i].status == GNTST_okay &&
428 map->kunmap_ops[offset + i].handle != -1)
429 successful_unmaps++;
430
431 WARN_ON(map->kunmap_ops[offset+i].status &&
432 map->kunmap_ops[offset+i].handle != -1);
433 pr_debug("kunmap handle=%u st=%d\n",
434 map->kunmap_ops[offset+i].handle,
435 map->kunmap_ops[offset+i].status);
436 map->kunmap_ops[offset+i].handle = -1;
437 }
438 }
439
440 /*
441 * Decrease the live-grant counter. This must happen after the loop to
442 * prevent premature reuse of the grants by gnttab_mmap().
443 */
444 live_grants = atomic_sub_return(successful_unmaps, &map->live_grants);
445 if (WARN_ON(live_grants < 0))
446 pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n",
447 __func__, live_grants, successful_unmaps);
448
449 /* Release reference taken by __unmap_grant_pages */
450 gntdev_put_map(NULL, map);
451}
452
453static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
454 int pages)
455{
456 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
457 int pgno = (map->notify.addr >> PAGE_SHIFT);
458
459 if (pgno >= offset && pgno < offset + pages) {
460 /* No need for kmap, pages are in lowmem */
461 uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
462
463 tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
464 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
465 }
466 }
467
468 map->unmap_data.unmap_ops = map->unmap_ops + offset;
469 map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
470 map->unmap_data.pages = map->pages + offset;
471 map->unmap_data.count = pages;
472 map->unmap_data.done = __unmap_grant_pages_done;
473 map->unmap_data.data = map;
474 refcount_inc(&map->users); /* to keep map alive during async call below */
475
476 gnttab_unmap_refs_async(&map->unmap_data);
477}
478
479static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
480 int pages)
481{
482 int range;
483
484 if (atomic_read(&map->live_grants) == 0)
485 return; /* Nothing to do */
486
487 pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
488
489 /* It is possible the requested range will have a "hole" where we
490 * already unmapped some of the grants. Only unmap valid ranges.
491 */
492 while (pages) {
493 while (pages && map->being_removed[offset]) {
494 offset++;
495 pages--;
496 }
497 range = 0;
498 while (range < pages) {
499 if (map->being_removed[offset + range])
500 break;
501 map->being_removed[offset + range] = true;
502 range++;
503 }
504 if (range)
505 __unmap_grant_pages(map, offset, range);
506 offset += range;
507 pages -= range;
508 }
509}
510
511/* ------------------------------------------------------------------ */
512
513static void gntdev_vma_open(struct vm_area_struct *vma)
514{
515 struct gntdev_grant_map *map = vma->vm_private_data;
516
517 pr_debug("gntdev_vma_open %p\n", vma);
518 refcount_inc(&map->users);
519}
520
521static void gntdev_vma_close(struct vm_area_struct *vma)
522{
523 struct gntdev_grant_map *map = vma->vm_private_data;
524 struct file *file = vma->vm_file;
525 struct gntdev_priv *priv = file->private_data;
526
527 pr_debug("gntdev_vma_close %p\n", vma);
528 if (use_ptemod) {
529 /* It is possible that an mmu notifier could be running
530 * concurrently, so take priv->lock to ensure that the vma won't
531 * vanishing during the unmap_grant_pages call, since we will
532 * spin here until that completes. Such a concurrent call will
533 * not do any unmapping, since that has been done prior to
534 * closing the vma, but it may still iterate the unmap_ops list.
535 */
536 mutex_lock(&priv->lock);
537 map->vma = NULL;
538 mutex_unlock(&priv->lock);
539 }
540 vma->vm_private_data = NULL;
541 gntdev_put_map(priv, map);
542}
543
544static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
545 unsigned long addr)
546{
547 struct gntdev_grant_map *map = vma->vm_private_data;
548
549 return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
550}
551
552static const struct vm_operations_struct gntdev_vmops = {
553 .open = gntdev_vma_open,
554 .close = gntdev_vma_close,
555 .find_special_page = gntdev_vma_find_special_page,
556};
557
558/* ------------------------------------------------------------------ */
559
560static bool in_range(struct gntdev_grant_map *map,
561 unsigned long start, unsigned long end)
562{
563 if (!map->vma)
564 return false;
565 if (map->vma->vm_start >= end)
566 return false;
567 if (map->vma->vm_end <= start)
568 return false;
569
570 return true;
571}
572
573static int unmap_if_in_range(struct gntdev_grant_map *map,
574 unsigned long start, unsigned long end,
575 bool blockable)
576{
577 unsigned long mstart, mend;
578
579 if (!in_range(map, start, end))
580 return 0;
581
582 if (!blockable)
583 return -EAGAIN;
584
585 mstart = max(start, map->vma->vm_start);
586 mend = min(end, map->vma->vm_end);
587 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
588 map->index, map->count,
589 map->vma->vm_start, map->vma->vm_end,
590 start, end, mstart, mend);
591 unmap_grant_pages(map,
592 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
593 (mend - mstart) >> PAGE_SHIFT);
594
595 return 0;
596}
597
598static int mn_invl_range_start(struct mmu_notifier *mn,
599 const struct mmu_notifier_range *range)
600{
601 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
602 struct gntdev_grant_map *map;
603 int ret = 0;
604
605 if (mmu_notifier_range_blockable(range))
606 mutex_lock(&priv->lock);
607 else if (!mutex_trylock(&priv->lock))
608 return -EAGAIN;
609
610 list_for_each_entry(map, &priv->maps, next) {
611 ret = unmap_if_in_range(map, range->start, range->end,
612 mmu_notifier_range_blockable(range));
613 if (ret)
614 goto out_unlock;
615 }
616 list_for_each_entry(map, &priv->freeable_maps, next) {
617 ret = unmap_if_in_range(map, range->start, range->end,
618 mmu_notifier_range_blockable(range));
619 if (ret)
620 goto out_unlock;
621 }
622
623out_unlock:
624 mutex_unlock(&priv->lock);
625
626 return ret;
627}
628
629static void mn_release(struct mmu_notifier *mn,
630 struct mm_struct *mm)
631{
632 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
633 struct gntdev_grant_map *map;
634
635 mutex_lock(&priv->lock);
636 list_for_each_entry(map, &priv->maps, next) {
637 if (!map->vma)
638 continue;
639 pr_debug("map %d+%d (%lx %lx)\n",
640 map->index, map->count,
641 map->vma->vm_start, map->vma->vm_end);
642 unmap_grant_pages(map, /* offset */ 0, map->count);
643 }
644 list_for_each_entry(map, &priv->freeable_maps, next) {
645 if (!map->vma)
646 continue;
647 pr_debug("map %d+%d (%lx %lx)\n",
648 map->index, map->count,
649 map->vma->vm_start, map->vma->vm_end);
650 unmap_grant_pages(map, /* offset */ 0, map->count);
651 }
652 mutex_unlock(&priv->lock);
653}
654
655static const struct mmu_notifier_ops gntdev_mmu_ops = {
656 .release = mn_release,
657 .invalidate_range_start = mn_invl_range_start,
658};
659
660/* ------------------------------------------------------------------ */
661
662static int gntdev_open(struct inode *inode, struct file *flip)
663{
664 struct gntdev_priv *priv;
665 int ret = 0;
666
667 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
668 if (!priv)
669 return -ENOMEM;
670
671 INIT_LIST_HEAD(&priv->maps);
672 INIT_LIST_HEAD(&priv->freeable_maps);
673 mutex_init(&priv->lock);
674
675#ifdef CONFIG_XEN_GNTDEV_DMABUF
676 priv->dmabuf_priv = gntdev_dmabuf_init(flip);
677 if (IS_ERR(priv->dmabuf_priv)) {
678 ret = PTR_ERR(priv->dmabuf_priv);
679 kfree(priv);
680 return ret;
681 }
682#endif
683
684 if (use_ptemod) {
685 priv->mm = get_task_mm(current);
686 if (!priv->mm) {
687 kfree(priv);
688 return -ENOMEM;
689 }
690 priv->mn.ops = &gntdev_mmu_ops;
691 ret = mmu_notifier_register(&priv->mn, priv->mm);
692 mmput(priv->mm);
693 }
694
695 if (ret) {
696 kfree(priv);
697 return ret;
698 }
699
700 flip->private_data = priv;
701#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
702 priv->dma_dev = gntdev_miscdev.this_device;
703 dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
704#endif
705 pr_debug("priv %p\n", priv);
706
707 return 0;
708}
709
710static int gntdev_release(struct inode *inode, struct file *flip)
711{
712 struct gntdev_priv *priv = flip->private_data;
713 struct gntdev_grant_map *map;
714
715 pr_debug("priv %p\n", priv);
716
717 mutex_lock(&priv->lock);
718 while (!list_empty(&priv->maps)) {
719 map = list_entry(priv->maps.next,
720 struct gntdev_grant_map, next);
721 list_del(&map->next);
722 gntdev_put_map(NULL /* already removed */, map);
723 }
724 WARN_ON(!list_empty(&priv->freeable_maps));
725 mutex_unlock(&priv->lock);
726
727#ifdef CONFIG_XEN_GNTDEV_DMABUF
728 gntdev_dmabuf_fini(priv->dmabuf_priv);
729#endif
730
731 if (use_ptemod)
732 mmu_notifier_unregister(&priv->mn, priv->mm);
733
734 kfree(priv);
735 return 0;
736}
737
738static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
739 struct ioctl_gntdev_map_grant_ref __user *u)
740{
741 struct ioctl_gntdev_map_grant_ref op;
742 struct gntdev_grant_map *map;
743 int err;
744
745 if (copy_from_user(&op, u, sizeof(op)) != 0)
746 return -EFAULT;
747 pr_debug("priv %p, add %d\n", priv, op.count);
748 if (unlikely(op.count <= 0))
749 return -EINVAL;
750
751 err = -ENOMEM;
752 map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
753 if (!map)
754 return err;
755
756 if (unlikely(gntdev_account_mapped_pages(op.count))) {
757 pr_debug("can't map: over limit\n");
758 gntdev_put_map(NULL, map);
759 return err;
760 }
761
762 if (copy_from_user(map->grants, &u->refs,
763 sizeof(map->grants[0]) * op.count) != 0) {
764 gntdev_put_map(NULL, map);
765 return -EFAULT;
766 }
767
768 mutex_lock(&priv->lock);
769 gntdev_add_map(priv, map);
770 op.index = map->index << PAGE_SHIFT;
771 mutex_unlock(&priv->lock);
772
773 if (copy_to_user(u, &op, sizeof(op)) != 0)
774 return -EFAULT;
775
776 return 0;
777}
778
779static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
780 struct ioctl_gntdev_unmap_grant_ref __user *u)
781{
782 struct ioctl_gntdev_unmap_grant_ref op;
783 struct gntdev_grant_map *map;
784 int err = -ENOENT;
785
786 if (copy_from_user(&op, u, sizeof(op)) != 0)
787 return -EFAULT;
788 pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
789
790 mutex_lock(&priv->lock);
791 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
792 if (map) {
793 list_del(&map->next);
794 if (populate_freeable_maps)
795 list_add_tail(&map->next, &priv->freeable_maps);
796 err = 0;
797 }
798 mutex_unlock(&priv->lock);
799 if (map)
800 gntdev_put_map(priv, map);
801 return err;
802}
803
804static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
805 struct ioctl_gntdev_get_offset_for_vaddr __user *u)
806{
807 struct ioctl_gntdev_get_offset_for_vaddr op;
808 struct vm_area_struct *vma;
809 struct gntdev_grant_map *map;
810 int rv = -EINVAL;
811
812 if (copy_from_user(&op, u, sizeof(op)) != 0)
813 return -EFAULT;
814 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
815
816 down_read(&current->mm->mmap_sem);
817 vma = find_vma(current->mm, op.vaddr);
818 if (!vma || vma->vm_ops != &gntdev_vmops)
819 goto out_unlock;
820
821 map = vma->vm_private_data;
822 if (!map)
823 goto out_unlock;
824
825 op.offset = map->index << PAGE_SHIFT;
826 op.count = map->count;
827 rv = 0;
828
829 out_unlock:
830 up_read(&current->mm->mmap_sem);
831
832 if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
833 return -EFAULT;
834 return rv;
835}
836
837static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
838{
839 struct ioctl_gntdev_unmap_notify op;
840 struct gntdev_grant_map *map;
841 int rc;
842 int out_flags;
843 unsigned int out_event;
844
845 if (copy_from_user(&op, u, sizeof(op)))
846 return -EFAULT;
847
848 if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
849 return -EINVAL;
850
851 /* We need to grab a reference to the event channel we are going to use
852 * to send the notify before releasing the reference we may already have
853 * (if someone has called this ioctl twice). This is required so that
854 * it is possible to change the clear_byte part of the notification
855 * without disturbing the event channel part, which may now be the last
856 * reference to that event channel.
857 */
858 if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
859 if (evtchn_get(op.event_channel_port))
860 return -EINVAL;
861 }
862
863 out_flags = op.action;
864 out_event = op.event_channel_port;
865
866 mutex_lock(&priv->lock);
867
868 list_for_each_entry(map, &priv->maps, next) {
869 uint64_t begin = map->index << PAGE_SHIFT;
870 uint64_t end = (map->index + map->count) << PAGE_SHIFT;
871 if (op.index >= begin && op.index < end)
872 goto found;
873 }
874 rc = -ENOENT;
875 goto unlock_out;
876
877 found:
878 if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
879 (map->flags & GNTMAP_readonly)) {
880 rc = -EINVAL;
881 goto unlock_out;
882 }
883
884 out_flags = map->notify.flags;
885 out_event = map->notify.event;
886
887 map->notify.flags = op.action;
888 map->notify.addr = op.index - (map->index << PAGE_SHIFT);
889 map->notify.event = op.event_channel_port;
890
891 rc = 0;
892
893 unlock_out:
894 mutex_unlock(&priv->lock);
895
896 /* Drop the reference to the event channel we did not save in the map */
897 if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
898 evtchn_put(out_event);
899
900 return rc;
901}
902
903#define GNTDEV_COPY_BATCH 16
904
905struct gntdev_copy_batch {
906 struct gnttab_copy ops[GNTDEV_COPY_BATCH];
907 struct page *pages[GNTDEV_COPY_BATCH];
908 s16 __user *status[GNTDEV_COPY_BATCH];
909 unsigned int nr_ops;
910 unsigned int nr_pages;
911 bool writeable;
912};
913
914static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
915 unsigned long *gfn)
916{
917 unsigned long addr = (unsigned long)virt;
918 struct page *page;
919 unsigned long xen_pfn;
920 int ret;
921
922 ret = get_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page);
923 if (ret < 0)
924 return ret;
925
926 batch->pages[batch->nr_pages++] = page;
927
928 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
929 *gfn = pfn_to_gfn(xen_pfn);
930
931 return 0;
932}
933
934static void gntdev_put_pages(struct gntdev_copy_batch *batch)
935{
936 unsigned int i;
937
938 for (i = 0; i < batch->nr_pages; i++) {
939 if (batch->writeable && !PageDirty(batch->pages[i]))
940 set_page_dirty_lock(batch->pages[i]);
941 put_page(batch->pages[i]);
942 }
943 batch->nr_pages = 0;
944 batch->writeable = false;
945}
946
947static int gntdev_copy(struct gntdev_copy_batch *batch)
948{
949 unsigned int i;
950
951 gnttab_batch_copy(batch->ops, batch->nr_ops);
952 gntdev_put_pages(batch);
953
954 /*
955 * For each completed op, update the status if the op failed
956 * and all previous ops for the segment were successful.
957 */
958 for (i = 0; i < batch->nr_ops; i++) {
959 s16 status = batch->ops[i].status;
960 s16 old_status;
961
962 if (status == GNTST_okay)
963 continue;
964
965 if (__get_user(old_status, batch->status[i]))
966 return -EFAULT;
967
968 if (old_status != GNTST_okay)
969 continue;
970
971 if (__put_user(status, batch->status[i]))
972 return -EFAULT;
973 }
974
975 batch->nr_ops = 0;
976 return 0;
977}
978
979static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
980 struct gntdev_grant_copy_segment *seg,
981 s16 __user *status)
982{
983 uint16_t copied = 0;
984
985 /*
986 * Disallow local -> local copies since there is only space in
987 * batch->pages for one page per-op and this would be a very
988 * expensive memcpy().
989 */
990 if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
991 return -EINVAL;
992
993 /* Can't cross page if source/dest is a grant ref. */
994 if (seg->flags & GNTCOPY_source_gref) {
995 if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
996 return -EINVAL;
997 }
998 if (seg->flags & GNTCOPY_dest_gref) {
999 if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
1000 return -EINVAL;
1001 }
1002
1003 if (put_user(GNTST_okay, status))
1004 return -EFAULT;
1005
1006 while (copied < seg->len) {
1007 struct gnttab_copy *op;
1008 void __user *virt;
1009 size_t len, off;
1010 unsigned long gfn;
1011 int ret;
1012
1013 if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
1014 ret = gntdev_copy(batch);
1015 if (ret < 0)
1016 return ret;
1017 }
1018
1019 len = seg->len - copied;
1020
1021 op = &batch->ops[batch->nr_ops];
1022 op->flags = 0;
1023
1024 if (seg->flags & GNTCOPY_source_gref) {
1025 op->source.u.ref = seg->source.foreign.ref;
1026 op->source.domid = seg->source.foreign.domid;
1027 op->source.offset = seg->source.foreign.offset + copied;
1028 op->flags |= GNTCOPY_source_gref;
1029 } else {
1030 virt = seg->source.virt + copied;
1031 off = (unsigned long)virt & ~XEN_PAGE_MASK;
1032 len = min(len, (size_t)XEN_PAGE_SIZE - off);
1033 batch->writeable = false;
1034
1035 ret = gntdev_get_page(batch, virt, &gfn);
1036 if (ret < 0)
1037 return ret;
1038
1039 op->source.u.gmfn = gfn;
1040 op->source.domid = DOMID_SELF;
1041 op->source.offset = off;
1042 }
1043
1044 if (seg->flags & GNTCOPY_dest_gref) {
1045 op->dest.u.ref = seg->dest.foreign.ref;
1046 op->dest.domid = seg->dest.foreign.domid;
1047 op->dest.offset = seg->dest.foreign.offset + copied;
1048 op->flags |= GNTCOPY_dest_gref;
1049 } else {
1050 virt = seg->dest.virt + copied;
1051 off = (unsigned long)virt & ~XEN_PAGE_MASK;
1052 len = min(len, (size_t)XEN_PAGE_SIZE - off);
1053 batch->writeable = true;
1054
1055 ret = gntdev_get_page(batch, virt, &gfn);
1056 if (ret < 0)
1057 return ret;
1058
1059 op->dest.u.gmfn = gfn;
1060 op->dest.domid = DOMID_SELF;
1061 op->dest.offset = off;
1062 }
1063
1064 op->len = len;
1065 copied += len;
1066
1067 batch->status[batch->nr_ops] = status;
1068 batch->nr_ops++;
1069 }
1070
1071 return 0;
1072}
1073
1074static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
1075{
1076 struct ioctl_gntdev_grant_copy copy;
1077 struct gntdev_copy_batch batch;
1078 unsigned int i;
1079 int ret = 0;
1080
1081 if (copy_from_user(&copy, u, sizeof(copy)))
1082 return -EFAULT;
1083
1084 batch.nr_ops = 0;
1085 batch.nr_pages = 0;
1086
1087 for (i = 0; i < copy.count; i++) {
1088 struct gntdev_grant_copy_segment seg;
1089
1090 if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
1091 ret = -EFAULT;
1092 goto out;
1093 }
1094
1095 ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
1096 if (ret < 0)
1097 goto out;
1098
1099 cond_resched();
1100 }
1101 if (batch.nr_ops)
1102 ret = gntdev_copy(&batch);
1103 return ret;
1104
1105 out:
1106 gntdev_put_pages(&batch);
1107 return ret;
1108}
1109
1110static long gntdev_ioctl(struct file *flip,
1111 unsigned int cmd, unsigned long arg)
1112{
1113 struct gntdev_priv *priv = flip->private_data;
1114 void __user *ptr = (void __user *)arg;
1115
1116 switch (cmd) {
1117 case IOCTL_GNTDEV_MAP_GRANT_REF:
1118 return gntdev_ioctl_map_grant_ref(priv, ptr);
1119
1120 case IOCTL_GNTDEV_UNMAP_GRANT_REF:
1121 return gntdev_ioctl_unmap_grant_ref(priv, ptr);
1122
1123 case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
1124 return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
1125
1126 case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
1127 return gntdev_ioctl_notify(priv, ptr);
1128
1129 case IOCTL_GNTDEV_GRANT_COPY:
1130 return gntdev_ioctl_grant_copy(priv, ptr);
1131
1132#ifdef CONFIG_XEN_GNTDEV_DMABUF
1133 case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
1134 return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
1135
1136 case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
1137 return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
1138
1139 case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
1140 return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
1141
1142 case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
1143 return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
1144#endif
1145
1146 default:
1147 pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
1148 return -ENOIOCTLCMD;
1149 }
1150
1151 return 0;
1152}
1153
1154static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1155{
1156 struct gntdev_priv *priv = flip->private_data;
1157 int index = vma->vm_pgoff;
1158 int count = vma_pages(vma);
1159 struct gntdev_grant_map *map;
1160 int err = -EINVAL;
1161
1162 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
1163 return -EINVAL;
1164
1165 pr_debug("map %d+%d at %lx (pgoff %lx)\n",
1166 index, count, vma->vm_start, vma->vm_pgoff);
1167
1168 mutex_lock(&priv->lock);
1169 map = gntdev_find_map_index(priv, index, count);
1170 if (!map)
1171 goto unlock_out;
1172 if (use_ptemod && map->vma)
1173 goto unlock_out;
1174 if (use_ptemod && priv->mm != vma->vm_mm) {
1175 pr_warn("Huh? Other mm?\n");
1176 goto unlock_out;
1177 }
1178
1179 if (atomic_read(&map->live_grants)) {
1180 err = -EAGAIN;
1181 goto unlock_out;
1182 }
1183 refcount_inc(&map->users);
1184
1185 vma->vm_ops = &gntdev_vmops;
1186
1187 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
1188
1189 if (use_ptemod)
1190 vma->vm_flags |= VM_DONTCOPY;
1191
1192 vma->vm_private_data = map;
1193
1194 if (use_ptemod)
1195 map->vma = vma;
1196
1197 if (map->flags) {
1198 if ((vma->vm_flags & VM_WRITE) &&
1199 (map->flags & GNTMAP_readonly))
1200 goto out_unlock_put;
1201 } else {
1202 map->flags = GNTMAP_host_map;
1203 if (!(vma->vm_flags & VM_WRITE))
1204 map->flags |= GNTMAP_readonly;
1205 }
1206
1207 mutex_unlock(&priv->lock);
1208
1209 if (use_ptemod) {
1210 map->pages_vm_start = vma->vm_start;
1211 err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1212 vma->vm_end - vma->vm_start,
1213 find_grant_ptes, map);
1214 if (err) {
1215 pr_warn("find_grant_ptes() failure.\n");
1216 goto out_put_map;
1217 }
1218 }
1219
1220 err = gntdev_map_grant_pages(map);
1221 if (err)
1222 goto out_put_map;
1223
1224 if (!use_ptemod) {
1225 err = vm_map_pages_zero(vma, map->pages, map->count);
1226 if (err)
1227 goto out_put_map;
1228 } else {
1229#ifdef CONFIG_X86
1230 /*
1231 * If the PTEs were not made special by the grant map
1232 * hypercall, do so here.
1233 *
1234 * This is racy since the mapping is already visible
1235 * to userspace but userspace should be well-behaved
1236 * enough to not touch it until the mmap() call
1237 * returns.
1238 */
1239 if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1240 apply_to_page_range(vma->vm_mm, vma->vm_start,
1241 vma->vm_end - vma->vm_start,
1242 set_grant_ptes_as_special, NULL);
1243 }
1244#endif
1245 }
1246
1247 return 0;
1248
1249unlock_out:
1250 mutex_unlock(&priv->lock);
1251 return err;
1252
1253out_unlock_put:
1254 mutex_unlock(&priv->lock);
1255out_put_map:
1256 if (use_ptemod) {
1257 map->vma = NULL;
1258 unmap_grant_pages(map, 0, map->count);
1259 }
1260 gntdev_put_map(priv, map);
1261 return err;
1262}
1263
1264static const struct file_operations gntdev_fops = {
1265 .owner = THIS_MODULE,
1266 .open = gntdev_open,
1267 .release = gntdev_release,
1268 .mmap = gntdev_mmap,
1269 .unlocked_ioctl = gntdev_ioctl
1270};
1271
1272static struct miscdevice gntdev_miscdev = {
1273 .minor = MISC_DYNAMIC_MINOR,
1274 .name = "xen/gntdev",
1275 .fops = &gntdev_fops,
1276};
1277
1278/* ------------------------------------------------------------------ */
1279
1280static int __init gntdev_init(void)
1281{
1282 int err;
1283
1284 if (!xen_domain())
1285 return -ENODEV;
1286
1287 use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1288
1289 err = misc_register(&gntdev_miscdev);
1290 if (err != 0) {
1291 pr_err("Could not register gntdev device\n");
1292 return err;
1293 }
1294 return 0;
1295}
1296
1297static void __exit gntdev_exit(void)
1298{
1299 misc_deregister(&gntdev_miscdev);
1300}
1301
1302module_init(gntdev_init);
1303module_exit(gntdev_exit);
1304
1305/* ------------------------------------------------------------------ */