blob: b81b0db641f8014e2786a4fafc708fdfff8a9814 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
12 */
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
20#include <linux/debugfs.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/poll.h>
24#include <linux/dma-resv.h>
25#include <linux/mm.h>
26#include <linux/mount.h>
27#include <linux/pseudo_fs.h>
28
29#include <uapi/linux/dma-buf.h>
30#include <uapi/linux/magic.h>
31
32#include "dma-buf-sysfs-stats.h"
33
34static inline int is_dma_buf_file(struct file *);
35
36struct dma_buf_list {
37 struct list_head head;
38 struct mutex lock;
39};
40
41static struct dma_buf_list db_list;
42
43static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
44{
45 struct dma_buf *dmabuf;
46 char name[DMA_BUF_NAME_LEN];
47 size_t ret = 0;
48
49 dmabuf = dentry->d_fsdata;
50 spin_lock(&dmabuf->name_lock);
51 if (dmabuf->name)
52 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
53 spin_unlock(&dmabuf->name_lock);
54
55 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
56 dentry->d_name.name, ret > 0 ? name : "");
57}
58
59static void dma_buf_release(struct dentry *dentry)
60{
61 struct dma_buf *dmabuf;
62
63 dmabuf = dentry->d_fsdata;
64 if (unlikely(!dmabuf))
65 return;
66
67 BUG_ON(dmabuf->vmapping_counter);
68
69 /*
70 * Any fences that a dma-buf poll can wait on should be signaled
71 * before releasing dma-buf. This is the responsibility of each
72 * driver that uses the reservation objects.
73 *
74 * If you hit this BUG() it means someone dropped their ref to the
75 * dma-buf while still having pending operation to the buffer.
76 */
77 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
78
79 dmabuf->ops->release(dmabuf);
80
81 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
82 dma_resv_fini(dmabuf->resv);
83
84 dma_buf_stats_teardown(dmabuf);
85 WARN_ON(!list_empty(&dmabuf->attachments));
86 module_put(dmabuf->owner);
87 kfree(dmabuf->name);
88 kfree(dmabuf);
89}
90
91static int dma_buf_file_release(struct inode *inode, struct file *file)
92{
93 struct dma_buf *dmabuf;
94
95 if (!is_dma_buf_file(file))
96 return -EINVAL;
97
98 dmabuf = file->private_data;
99
100 mutex_lock(&db_list.lock);
101 list_del(&dmabuf->list_node);
102 mutex_unlock(&db_list.lock);
103
104 return 0;
105}
106
107static const struct dentry_operations dma_buf_dentry_ops = {
108 .d_dname = dmabuffs_dname,
109 .d_release = dma_buf_release,
110};
111
112static struct vfsmount *dma_buf_mnt;
113
114static int dma_buf_fs_init_context(struct fs_context *fc)
115{
116 struct pseudo_fs_context *ctx;
117
118 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
119 if (!ctx)
120 return -ENOMEM;
121 ctx->dops = &dma_buf_dentry_ops;
122 return 0;
123}
124
125static struct file_system_type dma_buf_fs_type = {
126 .name = "dmabuf",
127 .init_fs_context = dma_buf_fs_init_context,
128 .kill_sb = kill_anon_super,
129};
130
131static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
132{
133 struct dma_buf *dmabuf;
134
135 if (!is_dma_buf_file(file))
136 return -EINVAL;
137
138 dmabuf = file->private_data;
139
140 /* check if buffer supports mmap */
141 if (!dmabuf->ops->mmap)
142 return -EINVAL;
143
144 /* check for overflowing the buffer's size */
145 if (vma->vm_pgoff + vma_pages(vma) >
146 dmabuf->size >> PAGE_SHIFT)
147 return -EINVAL;
148
149 return dmabuf->ops->mmap(dmabuf, vma);
150}
151
152static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
153{
154 struct dma_buf *dmabuf;
155 loff_t base;
156
157 if (!is_dma_buf_file(file))
158 return -EBADF;
159
160 dmabuf = file->private_data;
161
162 /* only support discovering the end of the buffer,
163 but also allow SEEK_SET to maintain the idiomatic
164 SEEK_END(0), SEEK_CUR(0) pattern */
165 if (whence == SEEK_END)
166 base = dmabuf->size;
167 else if (whence == SEEK_SET)
168 base = 0;
169 else
170 return -EINVAL;
171
172 if (offset != 0)
173 return -EINVAL;
174
175 return base + offset;
176}
177
178/**
179 * DOC: fence polling
180 *
181 * To support cross-device and cross-driver synchronization of buffer access
182 * implicit fences (represented internally in the kernel with &struct fence) can
183 * be attached to a &dma_buf. The glue for that and a few related things are
184 * provided in the &dma_resv structure.
185 *
186 * Userspace can query the state of these implicitly tracked fences using poll()
187 * and related system calls:
188 *
189 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
190 * most recent write or exclusive fence.
191 *
192 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
193 * all attached fences, shared and exclusive ones.
194 *
195 * Note that this only signals the completion of the respective fences, i.e. the
196 * DMA transfers are complete. Cache flushing and any other necessary
197 * preparations before CPU access can begin still need to happen.
198 */
199
200static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
201{
202 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
203 unsigned long flags;
204
205 spin_lock_irqsave(&dcb->poll->lock, flags);
206 wake_up_locked_poll(dcb->poll, dcb->active);
207 dcb->active = 0;
208 spin_unlock_irqrestore(&dcb->poll->lock, flags);
209}
210
211static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
212{
213 struct dma_buf *dmabuf;
214 struct dma_resv *resv;
215 struct dma_resv_list *fobj;
216 struct dma_fence *fence_excl;
217 __poll_t events;
218 unsigned shared_count, seq;
219
220 dmabuf = file->private_data;
221 if (!dmabuf || !dmabuf->resv)
222 return EPOLLERR;
223
224 resv = dmabuf->resv;
225
226 poll_wait(file, &dmabuf->poll, poll);
227
228 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
229 if (!events)
230 return 0;
231
232retry:
233 seq = read_seqcount_begin(&resv->seq);
234 rcu_read_lock();
235
236 fobj = rcu_dereference(resv->fence);
237 if (fobj)
238 shared_count = fobj->shared_count;
239 else
240 shared_count = 0;
241 fence_excl = rcu_dereference(resv->fence_excl);
242 if (read_seqcount_retry(&resv->seq, seq)) {
243 rcu_read_unlock();
244 goto retry;
245 }
246
247 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
248 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
249 __poll_t pevents = EPOLLIN;
250
251 if (shared_count == 0)
252 pevents |= EPOLLOUT;
253
254 spin_lock_irq(&dmabuf->poll.lock);
255 if (dcb->active) {
256 dcb->active |= pevents;
257 events &= ~pevents;
258 } else
259 dcb->active = pevents;
260 spin_unlock_irq(&dmabuf->poll.lock);
261
262 if (events & pevents) {
263 if (!dma_fence_get_rcu(fence_excl)) {
264 /* force a recheck */
265 events &= ~pevents;
266 dma_buf_poll_cb(NULL, &dcb->cb);
267 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
268 dma_buf_poll_cb)) {
269 events &= ~pevents;
270 dma_fence_put(fence_excl);
271 } else {
272 /*
273 * No callback queued, wake up any additional
274 * waiters.
275 */
276 dma_fence_put(fence_excl);
277 dma_buf_poll_cb(NULL, &dcb->cb);
278 }
279 }
280 }
281
282 if ((events & EPOLLOUT) && shared_count > 0) {
283 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
284 int i;
285
286 /* Only queue a new callback if no event has fired yet */
287 spin_lock_irq(&dmabuf->poll.lock);
288 if (dcb->active)
289 events &= ~EPOLLOUT;
290 else
291 dcb->active = EPOLLOUT;
292 spin_unlock_irq(&dmabuf->poll.lock);
293
294 if (!(events & EPOLLOUT))
295 goto out;
296
297 for (i = 0; i < shared_count; ++i) {
298 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
299
300 if (!dma_fence_get_rcu(fence)) {
301 /*
302 * fence refcount dropped to zero, this means
303 * that fobj has been freed
304 *
305 * call dma_buf_poll_cb and force a recheck!
306 */
307 events &= ~EPOLLOUT;
308 dma_buf_poll_cb(NULL, &dcb->cb);
309 break;
310 }
311 if (!dma_fence_add_callback(fence, &dcb->cb,
312 dma_buf_poll_cb)) {
313 dma_fence_put(fence);
314 events &= ~EPOLLOUT;
315 break;
316 }
317 dma_fence_put(fence);
318 }
319
320 /* No callback queued, wake up any additional waiters. */
321 if (i == shared_count)
322 dma_buf_poll_cb(NULL, &dcb->cb);
323 }
324
325out:
326 rcu_read_unlock();
327 return events;
328}
329
330/**
331 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
332 * The name of the dma-buf buffer can only be set when the dma-buf is not
333 * attached to any devices. It could theoritically support changing the
334 * name of the dma-buf if the same piece of memory is used for multiple
335 * purpose between different devices.
336 *
337 * @dmabuf [in] dmabuf buffer that will be renamed.
338 * @buf: [in] A piece of userspace memory that contains the name of
339 * the dma-buf.
340 *
341 * Returns 0 on success. If the dma-buf buffer is already attached to
342 * devices, return -EBUSY.
343 *
344 */
345static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
346{
347 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
348 long ret = 0;
349
350 if (IS_ERR(name))
351 return PTR_ERR(name);
352
353 mutex_lock(&dmabuf->lock);
354 if (!list_empty(&dmabuf->attachments)) {
355 ret = -EBUSY;
356 kfree(name);
357 goto out_unlock;
358 }
359 spin_lock(&dmabuf->name_lock);
360 kfree(dmabuf->name);
361 dmabuf->name = name;
362 spin_unlock(&dmabuf->name_lock);
363
364out_unlock:
365 mutex_unlock(&dmabuf->lock);
366 return ret;
367}
368
369static long dma_buf_ioctl(struct file *file,
370 unsigned int cmd, unsigned long arg)
371{
372 struct dma_buf *dmabuf;
373 struct dma_buf_sync sync;
374 enum dma_data_direction direction;
375 int ret;
376
377 dmabuf = file->private_data;
378
379 switch (cmd) {
380 case DMA_BUF_IOCTL_SYNC:
381 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
382 return -EFAULT;
383
384 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
385 return -EINVAL;
386
387 switch (sync.flags & DMA_BUF_SYNC_RW) {
388 case DMA_BUF_SYNC_READ:
389 direction = DMA_FROM_DEVICE;
390 break;
391 case DMA_BUF_SYNC_WRITE:
392 direction = DMA_TO_DEVICE;
393 break;
394 case DMA_BUF_SYNC_RW:
395 direction = DMA_BIDIRECTIONAL;
396 break;
397 default:
398 return -EINVAL;
399 }
400
401 if (sync.flags & DMA_BUF_SYNC_END)
402 ret = dma_buf_end_cpu_access(dmabuf, direction);
403 else
404 ret = dma_buf_begin_cpu_access(dmabuf, direction);
405
406 return ret;
407
408 case DMA_BUF_SET_NAME_A:
409 case DMA_BUF_SET_NAME_B:
410 return dma_buf_set_name(dmabuf, (const char __user *)arg);
411
412 default:
413 return -ENOTTY;
414 }
415}
416
417static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
418{
419 struct dma_buf *dmabuf = file->private_data;
420
421 seq_printf(m, "size:\t%zu\n", dmabuf->size);
422 /* Don't count the temporary reference taken inside procfs seq_show */
423 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
424 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
425 spin_lock(&dmabuf->name_lock);
426 if (dmabuf->name)
427 seq_printf(m, "name:\t%s\n", dmabuf->name);
428 spin_unlock(&dmabuf->name_lock);
429}
430
431static const struct file_operations dma_buf_fops = {
432 .release = dma_buf_file_release,
433 .mmap = dma_buf_mmap_internal,
434 .llseek = dma_buf_llseek,
435 .poll = dma_buf_poll,
436 .unlocked_ioctl = dma_buf_ioctl,
437#ifdef CONFIG_COMPAT
438 .compat_ioctl = dma_buf_ioctl,
439#endif
440 .show_fdinfo = dma_buf_show_fdinfo,
441};
442
443/*
444 * is_dma_buf_file - Check if struct file* is associated with dma_buf
445 */
446static inline int is_dma_buf_file(struct file *file)
447{
448 return file->f_op == &dma_buf_fops;
449}
450
451static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
452{
453 struct file *file;
454 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
455
456 if (IS_ERR(inode))
457 return ERR_CAST(inode);
458
459 inode->i_size = dmabuf->size;
460 inode_set_bytes(inode, dmabuf->size);
461
462 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
463 flags, &dma_buf_fops);
464 if (IS_ERR(file))
465 goto err_alloc_file;
466 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
467 file->private_data = dmabuf;
468 file->f_path.dentry->d_fsdata = dmabuf;
469
470 return file;
471
472err_alloc_file:
473 iput(inode);
474 return file;
475}
476
477/**
478 * DOC: dma buf device access
479 *
480 * For device DMA access to a shared DMA buffer the usual sequence of operations
481 * is fairly simple:
482 *
483 * 1. The exporter defines his exporter instance using
484 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
485 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
486 * as a file descriptor by calling dma_buf_fd().
487 *
488 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
489 * to share with: First the filedescriptor is converted to a &dma_buf using
490 * dma_buf_get(). Then the buffer is attached to the device using
491 * dma_buf_attach().
492 *
493 * Up to this stage the exporter is still free to migrate or reallocate the
494 * backing storage.
495 *
496 * 3. Once the buffer is attached to all devices userspace can initiate DMA
497 * access to the shared buffer. In the kernel this is done by calling
498 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
499 *
500 * 4. Once a driver is done with a shared buffer it needs to call
501 * dma_buf_detach() (after cleaning up any mappings) and then release the
502 * reference acquired with dma_buf_get by calling dma_buf_put().
503 *
504 * For the detailed semantics exporters are expected to implement see
505 * &dma_buf_ops.
506 */
507
508/**
509 * dma_buf_export - Creates a new dma_buf, and associates an anon file
510 * with this buffer, so it can be exported.
511 * Also connect the allocator specific data and ops to the buffer.
512 * Additionally, provide a name string for exporter; useful in debugging.
513 *
514 * @exp_info: [in] holds all the export related information provided
515 * by the exporter. see &struct dma_buf_export_info
516 * for further details.
517 *
518 * Returns, on success, a newly created dma_buf object, which wraps the
519 * supplied private data and operations for dma_buf_ops. On either missing
520 * ops, or error in allocating struct dma_buf, will return negative error.
521 *
522 * For most cases the easiest way to create @exp_info is through the
523 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
524 */
525struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
526{
527 struct dma_buf *dmabuf;
528 struct dma_resv *resv = exp_info->resv;
529 struct file *file;
530 size_t alloc_size = sizeof(struct dma_buf);
531 int ret;
532
533 if (!exp_info->resv)
534 alloc_size += sizeof(struct dma_resv);
535 else
536 /* prevent &dma_buf[1] == dma_buf->resv */
537 alloc_size += 1;
538
539 if (WARN_ON(!exp_info->priv
540 || !exp_info->ops
541 || !exp_info->ops->map_dma_buf
542 || !exp_info->ops->unmap_dma_buf
543 || !exp_info->ops->release)) {
544 return ERR_PTR(-EINVAL);
545 }
546
547 if (!try_module_get(exp_info->owner))
548 return ERR_PTR(-ENOENT);
549
550 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
551 if (!dmabuf) {
552 ret = -ENOMEM;
553 goto err_module;
554 }
555
556 dmabuf->priv = exp_info->priv;
557 dmabuf->ops = exp_info->ops;
558 dmabuf->size = exp_info->size;
559 dmabuf->exp_name = exp_info->exp_name;
560 dmabuf->owner = exp_info->owner;
561 spin_lock_init(&dmabuf->name_lock);
562 init_waitqueue_head(&dmabuf->poll);
563 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
564 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
565
566 if (!resv) {
567 resv = (struct dma_resv *)&dmabuf[1];
568 dma_resv_init(resv);
569 }
570 dmabuf->resv = resv;
571
572 file = dma_buf_getfile(dmabuf, exp_info->flags);
573 if (IS_ERR(file)) {
574 ret = PTR_ERR(file);
575 goto err_dmabuf;
576 }
577
578 file->f_mode |= FMODE_LSEEK;
579 dmabuf->file = file;
580
581 ret = dma_buf_stats_setup(dmabuf);
582 if (ret)
583 goto err_sysfs;
584
585 mutex_init(&dmabuf->lock);
586 INIT_LIST_HEAD(&dmabuf->attachments);
587
588 mutex_lock(&db_list.lock);
589 list_add(&dmabuf->list_node, &db_list.head);
590 mutex_unlock(&db_list.lock);
591
592 return dmabuf;
593
594err_sysfs:
595 /*
596 * Set file->f_path.dentry->d_fsdata to NULL so that when
597 * dma_buf_release() gets invoked by dentry_ops, it exits
598 * early before calling the release() dma_buf op.
599 */
600 file->f_path.dentry->d_fsdata = NULL;
601 fput(file);
602err_dmabuf:
603 kfree(dmabuf);
604err_module:
605 module_put(exp_info->owner);
606 return ERR_PTR(ret);
607}
608EXPORT_SYMBOL_GPL(dma_buf_export);
609
610/**
611 * dma_buf_fd - returns a file descriptor for the given dma_buf
612 * @dmabuf: [in] pointer to dma_buf for which fd is required.
613 * @flags: [in] flags to give to fd
614 *
615 * On success, returns an associated 'fd'. Else, returns error.
616 */
617int dma_buf_fd(struct dma_buf *dmabuf, int flags)
618{
619 int fd;
620
621 if (!dmabuf || !dmabuf->file)
622 return -EINVAL;
623
624 fd = get_unused_fd_flags(flags);
625 if (fd < 0)
626 return fd;
627
628 fd_install(fd, dmabuf->file);
629
630 return fd;
631}
632EXPORT_SYMBOL_GPL(dma_buf_fd);
633
634/**
635 * dma_buf_get - returns the dma_buf structure related to an fd
636 * @fd: [in] fd associated with the dma_buf to be returned
637 *
638 * On success, returns the dma_buf structure associated with an fd; uses
639 * file's refcounting done by fget to increase refcount. returns ERR_PTR
640 * otherwise.
641 */
642struct dma_buf *dma_buf_get(int fd)
643{
644 struct file *file;
645
646 file = fget(fd);
647
648 if (!file)
649 return ERR_PTR(-EBADF);
650
651 if (!is_dma_buf_file(file)) {
652 fput(file);
653 return ERR_PTR(-EINVAL);
654 }
655
656 return file->private_data;
657}
658EXPORT_SYMBOL_GPL(dma_buf_get);
659
660/**
661 * dma_buf_put - decreases refcount of the buffer
662 * @dmabuf: [in] buffer to reduce refcount of
663 *
664 * Uses file's refcounting done implicitly by fput().
665 *
666 * If, as a result of this call, the refcount becomes 0, the 'release' file
667 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
668 * in turn, and frees the memory allocated for dmabuf when exported.
669 */
670void dma_buf_put(struct dma_buf *dmabuf)
671{
672 if (WARN_ON(!dmabuf || !dmabuf->file))
673 return;
674
675 fput(dmabuf->file);
676}
677EXPORT_SYMBOL_GPL(dma_buf_put);
678
679/**
680 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
681 * calls attach() of dma_buf_ops to allow device-specific attach functionality
682 * @dmabuf: [in] buffer to attach device to.
683 * @dev: [in] device to be attached.
684 *
685 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
686 * must be cleaned up by calling dma_buf_detach().
687 *
688 * Returns:
689 *
690 * A pointer to newly created &dma_buf_attachment on success, or a negative
691 * error code wrapped into a pointer on failure.
692 *
693 * Note that this can fail if the backing storage of @dmabuf is in a place not
694 * accessible to @dev, and cannot be moved to a more suitable place. This is
695 * indicated with the error code -EBUSY.
696 */
697struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
698 struct device *dev)
699{
700 struct dma_buf_attachment *attach;
701 int ret;
702
703 if (WARN_ON(!dmabuf || !dev))
704 return ERR_PTR(-EINVAL);
705
706 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
707 if (!attach)
708 return ERR_PTR(-ENOMEM);
709
710 attach->dev = dev;
711 attach->dmabuf = dmabuf;
712
713 mutex_lock(&dmabuf->lock);
714
715 if (dmabuf->ops->attach) {
716 ret = dmabuf->ops->attach(dmabuf, attach);
717 if (ret)
718 goto err_attach;
719 }
720 list_add(&attach->node, &dmabuf->attachments);
721
722 mutex_unlock(&dmabuf->lock);
723
724 return attach;
725
726err_attach:
727 kfree(attach);
728 mutex_unlock(&dmabuf->lock);
729 return ERR_PTR(ret);
730
731 dma_buf_detach(dmabuf, attach);
732 return ERR_PTR(ret);
733}
734EXPORT_SYMBOL_GPL(dma_buf_attach);
735
736/**
737 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
738 * optionally calls detach() of dma_buf_ops for device-specific detach
739 * @dmabuf: [in] buffer to detach from.
740 * @attach: [in] attachment to be detached; is free'd after this call.
741 *
742 * Clean up a device attachment obtained by calling dma_buf_attach().
743 */
744void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
745{
746 if (WARN_ON(!dmabuf || !attach))
747 return;
748
749 if (attach->sgt) {
750 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
751 }
752
753 mutex_lock(&dmabuf->lock);
754 list_del(&attach->node);
755 if (dmabuf->ops->detach)
756 dmabuf->ops->detach(dmabuf, attach);
757
758 mutex_unlock(&dmabuf->lock);
759 kfree(attach);
760}
761EXPORT_SYMBOL_GPL(dma_buf_detach);
762
763/**
764 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
765 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
766 * dma_buf_ops.
767 * @attach: [in] attachment whose scatterlist is to be returned
768 * @direction: [in] direction of DMA transfer
769 *
770 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
771 * on error. May return -EINTR if it is interrupted by a signal.
772 *
773 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
774 * the underlying backing storage is pinned for as long as a mapping exists,
775 * therefore users/importers should not hold onto a mapping for undue amounts of
776 * time.
777 */
778struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
779 enum dma_data_direction direction)
780{
781 struct sg_table *sg_table;
782
783 might_sleep();
784
785 if (WARN_ON(!attach || !attach->dmabuf))
786 return ERR_PTR(-EINVAL);
787
788 if (attach->sgt) {
789 /*
790 * Two mappings with different directions for the same
791 * attachment are not allowed.
792 */
793 if (attach->dir != direction &&
794 attach->dir != DMA_BIDIRECTIONAL)
795 return ERR_PTR(-EBUSY);
796
797 return attach->sgt;
798 }
799
800 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
801 if (!sg_table)
802 sg_table = ERR_PTR(-ENOMEM);
803
804 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
805 attach->sgt = sg_table;
806 attach->dir = direction;
807 }
808
809 return sg_table;
810}
811EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
812
813/**
814 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
815 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
816 * dma_buf_ops.
817 * @attach: [in] attachment to unmap buffer from
818 * @sg_table: [in] scatterlist info of the buffer to unmap
819 * @direction: [in] direction of DMA transfer
820 *
821 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
822 */
823void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
824 struct sg_table *sg_table,
825 enum dma_data_direction direction)
826{
827 might_sleep();
828
829 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
830 return;
831
832 if (attach->sgt == sg_table)
833 return;
834
835 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
836}
837EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
838
839/**
840 * DOC: cpu access
841 *
842 * There are mutliple reasons for supporting CPU access to a dma buffer object:
843 *
844 * - Fallback operations in the kernel, for example when a device is connected
845 * over USB and the kernel needs to shuffle the data around first before
846 * sending it away. Cache coherency is handled by braketing any transactions
847 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
848 * access.
849 *
850 * To support dma_buf objects residing in highmem cpu access is page-based
851 * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
852 * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
853 * returns a pointer in kernel virtual address space. Afterwards the chunk
854 * needs to be unmapped again. There is no limit on how often a given chunk
855 * can be mapped and unmapped, i.e. the importer does not need to call
856 * begin_cpu_access again before mapping the same chunk again.
857 *
858 * Interfaces::
859 * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
860 * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
861 *
862 * Implementing the functions is optional for exporters and for importers all
863 * the restrictions of using kmap apply.
864 *
865 * dma_buf kmap calls outside of the range specified in begin_cpu_access are
866 * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
867 * the partial chunks at the beginning and end but may return stale or bogus
868 * data outside of the range (in these partial chunks).
869 *
870 * For some cases the overhead of kmap can be too high, a vmap interface
871 * is introduced. This interface should be used very carefully, as vmalloc
872 * space is a limited resources on many architectures.
873 *
874 * Interfaces::
875 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
876 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
877 *
878 * The vmap call can fail if there is no vmap support in the exporter, or if
879 * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
880 * that the dma-buf layer keeps a reference count for all vmap access and
881 * calls down into the exporter's vmap function only when no vmapping exists,
882 * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
883 * provided by taking the dma_buf->lock mutex.
884 *
885 * - For full compatibility on the importer side with existing userspace
886 * interfaces, which might already support mmap'ing buffers. This is needed in
887 * many processing pipelines (e.g. feeding a software rendered image into a
888 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
889 * framework already supported this and for DMA buffer file descriptors to
890 * replace ION buffers mmap support was needed.
891 *
892 * There is no special interfaces, userspace simply calls mmap on the dma-buf
893 * fd. But like for CPU access there's a need to braket the actual access,
894 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
895 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
896 * be restarted.
897 *
898 * Some systems might need some sort of cache coherency management e.g. when
899 * CPU and GPU domains are being accessed through dma-buf at the same time.
900 * To circumvent this problem there are begin/end coherency markers, that
901 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
902 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
903 * sequence would be used like following:
904 *
905 * - mmap dma-buf fd
906 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
907 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
908 * want (with the new data being consumed by say the GPU or the scanout
909 * device)
910 * - munmap once you don't need the buffer any more
911 *
912 * For correctness and optimal performance, it is always required to use
913 * SYNC_START and SYNC_END before and after, respectively, when accessing the
914 * mapped address. Userspace cannot rely on coherent access, even when there
915 * are systems where it just works without calling these ioctls.
916 *
917 * - And as a CPU fallback in userspace processing pipelines.
918 *
919 * Similar to the motivation for kernel cpu access it is again important that
920 * the userspace code of a given importing subsystem can use the same
921 * interfaces with a imported dma-buf buffer object as with a native buffer
922 * object. This is especially important for drm where the userspace part of
923 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
924 * use a different way to mmap a buffer rather invasive.
925 *
926 * The assumption in the current dma-buf interfaces is that redirecting the
927 * initial mmap is all that's needed. A survey of some of the existing
928 * subsystems shows that no driver seems to do any nefarious thing like
929 * syncing up with outstanding asynchronous processing on the device or
930 * allocating special resources at fault time. So hopefully this is good
931 * enough, since adding interfaces to intercept pagefaults and allow pte
932 * shootdowns would increase the complexity quite a bit.
933 *
934 * Interface::
935 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
936 * unsigned long);
937 *
938 * If the importing subsystem simply provides a special-purpose mmap call to
939 * set up a mapping in userspace, calling do_mmap with dma_buf->file will
940 * equally achieve that for a dma-buf object.
941 */
942
943static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
944 enum dma_data_direction direction)
945{
946 bool write = (direction == DMA_BIDIRECTIONAL ||
947 direction == DMA_TO_DEVICE);
948 struct dma_resv *resv = dmabuf->resv;
949 long ret;
950
951 /* Wait on any implicit rendering fences */
952 ret = dma_resv_wait_timeout_rcu(resv, write, true,
953 MAX_SCHEDULE_TIMEOUT);
954 if (ret < 0)
955 return ret;
956
957 return 0;
958}
959
960/**
961 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
962 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
963 * preparations. Coherency is only guaranteed in the specified range for the
964 * specified access direction.
965 * @dmabuf: [in] buffer to prepare cpu access for.
966 * @direction: [in] length of range for cpu access.
967 *
968 * After the cpu access is complete the caller should call
969 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
970 * it guaranteed to be coherent with other DMA access.
971 *
972 * Can return negative error values, returns 0 on success.
973 */
974int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
975 enum dma_data_direction direction)
976{
977 int ret = 0;
978
979 if (WARN_ON(!dmabuf))
980 return -EINVAL;
981
982 if (dmabuf->ops->begin_cpu_access)
983 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
984
985 /* Ensure that all fences are waited upon - but we first allow
986 * the native handler the chance to do so more efficiently if it
987 * chooses. A double invocation here will be reasonably cheap no-op.
988 */
989 if (ret == 0)
990 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
991
992 return ret;
993}
994EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
995
996int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
997 enum dma_data_direction direction,
998 unsigned int offset, unsigned int len)
999{
1000 int ret = 0;
1001
1002 if (WARN_ON(!dmabuf))
1003 return -EINVAL;
1004
1005 if (dmabuf->ops->begin_cpu_access_partial)
1006 ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
1007 offset, len);
1008
1009 /* Ensure that all fences are waited upon - but we first allow
1010 * the native handler the chance to do so more efficiently if it
1011 * chooses. A double invocation here will be reasonably cheap no-op.
1012 */
1013 if (ret == 0)
1014 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1015
1016 return ret;
1017}
1018EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
1019
1020/**
1021 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1022 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1023 * actions. Coherency is only guaranteed in the specified range for the
1024 * specified access direction.
1025 * @dmabuf: [in] buffer to complete cpu access for.
1026 * @direction: [in] length of range for cpu access.
1027 *
1028 * This terminates CPU access started with dma_buf_begin_cpu_access().
1029 *
1030 * Can return negative error values, returns 0 on success.
1031 */
1032int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1033 enum dma_data_direction direction)
1034{
1035 int ret = 0;
1036
1037 WARN_ON(!dmabuf);
1038
1039 if (dmabuf->ops->end_cpu_access)
1040 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1041
1042 return ret;
1043}
1044EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1045
1046int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
1047 enum dma_data_direction direction,
1048 unsigned int offset, unsigned int len)
1049{
1050 int ret = 0;
1051
1052 WARN_ON(!dmabuf);
1053
1054 if (dmabuf->ops->end_cpu_access_partial)
1055 ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
1056 offset, len);
1057
1058 return ret;
1059}
1060EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
1061
1062/**
1063 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
1064 * same restrictions as for kmap and friends apply.
1065 * @dmabuf: [in] buffer to map page from.
1066 * @page_num: [in] page in PAGE_SIZE units to map.
1067 *
1068 * This call must always succeed, any necessary preparations that might fail
1069 * need to be done in begin_cpu_access.
1070 */
1071void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
1072{
1073 WARN_ON(!dmabuf);
1074
1075 if (!dmabuf->ops->map)
1076 return NULL;
1077 return dmabuf->ops->map(dmabuf, page_num);
1078}
1079EXPORT_SYMBOL_GPL(dma_buf_kmap);
1080
1081/**
1082 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
1083 * @dmabuf: [in] buffer to unmap page from.
1084 * @page_num: [in] page in PAGE_SIZE units to unmap.
1085 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
1086 *
1087 * This call must always succeed.
1088 */
1089void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
1090 void *vaddr)
1091{
1092 WARN_ON(!dmabuf);
1093
1094 if (dmabuf->ops->unmap)
1095 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
1096}
1097EXPORT_SYMBOL_GPL(dma_buf_kunmap);
1098
1099
1100/**
1101 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1102 * @dmabuf: [in] buffer that should back the vma
1103 * @vma: [in] vma for the mmap
1104 * @pgoff: [in] offset in pages where this mmap should start within the
1105 * dma-buf buffer.
1106 *
1107 * This function adjusts the passed in vma so that it points at the file of the
1108 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1109 * checking on the size of the vma. Then it calls the exporters mmap function to
1110 * set up the mapping.
1111 *
1112 * Can return negative error values, returns 0 on success.
1113 */
1114int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1115 unsigned long pgoff)
1116{
1117 struct file *oldfile;
1118 int ret;
1119
1120 if (WARN_ON(!dmabuf || !vma))
1121 return -EINVAL;
1122
1123 /* check if buffer supports mmap */
1124 if (!dmabuf->ops->mmap)
1125 return -EINVAL;
1126
1127 /* check for offset overflow */
1128 if (pgoff + vma_pages(vma) < pgoff)
1129 return -EOVERFLOW;
1130
1131 /* check for overflowing the buffer's size */
1132 if (pgoff + vma_pages(vma) >
1133 dmabuf->size >> PAGE_SHIFT)
1134 return -EINVAL;
1135
1136 /* readjust the vma */
1137 get_file(dmabuf->file);
1138 oldfile = vma->vm_file;
1139 vma->vm_file = dmabuf->file;
1140 vma->vm_pgoff = pgoff;
1141
1142 ret = dmabuf->ops->mmap(dmabuf, vma);
1143 if (ret) {
1144 /* restore old parameters on failure */
1145 vma->vm_file = oldfile;
1146 fput(dmabuf->file);
1147 } else {
1148 if (oldfile)
1149 fput(oldfile);
1150 }
1151 return ret;
1152
1153}
1154EXPORT_SYMBOL_GPL(dma_buf_mmap);
1155
1156/**
1157 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1158 * address space. Same restrictions as for vmap and friends apply.
1159 * @dmabuf: [in] buffer to vmap
1160 *
1161 * This call may fail due to lack of virtual mapping address space.
1162 * These calls are optional in drivers. The intended use for them
1163 * is for mapping objects linear in kernel space for high use objects.
1164 * Please attempt to use kmap/kunmap before thinking about these interfaces.
1165 *
1166 * Returns NULL on error.
1167 */
1168void *dma_buf_vmap(struct dma_buf *dmabuf)
1169{
1170 void *ptr;
1171
1172 if (WARN_ON(!dmabuf))
1173 return NULL;
1174
1175 if (!dmabuf->ops->vmap)
1176 return NULL;
1177
1178 mutex_lock(&dmabuf->lock);
1179 if (dmabuf->vmapping_counter) {
1180 dmabuf->vmapping_counter++;
1181 BUG_ON(!dmabuf->vmap_ptr);
1182 ptr = dmabuf->vmap_ptr;
1183 goto out_unlock;
1184 }
1185
1186 BUG_ON(dmabuf->vmap_ptr);
1187
1188 ptr = dmabuf->ops->vmap(dmabuf);
1189 if (WARN_ON_ONCE(IS_ERR(ptr)))
1190 ptr = NULL;
1191 if (!ptr)
1192 goto out_unlock;
1193
1194 dmabuf->vmap_ptr = ptr;
1195 dmabuf->vmapping_counter = 1;
1196
1197out_unlock:
1198 mutex_unlock(&dmabuf->lock);
1199 return ptr;
1200}
1201EXPORT_SYMBOL_GPL(dma_buf_vmap);
1202
1203/**
1204 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1205 * @dmabuf: [in] buffer to vunmap
1206 * @vaddr: [in] vmap to vunmap
1207 */
1208void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1209{
1210 if (WARN_ON(!dmabuf))
1211 return;
1212
1213 BUG_ON(!dmabuf->vmap_ptr);
1214 BUG_ON(dmabuf->vmapping_counter == 0);
1215 BUG_ON(dmabuf->vmap_ptr != vaddr);
1216
1217 mutex_lock(&dmabuf->lock);
1218 if (--dmabuf->vmapping_counter == 0) {
1219 if (dmabuf->ops->vunmap)
1220 dmabuf->ops->vunmap(dmabuf, vaddr);
1221 dmabuf->vmap_ptr = NULL;
1222 }
1223 mutex_unlock(&dmabuf->lock);
1224}
1225EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1226
1227int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
1228{
1229 int ret = 0;
1230
1231 if (WARN_ON(!dmabuf) || !flags)
1232 return -EINVAL;
1233
1234 if (dmabuf->ops->get_flags)
1235 ret = dmabuf->ops->get_flags(dmabuf, flags);
1236
1237 return ret;
1238}
1239EXPORT_SYMBOL_GPL(dma_buf_get_flags);
1240
1241int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid)
1242{
1243 if (WARN_ON(!dmabuf) || !uuid)
1244 return -EINVAL;
1245
1246 if (!dmabuf->ops->get_uuid)
1247 return -ENODEV;
1248
1249 return dmabuf->ops->get_uuid(dmabuf, uuid);
1250}
1251EXPORT_SYMBOL_GPL(dma_buf_get_uuid);
1252
1253#ifdef CONFIG_DEBUG_FS
1254static int dma_buf_debug_show(struct seq_file *s, void *unused)
1255{
1256 int ret;
1257 struct dma_buf *buf_obj;
1258 struct dma_buf_attachment *attach_obj;
1259 struct dma_resv *robj;
1260 struct dma_resv_list *fobj;
1261 struct dma_fence *fence;
1262 unsigned seq;
1263 int count = 0, attach_count, shared_count, i;
1264 size_t size = 0;
1265
1266 ret = mutex_lock_interruptible(&db_list.lock);
1267
1268 if (ret)
1269 return ret;
1270
1271 seq_puts(s, "\nDma-buf Objects:\n");
1272 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1273 "size", "flags", "mode", "count", "ino");
1274
1275 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1276 ret = mutex_lock_interruptible(&buf_obj->lock);
1277
1278 if (ret) {
1279 seq_puts(s,
1280 "\tERROR locking buffer object: skipping\n");
1281 continue;
1282 }
1283
1284 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1285 buf_obj->size,
1286 buf_obj->file->f_flags, buf_obj->file->f_mode,
1287 file_count(buf_obj->file),
1288 buf_obj->exp_name,
1289 file_inode(buf_obj->file)->i_ino,
1290 buf_obj->name ?: "");
1291
1292 robj = buf_obj->resv;
1293 while (true) {
1294 seq = read_seqcount_begin(&robj->seq);
1295 rcu_read_lock();
1296 fobj = rcu_dereference(robj->fence);
1297 shared_count = fobj ? fobj->shared_count : 0;
1298 fence = rcu_dereference(robj->fence_excl);
1299 if (!read_seqcount_retry(&robj->seq, seq))
1300 break;
1301 rcu_read_unlock();
1302 }
1303
1304 if (fence)
1305 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1306 fence->ops->get_driver_name(fence),
1307 fence->ops->get_timeline_name(fence),
1308 dma_fence_is_signaled(fence) ? "" : "un");
1309 for (i = 0; i < shared_count; i++) {
1310 fence = rcu_dereference(fobj->shared[i]);
1311 if (!dma_fence_get_rcu(fence))
1312 continue;
1313 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1314 fence->ops->get_driver_name(fence),
1315 fence->ops->get_timeline_name(fence),
1316 dma_fence_is_signaled(fence) ? "" : "un");
1317 dma_fence_put(fence);
1318 }
1319 rcu_read_unlock();
1320
1321 seq_puts(s, "\tAttached Devices:\n");
1322 attach_count = 0;
1323
1324 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1325 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1326 attach_count++;
1327 }
1328
1329 seq_printf(s, "Total %d devices attached\n\n",
1330 attach_count);
1331
1332 count++;
1333 size += buf_obj->size;
1334 mutex_unlock(&buf_obj->lock);
1335 }
1336
1337 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1338
1339 mutex_unlock(&db_list.lock);
1340 return 0;
1341}
1342
1343DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1344
1345static struct dentry *dma_buf_debugfs_dir;
1346
1347static int dma_buf_init_debugfs(void)
1348{
1349 struct dentry *d;
1350 int err = 0;
1351
1352 d = debugfs_create_dir("dma_buf", NULL);
1353 if (IS_ERR(d))
1354 return PTR_ERR(d);
1355
1356 dma_buf_debugfs_dir = d;
1357
1358 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1359 NULL, &dma_buf_debug_fops);
1360 if (IS_ERR(d)) {
1361 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1362 debugfs_remove_recursive(dma_buf_debugfs_dir);
1363 dma_buf_debugfs_dir = NULL;
1364 err = PTR_ERR(d);
1365 }
1366
1367 return err;
1368}
1369
1370static void dma_buf_uninit_debugfs(void)
1371{
1372 debugfs_remove_recursive(dma_buf_debugfs_dir);
1373}
1374#else
1375static inline int dma_buf_init_debugfs(void)
1376{
1377 return 0;
1378}
1379static inline void dma_buf_uninit_debugfs(void)
1380{
1381}
1382#endif
1383
1384static int __init dma_buf_init(void)
1385{
1386 int ret;
1387
1388 ret = dma_buf_init_sysfs_statistics();
1389 if (ret)
1390 return ret;
1391
1392 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1393 if (IS_ERR(dma_buf_mnt))
1394 return PTR_ERR(dma_buf_mnt);
1395
1396 mutex_init(&db_list.lock);
1397 INIT_LIST_HEAD(&db_list.head);
1398 dma_buf_init_debugfs();
1399 return 0;
1400}
1401subsys_initcall(dma_buf_init);
1402
1403static void __exit dma_buf_deinit(void)
1404{
1405 dma_buf_uninit_debugfs();
1406 kern_unmount(dma_buf_mnt);
1407 dma_buf_uninit_sysfs_statistics();
1408}
1409module_exit(dma_buf_deinit);
1410MODULE_LICENSE("GPL");