blob: 63efa7c211edd9f6b17fe540b562115a2408ccbe [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ioctl.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/syscalls.h>
9#include <linux/mm.h>
10#include <linux/capability.h>
11#include <linux/compat.h>
12#include <linux/file.h>
13#include <linux/fs.h>
14#include <linux/security.h>
15#include <linux/export.h>
16#include <linux/uaccess.h>
17#include <linux/writeback.h>
18#include <linux/buffer_head.h>
19#include <linux/falloc.h>
20#include <linux/sched/signal.h>
21
22#include "internal.h"
23
24#include <asm/ioctls.h>
25
26/* So that the fiemap access checks can't overflow on 32 bit machines. */
27#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
28
29/**
30 * vfs_ioctl - call filesystem specific ioctl methods
31 * @filp: open file to invoke ioctl method on
32 * @cmd: ioctl command to execute
33 * @arg: command-specific argument for ioctl
34 *
35 * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
36 * returns -ENOTTY.
37 *
38 * Returns 0 on success, -errno on error.
39 */
40long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41{
42 int error = -ENOTTY;
43
44 if (!filp->f_op->unlocked_ioctl)
45 goto out;
46
47 error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
48 if (error == -ENOIOCTLCMD)
49 error = -ENOTTY;
50 out:
51 return error;
52}
53EXPORT_SYMBOL(vfs_ioctl);
54
55static int ioctl_fibmap(struct file *filp, int __user *p)
56{
57 struct address_space *mapping = filp->f_mapping;
58 int res, block;
59
60 /* do we support this mess? */
61 if (!mapping->a_ops->bmap)
62 return -EINVAL;
63 if (!capable(CAP_SYS_RAWIO))
64 return -EPERM;
65 res = get_user(block, p);
66 if (res)
67 return res;
68 res = mapping->a_ops->bmap(mapping, block);
69 return put_user(res, p);
70}
71
72/**
73 * fiemap_fill_next_extent - Fiemap helper function
74 * @fieinfo: Fiemap context passed into ->fiemap
75 * @logical: Extent logical start offset, in bytes
76 * @phys: Extent physical start offset, in bytes
77 * @len: Extent length, in bytes
78 * @flags: FIEMAP_EXTENT flags that describe this extent
79 *
80 * Called from file system ->fiemap callback. Will populate extent
81 * info as passed in via arguments and copy to user memory. On
82 * success, extent count on fieinfo is incremented.
83 *
84 * Returns 0 on success, -errno on error, 1 if this was the last
85 * extent that will fit in user array.
86 */
87#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
88#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
89#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
90int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
91 u64 phys, u64 len, u32 flags)
92{
93 struct fiemap_extent extent;
94 struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
95
96 /* only count the extents */
97 if (fieinfo->fi_extents_max == 0) {
98 fieinfo->fi_extents_mapped++;
99 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
100 }
101
102 if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
103 return 1;
104
105 if (flags & SET_UNKNOWN_FLAGS)
106 flags |= FIEMAP_EXTENT_UNKNOWN;
107 if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
108 flags |= FIEMAP_EXTENT_ENCODED;
109 if (flags & SET_NOT_ALIGNED_FLAGS)
110 flags |= FIEMAP_EXTENT_NOT_ALIGNED;
111
112 memset(&extent, 0, sizeof(extent));
113 extent.fe_logical = logical;
114 extent.fe_physical = phys;
115 extent.fe_length = len;
116 extent.fe_flags = flags;
117
118 dest += fieinfo->fi_extents_mapped;
119 if (copy_to_user(dest, &extent, sizeof(extent)))
120 return -EFAULT;
121
122 fieinfo->fi_extents_mapped++;
123 if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
124 return 1;
125 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
126}
127EXPORT_SYMBOL(fiemap_fill_next_extent);
128
129/**
130 * fiemap_check_flags - check validity of requested flags for fiemap
131 * @fieinfo: Fiemap context passed into ->fiemap
132 * @fs_flags: Set of fiemap flags that the file system understands
133 *
134 * Called from file system ->fiemap callback. This will compute the
135 * intersection of valid fiemap flags and those that the fs supports. That
136 * value is then compared against the user supplied flags. In case of bad user
137 * flags, the invalid values will be written into the fieinfo structure, and
138 * -EBADR is returned, which tells ioctl_fiemap() to return those values to
139 * userspace. For this reason, a return code of -EBADR should be preserved.
140 *
141 * Returns 0 on success, -EBADR on bad flags.
142 */
143int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags)
144{
145 u32 incompat_flags;
146
147 incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags);
148 if (incompat_flags) {
149 fieinfo->fi_flags = incompat_flags;
150 return -EBADR;
151 }
152 return 0;
153}
154EXPORT_SYMBOL(fiemap_check_flags);
155
156static int fiemap_check_ranges(struct super_block *sb,
157 u64 start, u64 len, u64 *new_len)
158{
159 u64 maxbytes = (u64) sb->s_maxbytes;
160
161 *new_len = len;
162
163 if (len == 0)
164 return -EINVAL;
165
166 if (start > maxbytes)
167 return -EFBIG;
168
169 /*
170 * Shrink request scope to what the fs can actually handle.
171 */
172 if (len > maxbytes || (maxbytes - len) < start)
173 *new_len = maxbytes - start;
174
175 return 0;
176}
177
178int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
179 u64 start, u64 *len, u32 supported_flags)
180{
181 u64 maxbytes = inode->i_sb->s_maxbytes;
182 u32 incompat_flags;
183
184 if (*len == 0)
185 return -EINVAL;
186
187 if (start > maxbytes)
188 return -EFBIG;
189
190 /*
191 * Shrink request scope to what the fs can actually handle.
192 */
193 if (*len > maxbytes || (maxbytes - *len) < start)
194 *len = maxbytes - start;
195
196 supported_flags &= FIEMAP_FLAGS_COMPAT;
197 incompat_flags = fieinfo->fi_flags & ~supported_flags;
198 if (incompat_flags) {
199 fieinfo->fi_flags = incompat_flags;
200 return -EBADR;
201 }
202 return 0;
203}
204EXPORT_SYMBOL(fiemap_prep);
205
206static int ioctl_fiemap(struct file *filp, unsigned long arg)
207{
208 struct fiemap fiemap;
209 struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
210 struct fiemap_extent_info fieinfo = { 0, };
211 struct inode *inode = file_inode(filp);
212 struct super_block *sb = inode->i_sb;
213 u64 len;
214 int error;
215
216 if (!inode->i_op->fiemap)
217 return -EOPNOTSUPP;
218
219 if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
220 return -EFAULT;
221
222 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
223 return -EINVAL;
224
225 error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
226 &len);
227 if (error)
228 return error;
229
230 fieinfo.fi_flags = fiemap.fm_flags;
231 fieinfo.fi_extents_max = fiemap.fm_extent_count;
232 fieinfo.fi_extents_start = ufiemap->fm_extents;
233
234 if (fiemap.fm_extent_count != 0 &&
235 !access_ok(fieinfo.fi_extents_start,
236 fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
237 return -EFAULT;
238
239 if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
240 filemap_write_and_wait(inode->i_mapping);
241
242 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
243 fiemap.fm_flags = fieinfo.fi_flags;
244 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
245 if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
246 error = -EFAULT;
247
248 return error;
249}
250
251static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
252 u64 off, u64 olen, u64 destoff)
253{
254 struct fd src_file = fdget(srcfd);
255 loff_t cloned;
256 int ret;
257
258 if (!src_file.file)
259 return -EBADF;
260 ret = -EXDEV;
261 if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
262 goto fdput;
263 cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
264 olen, 0);
265 if (cloned < 0)
266 ret = cloned;
267 else if (olen && cloned != olen)
268 ret = -EINVAL;
269 else
270 ret = 0;
271fdput:
272 fdput(src_file);
273 return ret;
274}
275
276static long ioctl_file_clone_range(struct file *file, void __user *argp)
277{
278 struct file_clone_range args;
279
280 if (copy_from_user(&args, argp, sizeof(args)))
281 return -EFAULT;
282 return ioctl_file_clone(file, args.src_fd, args.src_offset,
283 args.src_length, args.dest_offset);
284}
285
286#ifdef CONFIG_BLOCK
287
288static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
289{
290 return (offset >> inode->i_blkbits);
291}
292
293static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
294{
295 return (blk << inode->i_blkbits);
296}
297
298/**
299 * __generic_block_fiemap - FIEMAP for block based inodes (no locking)
300 * @inode: the inode to map
301 * @fieinfo: the fiemap info struct that will be passed back to userspace
302 * @start: where to start mapping in the inode
303 * @len: how much space to map
304 * @get_block: the fs's get_block function
305 *
306 * This does FIEMAP for block based inodes. Basically it will just loop
307 * through get_block until we hit the number of extents we want to map, or we
308 * go past the end of the file and hit a hole.
309 *
310 * If it is possible to have data blocks beyond a hole past @inode->i_size, then
311 * please do not use this function, it will stop at the first unmapped block
312 * beyond i_size.
313 *
314 * If you use this function directly, you need to do your own locking. Use
315 * generic_block_fiemap if you want the locking done for you.
316 */
317
318int __generic_block_fiemap(struct inode *inode,
319 struct fiemap_extent_info *fieinfo, loff_t start,
320 loff_t len, get_block_t *get_block)
321{
322 struct buffer_head map_bh;
323 sector_t start_blk, last_blk;
324 loff_t isize = i_size_read(inode);
325 u64 logical = 0, phys = 0, size = 0;
326 u32 flags = FIEMAP_EXTENT_MERGED;
327 bool past_eof = false, whole_file = false;
328 int ret = 0;
329
330 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
331 if (ret)
332 return ret;
333
334 /*
335 * Either the i_mutex or other appropriate locking needs to be held
336 * since we expect isize to not change at all through the duration of
337 * this call.
338 */
339 if (len >= isize) {
340 whole_file = true;
341 len = isize;
342 }
343
344 /*
345 * Some filesystems can't deal with being asked to map less than
346 * blocksize, so make sure our len is at least block length.
347 */
348 if (logical_to_blk(inode, len) == 0)
349 len = blk_to_logical(inode, 1);
350
351 start_blk = logical_to_blk(inode, start);
352 last_blk = logical_to_blk(inode, start + len - 1);
353
354 do {
355 /*
356 * we set b_size to the total size we want so it will map as
357 * many contiguous blocks as possible at once
358 */
359 memset(&map_bh, 0, sizeof(struct buffer_head));
360 map_bh.b_size = len;
361
362 ret = get_block(inode, start_blk, &map_bh, 0);
363 if (ret)
364 break;
365
366 /* HOLE */
367 if (!buffer_mapped(&map_bh)) {
368 start_blk++;
369
370 /*
371 * We want to handle the case where there is an
372 * allocated block at the front of the file, and then
373 * nothing but holes up to the end of the file properly,
374 * to make sure that extent at the front gets properly
375 * marked with FIEMAP_EXTENT_LAST
376 */
377 if (!past_eof &&
378 blk_to_logical(inode, start_blk) >= isize)
379 past_eof = 1;
380
381 /*
382 * First hole after going past the EOF, this is our
383 * last extent
384 */
385 if (past_eof && size) {
386 flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
387 ret = fiemap_fill_next_extent(fieinfo, logical,
388 phys, size,
389 flags);
390 } else if (size) {
391 ret = fiemap_fill_next_extent(fieinfo, logical,
392 phys, size, flags);
393 size = 0;
394 }
395
396 /* if we have holes up to/past EOF then we're done */
397 if (start_blk > last_blk || past_eof || ret)
398 break;
399 } else {
400 /*
401 * We have gone over the length of what we wanted to
402 * map, and it wasn't the entire file, so add the extent
403 * we got last time and exit.
404 *
405 * This is for the case where say we want to map all the
406 * way up to the second to the last block in a file, but
407 * the last block is a hole, making the second to last
408 * block FIEMAP_EXTENT_LAST. In this case we want to
409 * see if there is a hole after the second to last block
410 * so we can mark it properly. If we found data after
411 * we exceeded the length we were requesting, then we
412 * are good to go, just add the extent to the fieinfo
413 * and break
414 */
415 if (start_blk > last_blk && !whole_file) {
416 ret = fiemap_fill_next_extent(fieinfo, logical,
417 phys, size,
418 flags);
419 break;
420 }
421
422 /*
423 * if size != 0 then we know we already have an extent
424 * to add, so add it.
425 */
426 if (size) {
427 ret = fiemap_fill_next_extent(fieinfo, logical,
428 phys, size,
429 flags);
430 if (ret)
431 break;
432 }
433
434 logical = blk_to_logical(inode, start_blk);
435 phys = blk_to_logical(inode, map_bh.b_blocknr);
436 size = map_bh.b_size;
437 flags = FIEMAP_EXTENT_MERGED;
438
439 start_blk += logical_to_blk(inode, size);
440
441 /*
442 * If we are past the EOF, then we need to make sure as
443 * soon as we find a hole that the last extent we found
444 * is marked with FIEMAP_EXTENT_LAST
445 */
446 if (!past_eof && logical + size >= isize)
447 past_eof = true;
448 }
449 cond_resched();
450 if (fatal_signal_pending(current)) {
451 ret = -EINTR;
452 break;
453 }
454
455 } while (1);
456
457 /* If ret is 1 then we just hit the end of the extent array */
458 if (ret == 1)
459 ret = 0;
460
461 return ret;
462}
463EXPORT_SYMBOL(__generic_block_fiemap);
464
465/**
466 * generic_block_fiemap - FIEMAP for block based inodes
467 * @inode: The inode to map
468 * @fieinfo: The mapping information
469 * @start: The initial block to map
470 * @len: The length of the extect to attempt to map
471 * @get_block: The block mapping function for the fs
472 *
473 * Calls __generic_block_fiemap to map the inode, after taking
474 * the inode's mutex lock.
475 */
476
477int generic_block_fiemap(struct inode *inode,
478 struct fiemap_extent_info *fieinfo, u64 start,
479 u64 len, get_block_t *get_block)
480{
481 int ret;
482 inode_lock(inode);
483 ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
484 inode_unlock(inode);
485 return ret;
486}
487EXPORT_SYMBOL(generic_block_fiemap);
488
489#endif /* CONFIG_BLOCK */
490
491/*
492 * This provides compatibility with legacy XFS pre-allocation ioctls
493 * which predate the fallocate syscall.
494 *
495 * Only the l_start, l_len and l_whence fields of the 'struct space_resv'
496 * are used here, rest are ignored.
497 */
498int ioctl_preallocate(struct file *filp, void __user *argp)
499{
500 struct inode *inode = file_inode(filp);
501 struct space_resv sr;
502
503 if (copy_from_user(&sr, argp, sizeof(sr)))
504 return -EFAULT;
505
506 switch (sr.l_whence) {
507 case SEEK_SET:
508 break;
509 case SEEK_CUR:
510 sr.l_start += filp->f_pos;
511 break;
512 case SEEK_END:
513 sr.l_start += i_size_read(inode);
514 break;
515 default:
516 return -EINVAL;
517 }
518
519 return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
520}
521
522static int file_ioctl(struct file *filp, unsigned int cmd,
523 unsigned long arg)
524{
525 struct inode *inode = file_inode(filp);
526 int __user *p = (int __user *)arg;
527
528 switch (cmd) {
529 case FIBMAP:
530 return ioctl_fibmap(filp, p);
531 case FIONREAD:
532 return put_user(i_size_read(inode) - filp->f_pos, p);
533 case FS_IOC_RESVSP:
534 case FS_IOC_RESVSP64:
535 return ioctl_preallocate(filp, p);
536 }
537
538 return vfs_ioctl(filp, cmd, arg);
539}
540
541static int ioctl_fionbio(struct file *filp, int __user *argp)
542{
543 unsigned int flag;
544 int on, error;
545
546 error = get_user(on, argp);
547 if (error)
548 return error;
549 flag = O_NONBLOCK;
550#ifdef __sparc__
551 /* SunOS compatibility item. */
552 if (O_NONBLOCK != O_NDELAY)
553 flag |= O_NDELAY;
554#endif
555 spin_lock(&filp->f_lock);
556 if (on)
557 filp->f_flags |= flag;
558 else
559 filp->f_flags &= ~flag;
560 spin_unlock(&filp->f_lock);
561 return error;
562}
563
564static int ioctl_fioasync(unsigned int fd, struct file *filp,
565 int __user *argp)
566{
567 unsigned int flag;
568 int on, error;
569
570 error = get_user(on, argp);
571 if (error)
572 return error;
573 flag = on ? FASYNC : 0;
574
575 /* Did FASYNC state change ? */
576 if ((flag ^ filp->f_flags) & FASYNC) {
577 if (filp->f_op->fasync)
578 /* fasync() adjusts filp->f_flags */
579 error = filp->f_op->fasync(fd, filp, on);
580 else
581 error = -ENOTTY;
582 }
583 return error < 0 ? error : 0;
584}
585
586static int ioctl_fsfreeze(struct file *filp)
587{
588 struct super_block *sb = file_inode(filp)->i_sb;
589
590 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
591 return -EPERM;
592
593 /* If filesystem doesn't support freeze feature, return. */
594 if (sb->s_op->freeze_fs == NULL && sb->s_op->freeze_super == NULL)
595 return -EOPNOTSUPP;
596
597 /* Freeze */
598 if (sb->s_op->freeze_super)
599 return sb->s_op->freeze_super(sb);
600 return freeze_super(sb);
601}
602
603static int ioctl_fsthaw(struct file *filp)
604{
605 struct super_block *sb = file_inode(filp)->i_sb;
606
607 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
608 return -EPERM;
609
610 /* Thaw */
611 if (sb->s_op->thaw_super)
612 return sb->s_op->thaw_super(sb);
613 return thaw_super(sb);
614}
615
616static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
617{
618 struct file_dedupe_range __user *argp = arg;
619 struct file_dedupe_range *same = NULL;
620 int ret;
621 unsigned long size;
622 u16 count;
623
624 if (get_user(count, &argp->dest_count)) {
625 ret = -EFAULT;
626 goto out;
627 }
628
629 size = offsetof(struct file_dedupe_range __user, info[count]);
630 if (size > PAGE_SIZE) {
631 ret = -ENOMEM;
632 goto out;
633 }
634
635 same = memdup_user(argp, size);
636 if (IS_ERR(same)) {
637 ret = PTR_ERR(same);
638 same = NULL;
639 goto out;
640 }
641
642 same->dest_count = count;
643 ret = vfs_dedupe_file_range(file, same);
644 if (ret)
645 goto out;
646
647 ret = copy_to_user(argp, same, size);
648 if (ret)
649 ret = -EFAULT;
650
651out:
652 kfree(same);
653 return ret;
654}
655
656/*
657 * When you add any new common ioctls to the switches above and below
658 * please update compat_sys_ioctl() too.
659 *
660 * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
661 * It's just a simple helper for sys_ioctl and compat_sys_ioctl.
662 */
663int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
664 unsigned long arg)
665{
666 int error = 0;
667 int __user *argp = (int __user *)arg;
668 struct inode *inode = file_inode(filp);
669
670 switch (cmd) {
671 case FIOCLEX:
672 set_close_on_exec(fd, 1);
673 break;
674
675 case FIONCLEX:
676 set_close_on_exec(fd, 0);
677 break;
678
679 case FIONBIO:
680 error = ioctl_fionbio(filp, argp);
681 break;
682
683 case FIOASYNC:
684 error = ioctl_fioasync(fd, filp, argp);
685 break;
686
687 case FIOQSIZE:
688 if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
689 S_ISLNK(inode->i_mode)) {
690 loff_t res = inode_get_bytes(inode);
691 error = copy_to_user(argp, &res, sizeof(res)) ?
692 -EFAULT : 0;
693 } else
694 error = -ENOTTY;
695 break;
696
697 case FIFREEZE:
698 error = ioctl_fsfreeze(filp);
699 break;
700
701 case FITHAW:
702 error = ioctl_fsthaw(filp);
703 break;
704
705 case FS_IOC_FIEMAP:
706 return ioctl_fiemap(filp, arg);
707
708 case FIGETBSZ:
709 /* anon_bdev filesystems may not have a block size */
710 if (!inode->i_sb->s_blocksize)
711 return -EINVAL;
712 return put_user(inode->i_sb->s_blocksize, argp);
713
714 case FICLONE:
715 return ioctl_file_clone(filp, arg, 0, 0, 0);
716
717 case FICLONERANGE:
718 return ioctl_file_clone_range(filp, argp);
719
720 case FIDEDUPERANGE:
721 return ioctl_file_dedupe_range(filp, argp);
722
723 default:
724 if (S_ISREG(inode->i_mode))
725 error = file_ioctl(filp, cmd, arg);
726 else
727 error = vfs_ioctl(filp, cmd, arg);
728 break;
729 }
730 return error;
731}
732
733int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
734{
735 int error;
736 struct fd f = fdget(fd);
737
738 if (!f.file)
739 return -EBADF;
740 error = security_file_ioctl(f.file, cmd, arg);
741 if (!error)
742 error = do_vfs_ioctl(f.file, fd, cmd, arg);
743 fdput(f);
744 return error;
745}
746
747SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
748{
749 return ksys_ioctl(fd, cmd, arg);
750}
751
752#ifdef CONFIG_COMPAT
753/**
754 * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation
755 *
756 * This is not normally called as a function, but instead set in struct
757 * file_operations as
758 *
759 * .compat_ioctl = compat_ptr_ioctl,
760 *
761 * On most architectures, the compat_ptr_ioctl() just passes all arguments
762 * to the corresponding ->ioctl handler. The exception is arch/s390, where
763 * compat_ptr() clears the top bit of a 32-bit pointer value, so user space
764 * pointers to the second 2GB alias the first 2GB, as is the case for
765 * native 32-bit s390 user space.
766 *
767 * The compat_ptr_ioctl() function must therefore be used only with ioctl
768 * functions that either ignore the argument or pass a pointer to a
769 * compatible data type.
770 *
771 * If any ioctl command handled by fops->unlocked_ioctl passes a plain
772 * integer instead of a pointer, or any of the passed data types
773 * is incompatible between 32-bit and 64-bit architectures, a proper
774 * handler is required instead of compat_ptr_ioctl.
775 */
776long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
777{
778 if (!file->f_op->unlocked_ioctl)
779 return -ENOIOCTLCMD;
780
781 return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
782}
783EXPORT_SYMBOL(compat_ptr_ioctl);
784#endif