| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
 | 2 | /* | 
 | 3 |  *  linux/fs/stat.c | 
 | 4 |  * | 
 | 5 |  *  Copyright (C) 1991, 1992  Linus Torvalds | 
 | 6 |  */ | 
 | 7 |  | 
 | 8 | #include <linux/export.h> | 
 | 9 | #include <linux/mm.h> | 
 | 10 | #include <linux/errno.h> | 
 | 11 | #include <linux/file.h> | 
 | 12 | #include <linux/highuid.h> | 
 | 13 | #include <linux/fs.h> | 
 | 14 | #include <linux/namei.h> | 
 | 15 | #include <linux/security.h> | 
 | 16 | #include <linux/cred.h> | 
 | 17 | #include <linux/syscalls.h> | 
 | 18 | #include <linux/pagemap.h> | 
 | 19 | #include <linux/compat.h> | 
 | 20 |  | 
 | 21 | #include <linux/uaccess.h> | 
 | 22 | #include <asm/unistd.h> | 
 | 23 |  | 
 | 24 | /** | 
 | 25 |  * generic_fillattr - Fill in the basic attributes from the inode struct | 
 | 26 |  * @inode: Inode to use as the source | 
 | 27 |  * @stat: Where to fill in the attributes | 
 | 28 |  * | 
 | 29 |  * Fill in the basic attributes in the kstat structure from data that's to be | 
 | 30 |  * found on the VFS inode structure.  This is the default if no getattr inode | 
 | 31 |  * operation is supplied. | 
 | 32 |  */ | 
 | 33 | void generic_fillattr(struct inode *inode, struct kstat *stat) | 
 | 34 | { | 
 | 35 | 	stat->dev = inode->i_sb->s_dev; | 
 | 36 | 	stat->ino = inode->i_ino; | 
 | 37 | 	stat->mode = inode->i_mode; | 
 | 38 | 	stat->nlink = inode->i_nlink; | 
 | 39 | 	stat->uid = inode->i_uid; | 
 | 40 | 	stat->gid = inode->i_gid; | 
 | 41 | 	stat->rdev = inode->i_rdev; | 
 | 42 | 	stat->size = i_size_read(inode); | 
 | 43 | 	stat->atime = inode->i_atime; | 
 | 44 | 	stat->mtime = inode->i_mtime; | 
 | 45 | 	stat->ctime = inode->i_ctime; | 
 | 46 | 	stat->blksize = i_blocksize(inode); | 
 | 47 | 	stat->blocks = inode->i_blocks; | 
 | 48 |  | 
 | 49 | 	if (IS_NOATIME(inode)) | 
 | 50 | 		stat->result_mask &= ~STATX_ATIME; | 
 | 51 | 	if (IS_AUTOMOUNT(inode)) | 
 | 52 | 		stat->attributes |= STATX_ATTR_AUTOMOUNT; | 
 | 53 | } | 
 | 54 | EXPORT_SYMBOL(generic_fillattr); | 
 | 55 |  | 
 | 56 | /** | 
 | 57 |  * vfs_getattr_nosec - getattr without security checks | 
 | 58 |  * @path: file to get attributes from | 
 | 59 |  * @stat: structure to return attributes in | 
 | 60 |  * @request_mask: STATX_xxx flags indicating what the caller wants | 
 | 61 |  * @query_flags: Query mode (KSTAT_QUERY_FLAGS) | 
 | 62 |  * | 
 | 63 |  * Get attributes without calling security_inode_getattr. | 
 | 64 |  * | 
 | 65 |  * Currently the only caller other than vfs_getattr is internal to the | 
 | 66 |  * filehandle lookup code, which uses only the inode number and returns no | 
 | 67 |  * attributes to any user.  Any other code probably wants vfs_getattr. | 
 | 68 |  */ | 
 | 69 | int vfs_getattr_nosec(const struct path *path, struct kstat *stat, | 
 | 70 | 		      u32 request_mask, unsigned int query_flags) | 
 | 71 | { | 
 | 72 | 	struct inode *inode = d_backing_inode(path->dentry); | 
 | 73 |  | 
 | 74 | 	memset(stat, 0, sizeof(*stat)); | 
 | 75 | 	stat->result_mask |= STATX_BASIC_STATS; | 
 | 76 | 	request_mask &= STATX_ALL; | 
 | 77 | 	query_flags &= KSTAT_QUERY_FLAGS; | 
 | 78 | 	if (inode->i_op->getattr) | 
 | 79 | 		return inode->i_op->getattr(path, stat, request_mask, | 
 | 80 | 					    query_flags); | 
 | 81 |  | 
 | 82 | 	generic_fillattr(inode, stat); | 
 | 83 | 	return 0; | 
 | 84 | } | 
 | 85 | EXPORT_SYMBOL(vfs_getattr_nosec); | 
 | 86 |  | 
 | 87 | /* | 
 | 88 |  * vfs_getattr - Get the enhanced basic attributes of a file | 
 | 89 |  * @path: The file of interest | 
 | 90 |  * @stat: Where to return the statistics | 
 | 91 |  * @request_mask: STATX_xxx flags indicating what the caller wants | 
 | 92 |  * @query_flags: Query mode (KSTAT_QUERY_FLAGS) | 
 | 93 |  * | 
 | 94 |  * Ask the filesystem for a file's attributes.  The caller must indicate in | 
 | 95 |  * request_mask and query_flags to indicate what they want. | 
 | 96 |  * | 
 | 97 |  * If the file is remote, the filesystem can be forced to update the attributes | 
 | 98 |  * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can | 
 | 99 |  * suppress the update by passing AT_STATX_DONT_SYNC. | 
 | 100 |  * | 
 | 101 |  * Bits must have been set in request_mask to indicate which attributes the | 
 | 102 |  * caller wants retrieving.  Any such attribute not requested may be returned | 
 | 103 |  * anyway, but the value may be approximate, and, if remote, may not have been | 
 | 104 |  * synchronised with the server. | 
 | 105 |  * | 
 | 106 |  * 0 will be returned on success, and a -ve error code if unsuccessful. | 
 | 107 |  */ | 
 | 108 | int vfs_getattr(const struct path *path, struct kstat *stat, | 
 | 109 | 		u32 request_mask, unsigned int query_flags) | 
 | 110 | { | 
 | 111 | 	int retval; | 
 | 112 |  | 
 | 113 | 	retval = security_inode_getattr(path); | 
 | 114 | 	if (retval) | 
 | 115 | 		return retval; | 
 | 116 | 	return vfs_getattr_nosec(path, stat, request_mask, query_flags); | 
 | 117 | } | 
 | 118 | EXPORT_SYMBOL(vfs_getattr); | 
 | 119 |  | 
 | 120 | /** | 
 | 121 |  * vfs_statx_fd - Get the enhanced basic attributes by file descriptor | 
 | 122 |  * @fd: The file descriptor referring to the file of interest | 
 | 123 |  * @stat: The result structure to fill in. | 
 | 124 |  * @request_mask: STATX_xxx flags indicating what the caller wants | 
 | 125 |  * @query_flags: Query mode (KSTAT_QUERY_FLAGS) | 
 | 126 |  * | 
 | 127 |  * This function is a wrapper around vfs_getattr().  The main difference is | 
 | 128 |  * that it uses a file descriptor to determine the file location. | 
 | 129 |  * | 
 | 130 |  * 0 will be returned on success, and a -ve error code if unsuccessful. | 
 | 131 |  */ | 
 | 132 | int vfs_statx_fd(unsigned int fd, struct kstat *stat, | 
 | 133 | 		 u32 request_mask, unsigned int query_flags) | 
 | 134 | { | 
 | 135 | 	struct fd f; | 
 | 136 | 	int error = -EBADF; | 
 | 137 |  | 
 | 138 | 	if (query_flags & ~KSTAT_QUERY_FLAGS) | 
 | 139 | 		return -EINVAL; | 
 | 140 |  | 
 | 141 | 	f = fdget_raw(fd); | 
 | 142 | 	if (f.file) { | 
 | 143 | 		error = vfs_getattr(&f.file->f_path, stat, | 
 | 144 | 				    request_mask, query_flags); | 
 | 145 | 		fdput(f); | 
 | 146 | 	} | 
 | 147 | 	return error; | 
 | 148 | } | 
 | 149 | EXPORT_SYMBOL(vfs_statx_fd); | 
 | 150 |  | 
 | 151 | /** | 
 | 152 |  * vfs_statx - Get basic and extra attributes by filename | 
 | 153 |  * @dfd: A file descriptor representing the base dir for a relative filename | 
 | 154 |  * @filename: The name of the file of interest | 
 | 155 |  * @flags: Flags to control the query | 
 | 156 |  * @stat: The result structure to fill in. | 
 | 157 |  * @request_mask: STATX_xxx flags indicating what the caller wants | 
 | 158 |  * | 
 | 159 |  * This function is a wrapper around vfs_getattr().  The main difference is | 
 | 160 |  * that it uses a filename and base directory to determine the file location. | 
 | 161 |  * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink | 
 | 162 |  * at the given name from being referenced. | 
 | 163 |  * | 
 | 164 |  * 0 will be returned on success, and a -ve error code if unsuccessful. | 
 | 165 |  */ | 
 | 166 | int vfs_statx(int dfd, const char __user *filename, int flags, | 
 | 167 | 	      struct kstat *stat, u32 request_mask) | 
 | 168 | { | 
 | 169 | 	struct path path; | 
 | 170 | 	int error = -EINVAL; | 
 | 171 | 	unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT; | 
 | 172 |  | 
 | 173 | 	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | | 
 | 174 | 		       AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0) | 
 | 175 | 		return -EINVAL; | 
 | 176 |  | 
 | 177 | 	if (flags & AT_SYMLINK_NOFOLLOW) | 
 | 178 | 		lookup_flags &= ~LOOKUP_FOLLOW; | 
 | 179 | 	if (flags & AT_NO_AUTOMOUNT) | 
 | 180 | 		lookup_flags &= ~LOOKUP_AUTOMOUNT; | 
 | 181 | 	if (flags & AT_EMPTY_PATH) | 
 | 182 | 		lookup_flags |= LOOKUP_EMPTY; | 
 | 183 |  | 
 | 184 | retry: | 
 | 185 | 	error = user_path_at(dfd, filename, lookup_flags, &path); | 
 | 186 | 	if (error) | 
 | 187 | 		goto out; | 
 | 188 |  | 
 | 189 | 	error = vfs_getattr(&path, stat, request_mask, flags); | 
 | 190 | 	path_put(&path); | 
 | 191 | 	if (retry_estale(error, lookup_flags)) { | 
 | 192 | 		lookup_flags |= LOOKUP_REVAL; | 
 | 193 | 		goto retry; | 
 | 194 | 	} | 
 | 195 | out: | 
 | 196 | 	return error; | 
 | 197 | } | 
 | 198 | EXPORT_SYMBOL(vfs_statx); | 
 | 199 |  | 
 | 200 |  | 
 | 201 | #ifdef __ARCH_WANT_OLD_STAT | 
 | 202 |  | 
 | 203 | /* | 
 | 204 |  * For backward compatibility?  Maybe this should be moved | 
 | 205 |  * into arch/i386 instead? | 
 | 206 |  */ | 
 | 207 | static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) | 
 | 208 | { | 
 | 209 | 	static int warncount = 5; | 
 | 210 | 	struct __old_kernel_stat tmp; | 
 | 211 |  | 
 | 212 | 	if (warncount > 0) { | 
 | 213 | 		warncount--; | 
 | 214 | 		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", | 
 | 215 | 			current->comm); | 
 | 216 | 	} else if (warncount < 0) { | 
 | 217 | 		/* it's laughable, but... */ | 
 | 218 | 		warncount = 0; | 
 | 219 | 	} | 
 | 220 |  | 
 | 221 | 	memset(&tmp, 0, sizeof(struct __old_kernel_stat)); | 
 | 222 | 	tmp.st_dev = old_encode_dev(stat->dev); | 
 | 223 | 	tmp.st_ino = stat->ino; | 
 | 224 | 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) | 
 | 225 | 		return -EOVERFLOW; | 
 | 226 | 	tmp.st_mode = stat->mode; | 
 | 227 | 	tmp.st_nlink = stat->nlink; | 
 | 228 | 	if (tmp.st_nlink != stat->nlink) | 
 | 229 | 		return -EOVERFLOW; | 
 | 230 | 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); | 
 | 231 | 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); | 
 | 232 | 	tmp.st_rdev = old_encode_dev(stat->rdev); | 
 | 233 | #if BITS_PER_LONG == 32 | 
 | 234 | 	if (stat->size > MAX_NON_LFS) | 
 | 235 | 		return -EOVERFLOW; | 
 | 236 | #endif | 
 | 237 | 	tmp.st_size = stat->size; | 
 | 238 | 	tmp.st_atime = stat->atime.tv_sec; | 
 | 239 | 	tmp.st_mtime = stat->mtime.tv_sec; | 
 | 240 | 	tmp.st_ctime = stat->ctime.tv_sec; | 
 | 241 | 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; | 
 | 242 | } | 
 | 243 |  | 
 | 244 | SYSCALL_DEFINE2(stat, const char __user *, filename, | 
 | 245 | 		struct __old_kernel_stat __user *, statbuf) | 
 | 246 | { | 
 | 247 | 	struct kstat stat; | 
 | 248 | 	int error; | 
 | 249 |  | 
 | 250 | 	error = vfs_stat(filename, &stat); | 
 | 251 | 	if (error) | 
 | 252 | 		return error; | 
 | 253 |  | 
 | 254 | 	return cp_old_stat(&stat, statbuf); | 
 | 255 | } | 
 | 256 |  | 
 | 257 | SYSCALL_DEFINE2(lstat, const char __user *, filename, | 
 | 258 | 		struct __old_kernel_stat __user *, statbuf) | 
 | 259 | { | 
 | 260 | 	struct kstat stat; | 
 | 261 | 	int error; | 
 | 262 |  | 
 | 263 | 	error = vfs_lstat(filename, &stat); | 
 | 264 | 	if (error) | 
 | 265 | 		return error; | 
 | 266 |  | 
 | 267 | 	return cp_old_stat(&stat, statbuf); | 
 | 268 | } | 
 | 269 |  | 
 | 270 | SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) | 
 | 271 | { | 
 | 272 | 	struct kstat stat; | 
 | 273 | 	int error = vfs_fstat(fd, &stat); | 
 | 274 |  | 
 | 275 | 	if (!error) | 
 | 276 | 		error = cp_old_stat(&stat, statbuf); | 
 | 277 |  | 
 | 278 | 	return error; | 
 | 279 | } | 
 | 280 |  | 
 | 281 | #endif /* __ARCH_WANT_OLD_STAT */ | 
 | 282 |  | 
 | 283 | #if BITS_PER_LONG == 32 | 
 | 284 | #  define choose_32_64(a,b) a | 
 | 285 | #else | 
 | 286 | #  define choose_32_64(a,b) b | 
 | 287 | #endif | 
 | 288 |  | 
 | 289 | #define valid_dev(x)  choose_32_64(old_valid_dev(x),true) | 
 | 290 | #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x) | 
 | 291 |  | 
 | 292 | #ifndef INIT_STRUCT_STAT_PADDING | 
 | 293 | #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) | 
 | 294 | #endif | 
 | 295 |  | 
 | 296 | static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) | 
 | 297 | { | 
 | 298 | 	struct stat tmp; | 
 | 299 |  | 
 | 300 | 	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev)) | 
 | 301 | 		return -EOVERFLOW; | 
 | 302 | #if BITS_PER_LONG == 32 | 
 | 303 | 	if (stat->size > MAX_NON_LFS) | 
 | 304 | 		return -EOVERFLOW; | 
 | 305 | #endif | 
 | 306 |  | 
 | 307 | 	INIT_STRUCT_STAT_PADDING(tmp); | 
 | 308 | 	tmp.st_dev = encode_dev(stat->dev); | 
 | 309 | 	tmp.st_ino = stat->ino; | 
 | 310 | 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) | 
 | 311 | 		return -EOVERFLOW; | 
 | 312 | 	tmp.st_mode = stat->mode; | 
 | 313 | 	tmp.st_nlink = stat->nlink; | 
 | 314 | 	if (tmp.st_nlink != stat->nlink) | 
 | 315 | 		return -EOVERFLOW; | 
 | 316 | 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); | 
 | 317 | 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); | 
 | 318 | 	tmp.st_rdev = encode_dev(stat->rdev); | 
 | 319 | 	tmp.st_size = stat->size; | 
 | 320 | 	tmp.st_atime = stat->atime.tv_sec; | 
 | 321 | 	tmp.st_mtime = stat->mtime.tv_sec; | 
 | 322 | 	tmp.st_ctime = stat->ctime.tv_sec; | 
 | 323 | #ifdef STAT_HAVE_NSEC | 
 | 324 | 	tmp.st_atime_nsec = stat->atime.tv_nsec; | 
 | 325 | 	tmp.st_mtime_nsec = stat->mtime.tv_nsec; | 
 | 326 | 	tmp.st_ctime_nsec = stat->ctime.tv_nsec; | 
 | 327 | #endif | 
 | 328 | 	tmp.st_blocks = stat->blocks; | 
 | 329 | 	tmp.st_blksize = stat->blksize; | 
 | 330 | 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; | 
 | 331 | } | 
 | 332 |  | 
 | 333 | SYSCALL_DEFINE2(newstat, const char __user *, filename, | 
 | 334 | 		struct stat __user *, statbuf) | 
 | 335 | { | 
 | 336 | 	struct kstat stat; | 
 | 337 | 	int error = vfs_stat(filename, &stat); | 
 | 338 |  | 
 | 339 | 	if (error) | 
 | 340 | 		return error; | 
 | 341 | 	return cp_new_stat(&stat, statbuf); | 
 | 342 | } | 
 | 343 |  | 
 | 344 | SYSCALL_DEFINE2(newlstat, const char __user *, filename, | 
 | 345 | 		struct stat __user *, statbuf) | 
 | 346 | { | 
 | 347 | 	struct kstat stat; | 
 | 348 | 	int error; | 
 | 349 |  | 
 | 350 | 	error = vfs_lstat(filename, &stat); | 
 | 351 | 	if (error) | 
 | 352 | 		return error; | 
 | 353 |  | 
 | 354 | 	return cp_new_stat(&stat, statbuf); | 
 | 355 | } | 
 | 356 |  | 
 | 357 | #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) | 
 | 358 | SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, | 
 | 359 | 		struct stat __user *, statbuf, int, flag) | 
 | 360 | { | 
 | 361 | 	struct kstat stat; | 
 | 362 | 	int error; | 
 | 363 |  | 
 | 364 | 	error = vfs_fstatat(dfd, filename, &stat, flag); | 
 | 365 | 	if (error) | 
 | 366 | 		return error; | 
 | 367 | 	return cp_new_stat(&stat, statbuf); | 
 | 368 | } | 
 | 369 | #endif | 
 | 370 |  | 
 | 371 | SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) | 
 | 372 | { | 
 | 373 | 	struct kstat stat; | 
 | 374 | 	int error = vfs_fstat(fd, &stat); | 
 | 375 |  | 
 | 376 | 	if (!error) | 
 | 377 | 		error = cp_new_stat(&stat, statbuf); | 
 | 378 |  | 
 | 379 | 	return error; | 
 | 380 | } | 
 | 381 |  | 
 | 382 | static int do_readlinkat(int dfd, const char __user *pathname, | 
 | 383 | 			 char __user *buf, int bufsiz) | 
 | 384 | { | 
 | 385 | 	struct path path; | 
 | 386 | 	int error; | 
 | 387 | 	int empty = 0; | 
 | 388 | 	unsigned int lookup_flags = LOOKUP_EMPTY; | 
 | 389 |  | 
 | 390 | 	if (bufsiz <= 0) | 
 | 391 | 		return -EINVAL; | 
 | 392 |  | 
 | 393 | retry: | 
 | 394 | 	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); | 
 | 395 | 	if (!error) { | 
 | 396 | 		struct inode *inode = d_backing_inode(path.dentry); | 
 | 397 |  | 
 | 398 | 		error = empty ? -ENOENT : -EINVAL; | 
 | 399 | 		/* | 
 | 400 | 		 * AFS mountpoints allow readlink(2) but are not symlinks | 
 | 401 | 		 */ | 
 | 402 | 		if (d_is_symlink(path.dentry) || inode->i_op->readlink) { | 
 | 403 | 			error = security_inode_readlink(path.dentry); | 
 | 404 | 			if (!error) { | 
 | 405 | 				touch_atime(&path); | 
 | 406 | 				error = vfs_readlink(path.dentry, buf, bufsiz); | 
 | 407 | 			} | 
 | 408 | 		} | 
 | 409 | 		path_put(&path); | 
 | 410 | 		if (retry_estale(error, lookup_flags)) { | 
 | 411 | 			lookup_flags |= LOOKUP_REVAL; | 
 | 412 | 			goto retry; | 
 | 413 | 		} | 
 | 414 | 	} | 
 | 415 | 	return error; | 
 | 416 | } | 
 | 417 |  | 
 | 418 | SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, | 
 | 419 | 		char __user *, buf, int, bufsiz) | 
 | 420 | { | 
 | 421 | 	return do_readlinkat(dfd, pathname, buf, bufsiz); | 
 | 422 | } | 
 | 423 |  | 
 | 424 | SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, | 
 | 425 | 		int, bufsiz) | 
 | 426 | { | 
 | 427 | 	return do_readlinkat(AT_FDCWD, path, buf, bufsiz); | 
 | 428 | } | 
 | 429 |  | 
 | 430 |  | 
 | 431 | /* ---------- LFS-64 ----------- */ | 
 | 432 | #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) | 
 | 433 |  | 
 | 434 | #ifndef INIT_STRUCT_STAT64_PADDING | 
 | 435 | #  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) | 
 | 436 | #endif | 
 | 437 |  | 
 | 438 | static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) | 
 | 439 | { | 
 | 440 | 	struct stat64 tmp; | 
 | 441 |  | 
 | 442 | 	INIT_STRUCT_STAT64_PADDING(tmp); | 
 | 443 | #ifdef CONFIG_MIPS | 
 | 444 | 	/* mips has weird padding, so we don't get 64 bits there */ | 
 | 445 | 	tmp.st_dev = new_encode_dev(stat->dev); | 
 | 446 | 	tmp.st_rdev = new_encode_dev(stat->rdev); | 
 | 447 | #else | 
 | 448 | 	tmp.st_dev = huge_encode_dev(stat->dev); | 
 | 449 | 	tmp.st_rdev = huge_encode_dev(stat->rdev); | 
 | 450 | #endif | 
 | 451 | 	tmp.st_ino = stat->ino; | 
 | 452 | 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) | 
 | 453 | 		return -EOVERFLOW; | 
 | 454 | #ifdef STAT64_HAS_BROKEN_ST_INO | 
 | 455 | 	tmp.__st_ino = stat->ino; | 
 | 456 | #endif | 
 | 457 | 	tmp.st_mode = stat->mode; | 
 | 458 | 	tmp.st_nlink = stat->nlink; | 
 | 459 | 	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); | 
 | 460 | 	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); | 
 | 461 | 	tmp.st_atime = stat->atime.tv_sec; | 
 | 462 | 	tmp.st_atime_nsec = stat->atime.tv_nsec; | 
 | 463 | 	tmp.st_mtime = stat->mtime.tv_sec; | 
 | 464 | 	tmp.st_mtime_nsec = stat->mtime.tv_nsec; | 
 | 465 | 	tmp.st_ctime = stat->ctime.tv_sec; | 
 | 466 | 	tmp.st_ctime_nsec = stat->ctime.tv_nsec; | 
 | 467 | 	tmp.st_size = stat->size; | 
 | 468 | 	tmp.st_blocks = stat->blocks; | 
 | 469 | 	tmp.st_blksize = stat->blksize; | 
 | 470 | 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; | 
 | 471 | } | 
 | 472 |  | 
 | 473 | SYSCALL_DEFINE2(stat64, const char __user *, filename, | 
 | 474 | 		struct stat64 __user *, statbuf) | 
 | 475 | { | 
 | 476 | 	struct kstat stat; | 
 | 477 | 	int error = vfs_stat(filename, &stat); | 
 | 478 |  | 
 | 479 | 	if (!error) | 
 | 480 | 		error = cp_new_stat64(&stat, statbuf); | 
 | 481 |  | 
 | 482 | 	return error; | 
 | 483 | } | 
 | 484 |  | 
 | 485 | SYSCALL_DEFINE2(lstat64, const char __user *, filename, | 
 | 486 | 		struct stat64 __user *, statbuf) | 
 | 487 | { | 
 | 488 | 	struct kstat stat; | 
 | 489 | 	int error = vfs_lstat(filename, &stat); | 
 | 490 |  | 
 | 491 | 	if (!error) | 
 | 492 | 		error = cp_new_stat64(&stat, statbuf); | 
 | 493 |  | 
 | 494 | 	return error; | 
 | 495 | } | 
 | 496 |  | 
 | 497 | SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) | 
 | 498 | { | 
 | 499 | 	struct kstat stat; | 
 | 500 | 	int error = vfs_fstat(fd, &stat); | 
 | 501 |  | 
 | 502 | 	if (!error) | 
 | 503 | 		error = cp_new_stat64(&stat, statbuf); | 
 | 504 |  | 
 | 505 | 	return error; | 
 | 506 | } | 
 | 507 |  | 
 | 508 | SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, | 
 | 509 | 		struct stat64 __user *, statbuf, int, flag) | 
 | 510 | { | 
 | 511 | 	struct kstat stat; | 
 | 512 | 	int error; | 
 | 513 |  | 
 | 514 | 	error = vfs_fstatat(dfd, filename, &stat, flag); | 
 | 515 | 	if (error) | 
 | 516 | 		return error; | 
 | 517 | 	return cp_new_stat64(&stat, statbuf); | 
 | 518 | } | 
 | 519 | #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ | 
 | 520 |  | 
 | 521 | static noinline_for_stack int | 
 | 522 | cp_statx(const struct kstat *stat, struct statx __user *buffer) | 
 | 523 | { | 
 | 524 | 	struct statx tmp; | 
 | 525 |  | 
 | 526 | 	memset(&tmp, 0, sizeof(tmp)); | 
 | 527 |  | 
 | 528 | 	tmp.stx_mask = stat->result_mask; | 
 | 529 | 	tmp.stx_blksize = stat->blksize; | 
 | 530 | 	tmp.stx_attributes = stat->attributes; | 
 | 531 | 	tmp.stx_nlink = stat->nlink; | 
 | 532 | 	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); | 
 | 533 | 	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); | 
 | 534 | 	tmp.stx_mode = stat->mode; | 
 | 535 | 	tmp.stx_ino = stat->ino; | 
 | 536 | 	tmp.stx_size = stat->size; | 
 | 537 | 	tmp.stx_blocks = stat->blocks; | 
 | 538 | 	tmp.stx_attributes_mask = stat->attributes_mask; | 
 | 539 | 	tmp.stx_atime.tv_sec = stat->atime.tv_sec; | 
 | 540 | 	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; | 
 | 541 | 	tmp.stx_btime.tv_sec = stat->btime.tv_sec; | 
 | 542 | 	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; | 
 | 543 | 	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; | 
 | 544 | 	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; | 
 | 545 | 	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; | 
 | 546 | 	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; | 
 | 547 | 	tmp.stx_rdev_major = MAJOR(stat->rdev); | 
 | 548 | 	tmp.stx_rdev_minor = MINOR(stat->rdev); | 
 | 549 | 	tmp.stx_dev_major = MAJOR(stat->dev); | 
 | 550 | 	tmp.stx_dev_minor = MINOR(stat->dev); | 
 | 551 |  | 
 | 552 | 	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; | 
 | 553 | } | 
 | 554 |  | 
 | 555 | /** | 
 | 556 |  * sys_statx - System call to get enhanced stats | 
 | 557 |  * @dfd: Base directory to pathwalk from *or* fd to stat. | 
 | 558 |  * @filename: File to stat or "" with AT_EMPTY_PATH | 
 | 559 |  * @flags: AT_* flags to control pathwalk. | 
 | 560 |  * @mask: Parts of statx struct actually required. | 
 | 561 |  * @buffer: Result buffer. | 
 | 562 |  * | 
 | 563 |  * Note that fstat() can be emulated by setting dfd to the fd of interest, | 
 | 564 |  * supplying "" as the filename and setting AT_EMPTY_PATH in the flags. | 
 | 565 |  */ | 
 | 566 | SYSCALL_DEFINE5(statx, | 
 | 567 | 		int, dfd, const char __user *, filename, unsigned, flags, | 
 | 568 | 		unsigned int, mask, | 
 | 569 | 		struct statx __user *, buffer) | 
 | 570 | { | 
 | 571 | 	struct kstat stat; | 
 | 572 | 	int error; | 
 | 573 |  | 
 | 574 | 	if (mask & STATX__RESERVED) | 
 | 575 | 		return -EINVAL; | 
 | 576 | 	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) | 
 | 577 | 		return -EINVAL; | 
 | 578 |  | 
 | 579 | 	error = vfs_statx(dfd, filename, flags, &stat, mask); | 
 | 580 | 	if (error) | 
 | 581 | 		return error; | 
 | 582 |  | 
 | 583 | 	return cp_statx(&stat, buffer); | 
 | 584 | } | 
 | 585 |  | 
 | 586 | #ifdef CONFIG_COMPAT | 
 | 587 | static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) | 
 | 588 | { | 
 | 589 | 	struct compat_stat tmp; | 
 | 590 |  | 
 | 591 | 	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev)) | 
 | 592 | 		return -EOVERFLOW; | 
 | 593 |  | 
 | 594 | 	memset(&tmp, 0, sizeof(tmp)); | 
 | 595 | 	tmp.st_dev = old_encode_dev(stat->dev); | 
 | 596 | 	tmp.st_ino = stat->ino; | 
 | 597 | 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) | 
 | 598 | 		return -EOVERFLOW; | 
 | 599 | 	tmp.st_mode = stat->mode; | 
 | 600 | 	tmp.st_nlink = stat->nlink; | 
 | 601 | 	if (tmp.st_nlink != stat->nlink) | 
 | 602 | 		return -EOVERFLOW; | 
 | 603 | 	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); | 
 | 604 | 	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); | 
 | 605 | 	tmp.st_rdev = old_encode_dev(stat->rdev); | 
 | 606 | 	if ((u64) stat->size > MAX_NON_LFS) | 
 | 607 | 		return -EOVERFLOW; | 
 | 608 | 	tmp.st_size = stat->size; | 
 | 609 | 	tmp.st_atime = stat->atime.tv_sec; | 
 | 610 | 	tmp.st_atime_nsec = stat->atime.tv_nsec; | 
 | 611 | 	tmp.st_mtime = stat->mtime.tv_sec; | 
 | 612 | 	tmp.st_mtime_nsec = stat->mtime.tv_nsec; | 
 | 613 | 	tmp.st_ctime = stat->ctime.tv_sec; | 
 | 614 | 	tmp.st_ctime_nsec = stat->ctime.tv_nsec; | 
 | 615 | 	tmp.st_blocks = stat->blocks; | 
 | 616 | 	tmp.st_blksize = stat->blksize; | 
 | 617 | 	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; | 
 | 618 | } | 
 | 619 |  | 
 | 620 | COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, | 
 | 621 | 		       struct compat_stat __user *, statbuf) | 
 | 622 | { | 
 | 623 | 	struct kstat stat; | 
 | 624 | 	int error; | 
 | 625 |  | 
 | 626 | 	error = vfs_stat(filename, &stat); | 
 | 627 | 	if (error) | 
 | 628 | 		return error; | 
 | 629 | 	return cp_compat_stat(&stat, statbuf); | 
 | 630 | } | 
 | 631 |  | 
 | 632 | COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, | 
 | 633 | 		       struct compat_stat __user *, statbuf) | 
 | 634 | { | 
 | 635 | 	struct kstat stat; | 
 | 636 | 	int error; | 
 | 637 |  | 
 | 638 | 	error = vfs_lstat(filename, &stat); | 
 | 639 | 	if (error) | 
 | 640 | 		return error; | 
 | 641 | 	return cp_compat_stat(&stat, statbuf); | 
 | 642 | } | 
 | 643 |  | 
 | 644 | #ifndef __ARCH_WANT_STAT64 | 
 | 645 | COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, | 
 | 646 | 		       const char __user *, filename, | 
 | 647 | 		       struct compat_stat __user *, statbuf, int, flag) | 
 | 648 | { | 
 | 649 | 	struct kstat stat; | 
 | 650 | 	int error; | 
 | 651 |  | 
 | 652 | 	error = vfs_fstatat(dfd, filename, &stat, flag); | 
 | 653 | 	if (error) | 
 | 654 | 		return error; | 
 | 655 | 	return cp_compat_stat(&stat, statbuf); | 
 | 656 | } | 
 | 657 | #endif | 
 | 658 |  | 
 | 659 | COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, | 
 | 660 | 		       struct compat_stat __user *, statbuf) | 
 | 661 | { | 
 | 662 | 	struct kstat stat; | 
 | 663 | 	int error = vfs_fstat(fd, &stat); | 
 | 664 |  | 
 | 665 | 	if (!error) | 
 | 666 | 		error = cp_compat_stat(&stat, statbuf); | 
 | 667 | 	return error; | 
 | 668 | } | 
 | 669 | #endif | 
 | 670 |  | 
 | 671 | /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ | 
 | 672 | void __inode_add_bytes(struct inode *inode, loff_t bytes) | 
 | 673 | { | 
 | 674 | 	inode->i_blocks += bytes >> 9; | 
 | 675 | 	bytes &= 511; | 
 | 676 | 	inode->i_bytes += bytes; | 
 | 677 | 	if (inode->i_bytes >= 512) { | 
 | 678 | 		inode->i_blocks++; | 
 | 679 | 		inode->i_bytes -= 512; | 
 | 680 | 	} | 
 | 681 | } | 
 | 682 | EXPORT_SYMBOL(__inode_add_bytes); | 
 | 683 |  | 
 | 684 | void inode_add_bytes(struct inode *inode, loff_t bytes) | 
 | 685 | { | 
 | 686 | 	spin_lock(&inode->i_lock); | 
 | 687 | 	__inode_add_bytes(inode, bytes); | 
 | 688 | 	spin_unlock(&inode->i_lock); | 
 | 689 | } | 
 | 690 |  | 
 | 691 | EXPORT_SYMBOL(inode_add_bytes); | 
 | 692 |  | 
 | 693 | void __inode_sub_bytes(struct inode *inode, loff_t bytes) | 
 | 694 | { | 
 | 695 | 	inode->i_blocks -= bytes >> 9; | 
 | 696 | 	bytes &= 511; | 
 | 697 | 	if (inode->i_bytes < bytes) { | 
 | 698 | 		inode->i_blocks--; | 
 | 699 | 		inode->i_bytes += 512; | 
 | 700 | 	} | 
 | 701 | 	inode->i_bytes -= bytes; | 
 | 702 | } | 
 | 703 |  | 
 | 704 | EXPORT_SYMBOL(__inode_sub_bytes); | 
 | 705 |  | 
 | 706 | void inode_sub_bytes(struct inode *inode, loff_t bytes) | 
 | 707 | { | 
 | 708 | 	spin_lock(&inode->i_lock); | 
 | 709 | 	__inode_sub_bytes(inode, bytes); | 
 | 710 | 	spin_unlock(&inode->i_lock); | 
 | 711 | } | 
 | 712 |  | 
 | 713 | EXPORT_SYMBOL(inode_sub_bytes); | 
 | 714 |  | 
 | 715 | loff_t inode_get_bytes(struct inode *inode) | 
 | 716 | { | 
 | 717 | 	loff_t ret; | 
 | 718 |  | 
 | 719 | 	spin_lock(&inode->i_lock); | 
 | 720 | 	ret = __inode_get_bytes(inode); | 
 | 721 | 	spin_unlock(&inode->i_lock); | 
 | 722 | 	return ret; | 
 | 723 | } | 
 | 724 |  | 
 | 725 | EXPORT_SYMBOL(inode_get_bytes); | 
 | 726 |  | 
 | 727 | void inode_set_bytes(struct inode *inode, loff_t bytes) | 
 | 728 | { | 
 | 729 | 	/* Caller is here responsible for sufficient locking | 
 | 730 | 	 * (ie. inode->i_lock) */ | 
 | 731 | 	inode->i_blocks = bytes >> 9; | 
 | 732 | 	inode->i_bytes = bytes & 511; | 
 | 733 | } | 
 | 734 |  | 
 | 735 | EXPORT_SYMBOL(inode_set_bytes); |