| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/fs/file_table.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1991, 1992  Linus Torvalds | 
|  | 5 | *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | 
|  | 6 | */ | 
|  | 7 |  | 
|  | 8 | #include <linux/string.h> | 
|  | 9 | #include <linux/slab.h> | 
|  | 10 | #include <linux/file.h> | 
|  | 11 | #include <linux/fdtable.h> | 
|  | 12 | #include <linux/init.h> | 
|  | 13 | #include <linux/module.h> | 
|  | 14 | #include <linux/fs.h> | 
|  | 15 | #include <linux/security.h> | 
|  | 16 | #include <linux/cred.h> | 
|  | 17 | #include <linux/eventpoll.h> | 
|  | 18 | #include <linux/rcupdate.h> | 
|  | 19 | #include <linux/mount.h> | 
|  | 20 | #include <linux/capability.h> | 
|  | 21 | #include <linux/cdev.h> | 
|  | 22 | #include <linux/fsnotify.h> | 
|  | 23 | #include <linux/sysctl.h> | 
|  | 24 | #include <linux/percpu_counter.h> | 
|  | 25 | #include <linux/percpu.h> | 
|  | 26 | #include <linux/task_work.h> | 
|  | 27 | #include <linux/ima.h> | 
|  | 28 | #include <linux/swap.h> | 
|  | 29 |  | 
|  | 30 | #include <linux/atomic.h> | 
|  | 31 |  | 
|  | 32 | #include "internal.h" | 
|  | 33 |  | 
|  | 34 | /* sysctl tunables... */ | 
|  | 35 | struct files_stat_struct files_stat = { | 
|  | 36 | .max_files = NR_FILE | 
|  | 37 | }; | 
|  | 38 |  | 
|  | 39 | /* SLAB cache for file structures */ | 
|  | 40 | static struct kmem_cache *filp_cachep __read_mostly; | 
|  | 41 |  | 
|  | 42 | static struct percpu_counter nr_files __cacheline_aligned_in_smp; | 
|  | 43 |  | 
|  | 44 | static void file_free_rcu(struct rcu_head *head) | 
|  | 45 | { | 
|  | 46 | struct file *f = container_of(head, struct file, f_u.fu_rcuhead); | 
|  | 47 |  | 
|  | 48 | put_cred(f->f_cred); | 
|  | 49 | kmem_cache_free(filp_cachep, f); | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | static inline void file_free(struct file *f) | 
|  | 53 | { | 
|  | 54 | security_file_free(f); | 
|  | 55 | if (!(f->f_mode & FMODE_NOACCOUNT)) | 
|  | 56 | percpu_counter_dec(&nr_files); | 
|  | 57 | call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | /* | 
|  | 61 | * Return the total number of open files in the system | 
|  | 62 | */ | 
|  | 63 | static long get_nr_files(void) | 
|  | 64 | { | 
|  | 65 | return percpu_counter_read_positive(&nr_files); | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | /* | 
|  | 69 | * Return the maximum number of open files in the system | 
|  | 70 | */ | 
|  | 71 | unsigned long get_max_files(void) | 
|  | 72 | { | 
|  | 73 | return files_stat.max_files; | 
|  | 74 | } | 
|  | 75 | EXPORT_SYMBOL_GPL(get_max_files); | 
|  | 76 |  | 
|  | 77 | /* | 
|  | 78 | * Handle nr_files sysctl | 
|  | 79 | */ | 
|  | 80 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) | 
|  | 81 | int proc_nr_files(struct ctl_table *table, int write, | 
|  | 82 | void __user *buffer, size_t *lenp, loff_t *ppos) | 
|  | 83 | { | 
|  | 84 | files_stat.nr_files = get_nr_files(); | 
|  | 85 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 
|  | 86 | } | 
|  | 87 | #else | 
|  | 88 | int proc_nr_files(struct ctl_table *table, int write, | 
|  | 89 | void __user *buffer, size_t *lenp, loff_t *ppos) | 
|  | 90 | { | 
|  | 91 | return -ENOSYS; | 
|  | 92 | } | 
|  | 93 | #endif | 
|  | 94 |  | 
|  | 95 | static struct file *__alloc_file(int flags, const struct cred *cred) | 
|  | 96 | { | 
|  | 97 | struct file *f; | 
|  | 98 | int error; | 
|  | 99 |  | 
|  | 100 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); | 
|  | 101 | if (unlikely(!f)) | 
|  | 102 | return ERR_PTR(-ENOMEM); | 
|  | 103 |  | 
|  | 104 | f->f_cred = get_cred(cred); | 
|  | 105 | error = security_file_alloc(f); | 
|  | 106 | if (unlikely(error)) { | 
|  | 107 | file_free_rcu(&f->f_u.fu_rcuhead); | 
|  | 108 | return ERR_PTR(error); | 
|  | 109 | } | 
|  | 110 |  | 
|  | 111 | atomic_long_set(&f->f_count, 1); | 
|  | 112 | rwlock_init(&f->f_owner.lock); | 
|  | 113 | spin_lock_init(&f->f_lock); | 
|  | 114 | mutex_init(&f->f_pos_lock); | 
|  | 115 | eventpoll_init_file(f); | 
|  | 116 | f->f_flags = flags; | 
|  | 117 | f->f_mode = OPEN_FMODE(flags); | 
|  | 118 | /* f->f_version: 0 */ | 
|  | 119 |  | 
|  | 120 | return f; | 
|  | 121 | } | 
|  | 122 |  | 
|  | 123 | /* Find an unused file structure and return a pointer to it. | 
|  | 124 | * Returns an error pointer if some error happend e.g. we over file | 
|  | 125 | * structures limit, run out of memory or operation is not permitted. | 
|  | 126 | * | 
|  | 127 | * Be very careful using this.  You are responsible for | 
|  | 128 | * getting write access to any mount that you might assign | 
|  | 129 | * to this filp, if it is opened for write.  If this is not | 
|  | 130 | * done, you will imbalance int the mount's writer count | 
|  | 131 | * and a warning at __fput() time. | 
|  | 132 | */ | 
|  | 133 | struct file *alloc_empty_file(int flags, const struct cred *cred) | 
|  | 134 | { | 
|  | 135 | static long old_max; | 
|  | 136 | struct file *f; | 
|  | 137 |  | 
|  | 138 | /* | 
|  | 139 | * Privileged users can go above max_files | 
|  | 140 | */ | 
|  | 141 | if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { | 
|  | 142 | /* | 
|  | 143 | * percpu_counters are inaccurate.  Do an expensive check before | 
|  | 144 | * we go and fail. | 
|  | 145 | */ | 
|  | 146 | if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) | 
|  | 147 | goto over; | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | f = __alloc_file(flags, cred); | 
|  | 151 | if (!IS_ERR(f)) | 
|  | 152 | percpu_counter_inc(&nr_files); | 
|  | 153 |  | 
|  | 154 | return f; | 
|  | 155 |  | 
|  | 156 | over: | 
|  | 157 | /* Ran out of filps - report that */ | 
|  | 158 | if (get_nr_files() > old_max) { | 
|  | 159 | pr_info("VFS: file-max limit %lu reached\n", get_max_files()); | 
|  | 160 | old_max = get_nr_files(); | 
|  | 161 | } | 
|  | 162 | return ERR_PTR(-ENFILE); | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | /* | 
|  | 166 | * Variant of alloc_empty_file() that doesn't check and modify nr_files. | 
|  | 167 | * | 
|  | 168 | * Should not be used unless there's a very good reason to do so. | 
|  | 169 | */ | 
|  | 170 | struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred) | 
|  | 171 | { | 
|  | 172 | struct file *f = __alloc_file(flags, cred); | 
|  | 173 |  | 
|  | 174 | if (!IS_ERR(f)) | 
|  | 175 | f->f_mode |= FMODE_NOACCOUNT; | 
|  | 176 |  | 
|  | 177 | return f; | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | /** | 
|  | 181 | * alloc_file - allocate and initialize a 'struct file' | 
|  | 182 | * | 
|  | 183 | * @path: the (dentry, vfsmount) pair for the new file | 
|  | 184 | * @flags: O_... flags with which the new file will be opened | 
|  | 185 | * @fop: the 'struct file_operations' for the new file | 
|  | 186 | */ | 
|  | 187 | static struct file *alloc_file(const struct path *path, int flags, | 
|  | 188 | const struct file_operations *fop) | 
|  | 189 | { | 
|  | 190 | struct file *file; | 
|  | 191 |  | 
|  | 192 | file = alloc_empty_file(flags, current_cred()); | 
|  | 193 | if (IS_ERR(file)) | 
|  | 194 | return file; | 
|  | 195 |  | 
|  | 196 | file->f_path = *path; | 
|  | 197 | file->f_inode = path->dentry->d_inode; | 
|  | 198 | file->f_mapping = path->dentry->d_inode->i_mapping; | 
|  | 199 | file->f_wb_err = filemap_sample_wb_err(file->f_mapping); | 
|  | 200 | if ((file->f_mode & FMODE_READ) && | 
|  | 201 | likely(fop->read || fop->read_iter)) | 
|  | 202 | file->f_mode |= FMODE_CAN_READ; | 
|  | 203 | if ((file->f_mode & FMODE_WRITE) && | 
|  | 204 | likely(fop->write || fop->write_iter)) | 
|  | 205 | file->f_mode |= FMODE_CAN_WRITE; | 
|  | 206 | file->f_mode |= FMODE_OPENED; | 
|  | 207 | file->f_op = fop; | 
|  | 208 | if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) | 
|  | 209 | i_readcount_inc(path->dentry->d_inode); | 
|  | 210 | return file; | 
|  | 211 | } | 
|  | 212 |  | 
|  | 213 | struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt, | 
|  | 214 | const char *name, int flags, | 
|  | 215 | const struct file_operations *fops) | 
|  | 216 | { | 
|  | 217 | static const struct dentry_operations anon_ops = { | 
|  | 218 | .d_dname = simple_dname | 
|  | 219 | }; | 
|  | 220 | struct qstr this = QSTR_INIT(name, strlen(name)); | 
|  | 221 | struct path path; | 
|  | 222 | struct file *file; | 
|  | 223 |  | 
|  | 224 | path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this); | 
|  | 225 | if (!path.dentry) | 
|  | 226 | return ERR_PTR(-ENOMEM); | 
|  | 227 | if (!mnt->mnt_sb->s_d_op) | 
|  | 228 | d_set_d_op(path.dentry, &anon_ops); | 
|  | 229 | path.mnt = mntget(mnt); | 
|  | 230 | d_instantiate(path.dentry, inode); | 
|  | 231 | file = alloc_file(&path, flags, fops); | 
|  | 232 | if (IS_ERR(file)) { | 
|  | 233 | ihold(inode); | 
|  | 234 | path_put(&path); | 
|  | 235 | } | 
|  | 236 | return file; | 
|  | 237 | } | 
|  | 238 | EXPORT_SYMBOL(alloc_file_pseudo); | 
|  | 239 |  | 
|  | 240 | struct file *alloc_file_clone(struct file *base, int flags, | 
|  | 241 | const struct file_operations *fops) | 
|  | 242 | { | 
|  | 243 | struct file *f = alloc_file(&base->f_path, flags, fops); | 
|  | 244 | if (!IS_ERR(f)) { | 
|  | 245 | path_get(&f->f_path); | 
|  | 246 | f->f_mapping = base->f_mapping; | 
|  | 247 | } | 
|  | 248 | return f; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | /* the real guts of fput() - releasing the last reference to file | 
|  | 252 | */ | 
|  | 253 | static void __fput(struct file *file) | 
|  | 254 | { | 
|  | 255 | struct dentry *dentry = file->f_path.dentry; | 
|  | 256 | struct vfsmount *mnt = file->f_path.mnt; | 
|  | 257 | struct inode *inode = file->f_inode; | 
|  | 258 |  | 
|  | 259 | if (unlikely(!(file->f_mode & FMODE_OPENED))) | 
|  | 260 | goto out; | 
|  | 261 |  | 
|  | 262 | might_sleep(); | 
|  | 263 |  | 
|  | 264 | fsnotify_close(file); | 
|  | 265 | /* | 
|  | 266 | * The function eventpoll_release() should be the first called | 
|  | 267 | * in the file cleanup chain. | 
|  | 268 | */ | 
|  | 269 | eventpoll_release(file); | 
|  | 270 | locks_remove_file(file); | 
|  | 271 |  | 
|  | 272 | ima_file_free(file); | 
|  | 273 | if (unlikely(file->f_flags & FASYNC)) { | 
|  | 274 | if (file->f_op->fasync) | 
|  | 275 | file->f_op->fasync(-1, file, 0); | 
|  | 276 | } | 
|  | 277 | if (file->f_op->release) | 
|  | 278 | file->f_op->release(inode, file); | 
|  | 279 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && | 
|  | 280 | !(file->f_mode & FMODE_PATH))) { | 
|  | 281 | cdev_put(inode->i_cdev); | 
|  | 282 | } | 
|  | 283 | fops_put(file->f_op); | 
|  | 284 | put_pid(file->f_owner.pid); | 
|  | 285 | if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) | 
|  | 286 | i_readcount_dec(inode); | 
|  | 287 | if (file->f_mode & FMODE_WRITER) { | 
|  | 288 | put_write_access(inode); | 
|  | 289 | __mnt_drop_write(mnt); | 
|  | 290 | } | 
|  | 291 | dput(dentry); | 
|  | 292 | mntput(mnt); | 
|  | 293 | out: | 
|  | 294 | file_free(file); | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | static LLIST_HEAD(delayed_fput_list); | 
|  | 298 | static void delayed_fput(struct work_struct *unused) | 
|  | 299 | { | 
|  | 300 | struct llist_node *node = llist_del_all(&delayed_fput_list); | 
|  | 301 | struct file *f, *t; | 
|  | 302 |  | 
|  | 303 | llist_for_each_entry_safe(f, t, node, f_u.fu_llist) | 
|  | 304 | __fput(f); | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | static void ____fput(struct callback_head *work) | 
|  | 308 | { | 
|  | 309 | __fput(container_of(work, struct file, f_u.fu_rcuhead)); | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | /* | 
|  | 313 | * If kernel thread really needs to have the final fput() it has done | 
|  | 314 | * to complete, call this.  The only user right now is the boot - we | 
|  | 315 | * *do* need to make sure our writes to binaries on initramfs has | 
|  | 316 | * not left us with opened struct file waiting for __fput() - execve() | 
|  | 317 | * won't work without that.  Please, don't add more callers without | 
|  | 318 | * very good reasons; in particular, never call that with locks | 
|  | 319 | * held and never call that from a thread that might need to do | 
|  | 320 | * some work on any kind of umount. | 
|  | 321 | */ | 
|  | 322 | void flush_delayed_fput(void) | 
|  | 323 | { | 
|  | 324 | delayed_fput(NULL); | 
|  | 325 | } | 
|  | 326 |  | 
|  | 327 | static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); | 
|  | 328 |  | 
|  | 329 | void fput(struct file *file) | 
|  | 330 | { | 
|  | 331 | if (atomic_long_dec_and_test(&file->f_count)) { | 
|  | 332 | struct task_struct *task = current; | 
|  | 333 |  | 
|  | 334 | if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { | 
|  | 335 | init_task_work(&file->f_u.fu_rcuhead, ____fput); | 
|  | 336 | if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) | 
|  | 337 | return; | 
|  | 338 | /* | 
|  | 339 | * After this task has run exit_task_work(), | 
|  | 340 | * task_work_add() will fail.  Fall through to delayed | 
|  | 341 | * fput to avoid leaking *file. | 
|  | 342 | */ | 
|  | 343 | } | 
|  | 344 |  | 
|  | 345 | if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) | 
|  | 346 | schedule_delayed_work(&delayed_fput_work, 1); | 
|  | 347 | } | 
|  | 348 | } | 
|  | 349 |  | 
|  | 350 | /* | 
|  | 351 | * synchronous analog of fput(); for kernel threads that might be needed | 
|  | 352 | * in some umount() (and thus can't use flush_delayed_fput() without | 
|  | 353 | * risking deadlocks), need to wait for completion of __fput() and know | 
|  | 354 | * for this specific struct file it won't involve anything that would | 
|  | 355 | * need them.  Use only if you really need it - at the very least, | 
|  | 356 | * don't blindly convert fput() by kernel thread to that. | 
|  | 357 | */ | 
|  | 358 | void __fput_sync(struct file *file) | 
|  | 359 | { | 
|  | 360 | if (atomic_long_dec_and_test(&file->f_count)) { | 
|  | 361 | struct task_struct *task = current; | 
|  | 362 | BUG_ON(!(task->flags & PF_KTHREAD)); | 
|  | 363 | __fput(file); | 
|  | 364 | } | 
|  | 365 | } | 
|  | 366 |  | 
|  | 367 | EXPORT_SYMBOL(fput); | 
|  | 368 |  | 
|  | 369 | void __init files_init(void) | 
|  | 370 | { | 
|  | 371 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, | 
|  | 372 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); | 
|  | 373 | percpu_counter_init(&nr_files, 0, GFP_KERNEL); | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 | /* | 
|  | 377 | * One file with associated inode and dcache is very roughly 1K. Per default | 
|  | 378 | * do not use more than 10% of our memory for files. | 
|  | 379 | */ | 
|  | 380 | void __init files_maxfiles_init(void) | 
|  | 381 | { | 
|  | 382 | unsigned long n; | 
|  | 383 | unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2; | 
|  | 384 |  | 
|  | 385 | memreserve = min(memreserve, totalram_pages - 1); | 
|  | 386 | n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; | 
|  | 387 |  | 
|  | 388 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); | 
|  | 389 | } |