| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | #include <linux/export.h> | 
|  | 2 | #include <linux/sched/signal.h> | 
|  | 3 | #include <linux/sched/task.h> | 
|  | 4 | #include <linux/fs.h> | 
|  | 5 | #include <linux/path.h> | 
|  | 6 | #include <linux/slab.h> | 
|  | 7 | #include <linux/fs_struct.h> | 
|  | 8 | #include "internal.h" | 
|  | 9 |  | 
|  | 10 | /* | 
|  | 11 | * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. | 
|  | 12 | * It can block. | 
|  | 13 | */ | 
|  | 14 | void set_fs_root(struct fs_struct *fs, const struct path *path) | 
|  | 15 | { | 
|  | 16 | struct path old_root; | 
|  | 17 |  | 
|  | 18 | path_get(path); | 
|  | 19 | spin_lock(&fs->lock); | 
|  | 20 | write_seqcount_begin(&fs->seq); | 
|  | 21 | old_root = fs->root; | 
|  | 22 | fs->root = *path; | 
|  | 23 | write_seqcount_end(&fs->seq); | 
|  | 24 | spin_unlock(&fs->lock); | 
|  | 25 | if (old_root.dentry) | 
|  | 26 | path_put(&old_root); | 
|  | 27 | } | 
|  | 28 |  | 
|  | 29 | /* | 
|  | 30 | * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. | 
|  | 31 | * It can block. | 
|  | 32 | */ | 
|  | 33 | void set_fs_pwd(struct fs_struct *fs, const struct path *path) | 
|  | 34 | { | 
|  | 35 | struct path old_pwd; | 
|  | 36 |  | 
|  | 37 | path_get(path); | 
|  | 38 | spin_lock(&fs->lock); | 
|  | 39 | write_seqcount_begin(&fs->seq); | 
|  | 40 | old_pwd = fs->pwd; | 
|  | 41 | fs->pwd = *path; | 
|  | 42 | write_seqcount_end(&fs->seq); | 
|  | 43 | spin_unlock(&fs->lock); | 
|  | 44 |  | 
|  | 45 | if (old_pwd.dentry) | 
|  | 46 | path_put(&old_pwd); | 
|  | 47 | } | 
|  | 48 | EXPORT_SYMBOL_GPL(set_fs_pwd); | 
|  | 49 |  | 
|  | 50 | static inline int replace_path(struct path *p, const struct path *old, const struct path *new) | 
|  | 51 | { | 
|  | 52 | if (likely(p->dentry != old->dentry || p->mnt != old->mnt)) | 
|  | 53 | return 0; | 
|  | 54 | *p = *new; | 
|  | 55 | return 1; | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | void chroot_fs_refs(const struct path *old_root, const struct path *new_root) | 
|  | 59 | { | 
|  | 60 | struct task_struct *g, *p; | 
|  | 61 | struct fs_struct *fs; | 
|  | 62 | int count = 0; | 
|  | 63 |  | 
|  | 64 | read_lock(&tasklist_lock); | 
|  | 65 | do_each_thread(g, p) { | 
|  | 66 | task_lock(p); | 
|  | 67 | fs = p->fs; | 
|  | 68 | if (fs) { | 
|  | 69 | int hits = 0; | 
|  | 70 | spin_lock(&fs->lock); | 
|  | 71 | write_seqcount_begin(&fs->seq); | 
|  | 72 | hits += replace_path(&fs->root, old_root, new_root); | 
|  | 73 | hits += replace_path(&fs->pwd, old_root, new_root); | 
|  | 74 | write_seqcount_end(&fs->seq); | 
|  | 75 | while (hits--) { | 
|  | 76 | count++; | 
|  | 77 | path_get(new_root); | 
|  | 78 | } | 
|  | 79 | spin_unlock(&fs->lock); | 
|  | 80 | } | 
|  | 81 | task_unlock(p); | 
|  | 82 | } while_each_thread(g, p); | 
|  | 83 | read_unlock(&tasklist_lock); | 
|  | 84 | while (count--) | 
|  | 85 | path_put(old_root); | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | void free_fs_struct(struct fs_struct *fs) | 
|  | 89 | { | 
|  | 90 | path_put(&fs->root); | 
|  | 91 | path_put(&fs->pwd); | 
|  | 92 | kmem_cache_free(fs_cachep, fs); | 
|  | 93 | } | 
|  | 94 | EXPORT_SYMBOL_GPL(free_fs_struct); | 
|  | 95 |  | 
|  | 96 | void exit_fs(struct task_struct *tsk) | 
|  | 97 | { | 
|  | 98 | struct fs_struct *fs = tsk->fs; | 
|  | 99 |  | 
|  | 100 | if (fs) { | 
|  | 101 | int kill; | 
|  | 102 | task_lock(tsk); | 
|  | 103 | spin_lock(&fs->lock); | 
|  | 104 | tsk->fs = NULL; | 
|  | 105 | kill = !--fs->users; | 
|  | 106 | spin_unlock(&fs->lock); | 
|  | 107 | task_unlock(tsk); | 
|  | 108 | if (kill) | 
|  | 109 | free_fs_struct(fs); | 
|  | 110 | } | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | struct fs_struct *copy_fs_struct(struct fs_struct *old) | 
|  | 114 | { | 
|  | 115 | struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); | 
|  | 116 | /* We don't need to lock fs - think why ;-) */ | 
|  | 117 | if (fs) { | 
|  | 118 | fs->users = 1; | 
|  | 119 | fs->in_exec = 0; | 
|  | 120 | spin_lock_init(&fs->lock); | 
|  | 121 | seqcount_init(&fs->seq); | 
|  | 122 | fs->umask = old->umask; | 
|  | 123 |  | 
|  | 124 | spin_lock(&old->lock); | 
|  | 125 | fs->root = old->root; | 
|  | 126 | path_get(&fs->root); | 
|  | 127 | fs->pwd = old->pwd; | 
|  | 128 | path_get(&fs->pwd); | 
|  | 129 | spin_unlock(&old->lock); | 
|  | 130 | } | 
|  | 131 | return fs; | 
|  | 132 | } | 
|  | 133 | EXPORT_SYMBOL_GPL(copy_fs_struct); | 
|  | 134 |  | 
|  | 135 | int unshare_fs_struct(void) | 
|  | 136 | { | 
|  | 137 | struct fs_struct *fs = current->fs; | 
|  | 138 | struct fs_struct *new_fs = copy_fs_struct(fs); | 
|  | 139 | int kill; | 
|  | 140 |  | 
|  | 141 | if (!new_fs) | 
|  | 142 | return -ENOMEM; | 
|  | 143 |  | 
|  | 144 | task_lock(current); | 
|  | 145 | spin_lock(&fs->lock); | 
|  | 146 | kill = !--fs->users; | 
|  | 147 | current->fs = new_fs; | 
|  | 148 | spin_unlock(&fs->lock); | 
|  | 149 | task_unlock(current); | 
|  | 150 |  | 
|  | 151 | if (kill) | 
|  | 152 | free_fs_struct(fs); | 
|  | 153 |  | 
|  | 154 | return 0; | 
|  | 155 | } | 
|  | 156 | EXPORT_SYMBOL_GPL(unshare_fs_struct); | 
|  | 157 |  | 
|  | 158 | int current_umask(void) | 
|  | 159 | { | 
|  | 160 | return current->fs->umask; | 
|  | 161 | } | 
|  | 162 | EXPORT_SYMBOL(current_umask); | 
|  | 163 |  | 
|  | 164 | /* to be mentioned only in INIT_TASK */ | 
|  | 165 | struct fs_struct init_fs = { | 
|  | 166 | .users		= 1, | 
|  | 167 | .lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock), | 
|  | 168 | .seq		= SEQCNT_ZERO(init_fs.seq), | 
|  | 169 | .umask		= 0022, | 
|  | 170 | }; |