| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * The "user cache". | 
 | 3 |  * | 
 | 4 |  * (C) Copyright 1991-2000 Linus Torvalds | 
 | 5 |  * | 
 | 6 |  * We have a per-user structure to keep track of how many | 
 | 7 |  * processes, files etc the user has claimed, in order to be | 
 | 8 |  * able to have per-user limits for system resources.  | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/init.h> | 
 | 12 | #include <linux/sched.h> | 
 | 13 | #include <linux/slab.h> | 
 | 14 | #include <linux/bitops.h> | 
 | 15 | #include <linux/key.h> | 
 | 16 | #include <linux/interrupt.h> | 
 | 17 | #include <linux/export.h> | 
 | 18 | #include <linux/user_namespace.h> | 
 | 19 |  | 
 | 20 | /* | 
 | 21 |  * userns count is 1 for root user, 1 for init_uts_ns, | 
 | 22 |  * and 1 for... ? | 
 | 23 |  */ | 
 | 24 | struct user_namespace init_user_ns = { | 
 | 25 | 	.kref = { | 
 | 26 | 		.refcount	= ATOMIC_INIT(3), | 
 | 27 | 	}, | 
 | 28 | 	.creator = &root_user, | 
 | 29 | }; | 
 | 30 | EXPORT_SYMBOL_GPL(init_user_ns); | 
 | 31 |  | 
 | 32 | /* | 
 | 33 |  * UID task count cache, to get fast user lookup in "alloc_uid" | 
 | 34 |  * when changing user ID's (ie setuid() and friends). | 
 | 35 |  */ | 
 | 36 |  | 
 | 37 | #define UIDHASH_MASK		(UIDHASH_SZ - 1) | 
 | 38 | #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | 
 | 39 | #define uidhashentry(ns, uid)	((ns)->uidhash_table + __uidhashfn((uid))) | 
 | 40 |  | 
 | 41 | static struct kmem_cache *uid_cachep; | 
 | 42 |  | 
 | 43 | /* | 
 | 44 |  * The uidhash_lock is mostly taken from process context, but it is | 
 | 45 |  * occasionally also taken from softirq/tasklet context, when | 
 | 46 |  * task-structs get RCU-freed. Hence all locking must be softirq-safe. | 
 | 47 |  * But free_uid() is also called with local interrupts disabled, and running | 
 | 48 |  * local_bh_enable() with local interrupts disabled is an error - we'll run | 
 | 49 |  * softirq callbacks, and they can unconditionally enable interrupts, and | 
 | 50 |  * the caller of free_uid() didn't expect that.. | 
 | 51 |  */ | 
 | 52 | static DEFINE_SPINLOCK(uidhash_lock); | 
 | 53 |  | 
 | 54 | /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->user_ns */ | 
 | 55 | struct user_struct root_user = { | 
 | 56 | 	.__count	= ATOMIC_INIT(2), | 
 | 57 | 	.processes	= ATOMIC_INIT(1), | 
 | 58 | 	.files		= ATOMIC_INIT(0), | 
 | 59 | 	.sigpending	= ATOMIC_INIT(0), | 
 | 60 | 	.locked_shm     = 0, | 
 | 61 | 	.user_ns	= &init_user_ns, | 
 | 62 | }; | 
 | 63 |  | 
 | 64 | /* | 
 | 65 |  * These routines must be called with the uidhash spinlock held! | 
 | 66 |  */ | 
 | 67 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | 
 | 68 | { | 
 | 69 | 	hlist_add_head(&up->uidhash_node, hashent); | 
 | 70 | } | 
 | 71 |  | 
 | 72 | static void uid_hash_remove(struct user_struct *up) | 
 | 73 | { | 
 | 74 | 	hlist_del_init(&up->uidhash_node); | 
 | 75 | 	put_user_ns(up->user_ns); | 
 | 76 | } | 
 | 77 |  | 
 | 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 
 | 79 | { | 
 | 80 | 	struct user_struct *user; | 
 | 81 | 	struct hlist_node *h; | 
 | 82 |  | 
 | 83 | 	hlist_for_each_entry(user, h, hashent, uidhash_node) { | 
 | 84 | 		if (user->uid == uid) { | 
 | 85 | 			atomic_inc(&user->__count); | 
 | 86 | 			return user; | 
 | 87 | 		} | 
 | 88 | 	} | 
 | 89 |  | 
 | 90 | 	return NULL; | 
 | 91 | } | 
 | 92 |  | 
 | 93 | /* IRQs are disabled and uidhash_lock is held upon function entry. | 
 | 94 |  * IRQ state (as stored in flags) is restored and uidhash_lock released | 
 | 95 |  * upon function exit. | 
 | 96 |  */ | 
 | 97 | static void free_user(struct user_struct *up, unsigned long flags) | 
 | 98 | 	__releases(&uidhash_lock) | 
 | 99 | { | 
 | 100 | 	uid_hash_remove(up); | 
 | 101 | 	spin_unlock_irqrestore(&uidhash_lock, flags); | 
 | 102 | 	key_put(up->uid_keyring); | 
 | 103 | 	key_put(up->session_keyring); | 
 | 104 | 	kmem_cache_free(uid_cachep, up); | 
 | 105 | } | 
 | 106 |  | 
 | 107 | /* | 
 | 108 |  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The | 
 | 109 |  * caller must undo that ref with free_uid(). | 
 | 110 |  * | 
 | 111 |  * If the user_struct could not be found, return NULL. | 
 | 112 |  */ | 
 | 113 | struct user_struct *find_user(uid_t uid) | 
 | 114 | { | 
 | 115 | 	struct user_struct *ret; | 
 | 116 | 	unsigned long flags; | 
 | 117 | 	struct user_namespace *ns = current_user_ns(); | 
 | 118 |  | 
 | 119 | 	spin_lock_irqsave(&uidhash_lock, flags); | 
 | 120 | 	ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 
 | 121 | 	spin_unlock_irqrestore(&uidhash_lock, flags); | 
 | 122 | 	return ret; | 
 | 123 | } | 
 | 124 |  | 
 | 125 | void free_uid(struct user_struct *up) | 
 | 126 | { | 
 | 127 | 	unsigned long flags; | 
 | 128 |  | 
 | 129 | 	if (!up) | 
 | 130 | 		return; | 
 | 131 |  | 
 | 132 | 	local_irq_save_nort(flags); | 
 | 133 | 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) | 
 | 134 | 		free_user(up, flags); | 
 | 135 | 	else | 
 | 136 | 		local_irq_restore_nort(flags); | 
 | 137 | } | 
 | 138 |  | 
 | 139 | struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | 
 | 140 | { | 
 | 141 | 	struct hlist_head *hashent = uidhashentry(ns, uid); | 
 | 142 | 	struct user_struct *up, *new; | 
 | 143 |  | 
 | 144 | 	spin_lock_irq(&uidhash_lock); | 
 | 145 | 	up = uid_hash_find(uid, hashent); | 
 | 146 | 	spin_unlock_irq(&uidhash_lock); | 
 | 147 |  | 
 | 148 | 	if (!up) { | 
 | 149 | 		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); | 
 | 150 | 		if (!new) | 
 | 151 | 			goto out_unlock; | 
 | 152 |  | 
 | 153 | 		new->uid = uid; | 
 | 154 | 		atomic_set(&new->__count, 1); | 
 | 155 |  | 
 | 156 | 		new->user_ns = get_user_ns(ns); | 
 | 157 |  | 
 | 158 | 		/* | 
 | 159 | 		 * Before adding this, check whether we raced | 
 | 160 | 		 * on adding the same user already.. | 
 | 161 | 		 */ | 
 | 162 | 		spin_lock_irq(&uidhash_lock); | 
 | 163 | 		up = uid_hash_find(uid, hashent); | 
 | 164 | 		if (up) { | 
 | 165 | 			put_user_ns(ns); | 
 | 166 | 			key_put(new->uid_keyring); | 
 | 167 | 			key_put(new->session_keyring); | 
 | 168 | 			kmem_cache_free(uid_cachep, new); | 
 | 169 | 		} else { | 
 | 170 | 			uid_hash_insert(new, hashent); | 
 | 171 | 			up = new; | 
 | 172 | 		} | 
 | 173 | 		spin_unlock_irq(&uidhash_lock); | 
 | 174 | 	} | 
 | 175 |  | 
 | 176 | 	return up; | 
 | 177 |  | 
 | 178 | out_unlock: | 
 | 179 | 	return NULL; | 
 | 180 | } | 
 | 181 |  | 
 | 182 | static int __init uid_cache_init(void) | 
 | 183 | { | 
 | 184 | 	int n; | 
 | 185 |  | 
 | 186 | 	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | 
 | 187 | 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 
 | 188 |  | 
 | 189 | 	for(n = 0; n < UIDHASH_SZ; ++n) | 
 | 190 | 		INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); | 
 | 191 |  | 
 | 192 | 	/* Insert the root user immediately (init already runs as root) */ | 
 | 193 | 	spin_lock_irq(&uidhash_lock); | 
 | 194 | 	uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); | 
 | 195 | 	spin_unlock_irq(&uidhash_lock); | 
 | 196 |  | 
 | 197 | 	return 0; | 
 | 198 | } | 
 | 199 |  | 
 | 200 | module_init(uid_cache_init); |