| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame^] | 1 | /* Basic authentication token and access key management | 
|  | 2 | * | 
|  | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. | 
|  | 4 | * Written by David Howells (dhowells@redhat.com) | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or | 
|  | 7 | * modify it under the terms of the GNU General Public License | 
|  | 8 | * as published by the Free Software Foundation; either version | 
|  | 9 | * 2 of the License, or (at your option) any later version. | 
|  | 10 | */ | 
|  | 11 |  | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/init.h> | 
|  | 14 | #include <linux/poison.h> | 
|  | 15 | #include <linux/sched.h> | 
|  | 16 | #include <linux/slab.h> | 
|  | 17 | #include <linux/security.h> | 
|  | 18 | #include <linux/workqueue.h> | 
|  | 19 | #include <linux/random.h> | 
|  | 20 | #include <linux/err.h> | 
|  | 21 | #include "internal.h" | 
|  | 22 |  | 
|  | 23 | struct kmem_cache *key_jar; | 
|  | 24 | struct rb_root		key_serial_tree; /* tree of keys indexed by serial */ | 
|  | 25 | DEFINE_SPINLOCK(key_serial_lock); | 
|  | 26 |  | 
|  | 27 | struct rb_root	key_user_tree; /* tree of quota records indexed by UID */ | 
|  | 28 | DEFINE_SPINLOCK(key_user_lock); | 
|  | 29 |  | 
|  | 30 | unsigned int key_quota_root_maxkeys = 1000000;	/* root's key count quota */ | 
|  | 31 | unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */ | 
|  | 32 | unsigned int key_quota_maxkeys = 200;		/* general key count quota */ | 
|  | 33 | unsigned int key_quota_maxbytes = 20000;	/* general key space quota */ | 
|  | 34 |  | 
|  | 35 | static LIST_HEAD(key_types_list); | 
|  | 36 | static DECLARE_RWSEM(key_types_sem); | 
|  | 37 |  | 
|  | 38 | /* We serialise key instantiation and link */ | 
|  | 39 | DEFINE_MUTEX(key_construction_mutex); | 
|  | 40 |  | 
|  | 41 | #ifdef KEY_DEBUGGING | 
|  | 42 | void __key_check(const struct key *key) | 
|  | 43 | { | 
|  | 44 | printk("__key_check: key %p {%08x} should be {%08x}\n", | 
|  | 45 | key, key->magic, KEY_DEBUG_MAGIC); | 
|  | 46 | BUG(); | 
|  | 47 | } | 
|  | 48 | #endif | 
|  | 49 |  | 
|  | 50 | /* | 
|  | 51 | * Get the key quota record for a user, allocating a new record if one doesn't | 
|  | 52 | * already exist. | 
|  | 53 | */ | 
|  | 54 | struct key_user *key_user_lookup(kuid_t uid) | 
|  | 55 | { | 
|  | 56 | struct key_user *candidate = NULL, *user; | 
|  | 57 | struct rb_node *parent, **p; | 
|  | 58 |  | 
|  | 59 | try_again: | 
|  | 60 | parent = NULL; | 
|  | 61 | p = &key_user_tree.rb_node; | 
|  | 62 | spin_lock(&key_user_lock); | 
|  | 63 |  | 
|  | 64 | /* search the tree for a user record with a matching UID */ | 
|  | 65 | while (*p) { | 
|  | 66 | parent = *p; | 
|  | 67 | user = rb_entry(parent, struct key_user, node); | 
|  | 68 |  | 
|  | 69 | if (uid_lt(uid, user->uid)) | 
|  | 70 | p = &(*p)->rb_left; | 
|  | 71 | else if (uid_gt(uid, user->uid)) | 
|  | 72 | p = &(*p)->rb_right; | 
|  | 73 | else | 
|  | 74 | goto found; | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | /* if we get here, we failed to find a match in the tree */ | 
|  | 78 | if (!candidate) { | 
|  | 79 | /* allocate a candidate user record if we don't already have | 
|  | 80 | * one */ | 
|  | 81 | spin_unlock(&key_user_lock); | 
|  | 82 |  | 
|  | 83 | user = NULL; | 
|  | 84 | candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); | 
|  | 85 | if (unlikely(!candidate)) | 
|  | 86 | goto out; | 
|  | 87 |  | 
|  | 88 | /* the allocation may have scheduled, so we need to repeat the | 
|  | 89 | * search lest someone else added the record whilst we were | 
|  | 90 | * asleep */ | 
|  | 91 | goto try_again; | 
|  | 92 | } | 
|  | 93 |  | 
|  | 94 | /* if we get here, then the user record still hadn't appeared on the | 
|  | 95 | * second pass - so we use the candidate record */ | 
|  | 96 | refcount_set(&candidate->usage, 1); | 
|  | 97 | atomic_set(&candidate->nkeys, 0); | 
|  | 98 | atomic_set(&candidate->nikeys, 0); | 
|  | 99 | candidate->uid = uid; | 
|  | 100 | candidate->qnkeys = 0; | 
|  | 101 | candidate->qnbytes = 0; | 
|  | 102 | spin_lock_init(&candidate->lock); | 
|  | 103 | mutex_init(&candidate->cons_lock); | 
|  | 104 |  | 
|  | 105 | rb_link_node(&candidate->node, parent, p); | 
|  | 106 | rb_insert_color(&candidate->node, &key_user_tree); | 
|  | 107 | spin_unlock(&key_user_lock); | 
|  | 108 | user = candidate; | 
|  | 109 | goto out; | 
|  | 110 |  | 
|  | 111 | /* okay - we found a user record for this UID */ | 
|  | 112 | found: | 
|  | 113 | refcount_inc(&user->usage); | 
|  | 114 | spin_unlock(&key_user_lock); | 
|  | 115 | kfree(candidate); | 
|  | 116 | out: | 
|  | 117 | return user; | 
|  | 118 | } | 
|  | 119 |  | 
|  | 120 | /* | 
|  | 121 | * Dispose of a user structure | 
|  | 122 | */ | 
|  | 123 | void key_user_put(struct key_user *user) | 
|  | 124 | { | 
|  | 125 | if (refcount_dec_and_lock(&user->usage, &key_user_lock)) { | 
|  | 126 | rb_erase(&user->node, &key_user_tree); | 
|  | 127 | spin_unlock(&key_user_lock); | 
|  | 128 |  | 
|  | 129 | kfree(user); | 
|  | 130 | } | 
|  | 131 | } | 
|  | 132 |  | 
|  | 133 | /* | 
|  | 134 | * Allocate a serial number for a key.  These are assigned randomly to avoid | 
|  | 135 | * security issues through covert channel problems. | 
|  | 136 | */ | 
|  | 137 | static inline void key_alloc_serial(struct key *key) | 
|  | 138 | { | 
|  | 139 | struct rb_node *parent, **p; | 
|  | 140 | struct key *xkey; | 
|  | 141 |  | 
|  | 142 | /* propose a random serial number and look for a hole for it in the | 
|  | 143 | * serial number tree */ | 
|  | 144 | do { | 
|  | 145 | get_random_bytes(&key->serial, sizeof(key->serial)); | 
|  | 146 |  | 
|  | 147 | key->serial >>= 1; /* negative numbers are not permitted */ | 
|  | 148 | } while (key->serial < 3); | 
|  | 149 |  | 
|  | 150 | spin_lock(&key_serial_lock); | 
|  | 151 |  | 
|  | 152 | attempt_insertion: | 
|  | 153 | parent = NULL; | 
|  | 154 | p = &key_serial_tree.rb_node; | 
|  | 155 |  | 
|  | 156 | while (*p) { | 
|  | 157 | parent = *p; | 
|  | 158 | xkey = rb_entry(parent, struct key, serial_node); | 
|  | 159 |  | 
|  | 160 | if (key->serial < xkey->serial) | 
|  | 161 | p = &(*p)->rb_left; | 
|  | 162 | else if (key->serial > xkey->serial) | 
|  | 163 | p = &(*p)->rb_right; | 
|  | 164 | else | 
|  | 165 | goto serial_exists; | 
|  | 166 | } | 
|  | 167 |  | 
|  | 168 | /* we've found a suitable hole - arrange for this key to occupy it */ | 
|  | 169 | rb_link_node(&key->serial_node, parent, p); | 
|  | 170 | rb_insert_color(&key->serial_node, &key_serial_tree); | 
|  | 171 |  | 
|  | 172 | spin_unlock(&key_serial_lock); | 
|  | 173 | return; | 
|  | 174 |  | 
|  | 175 | /* we found a key with the proposed serial number - walk the tree from | 
|  | 176 | * that point looking for the next unused serial number */ | 
|  | 177 | serial_exists: | 
|  | 178 | for (;;) { | 
|  | 179 | key->serial++; | 
|  | 180 | if (key->serial < 3) { | 
|  | 181 | key->serial = 3; | 
|  | 182 | goto attempt_insertion; | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 | parent = rb_next(parent); | 
|  | 186 | if (!parent) | 
|  | 187 | goto attempt_insertion; | 
|  | 188 |  | 
|  | 189 | xkey = rb_entry(parent, struct key, serial_node); | 
|  | 190 | if (key->serial < xkey->serial) | 
|  | 191 | goto attempt_insertion; | 
|  | 192 | } | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | /** | 
|  | 196 | * key_alloc - Allocate a key of the specified type. | 
|  | 197 | * @type: The type of key to allocate. | 
|  | 198 | * @desc: The key description to allow the key to be searched out. | 
|  | 199 | * @uid: The owner of the new key. | 
|  | 200 | * @gid: The group ID for the new key's group permissions. | 
|  | 201 | * @cred: The credentials specifying UID namespace. | 
|  | 202 | * @perm: The permissions mask of the new key. | 
|  | 203 | * @flags: Flags specifying quota properties. | 
|  | 204 | * @restrict_link: Optional link restriction for new keyrings. | 
|  | 205 | * | 
|  | 206 | * Allocate a key of the specified type with the attributes given.  The key is | 
|  | 207 | * returned in an uninstantiated state and the caller needs to instantiate the | 
|  | 208 | * key before returning. | 
|  | 209 | * | 
|  | 210 | * The restrict_link structure (if not NULL) will be freed when the | 
|  | 211 | * keyring is destroyed, so it must be dynamically allocated. | 
|  | 212 | * | 
|  | 213 | * The user's key count quota is updated to reflect the creation of the key and | 
|  | 214 | * the user's key data quota has the default for the key type reserved.  The | 
|  | 215 | * instantiation function should amend this as necessary.  If insufficient | 
|  | 216 | * quota is available, -EDQUOT will be returned. | 
|  | 217 | * | 
|  | 218 | * The LSM security modules can prevent a key being created, in which case | 
|  | 219 | * -EACCES will be returned. | 
|  | 220 | * | 
|  | 221 | * Returns a pointer to the new key if successful and an error code otherwise. | 
|  | 222 | * | 
|  | 223 | * Note that the caller needs to ensure the key type isn't uninstantiated. | 
|  | 224 | * Internally this can be done by locking key_types_sem.  Externally, this can | 
|  | 225 | * be done by either never unregistering the key type, or making sure | 
|  | 226 | * key_alloc() calls don't race with module unloading. | 
|  | 227 | */ | 
|  | 228 | struct key *key_alloc(struct key_type *type, const char *desc, | 
|  | 229 | kuid_t uid, kgid_t gid, const struct cred *cred, | 
|  | 230 | key_perm_t perm, unsigned long flags, | 
|  | 231 | struct key_restriction *restrict_link) | 
|  | 232 | { | 
|  | 233 | struct key_user *user = NULL; | 
|  | 234 | struct key *key; | 
|  | 235 | size_t desclen, quotalen; | 
|  | 236 | int ret; | 
|  | 237 |  | 
|  | 238 | key = ERR_PTR(-EINVAL); | 
|  | 239 | if (!desc || !*desc) | 
|  | 240 | goto error; | 
|  | 241 |  | 
|  | 242 | if (type->vet_description) { | 
|  | 243 | ret = type->vet_description(desc); | 
|  | 244 | if (ret < 0) { | 
|  | 245 | key = ERR_PTR(ret); | 
|  | 246 | goto error; | 
|  | 247 | } | 
|  | 248 | } | 
|  | 249 |  | 
|  | 250 | desclen = strlen(desc); | 
|  | 251 | quotalen = desclen + 1 + type->def_datalen; | 
|  | 252 |  | 
|  | 253 | /* get hold of the key tracking for this user */ | 
|  | 254 | user = key_user_lookup(uid); | 
|  | 255 | if (!user) | 
|  | 256 | goto no_memory_1; | 
|  | 257 |  | 
|  | 258 | /* check that the user's quota permits allocation of another key and | 
|  | 259 | * its description */ | 
|  | 260 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { | 
|  | 261 | unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? | 
|  | 262 | key_quota_root_maxkeys : key_quota_maxkeys; | 
|  | 263 | unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? | 
|  | 264 | key_quota_root_maxbytes : key_quota_maxbytes; | 
|  | 265 |  | 
|  | 266 | spin_lock(&user->lock); | 
|  | 267 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { | 
|  | 268 | if (user->qnkeys + 1 > maxkeys || | 
|  | 269 | user->qnbytes + quotalen > maxbytes || | 
|  | 270 | user->qnbytes + quotalen < user->qnbytes) | 
|  | 271 | goto no_quota; | 
|  | 272 | } | 
|  | 273 |  | 
|  | 274 | user->qnkeys++; | 
|  | 275 | user->qnbytes += quotalen; | 
|  | 276 | spin_unlock(&user->lock); | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 | /* allocate and initialise the key and its description */ | 
|  | 280 | key = kmem_cache_zalloc(key_jar, GFP_KERNEL); | 
|  | 281 | if (!key) | 
|  | 282 | goto no_memory_2; | 
|  | 283 |  | 
|  | 284 | key->index_key.desc_len = desclen; | 
|  | 285 | key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); | 
|  | 286 | if (!key->index_key.description) | 
|  | 287 | goto no_memory_3; | 
|  | 288 |  | 
|  | 289 | refcount_set(&key->usage, 1); | 
|  | 290 | init_rwsem(&key->sem); | 
|  | 291 | lockdep_set_class(&key->sem, &type->lock_class); | 
|  | 292 | key->index_key.type = type; | 
|  | 293 | key->user = user; | 
|  | 294 | key->quotalen = quotalen; | 
|  | 295 | key->datalen = type->def_datalen; | 
|  | 296 | key->uid = uid; | 
|  | 297 | key->gid = gid; | 
|  | 298 | key->perm = perm; | 
|  | 299 | key->restrict_link = restrict_link; | 
|  | 300 |  | 
|  | 301 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) | 
|  | 302 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; | 
|  | 303 | if (flags & KEY_ALLOC_BUILT_IN) | 
|  | 304 | key->flags |= 1 << KEY_FLAG_BUILTIN; | 
|  | 305 | if (flags & KEY_ALLOC_UID_KEYRING) | 
|  | 306 | key->flags |= 1 << KEY_FLAG_UID_KEYRING; | 
|  | 307 |  | 
|  | 308 | #ifdef KEY_DEBUGGING | 
|  | 309 | key->magic = KEY_DEBUG_MAGIC; | 
|  | 310 | #endif | 
|  | 311 |  | 
|  | 312 | /* let the security module know about the key */ | 
|  | 313 | ret = security_key_alloc(key, cred, flags); | 
|  | 314 | if (ret < 0) | 
|  | 315 | goto security_error; | 
|  | 316 |  | 
|  | 317 | /* publish the key by giving it a serial number */ | 
|  | 318 | atomic_inc(&user->nkeys); | 
|  | 319 | key_alloc_serial(key); | 
|  | 320 |  | 
|  | 321 | error: | 
|  | 322 | return key; | 
|  | 323 |  | 
|  | 324 | security_error: | 
|  | 325 | kfree(key->description); | 
|  | 326 | kmem_cache_free(key_jar, key); | 
|  | 327 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { | 
|  | 328 | spin_lock(&user->lock); | 
|  | 329 | user->qnkeys--; | 
|  | 330 | user->qnbytes -= quotalen; | 
|  | 331 | spin_unlock(&user->lock); | 
|  | 332 | } | 
|  | 333 | key_user_put(user); | 
|  | 334 | key = ERR_PTR(ret); | 
|  | 335 | goto error; | 
|  | 336 |  | 
|  | 337 | no_memory_3: | 
|  | 338 | kmem_cache_free(key_jar, key); | 
|  | 339 | no_memory_2: | 
|  | 340 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { | 
|  | 341 | spin_lock(&user->lock); | 
|  | 342 | user->qnkeys--; | 
|  | 343 | user->qnbytes -= quotalen; | 
|  | 344 | spin_unlock(&user->lock); | 
|  | 345 | } | 
|  | 346 | key_user_put(user); | 
|  | 347 | no_memory_1: | 
|  | 348 | key = ERR_PTR(-ENOMEM); | 
|  | 349 | goto error; | 
|  | 350 |  | 
|  | 351 | no_quota: | 
|  | 352 | spin_unlock(&user->lock); | 
|  | 353 | key_user_put(user); | 
|  | 354 | key = ERR_PTR(-EDQUOT); | 
|  | 355 | goto error; | 
|  | 356 | } | 
|  | 357 | EXPORT_SYMBOL(key_alloc); | 
|  | 358 |  | 
|  | 359 | /** | 
|  | 360 | * key_payload_reserve - Adjust data quota reservation for the key's payload | 
|  | 361 | * @key: The key to make the reservation for. | 
|  | 362 | * @datalen: The amount of data payload the caller now wants. | 
|  | 363 | * | 
|  | 364 | * Adjust the amount of the owning user's key data quota that a key reserves. | 
|  | 365 | * If the amount is increased, then -EDQUOT may be returned if there isn't | 
|  | 366 | * enough free quota available. | 
|  | 367 | * | 
|  | 368 | * If successful, 0 is returned. | 
|  | 369 | */ | 
|  | 370 | int key_payload_reserve(struct key *key, size_t datalen) | 
|  | 371 | { | 
|  | 372 | int delta = (int)datalen - key->datalen; | 
|  | 373 | int ret = 0; | 
|  | 374 |  | 
|  | 375 | key_check(key); | 
|  | 376 |  | 
|  | 377 | /* contemplate the quota adjustment */ | 
|  | 378 | if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { | 
|  | 379 | unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? | 
|  | 380 | key_quota_root_maxbytes : key_quota_maxbytes; | 
|  | 381 |  | 
|  | 382 | spin_lock(&key->user->lock); | 
|  | 383 |  | 
|  | 384 | if (delta > 0 && | 
|  | 385 | (key->user->qnbytes + delta >= maxbytes || | 
|  | 386 | key->user->qnbytes + delta < key->user->qnbytes)) { | 
|  | 387 | ret = -EDQUOT; | 
|  | 388 | } | 
|  | 389 | else { | 
|  | 390 | key->user->qnbytes += delta; | 
|  | 391 | key->quotalen += delta; | 
|  | 392 | } | 
|  | 393 | spin_unlock(&key->user->lock); | 
|  | 394 | } | 
|  | 395 |  | 
|  | 396 | /* change the recorded data length if that didn't generate an error */ | 
|  | 397 | if (ret == 0) | 
|  | 398 | key->datalen = datalen; | 
|  | 399 |  | 
|  | 400 | return ret; | 
|  | 401 | } | 
|  | 402 | EXPORT_SYMBOL(key_payload_reserve); | 
|  | 403 |  | 
|  | 404 | /* | 
|  | 405 | * Change the key state to being instantiated. | 
|  | 406 | */ | 
|  | 407 | static void mark_key_instantiated(struct key *key, int reject_error) | 
|  | 408 | { | 
|  | 409 | /* Commit the payload before setting the state; barrier versus | 
|  | 410 | * key_read_state(). | 
|  | 411 | */ | 
|  | 412 | smp_store_release(&key->state, | 
|  | 413 | (reject_error < 0) ? reject_error : KEY_IS_POSITIVE); | 
|  | 414 | } | 
|  | 415 |  | 
|  | 416 | /* | 
|  | 417 | * Instantiate a key and link it into the target keyring atomically.  Must be | 
|  | 418 | * called with the target keyring's semaphore writelocked.  The target key's | 
|  | 419 | * semaphore need not be locked as instantiation is serialised by | 
|  | 420 | * key_construction_mutex. | 
|  | 421 | */ | 
|  | 422 | static int __key_instantiate_and_link(struct key *key, | 
|  | 423 | struct key_preparsed_payload *prep, | 
|  | 424 | struct key *keyring, | 
|  | 425 | struct key *authkey, | 
|  | 426 | struct assoc_array_edit **_edit) | 
|  | 427 | { | 
|  | 428 | int ret, awaken; | 
|  | 429 |  | 
|  | 430 | key_check(key); | 
|  | 431 | key_check(keyring); | 
|  | 432 |  | 
|  | 433 | awaken = 0; | 
|  | 434 | ret = -EBUSY; | 
|  | 435 |  | 
|  | 436 | mutex_lock(&key_construction_mutex); | 
|  | 437 |  | 
|  | 438 | /* can't instantiate twice */ | 
|  | 439 | if (key->state == KEY_IS_UNINSTANTIATED) { | 
|  | 440 | /* instantiate the key */ | 
|  | 441 | ret = key->type->instantiate(key, prep); | 
|  | 442 |  | 
|  | 443 | if (ret == 0) { | 
|  | 444 | /* mark the key as being instantiated */ | 
|  | 445 | atomic_inc(&key->user->nikeys); | 
|  | 446 | mark_key_instantiated(key, 0); | 
|  | 447 |  | 
|  | 448 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) | 
|  | 449 | awaken = 1; | 
|  | 450 |  | 
|  | 451 | /* and link it into the destination keyring */ | 
|  | 452 | if (keyring) { | 
|  | 453 | if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) | 
|  | 454 | set_bit(KEY_FLAG_KEEP, &key->flags); | 
|  | 455 |  | 
|  | 456 | __key_link(key, _edit); | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | /* disable the authorisation key */ | 
|  | 460 | if (authkey) | 
|  | 461 | key_revoke(authkey); | 
|  | 462 |  | 
|  | 463 | if (prep->expiry != TIME64_MAX) { | 
|  | 464 | key->expiry = prep->expiry; | 
|  | 465 | key_schedule_gc(prep->expiry + key_gc_delay); | 
|  | 466 | } | 
|  | 467 | } | 
|  | 468 | } | 
|  | 469 |  | 
|  | 470 | mutex_unlock(&key_construction_mutex); | 
|  | 471 |  | 
|  | 472 | /* wake up anyone waiting for a key to be constructed */ | 
|  | 473 | if (awaken) | 
|  | 474 | wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); | 
|  | 475 |  | 
|  | 476 | return ret; | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | /** | 
|  | 480 | * key_instantiate_and_link - Instantiate a key and link it into the keyring. | 
|  | 481 | * @key: The key to instantiate. | 
|  | 482 | * @data: The data to use to instantiate the keyring. | 
|  | 483 | * @datalen: The length of @data. | 
|  | 484 | * @keyring: Keyring to create a link in on success (or NULL). | 
|  | 485 | * @authkey: The authorisation token permitting instantiation. | 
|  | 486 | * | 
|  | 487 | * Instantiate a key that's in the uninstantiated state using the provided data | 
|  | 488 | * and, if successful, link it in to the destination keyring if one is | 
|  | 489 | * supplied. | 
|  | 490 | * | 
|  | 491 | * If successful, 0 is returned, the authorisation token is revoked and anyone | 
|  | 492 | * waiting for the key is woken up.  If the key was already instantiated, | 
|  | 493 | * -EBUSY will be returned. | 
|  | 494 | */ | 
|  | 495 | int key_instantiate_and_link(struct key *key, | 
|  | 496 | const void *data, | 
|  | 497 | size_t datalen, | 
|  | 498 | struct key *keyring, | 
|  | 499 | struct key *authkey) | 
|  | 500 | { | 
|  | 501 | struct key_preparsed_payload prep; | 
|  | 502 | struct assoc_array_edit *edit; | 
|  | 503 | int ret; | 
|  | 504 |  | 
|  | 505 | memset(&prep, 0, sizeof(prep)); | 
|  | 506 | prep.data = data; | 
|  | 507 | prep.datalen = datalen; | 
|  | 508 | prep.quotalen = key->type->def_datalen; | 
|  | 509 | prep.expiry = TIME64_MAX; | 
|  | 510 | if (key->type->preparse) { | 
|  | 511 | ret = key->type->preparse(&prep); | 
|  | 512 | if (ret < 0) | 
|  | 513 | goto error; | 
|  | 514 | } | 
|  | 515 |  | 
|  | 516 | if (keyring) { | 
|  | 517 | ret = __key_link_begin(keyring, &key->index_key, &edit); | 
|  | 518 | if (ret < 0) | 
|  | 519 | goto error; | 
|  | 520 |  | 
|  | 521 | if (keyring->restrict_link && keyring->restrict_link->check) { | 
|  | 522 | struct key_restriction *keyres = keyring->restrict_link; | 
|  | 523 |  | 
|  | 524 | ret = keyres->check(keyring, key->type, &prep.payload, | 
|  | 525 | keyres->key); | 
|  | 526 | if (ret < 0) | 
|  | 527 | goto error_link_end; | 
|  | 528 | } | 
|  | 529 | } | 
|  | 530 |  | 
|  | 531 | ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); | 
|  | 532 |  | 
|  | 533 | error_link_end: | 
|  | 534 | if (keyring) | 
|  | 535 | __key_link_end(keyring, &key->index_key, edit); | 
|  | 536 |  | 
|  | 537 | error: | 
|  | 538 | if (key->type->preparse) | 
|  | 539 | key->type->free_preparse(&prep); | 
|  | 540 | return ret; | 
|  | 541 | } | 
|  | 542 |  | 
|  | 543 | EXPORT_SYMBOL(key_instantiate_and_link); | 
|  | 544 |  | 
|  | 545 | /** | 
|  | 546 | * key_reject_and_link - Negatively instantiate a key and link it into the keyring. | 
|  | 547 | * @key: The key to instantiate. | 
|  | 548 | * @timeout: The timeout on the negative key. | 
|  | 549 | * @error: The error to return when the key is hit. | 
|  | 550 | * @keyring: Keyring to create a link in on success (or NULL). | 
|  | 551 | * @authkey: The authorisation token permitting instantiation. | 
|  | 552 | * | 
|  | 553 | * Negatively instantiate a key that's in the uninstantiated state and, if | 
|  | 554 | * successful, set its timeout and stored error and link it in to the | 
|  | 555 | * destination keyring if one is supplied.  The key and any links to the key | 
|  | 556 | * will be automatically garbage collected after the timeout expires. | 
|  | 557 | * | 
|  | 558 | * Negative keys are used to rate limit repeated request_key() calls by causing | 
|  | 559 | * them to return the stored error code (typically ENOKEY) until the negative | 
|  | 560 | * key expires. | 
|  | 561 | * | 
|  | 562 | * If successful, 0 is returned, the authorisation token is revoked and anyone | 
|  | 563 | * waiting for the key is woken up.  If the key was already instantiated, | 
|  | 564 | * -EBUSY will be returned. | 
|  | 565 | */ | 
|  | 566 | int key_reject_and_link(struct key *key, | 
|  | 567 | unsigned timeout, | 
|  | 568 | unsigned error, | 
|  | 569 | struct key *keyring, | 
|  | 570 | struct key *authkey) | 
|  | 571 | { | 
|  | 572 | struct assoc_array_edit *edit; | 
|  | 573 | int ret, awaken, link_ret = 0; | 
|  | 574 |  | 
|  | 575 | key_check(key); | 
|  | 576 | key_check(keyring); | 
|  | 577 |  | 
|  | 578 | awaken = 0; | 
|  | 579 | ret = -EBUSY; | 
|  | 580 |  | 
|  | 581 | if (keyring) { | 
|  | 582 | if (keyring->restrict_link) | 
|  | 583 | return -EPERM; | 
|  | 584 |  | 
|  | 585 | link_ret = __key_link_begin(keyring, &key->index_key, &edit); | 
|  | 586 | } | 
|  | 587 |  | 
|  | 588 | mutex_lock(&key_construction_mutex); | 
|  | 589 |  | 
|  | 590 | /* can't instantiate twice */ | 
|  | 591 | if (key->state == KEY_IS_UNINSTANTIATED) { | 
|  | 592 | /* mark the key as being negatively instantiated */ | 
|  | 593 | atomic_inc(&key->user->nikeys); | 
|  | 594 | mark_key_instantiated(key, -error); | 
|  | 595 | key->expiry = ktime_get_real_seconds() + timeout; | 
|  | 596 | key_schedule_gc(key->expiry + key_gc_delay); | 
|  | 597 |  | 
|  | 598 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) | 
|  | 599 | awaken = 1; | 
|  | 600 |  | 
|  | 601 | ret = 0; | 
|  | 602 |  | 
|  | 603 | /* and link it into the destination keyring */ | 
|  | 604 | if (keyring && link_ret == 0) | 
|  | 605 | __key_link(key, &edit); | 
|  | 606 |  | 
|  | 607 | /* disable the authorisation key */ | 
|  | 608 | if (authkey) | 
|  | 609 | key_revoke(authkey); | 
|  | 610 | } | 
|  | 611 |  | 
|  | 612 | mutex_unlock(&key_construction_mutex); | 
|  | 613 |  | 
|  | 614 | if (keyring && link_ret == 0) | 
|  | 615 | __key_link_end(keyring, &key->index_key, edit); | 
|  | 616 |  | 
|  | 617 | /* wake up anyone waiting for a key to be constructed */ | 
|  | 618 | if (awaken) | 
|  | 619 | wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); | 
|  | 620 |  | 
|  | 621 | return ret == 0 ? link_ret : ret; | 
|  | 622 | } | 
|  | 623 | EXPORT_SYMBOL(key_reject_and_link); | 
|  | 624 |  | 
|  | 625 | /** | 
|  | 626 | * key_put - Discard a reference to a key. | 
|  | 627 | * @key: The key to discard a reference from. | 
|  | 628 | * | 
|  | 629 | * Discard a reference to a key, and when all the references are gone, we | 
|  | 630 | * schedule the cleanup task to come and pull it out of the tree in process | 
|  | 631 | * context at some later time. | 
|  | 632 | */ | 
|  | 633 | void key_put(struct key *key) | 
|  | 634 | { | 
|  | 635 | if (key) { | 
|  | 636 | key_check(key); | 
|  | 637 |  | 
|  | 638 | if (refcount_dec_and_test(&key->usage)) | 
|  | 639 | schedule_work(&key_gc_work); | 
|  | 640 | } | 
|  | 641 | } | 
|  | 642 | EXPORT_SYMBOL(key_put); | 
|  | 643 |  | 
|  | 644 | /* | 
|  | 645 | * Find a key by its serial number. | 
|  | 646 | */ | 
|  | 647 | struct key *key_lookup(key_serial_t id) | 
|  | 648 | { | 
|  | 649 | struct rb_node *n; | 
|  | 650 | struct key *key; | 
|  | 651 |  | 
|  | 652 | spin_lock(&key_serial_lock); | 
|  | 653 |  | 
|  | 654 | /* search the tree for the specified key */ | 
|  | 655 | n = key_serial_tree.rb_node; | 
|  | 656 | while (n) { | 
|  | 657 | key = rb_entry(n, struct key, serial_node); | 
|  | 658 |  | 
|  | 659 | if (id < key->serial) | 
|  | 660 | n = n->rb_left; | 
|  | 661 | else if (id > key->serial) | 
|  | 662 | n = n->rb_right; | 
|  | 663 | else | 
|  | 664 | goto found; | 
|  | 665 | } | 
|  | 666 |  | 
|  | 667 | not_found: | 
|  | 668 | key = ERR_PTR(-ENOKEY); | 
|  | 669 | goto error; | 
|  | 670 |  | 
|  | 671 | found: | 
|  | 672 | /* A key is allowed to be looked up only if someone still owns a | 
|  | 673 | * reference to it - otherwise it's awaiting the gc. | 
|  | 674 | */ | 
|  | 675 | if (!refcount_inc_not_zero(&key->usage)) | 
|  | 676 | goto not_found; | 
|  | 677 |  | 
|  | 678 | error: | 
|  | 679 | spin_unlock(&key_serial_lock); | 
|  | 680 | return key; | 
|  | 681 | } | 
|  | 682 |  | 
|  | 683 | /* | 
|  | 684 | * Find and lock the specified key type against removal. | 
|  | 685 | * | 
|  | 686 | * We return with the sem read-locked if successful.  If the type wasn't | 
|  | 687 | * available -ENOKEY is returned instead. | 
|  | 688 | */ | 
|  | 689 | struct key_type *key_type_lookup(const char *type) | 
|  | 690 | { | 
|  | 691 | struct key_type *ktype; | 
|  | 692 |  | 
|  | 693 | down_read(&key_types_sem); | 
|  | 694 |  | 
|  | 695 | /* look up the key type to see if it's one of the registered kernel | 
|  | 696 | * types */ | 
|  | 697 | list_for_each_entry(ktype, &key_types_list, link) { | 
|  | 698 | if (strcmp(ktype->name, type) == 0) | 
|  | 699 | goto found_kernel_type; | 
|  | 700 | } | 
|  | 701 |  | 
|  | 702 | up_read(&key_types_sem); | 
|  | 703 | ktype = ERR_PTR(-ENOKEY); | 
|  | 704 |  | 
|  | 705 | found_kernel_type: | 
|  | 706 | return ktype; | 
|  | 707 | } | 
|  | 708 |  | 
|  | 709 | void key_set_timeout(struct key *key, unsigned timeout) | 
|  | 710 | { | 
|  | 711 | time64_t expiry = 0; | 
|  | 712 |  | 
|  | 713 | /* make the changes with the locks held to prevent races */ | 
|  | 714 | down_write(&key->sem); | 
|  | 715 |  | 
|  | 716 | if (timeout > 0) | 
|  | 717 | expiry = ktime_get_real_seconds() + timeout; | 
|  | 718 |  | 
|  | 719 | key->expiry = expiry; | 
|  | 720 | key_schedule_gc(key->expiry + key_gc_delay); | 
|  | 721 |  | 
|  | 722 | up_write(&key->sem); | 
|  | 723 | } | 
|  | 724 | EXPORT_SYMBOL_GPL(key_set_timeout); | 
|  | 725 |  | 
|  | 726 | /* | 
|  | 727 | * Unlock a key type locked by key_type_lookup(). | 
|  | 728 | */ | 
|  | 729 | void key_type_put(struct key_type *ktype) | 
|  | 730 | { | 
|  | 731 | up_read(&key_types_sem); | 
|  | 732 | } | 
|  | 733 |  | 
|  | 734 | /* | 
|  | 735 | * Attempt to update an existing key. | 
|  | 736 | * | 
|  | 737 | * The key is given to us with an incremented refcount that we need to discard | 
|  | 738 | * if we get an error. | 
|  | 739 | */ | 
|  | 740 | static inline key_ref_t __key_update(key_ref_t key_ref, | 
|  | 741 | struct key_preparsed_payload *prep) | 
|  | 742 | { | 
|  | 743 | struct key *key = key_ref_to_ptr(key_ref); | 
|  | 744 | int ret; | 
|  | 745 |  | 
|  | 746 | /* need write permission on the key to update it */ | 
|  | 747 | ret = key_permission(key_ref, KEY_NEED_WRITE); | 
|  | 748 | if (ret < 0) | 
|  | 749 | goto error; | 
|  | 750 |  | 
|  | 751 | ret = -EEXIST; | 
|  | 752 | if (!key->type->update) | 
|  | 753 | goto error; | 
|  | 754 |  | 
|  | 755 | down_write(&key->sem); | 
|  | 756 |  | 
|  | 757 | ret = key->type->update(key, prep); | 
|  | 758 | if (ret == 0) | 
|  | 759 | /* Updating a negative key positively instantiates it */ | 
|  | 760 | mark_key_instantiated(key, 0); | 
|  | 761 |  | 
|  | 762 | up_write(&key->sem); | 
|  | 763 |  | 
|  | 764 | if (ret < 0) | 
|  | 765 | goto error; | 
|  | 766 | out: | 
|  | 767 | return key_ref; | 
|  | 768 |  | 
|  | 769 | error: | 
|  | 770 | key_put(key); | 
|  | 771 | key_ref = ERR_PTR(ret); | 
|  | 772 | goto out; | 
|  | 773 | } | 
|  | 774 |  | 
|  | 775 | /** | 
|  | 776 | * key_create_or_update - Update or create and instantiate a key. | 
|  | 777 | * @keyring_ref: A pointer to the destination keyring with possession flag. | 
|  | 778 | * @type: The type of key. | 
|  | 779 | * @description: The searchable description for the key. | 
|  | 780 | * @payload: The data to use to instantiate or update the key. | 
|  | 781 | * @plen: The length of @payload. | 
|  | 782 | * @perm: The permissions mask for a new key. | 
|  | 783 | * @flags: The quota flags for a new key. | 
|  | 784 | * | 
|  | 785 | * Search the destination keyring for a key of the same description and if one | 
|  | 786 | * is found, update it, otherwise create and instantiate a new one and create a | 
|  | 787 | * link to it from that keyring. | 
|  | 788 | * | 
|  | 789 | * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be | 
|  | 790 | * concocted. | 
|  | 791 | * | 
|  | 792 | * Returns a pointer to the new key if successful, -ENODEV if the key type | 
|  | 793 | * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the | 
|  | 794 | * caller isn't permitted to modify the keyring or the LSM did not permit | 
|  | 795 | * creation of the key. | 
|  | 796 | * | 
|  | 797 | * On success, the possession flag from the keyring ref will be tacked on to | 
|  | 798 | * the key ref before it is returned. | 
|  | 799 | */ | 
|  | 800 | key_ref_t key_create_or_update(key_ref_t keyring_ref, | 
|  | 801 | const char *type, | 
|  | 802 | const char *description, | 
|  | 803 | const void *payload, | 
|  | 804 | size_t plen, | 
|  | 805 | key_perm_t perm, | 
|  | 806 | unsigned long flags) | 
|  | 807 | { | 
|  | 808 | struct keyring_index_key index_key = { | 
|  | 809 | .description	= description, | 
|  | 810 | }; | 
|  | 811 | struct key_preparsed_payload prep; | 
|  | 812 | struct assoc_array_edit *edit; | 
|  | 813 | const struct cred *cred = current_cred(); | 
|  | 814 | struct key *keyring, *key = NULL; | 
|  | 815 | key_ref_t key_ref; | 
|  | 816 | int ret; | 
|  | 817 | struct key_restriction *restrict_link = NULL; | 
|  | 818 |  | 
|  | 819 | /* look up the key type to see if it's one of the registered kernel | 
|  | 820 | * types */ | 
|  | 821 | index_key.type = key_type_lookup(type); | 
|  | 822 | if (IS_ERR(index_key.type)) { | 
|  | 823 | key_ref = ERR_PTR(-ENODEV); | 
|  | 824 | goto error; | 
|  | 825 | } | 
|  | 826 |  | 
|  | 827 | key_ref = ERR_PTR(-EINVAL); | 
|  | 828 | if (!index_key.type->instantiate || | 
|  | 829 | (!index_key.description && !index_key.type->preparse)) | 
|  | 830 | goto error_put_type; | 
|  | 831 |  | 
|  | 832 | keyring = key_ref_to_ptr(keyring_ref); | 
|  | 833 |  | 
|  | 834 | key_check(keyring); | 
|  | 835 |  | 
|  | 836 | if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION)) | 
|  | 837 | restrict_link = keyring->restrict_link; | 
|  | 838 |  | 
|  | 839 | key_ref = ERR_PTR(-ENOTDIR); | 
|  | 840 | if (keyring->type != &key_type_keyring) | 
|  | 841 | goto error_put_type; | 
|  | 842 |  | 
|  | 843 | memset(&prep, 0, sizeof(prep)); | 
|  | 844 | prep.data = payload; | 
|  | 845 | prep.datalen = plen; | 
|  | 846 | prep.quotalen = index_key.type->def_datalen; | 
|  | 847 | prep.expiry = TIME64_MAX; | 
|  | 848 | if (index_key.type->preparse) { | 
|  | 849 | ret = index_key.type->preparse(&prep); | 
|  | 850 | if (ret < 0) { | 
|  | 851 | key_ref = ERR_PTR(ret); | 
|  | 852 | goto error_free_prep; | 
|  | 853 | } | 
|  | 854 | if (!index_key.description) | 
|  | 855 | index_key.description = prep.description; | 
|  | 856 | key_ref = ERR_PTR(-EINVAL); | 
|  | 857 | if (!index_key.description) | 
|  | 858 | goto error_free_prep; | 
|  | 859 | } | 
|  | 860 | index_key.desc_len = strlen(index_key.description); | 
|  | 861 |  | 
|  | 862 | ret = __key_link_begin(keyring, &index_key, &edit); | 
|  | 863 | if (ret < 0) { | 
|  | 864 | key_ref = ERR_PTR(ret); | 
|  | 865 | goto error_free_prep; | 
|  | 866 | } | 
|  | 867 |  | 
|  | 868 | if (restrict_link && restrict_link->check) { | 
|  | 869 | ret = restrict_link->check(keyring, index_key.type, | 
|  | 870 | &prep.payload, restrict_link->key); | 
|  | 871 | if (ret < 0) { | 
|  | 872 | key_ref = ERR_PTR(ret); | 
|  | 873 | goto error_link_end; | 
|  | 874 | } | 
|  | 875 | } | 
|  | 876 |  | 
|  | 877 | /* if we're going to allocate a new key, we're going to have | 
|  | 878 | * to modify the keyring */ | 
|  | 879 | ret = key_permission(keyring_ref, KEY_NEED_WRITE); | 
|  | 880 | if (ret < 0) { | 
|  | 881 | key_ref = ERR_PTR(ret); | 
|  | 882 | goto error_link_end; | 
|  | 883 | } | 
|  | 884 |  | 
|  | 885 | /* if it's possible to update this type of key, search for an existing | 
|  | 886 | * key of the same type and description in the destination keyring and | 
|  | 887 | * update that instead if possible | 
|  | 888 | */ | 
|  | 889 | if (index_key.type->update) { | 
|  | 890 | key_ref = find_key_to_update(keyring_ref, &index_key); | 
|  | 891 | if (key_ref) | 
|  | 892 | goto found_matching_key; | 
|  | 893 | } | 
|  | 894 |  | 
|  | 895 | /* if the client doesn't provide, decide on the permissions we want */ | 
|  | 896 | if (perm == KEY_PERM_UNDEF) { | 
|  | 897 | perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; | 
|  | 898 | perm |= KEY_USR_VIEW; | 
|  | 899 |  | 
|  | 900 | if (index_key.type->read) | 
|  | 901 | perm |= KEY_POS_READ; | 
|  | 902 |  | 
|  | 903 | if (index_key.type == &key_type_keyring || | 
|  | 904 | index_key.type->update) | 
|  | 905 | perm |= KEY_POS_WRITE; | 
|  | 906 | } | 
|  | 907 |  | 
|  | 908 | /* allocate a new key */ | 
|  | 909 | key = key_alloc(index_key.type, index_key.description, | 
|  | 910 | cred->fsuid, cred->fsgid, cred, perm, flags, NULL); | 
|  | 911 | if (IS_ERR(key)) { | 
|  | 912 | key_ref = ERR_CAST(key); | 
|  | 913 | goto error_link_end; | 
|  | 914 | } | 
|  | 915 |  | 
|  | 916 | /* instantiate it and link it into the target keyring */ | 
|  | 917 | ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); | 
|  | 918 | if (ret < 0) { | 
|  | 919 | key_put(key); | 
|  | 920 | key_ref = ERR_PTR(ret); | 
|  | 921 | goto error_link_end; | 
|  | 922 | } | 
|  | 923 |  | 
|  | 924 | key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); | 
|  | 925 |  | 
|  | 926 | error_link_end: | 
|  | 927 | __key_link_end(keyring, &index_key, edit); | 
|  | 928 | error_free_prep: | 
|  | 929 | if (index_key.type->preparse) | 
|  | 930 | index_key.type->free_preparse(&prep); | 
|  | 931 | error_put_type: | 
|  | 932 | key_type_put(index_key.type); | 
|  | 933 | error: | 
|  | 934 | return key_ref; | 
|  | 935 |  | 
|  | 936 | found_matching_key: | 
|  | 937 | /* we found a matching key, so we're going to try to update it | 
|  | 938 | * - we can drop the locks first as we have the key pinned | 
|  | 939 | */ | 
|  | 940 | __key_link_end(keyring, &index_key, edit); | 
|  | 941 |  | 
|  | 942 | key = key_ref_to_ptr(key_ref); | 
|  | 943 | if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) { | 
|  | 944 | ret = wait_for_key_construction(key, true); | 
|  | 945 | if (ret < 0) { | 
|  | 946 | key_ref_put(key_ref); | 
|  | 947 | key_ref = ERR_PTR(ret); | 
|  | 948 | goto error_free_prep; | 
|  | 949 | } | 
|  | 950 | } | 
|  | 951 |  | 
|  | 952 | key_ref = __key_update(key_ref, &prep); | 
|  | 953 | goto error_free_prep; | 
|  | 954 | } | 
|  | 955 | EXPORT_SYMBOL(key_create_or_update); | 
|  | 956 |  | 
|  | 957 | /** | 
|  | 958 | * key_update - Update a key's contents. | 
|  | 959 | * @key_ref: The pointer (plus possession flag) to the key. | 
|  | 960 | * @payload: The data to be used to update the key. | 
|  | 961 | * @plen: The length of @payload. | 
|  | 962 | * | 
|  | 963 | * Attempt to update the contents of a key with the given payload data.  The | 
|  | 964 | * caller must be granted Write permission on the key.  Negative keys can be | 
|  | 965 | * instantiated by this method. | 
|  | 966 | * | 
|  | 967 | * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key | 
|  | 968 | * type does not support updating.  The key type may return other errors. | 
|  | 969 | */ | 
|  | 970 | int key_update(key_ref_t key_ref, const void *payload, size_t plen) | 
|  | 971 | { | 
|  | 972 | struct key_preparsed_payload prep; | 
|  | 973 | struct key *key = key_ref_to_ptr(key_ref); | 
|  | 974 | int ret; | 
|  | 975 |  | 
|  | 976 | key_check(key); | 
|  | 977 |  | 
|  | 978 | /* the key must be writable */ | 
|  | 979 | ret = key_permission(key_ref, KEY_NEED_WRITE); | 
|  | 980 | if (ret < 0) | 
|  | 981 | return ret; | 
|  | 982 |  | 
|  | 983 | /* attempt to update it if supported */ | 
|  | 984 | if (!key->type->update) | 
|  | 985 | return -EOPNOTSUPP; | 
|  | 986 |  | 
|  | 987 | memset(&prep, 0, sizeof(prep)); | 
|  | 988 | prep.data = payload; | 
|  | 989 | prep.datalen = plen; | 
|  | 990 | prep.quotalen = key->type->def_datalen; | 
|  | 991 | prep.expiry = TIME64_MAX; | 
|  | 992 | if (key->type->preparse) { | 
|  | 993 | ret = key->type->preparse(&prep); | 
|  | 994 | if (ret < 0) | 
|  | 995 | goto error; | 
|  | 996 | } | 
|  | 997 |  | 
|  | 998 | down_write(&key->sem); | 
|  | 999 |  | 
|  | 1000 | ret = key->type->update(key, &prep); | 
|  | 1001 | if (ret == 0) | 
|  | 1002 | /* Updating a negative key positively instantiates it */ | 
|  | 1003 | mark_key_instantiated(key, 0); | 
|  | 1004 |  | 
|  | 1005 | up_write(&key->sem); | 
|  | 1006 |  | 
|  | 1007 | error: | 
|  | 1008 | if (key->type->preparse) | 
|  | 1009 | key->type->free_preparse(&prep); | 
|  | 1010 | return ret; | 
|  | 1011 | } | 
|  | 1012 | EXPORT_SYMBOL(key_update); | 
|  | 1013 |  | 
|  | 1014 | /** | 
|  | 1015 | * key_revoke - Revoke a key. | 
|  | 1016 | * @key: The key to be revoked. | 
|  | 1017 | * | 
|  | 1018 | * Mark a key as being revoked and ask the type to free up its resources.  The | 
|  | 1019 | * revocation timeout is set and the key and all its links will be | 
|  | 1020 | * automatically garbage collected after key_gc_delay amount of time if they | 
|  | 1021 | * are not manually dealt with first. | 
|  | 1022 | */ | 
|  | 1023 | void key_revoke(struct key *key) | 
|  | 1024 | { | 
|  | 1025 | time64_t time; | 
|  | 1026 |  | 
|  | 1027 | key_check(key); | 
|  | 1028 |  | 
|  | 1029 | /* make sure no one's trying to change or use the key when we mark it | 
|  | 1030 | * - we tell lockdep that we might nest because we might be revoking an | 
|  | 1031 | *   authorisation key whilst holding the sem on a key we've just | 
|  | 1032 | *   instantiated | 
|  | 1033 | */ | 
|  | 1034 | down_write_nested(&key->sem, 1); | 
|  | 1035 | if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && | 
|  | 1036 | key->type->revoke) | 
|  | 1037 | key->type->revoke(key); | 
|  | 1038 |  | 
|  | 1039 | /* set the death time to no more than the expiry time */ | 
|  | 1040 | time = ktime_get_real_seconds(); | 
|  | 1041 | if (key->revoked_at == 0 || key->revoked_at > time) { | 
|  | 1042 | key->revoked_at = time; | 
|  | 1043 | key_schedule_gc(key->revoked_at + key_gc_delay); | 
|  | 1044 | } | 
|  | 1045 |  | 
|  | 1046 | up_write(&key->sem); | 
|  | 1047 | } | 
|  | 1048 | EXPORT_SYMBOL(key_revoke); | 
|  | 1049 |  | 
|  | 1050 | /** | 
|  | 1051 | * key_invalidate - Invalidate a key. | 
|  | 1052 | * @key: The key to be invalidated. | 
|  | 1053 | * | 
|  | 1054 | * Mark a key as being invalidated and have it cleaned up immediately.  The key | 
|  | 1055 | * is ignored by all searches and other operations from this point. | 
|  | 1056 | */ | 
|  | 1057 | void key_invalidate(struct key *key) | 
|  | 1058 | { | 
|  | 1059 | kenter("%d", key_serial(key)); | 
|  | 1060 |  | 
|  | 1061 | key_check(key); | 
|  | 1062 |  | 
|  | 1063 | if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { | 
|  | 1064 | down_write_nested(&key->sem, 1); | 
|  | 1065 | if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) | 
|  | 1066 | key_schedule_gc_links(); | 
|  | 1067 | up_write(&key->sem); | 
|  | 1068 | } | 
|  | 1069 | } | 
|  | 1070 | EXPORT_SYMBOL(key_invalidate); | 
|  | 1071 |  | 
|  | 1072 | /** | 
|  | 1073 | * generic_key_instantiate - Simple instantiation of a key from preparsed data | 
|  | 1074 | * @key: The key to be instantiated | 
|  | 1075 | * @prep: The preparsed data to load. | 
|  | 1076 | * | 
|  | 1077 | * Instantiate a key from preparsed data.  We assume we can just copy the data | 
|  | 1078 | * in directly and clear the old pointers. | 
|  | 1079 | * | 
|  | 1080 | * This can be pointed to directly by the key type instantiate op pointer. | 
|  | 1081 | */ | 
|  | 1082 | int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep) | 
|  | 1083 | { | 
|  | 1084 | int ret; | 
|  | 1085 |  | 
|  | 1086 | pr_devel("==>%s()\n", __func__); | 
|  | 1087 |  | 
|  | 1088 | ret = key_payload_reserve(key, prep->quotalen); | 
|  | 1089 | if (ret == 0) { | 
|  | 1090 | rcu_assign_keypointer(key, prep->payload.data[0]); | 
|  | 1091 | key->payload.data[1] = prep->payload.data[1]; | 
|  | 1092 | key->payload.data[2] = prep->payload.data[2]; | 
|  | 1093 | key->payload.data[3] = prep->payload.data[3]; | 
|  | 1094 | prep->payload.data[0] = NULL; | 
|  | 1095 | prep->payload.data[1] = NULL; | 
|  | 1096 | prep->payload.data[2] = NULL; | 
|  | 1097 | prep->payload.data[3] = NULL; | 
|  | 1098 | } | 
|  | 1099 | pr_devel("<==%s() = %d\n", __func__, ret); | 
|  | 1100 | return ret; | 
|  | 1101 | } | 
|  | 1102 | EXPORT_SYMBOL(generic_key_instantiate); | 
|  | 1103 |  | 
|  | 1104 | /** | 
|  | 1105 | * register_key_type - Register a type of key. | 
|  | 1106 | * @ktype: The new key type. | 
|  | 1107 | * | 
|  | 1108 | * Register a new key type. | 
|  | 1109 | * | 
|  | 1110 | * Returns 0 on success or -EEXIST if a type of this name already exists. | 
|  | 1111 | */ | 
|  | 1112 | int register_key_type(struct key_type *ktype) | 
|  | 1113 | { | 
|  | 1114 | struct key_type *p; | 
|  | 1115 | int ret; | 
|  | 1116 |  | 
|  | 1117 | memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); | 
|  | 1118 |  | 
|  | 1119 | ret = -EEXIST; | 
|  | 1120 | down_write(&key_types_sem); | 
|  | 1121 |  | 
|  | 1122 | /* disallow key types with the same name */ | 
|  | 1123 | list_for_each_entry(p, &key_types_list, link) { | 
|  | 1124 | if (strcmp(p->name, ktype->name) == 0) | 
|  | 1125 | goto out; | 
|  | 1126 | } | 
|  | 1127 |  | 
|  | 1128 | /* store the type */ | 
|  | 1129 | list_add(&ktype->link, &key_types_list); | 
|  | 1130 |  | 
|  | 1131 | pr_notice("Key type %s registered\n", ktype->name); | 
|  | 1132 | ret = 0; | 
|  | 1133 |  | 
|  | 1134 | out: | 
|  | 1135 | up_write(&key_types_sem); | 
|  | 1136 | return ret; | 
|  | 1137 | } | 
|  | 1138 | EXPORT_SYMBOL(register_key_type); | 
|  | 1139 |  | 
|  | 1140 | /** | 
|  | 1141 | * unregister_key_type - Unregister a type of key. | 
|  | 1142 | * @ktype: The key type. | 
|  | 1143 | * | 
|  | 1144 | * Unregister a key type and mark all the extant keys of this type as dead. | 
|  | 1145 | * Those keys of this type are then destroyed to get rid of their payloads and | 
|  | 1146 | * they and their links will be garbage collected as soon as possible. | 
|  | 1147 | */ | 
|  | 1148 | void unregister_key_type(struct key_type *ktype) | 
|  | 1149 | { | 
|  | 1150 | down_write(&key_types_sem); | 
|  | 1151 | list_del_init(&ktype->link); | 
|  | 1152 | downgrade_write(&key_types_sem); | 
|  | 1153 | key_gc_keytype(ktype); | 
|  | 1154 | pr_notice("Key type %s unregistered\n", ktype->name); | 
|  | 1155 | up_read(&key_types_sem); | 
|  | 1156 | } | 
|  | 1157 | EXPORT_SYMBOL(unregister_key_type); | 
|  | 1158 |  | 
|  | 1159 | /* | 
|  | 1160 | * Initialise the key management state. | 
|  | 1161 | */ | 
|  | 1162 | void __init key_init(void) | 
|  | 1163 | { | 
|  | 1164 | /* allocate a slab in which we can store keys */ | 
|  | 1165 | key_jar = kmem_cache_create("key_jar", sizeof(struct key), | 
|  | 1166 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 
|  | 1167 |  | 
|  | 1168 | /* add the special key types */ | 
|  | 1169 | list_add_tail(&key_type_keyring.link, &key_types_list); | 
|  | 1170 | list_add_tail(&key_type_dead.link, &key_types_list); | 
|  | 1171 | list_add_tail(&key_type_user.link, &key_types_list); | 
|  | 1172 | list_add_tail(&key_type_logon.link, &key_types_list); | 
|  | 1173 |  | 
|  | 1174 | /* record the root user tracking */ | 
|  | 1175 | rb_link_node(&root_key_user.node, | 
|  | 1176 | NULL, | 
|  | 1177 | &key_user_tree.rb_node); | 
|  | 1178 |  | 
|  | 1179 | rb_insert_color(&root_key_user.node, | 
|  | 1180 | &key_user_tree); | 
|  | 1181 | } |