| rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Software async crypto daemon. | 
|  | 3 | * | 
|  | 4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | 
|  | 5 | * | 
|  | 6 | * Added AEAD support to cryptd. | 
|  | 7 | *    Authors: Tadeusz Struk (tadeusz.struk@intel.com) | 
|  | 8 | *             Adrian Hoban <adrian.hoban@intel.com> | 
|  | 9 | *             Gabriele Paoloni <gabriele.paoloni@intel.com> | 
|  | 10 | *             Aidan O'Mahony (aidan.o.mahony@intel.com) | 
|  | 11 | *    Copyright (c) 2010, Intel Corporation. | 
|  | 12 | * | 
|  | 13 | * This program is free software; you can redistribute it and/or modify it | 
|  | 14 | * under the terms of the GNU General Public License as published by the Free | 
|  | 15 | * Software Foundation; either version 2 of the License, or (at your option) | 
|  | 16 | * any later version. | 
|  | 17 | * | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | #include <crypto/internal/hash.h> | 
|  | 21 | #include <crypto/internal/aead.h> | 
|  | 22 | #include <crypto/internal/skcipher.h> | 
|  | 23 | #include <crypto/cryptd.h> | 
|  | 24 | #include <crypto/crypto_wq.h> | 
|  | 25 | #include <linux/atomic.h> | 
|  | 26 | #include <linux/err.h> | 
|  | 27 | #include <linux/init.h> | 
|  | 28 | #include <linux/kernel.h> | 
|  | 29 | #include <linux/list.h> | 
|  | 30 | #include <linux/module.h> | 
|  | 31 | #include <linux/scatterlist.h> | 
|  | 32 | #include <linux/sched.h> | 
|  | 33 | #include <linux/slab.h> | 
|  | 34 |  | 
|  | 35 | #define CRYPTD_MAX_CPU_QLEN 1000 | 
|  | 36 |  | 
|  | 37 | struct cryptd_cpu_queue { | 
|  | 38 | struct crypto_queue queue; | 
|  | 39 | struct work_struct work; | 
|  | 40 | }; | 
|  | 41 |  | 
|  | 42 | struct cryptd_queue { | 
|  | 43 | struct cryptd_cpu_queue __percpu *cpu_queue; | 
|  | 44 | }; | 
|  | 45 |  | 
|  | 46 | struct cryptd_instance_ctx { | 
|  | 47 | struct crypto_spawn spawn; | 
|  | 48 | struct cryptd_queue *queue; | 
|  | 49 | }; | 
|  | 50 |  | 
|  | 51 | struct skcipherd_instance_ctx { | 
|  | 52 | struct crypto_skcipher_spawn spawn; | 
|  | 53 | struct cryptd_queue *queue; | 
|  | 54 | }; | 
|  | 55 |  | 
|  | 56 | struct hashd_instance_ctx { | 
|  | 57 | struct crypto_shash_spawn spawn; | 
|  | 58 | struct cryptd_queue *queue; | 
|  | 59 | }; | 
|  | 60 |  | 
|  | 61 | struct aead_instance_ctx { | 
|  | 62 | struct crypto_aead_spawn aead_spawn; | 
|  | 63 | struct cryptd_queue *queue; | 
|  | 64 | }; | 
|  | 65 |  | 
|  | 66 | struct cryptd_blkcipher_ctx { | 
|  | 67 | atomic_t refcnt; | 
|  | 68 | struct crypto_blkcipher *child; | 
|  | 69 | }; | 
|  | 70 |  | 
|  | 71 | struct cryptd_blkcipher_request_ctx { | 
|  | 72 | crypto_completion_t complete; | 
|  | 73 | }; | 
|  | 74 |  | 
|  | 75 | struct cryptd_skcipher_ctx { | 
|  | 76 | atomic_t refcnt; | 
|  | 77 | struct crypto_skcipher *child; | 
|  | 78 | }; | 
|  | 79 |  | 
|  | 80 | struct cryptd_skcipher_request_ctx { | 
|  | 81 | crypto_completion_t complete; | 
|  | 82 | }; | 
|  | 83 |  | 
|  | 84 | struct cryptd_hash_ctx { | 
|  | 85 | atomic_t refcnt; | 
|  | 86 | struct crypto_shash *child; | 
|  | 87 | }; | 
|  | 88 |  | 
|  | 89 | struct cryptd_hash_request_ctx { | 
|  | 90 | crypto_completion_t complete; | 
|  | 91 | struct shash_desc desc; | 
|  | 92 | }; | 
|  | 93 |  | 
|  | 94 | struct cryptd_aead_ctx { | 
|  | 95 | atomic_t refcnt; | 
|  | 96 | struct crypto_aead *child; | 
|  | 97 | }; | 
|  | 98 |  | 
|  | 99 | struct cryptd_aead_request_ctx { | 
|  | 100 | crypto_completion_t complete; | 
|  | 101 | }; | 
|  | 102 |  | 
|  | 103 | static void cryptd_queue_worker(struct work_struct *work); | 
|  | 104 |  | 
|  | 105 | static int cryptd_init_queue(struct cryptd_queue *queue, | 
|  | 106 | unsigned int max_cpu_qlen) | 
|  | 107 | { | 
|  | 108 | int cpu; | 
|  | 109 | struct cryptd_cpu_queue *cpu_queue; | 
|  | 110 |  | 
|  | 111 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | 
|  | 112 | if (!queue->cpu_queue) | 
|  | 113 | return -ENOMEM; | 
|  | 114 | for_each_possible_cpu(cpu) { | 
|  | 115 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 
|  | 116 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | 
|  | 117 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | 
|  | 118 | } | 
|  | 119 | return 0; | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 | static void cryptd_fini_queue(struct cryptd_queue *queue) | 
|  | 123 | { | 
|  | 124 | int cpu; | 
|  | 125 | struct cryptd_cpu_queue *cpu_queue; | 
|  | 126 |  | 
|  | 127 | for_each_possible_cpu(cpu) { | 
|  | 128 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | 
|  | 129 | BUG_ON(cpu_queue->queue.qlen); | 
|  | 130 | } | 
|  | 131 | free_percpu(queue->cpu_queue); | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | 
|  | 135 | struct crypto_async_request *request) | 
|  | 136 | { | 
|  | 137 | int cpu, err; | 
|  | 138 | struct cryptd_cpu_queue *cpu_queue; | 
|  | 139 | atomic_t *refcnt; | 
|  | 140 | bool may_backlog; | 
|  | 141 |  | 
|  | 142 | cpu = get_cpu(); | 
|  | 143 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | 
|  | 144 | err = crypto_enqueue_request(&cpu_queue->queue, request); | 
|  | 145 |  | 
|  | 146 | refcnt = crypto_tfm_ctx(request->tfm); | 
|  | 147 | may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; | 
|  | 148 |  | 
|  | 149 | if (err == -EBUSY && !may_backlog) | 
|  | 150 | goto out_put_cpu; | 
|  | 151 |  | 
|  | 152 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 
|  | 153 |  | 
|  | 154 | if (!atomic_read(refcnt)) | 
|  | 155 | goto out_put_cpu; | 
|  | 156 |  | 
|  | 157 | atomic_inc(refcnt); | 
|  | 158 |  | 
|  | 159 | out_put_cpu: | 
|  | 160 | put_cpu(); | 
|  | 161 |  | 
|  | 162 | return err; | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | /* Called in workqueue context, do one real cryption work (via | 
|  | 166 | * req->complete) and reschedule itself if there are more work to | 
|  | 167 | * do. */ | 
|  | 168 | static void cryptd_queue_worker(struct work_struct *work) | 
|  | 169 | { | 
|  | 170 | struct cryptd_cpu_queue *cpu_queue; | 
|  | 171 | struct crypto_async_request *req, *backlog; | 
|  | 172 |  | 
|  | 173 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | 
|  | 174 | /* | 
|  | 175 | * Only handle one request at a time to avoid hogging crypto workqueue. | 
|  | 176 | * preempt_disable/enable is used to prevent being preempted by | 
|  | 177 | * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent | 
|  | 178 | * cryptd_enqueue_request() being accessed from software interrupts. | 
|  | 179 | */ | 
|  | 180 | local_bh_disable(); | 
|  | 181 | preempt_disable(); | 
|  | 182 | backlog = crypto_get_backlog(&cpu_queue->queue); | 
|  | 183 | req = crypto_dequeue_request(&cpu_queue->queue); | 
|  | 184 | preempt_enable(); | 
|  | 185 | local_bh_enable(); | 
|  | 186 |  | 
|  | 187 | if (!req) | 
|  | 188 | return; | 
|  | 189 |  | 
|  | 190 | if (backlog) | 
|  | 191 | backlog->complete(backlog, -EINPROGRESS); | 
|  | 192 | req->complete(req, 0); | 
|  | 193 |  | 
|  | 194 | if (cpu_queue->queue.qlen) | 
|  | 195 | queue_work(kcrypto_wq, &cpu_queue->work); | 
|  | 196 | } | 
|  | 197 |  | 
|  | 198 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | 
|  | 199 | { | 
|  | 200 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 
|  | 201 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 
|  | 202 | return ictx->queue; | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, | 
|  | 206 | u32 *mask) | 
|  | 207 | { | 
|  | 208 | struct crypto_attr_type *algt; | 
|  | 209 |  | 
|  | 210 | algt = crypto_get_attr_type(tb); | 
|  | 211 | if (IS_ERR(algt)) | 
|  | 212 | return; | 
|  | 213 |  | 
|  | 214 | *type |= algt->type & CRYPTO_ALG_INTERNAL; | 
|  | 215 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; | 
|  | 216 | } | 
|  | 217 |  | 
|  | 218 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | 
|  | 219 | const u8 *key, unsigned int keylen) | 
|  | 220 | { | 
|  | 221 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | 
|  | 222 | struct crypto_blkcipher *child = ctx->child; | 
|  | 223 | int err; | 
|  | 224 |  | 
|  | 225 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 
|  | 226 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | 
|  | 227 | CRYPTO_TFM_REQ_MASK); | 
|  | 228 | err = crypto_blkcipher_setkey(child, key, keylen); | 
|  | 229 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | 
|  | 230 | CRYPTO_TFM_RES_MASK); | 
|  | 231 | return err; | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | 
|  | 235 | struct crypto_blkcipher *child, | 
|  | 236 | int err, | 
|  | 237 | int (*crypt)(struct blkcipher_desc *desc, | 
|  | 238 | struct scatterlist *dst, | 
|  | 239 | struct scatterlist *src, | 
|  | 240 | unsigned int len)) | 
|  | 241 | { | 
|  | 242 | struct cryptd_blkcipher_request_ctx *rctx; | 
|  | 243 | struct cryptd_blkcipher_ctx *ctx; | 
|  | 244 | struct crypto_ablkcipher *tfm; | 
|  | 245 | struct blkcipher_desc desc; | 
|  | 246 | int refcnt; | 
|  | 247 |  | 
|  | 248 | rctx = ablkcipher_request_ctx(req); | 
|  | 249 |  | 
|  | 250 | if (unlikely(err == -EINPROGRESS)) | 
|  | 251 | goto out; | 
|  | 252 |  | 
|  | 253 | desc.tfm = child; | 
|  | 254 | desc.info = req->info; | 
|  | 255 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 
|  | 256 |  | 
|  | 257 | err = crypt(&desc, req->dst, req->src, req->nbytes); | 
|  | 258 |  | 
|  | 259 | req->base.complete = rctx->complete; | 
|  | 260 |  | 
|  | 261 | out: | 
|  | 262 | tfm = crypto_ablkcipher_reqtfm(req); | 
|  | 263 | ctx = crypto_ablkcipher_ctx(tfm); | 
|  | 264 | refcnt = atomic_read(&ctx->refcnt); | 
|  | 265 |  | 
|  | 266 | local_bh_disable(); | 
|  | 267 | rctx->complete(&req->base, err); | 
|  | 268 | local_bh_enable(); | 
|  | 269 |  | 
|  | 270 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | 
|  | 271 | crypto_free_ablkcipher(tfm); | 
|  | 272 | } | 
|  | 273 |  | 
|  | 274 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | 
|  | 275 | { | 
|  | 276 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | 
|  | 277 | struct crypto_blkcipher *child = ctx->child; | 
|  | 278 |  | 
|  | 279 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | 
|  | 280 | crypto_blkcipher_crt(child)->encrypt); | 
|  | 281 | } | 
|  | 282 |  | 
|  | 283 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | 
|  | 284 | { | 
|  | 285 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | 
|  | 286 | struct crypto_blkcipher *child = ctx->child; | 
|  | 287 |  | 
|  | 288 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | 
|  | 289 | crypto_blkcipher_crt(child)->decrypt); | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | 
|  | 293 | crypto_completion_t compl) | 
|  | 294 | { | 
|  | 295 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | 
|  | 296 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 
|  | 297 | struct cryptd_queue *queue; | 
|  | 298 |  | 
|  | 299 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); | 
|  | 300 | rctx->complete = req->base.complete; | 
|  | 301 | req->base.complete = compl; | 
|  | 302 |  | 
|  | 303 | return cryptd_enqueue_request(queue, &req->base); | 
|  | 304 | } | 
|  | 305 |  | 
|  | 306 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | 
|  | 307 | { | 
|  | 308 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | 
|  | 309 | } | 
|  | 310 |  | 
|  | 311 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | 
|  | 312 | { | 
|  | 313 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | 
|  | 317 | { | 
|  | 318 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 
|  | 319 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | 
|  | 320 | struct crypto_spawn *spawn = &ictx->spawn; | 
|  | 321 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 
|  | 322 | struct crypto_blkcipher *cipher; | 
|  | 323 |  | 
|  | 324 | cipher = crypto_spawn_blkcipher(spawn); | 
|  | 325 | if (IS_ERR(cipher)) | 
|  | 326 | return PTR_ERR(cipher); | 
|  | 327 |  | 
|  | 328 | ctx->child = cipher; | 
|  | 329 | tfm->crt_ablkcipher.reqsize = | 
|  | 330 | sizeof(struct cryptd_blkcipher_request_ctx); | 
|  | 331 | return 0; | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | 
|  | 335 | { | 
|  | 336 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 
|  | 337 |  | 
|  | 338 | crypto_free_blkcipher(ctx->child); | 
|  | 339 | } | 
|  | 340 |  | 
|  | 341 | static int cryptd_init_instance(struct crypto_instance *inst, | 
|  | 342 | struct crypto_alg *alg) | 
|  | 343 | { | 
|  | 344 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | 
|  | 345 | "cryptd(%s)", | 
|  | 346 | alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | 
|  | 347 | return -ENAMETOOLONG; | 
|  | 348 |  | 
|  | 349 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | 
|  | 350 |  | 
|  | 351 | inst->alg.cra_priority = alg->cra_priority + 50; | 
|  | 352 | inst->alg.cra_blocksize = alg->cra_blocksize; | 
|  | 353 | inst->alg.cra_alignmask = alg->cra_alignmask; | 
|  | 354 |  | 
|  | 355 | return 0; | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | 
|  | 359 | unsigned int tail) | 
|  | 360 | { | 
|  | 361 | char *p; | 
|  | 362 | struct crypto_instance *inst; | 
|  | 363 | int err; | 
|  | 364 |  | 
|  | 365 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); | 
|  | 366 | if (!p) | 
|  | 367 | return ERR_PTR(-ENOMEM); | 
|  | 368 |  | 
|  | 369 | inst = (void *)(p + head); | 
|  | 370 |  | 
|  | 371 | err = cryptd_init_instance(inst, alg); | 
|  | 372 | if (err) | 
|  | 373 | goto out_free_inst; | 
|  | 374 |  | 
|  | 375 | out: | 
|  | 376 | return p; | 
|  | 377 |  | 
|  | 378 | out_free_inst: | 
|  | 379 | kfree(p); | 
|  | 380 | p = ERR_PTR(err); | 
|  | 381 | goto out; | 
|  | 382 | } | 
|  | 383 |  | 
|  | 384 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, | 
|  | 385 | struct rtattr **tb, | 
|  | 386 | struct cryptd_queue *queue) | 
|  | 387 | { | 
|  | 388 | struct cryptd_instance_ctx *ctx; | 
|  | 389 | struct crypto_instance *inst; | 
|  | 390 | struct crypto_alg *alg; | 
|  | 391 | u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; | 
|  | 392 | u32 mask = CRYPTO_ALG_TYPE_MASK; | 
|  | 393 | int err; | 
|  | 394 |  | 
|  | 395 | cryptd_check_internal(tb, &type, &mask); | 
|  | 396 |  | 
|  | 397 | alg = crypto_get_attr_alg(tb, type, mask); | 
|  | 398 | if (IS_ERR(alg)) | 
|  | 399 | return PTR_ERR(alg); | 
|  | 400 |  | 
|  | 401 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); | 
|  | 402 | err = PTR_ERR(inst); | 
|  | 403 | if (IS_ERR(inst)) | 
|  | 404 | goto out_put_alg; | 
|  | 405 |  | 
|  | 406 | ctx = crypto_instance_ctx(inst); | 
|  | 407 | ctx->queue = queue; | 
|  | 408 |  | 
|  | 409 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | 
|  | 410 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | 
|  | 411 | if (err) | 
|  | 412 | goto out_free_inst; | 
|  | 413 |  | 
|  | 414 | type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | 
|  | 415 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | 
|  | 416 | type |= CRYPTO_ALG_INTERNAL; | 
|  | 417 | inst->alg.cra_flags = type; | 
|  | 418 | inst->alg.cra_type = &crypto_ablkcipher_type; | 
|  | 419 |  | 
|  | 420 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | 
|  | 421 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | 
|  | 422 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | 
|  | 423 |  | 
|  | 424 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; | 
|  | 425 |  | 
|  | 426 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); | 
|  | 427 |  | 
|  | 428 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | 
|  | 429 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | 
|  | 430 |  | 
|  | 431 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | 
|  | 432 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | 
|  | 433 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | 
|  | 434 |  | 
|  | 435 | err = crypto_register_instance(tmpl, inst); | 
|  | 436 | if (err) { | 
|  | 437 | crypto_drop_spawn(&ctx->spawn); | 
|  | 438 | out_free_inst: | 
|  | 439 | kfree(inst); | 
|  | 440 | } | 
|  | 441 |  | 
|  | 442 | out_put_alg: | 
|  | 443 | crypto_mod_put(alg); | 
|  | 444 | return err; | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, | 
|  | 448 | const u8 *key, unsigned int keylen) | 
|  | 449 | { | 
|  | 450 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); | 
|  | 451 | struct crypto_skcipher *child = ctx->child; | 
|  | 452 | int err; | 
|  | 453 |  | 
|  | 454 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 
|  | 455 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | 
|  | 456 | CRYPTO_TFM_REQ_MASK); | 
|  | 457 | err = crypto_skcipher_setkey(child, key, keylen); | 
|  | 458 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | 
|  | 459 | CRYPTO_TFM_RES_MASK); | 
|  | 460 | return err; | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | static void cryptd_skcipher_complete(struct skcipher_request *req, int err) | 
|  | 464 | { | 
|  | 465 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 
|  | 466 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | 
|  | 467 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | 
|  | 468 | int refcnt = atomic_read(&ctx->refcnt); | 
|  | 469 |  | 
|  | 470 | local_bh_disable(); | 
|  | 471 | rctx->complete(&req->base, err); | 
|  | 472 | local_bh_enable(); | 
|  | 473 |  | 
|  | 474 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | 
|  | 475 | crypto_free_skcipher(tfm); | 
|  | 476 | } | 
|  | 477 |  | 
|  | 478 | static void cryptd_skcipher_encrypt(struct crypto_async_request *base, | 
|  | 479 | int err) | 
|  | 480 | { | 
|  | 481 | struct skcipher_request *req = skcipher_request_cast(base); | 
|  | 482 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | 
|  | 483 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 
|  | 484 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | 
|  | 485 | struct crypto_skcipher *child = ctx->child; | 
|  | 486 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | 
|  | 487 |  | 
|  | 488 | if (unlikely(err == -EINPROGRESS)) | 
|  | 489 | goto out; | 
|  | 490 |  | 
|  | 491 | skcipher_request_set_tfm(subreq, child); | 
|  | 492 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, | 
|  | 493 | NULL, NULL); | 
|  | 494 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | 
|  | 495 | req->iv); | 
|  | 496 |  | 
|  | 497 | err = crypto_skcipher_encrypt(subreq); | 
|  | 498 | skcipher_request_zero(subreq); | 
|  | 499 |  | 
|  | 500 | req->base.complete = rctx->complete; | 
|  | 501 |  | 
|  | 502 | out: | 
|  | 503 | cryptd_skcipher_complete(req, err); | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | static void cryptd_skcipher_decrypt(struct crypto_async_request *base, | 
|  | 507 | int err) | 
|  | 508 | { | 
|  | 509 | struct skcipher_request *req = skcipher_request_cast(base); | 
|  | 510 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | 
|  | 511 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 
|  | 512 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | 
|  | 513 | struct crypto_skcipher *child = ctx->child; | 
|  | 514 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | 
|  | 515 |  | 
|  | 516 | if (unlikely(err == -EINPROGRESS)) | 
|  | 517 | goto out; | 
|  | 518 |  | 
|  | 519 | skcipher_request_set_tfm(subreq, child); | 
|  | 520 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, | 
|  | 521 | NULL, NULL); | 
|  | 522 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | 
|  | 523 | req->iv); | 
|  | 524 |  | 
|  | 525 | err = crypto_skcipher_decrypt(subreq); | 
|  | 526 | skcipher_request_zero(subreq); | 
|  | 527 |  | 
|  | 528 | req->base.complete = rctx->complete; | 
|  | 529 |  | 
|  | 530 | out: | 
|  | 531 | cryptd_skcipher_complete(req, err); | 
|  | 532 | } | 
|  | 533 |  | 
|  | 534 | static int cryptd_skcipher_enqueue(struct skcipher_request *req, | 
|  | 535 | crypto_completion_t compl) | 
|  | 536 | { | 
|  | 537 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | 
|  | 538 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 
|  | 539 | struct cryptd_queue *queue; | 
|  | 540 |  | 
|  | 541 | queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); | 
|  | 542 | rctx->complete = req->base.complete; | 
|  | 543 | req->base.complete = compl; | 
|  | 544 |  | 
|  | 545 | return cryptd_enqueue_request(queue, &req->base); | 
|  | 546 | } | 
|  | 547 |  | 
|  | 548 | static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) | 
|  | 549 | { | 
|  | 550 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) | 
|  | 554 | { | 
|  | 555 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) | 
|  | 559 | { | 
|  | 560 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); | 
|  | 561 | struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); | 
|  | 562 | struct crypto_skcipher_spawn *spawn = &ictx->spawn; | 
|  | 563 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | 
|  | 564 | struct crypto_skcipher *cipher; | 
|  | 565 |  | 
|  | 566 | cipher = crypto_spawn_skcipher(spawn); | 
|  | 567 | if (IS_ERR(cipher)) | 
|  | 568 | return PTR_ERR(cipher); | 
|  | 569 |  | 
|  | 570 | ctx->child = cipher; | 
|  | 571 | crypto_skcipher_set_reqsize( | 
|  | 572 | tfm, sizeof(struct cryptd_skcipher_request_ctx)); | 
|  | 573 | return 0; | 
|  | 574 | } | 
|  | 575 |  | 
|  | 576 | static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) | 
|  | 577 | { | 
|  | 578 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | 
|  | 579 |  | 
|  | 580 | crypto_free_skcipher(ctx->child); | 
|  | 581 | } | 
|  | 582 |  | 
|  | 583 | static void cryptd_skcipher_free(struct skcipher_instance *inst) | 
|  | 584 | { | 
|  | 585 | struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); | 
|  | 586 |  | 
|  | 587 | crypto_drop_skcipher(&ctx->spawn); | 
|  | 588 | kfree(inst); | 
|  | 589 | } | 
|  | 590 |  | 
|  | 591 | static int cryptd_create_skcipher(struct crypto_template *tmpl, | 
|  | 592 | struct rtattr **tb, | 
|  | 593 | struct cryptd_queue *queue) | 
|  | 594 | { | 
|  | 595 | struct skcipherd_instance_ctx *ctx; | 
|  | 596 | struct skcipher_instance *inst; | 
|  | 597 | struct skcipher_alg *alg; | 
|  | 598 | const char *name; | 
|  | 599 | u32 type; | 
|  | 600 | u32 mask; | 
|  | 601 | int err; | 
|  | 602 |  | 
|  | 603 | type = 0; | 
|  | 604 | mask = CRYPTO_ALG_ASYNC; | 
|  | 605 |  | 
|  | 606 | cryptd_check_internal(tb, &type, &mask); | 
|  | 607 |  | 
|  | 608 | name = crypto_attr_alg_name(tb[1]); | 
|  | 609 | if (IS_ERR(name)) | 
|  | 610 | return PTR_ERR(name); | 
|  | 611 |  | 
|  | 612 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 
|  | 613 | if (!inst) | 
|  | 614 | return -ENOMEM; | 
|  | 615 |  | 
|  | 616 | ctx = skcipher_instance_ctx(inst); | 
|  | 617 | ctx->queue = queue; | 
|  | 618 |  | 
|  | 619 | crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); | 
|  | 620 | err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); | 
|  | 621 | if (err) | 
|  | 622 | goto out_free_inst; | 
|  | 623 |  | 
|  | 624 | alg = crypto_spawn_skcipher_alg(&ctx->spawn); | 
|  | 625 | err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); | 
|  | 626 | if (err) | 
|  | 627 | goto out_drop_skcipher; | 
|  | 628 |  | 
|  | 629 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | | 
|  | 630 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); | 
|  | 631 |  | 
|  | 632 | inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); | 
|  | 633 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); | 
|  | 634 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); | 
|  | 635 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); | 
|  | 636 |  | 
|  | 637 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); | 
|  | 638 |  | 
|  | 639 | inst->alg.init = cryptd_skcipher_init_tfm; | 
|  | 640 | inst->alg.exit = cryptd_skcipher_exit_tfm; | 
|  | 641 |  | 
|  | 642 | inst->alg.setkey = cryptd_skcipher_setkey; | 
|  | 643 | inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; | 
|  | 644 | inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; | 
|  | 645 |  | 
|  | 646 | inst->free = cryptd_skcipher_free; | 
|  | 647 |  | 
|  | 648 | err = skcipher_register_instance(tmpl, inst); | 
|  | 649 | if (err) { | 
|  | 650 | out_drop_skcipher: | 
|  | 651 | crypto_drop_skcipher(&ctx->spawn); | 
|  | 652 | out_free_inst: | 
|  | 653 | kfree(inst); | 
|  | 654 | } | 
|  | 655 | return err; | 
|  | 656 | } | 
|  | 657 |  | 
|  | 658 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | 
|  | 659 | { | 
|  | 660 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | 
|  | 661 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | 
|  | 662 | struct crypto_shash_spawn *spawn = &ictx->spawn; | 
|  | 663 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 
|  | 664 | struct crypto_shash *hash; | 
|  | 665 |  | 
|  | 666 | hash = crypto_spawn_shash(spawn); | 
|  | 667 | if (IS_ERR(hash)) | 
|  | 668 | return PTR_ERR(hash); | 
|  | 669 |  | 
|  | 670 | ctx->child = hash; | 
|  | 671 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 
|  | 672 | sizeof(struct cryptd_hash_request_ctx) + | 
|  | 673 | crypto_shash_descsize(hash)); | 
|  | 674 | return 0; | 
|  | 675 | } | 
|  | 676 |  | 
|  | 677 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | 
|  | 678 | { | 
|  | 679 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 
|  | 680 |  | 
|  | 681 | crypto_free_shash(ctx->child); | 
|  | 682 | } | 
|  | 683 |  | 
|  | 684 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | 
|  | 685 | const u8 *key, unsigned int keylen) | 
|  | 686 | { | 
|  | 687 | struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent); | 
|  | 688 | struct crypto_shash *child = ctx->child; | 
|  | 689 | int err; | 
|  | 690 |  | 
|  | 691 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | 
|  | 692 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | 
|  | 693 | CRYPTO_TFM_REQ_MASK); | 
|  | 694 | err = crypto_shash_setkey(child, key, keylen); | 
|  | 695 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | 
|  | 696 | CRYPTO_TFM_RES_MASK); | 
|  | 697 | return err; | 
|  | 698 | } | 
|  | 699 |  | 
|  | 700 | static int cryptd_hash_enqueue(struct ahash_request *req, | 
|  | 701 | crypto_completion_t compl) | 
|  | 702 | { | 
|  | 703 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 
|  | 704 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 
|  | 705 | struct cryptd_queue *queue = | 
|  | 706 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | 
|  | 707 |  | 
|  | 708 | rctx->complete = req->base.complete; | 
|  | 709 | req->base.complete = compl; | 
|  | 710 |  | 
|  | 711 | return cryptd_enqueue_request(queue, &req->base); | 
|  | 712 | } | 
|  | 713 |  | 
|  | 714 | static void cryptd_hash_complete(struct ahash_request *req, int err) | 
|  | 715 | { | 
|  | 716 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 
|  | 717 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 
|  | 718 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 
|  | 719 | int refcnt = atomic_read(&ctx->refcnt); | 
|  | 720 |  | 
|  | 721 | local_bh_disable(); | 
|  | 722 | rctx->complete(&req->base, err); | 
|  | 723 | local_bh_enable(); | 
|  | 724 |  | 
|  | 725 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | 
|  | 726 | crypto_free_ahash(tfm); | 
|  | 727 | } | 
|  | 728 |  | 
|  | 729 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | 
|  | 730 | { | 
|  | 731 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 
|  | 732 | struct crypto_shash *child = ctx->child; | 
|  | 733 | struct ahash_request *req = ahash_request_cast(req_async); | 
|  | 734 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 
|  | 735 | struct shash_desc *desc = &rctx->desc; | 
|  | 736 |  | 
|  | 737 | if (unlikely(err == -EINPROGRESS)) | 
|  | 738 | goto out; | 
|  | 739 |  | 
|  | 740 | desc->tfm = child; | 
|  | 741 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 
|  | 742 |  | 
|  | 743 | err = crypto_shash_init(desc); | 
|  | 744 |  | 
|  | 745 | req->base.complete = rctx->complete; | 
|  | 746 |  | 
|  | 747 | out: | 
|  | 748 | cryptd_hash_complete(req, err); | 
|  | 749 | } | 
|  | 750 |  | 
|  | 751 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | 
|  | 752 | { | 
|  | 753 | return cryptd_hash_enqueue(req, cryptd_hash_init); | 
|  | 754 | } | 
|  | 755 |  | 
|  | 756 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | 
|  | 757 | { | 
|  | 758 | struct ahash_request *req = ahash_request_cast(req_async); | 
|  | 759 | struct cryptd_hash_request_ctx *rctx; | 
|  | 760 |  | 
|  | 761 | rctx = ahash_request_ctx(req); | 
|  | 762 |  | 
|  | 763 | if (unlikely(err == -EINPROGRESS)) | 
|  | 764 | goto out; | 
|  | 765 |  | 
|  | 766 | err = shash_ahash_update(req, &rctx->desc); | 
|  | 767 |  | 
|  | 768 | req->base.complete = rctx->complete; | 
|  | 769 |  | 
|  | 770 | out: | 
|  | 771 | cryptd_hash_complete(req, err); | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | 
|  | 775 | { | 
|  | 776 | return cryptd_hash_enqueue(req, cryptd_hash_update); | 
|  | 777 | } | 
|  | 778 |  | 
|  | 779 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | 
|  | 780 | { | 
|  | 781 | struct ahash_request *req = ahash_request_cast(req_async); | 
|  | 782 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 
|  | 783 |  | 
|  | 784 | if (unlikely(err == -EINPROGRESS)) | 
|  | 785 | goto out; | 
|  | 786 |  | 
|  | 787 | err = crypto_shash_final(&rctx->desc, req->result); | 
|  | 788 |  | 
|  | 789 | req->base.complete = rctx->complete; | 
|  | 790 |  | 
|  | 791 | out: | 
|  | 792 | cryptd_hash_complete(req, err); | 
|  | 793 | } | 
|  | 794 |  | 
|  | 795 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | 
|  | 796 | { | 
|  | 797 | return cryptd_hash_enqueue(req, cryptd_hash_final); | 
|  | 798 | } | 
|  | 799 |  | 
|  | 800 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) | 
|  | 801 | { | 
|  | 802 | struct ahash_request *req = ahash_request_cast(req_async); | 
|  | 803 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 
|  | 804 |  | 
|  | 805 | if (unlikely(err == -EINPROGRESS)) | 
|  | 806 | goto out; | 
|  | 807 |  | 
|  | 808 | err = shash_ahash_finup(req, &rctx->desc); | 
|  | 809 |  | 
|  | 810 | req->base.complete = rctx->complete; | 
|  | 811 |  | 
|  | 812 | out: | 
|  | 813 | cryptd_hash_complete(req, err); | 
|  | 814 | } | 
|  | 815 |  | 
|  | 816 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) | 
|  | 817 | { | 
|  | 818 | return cryptd_hash_enqueue(req, cryptd_hash_finup); | 
|  | 819 | } | 
|  | 820 |  | 
|  | 821 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | 
|  | 822 | { | 
|  | 823 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | 
|  | 824 | struct crypto_shash *child = ctx->child; | 
|  | 825 | struct ahash_request *req = ahash_request_cast(req_async); | 
|  | 826 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 
|  | 827 | struct shash_desc *desc = &rctx->desc; | 
|  | 828 |  | 
|  | 829 | if (unlikely(err == -EINPROGRESS)) | 
|  | 830 | goto out; | 
|  | 831 |  | 
|  | 832 | desc->tfm = child; | 
|  | 833 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 
|  | 834 |  | 
|  | 835 | err = shash_ahash_digest(req, desc); | 
|  | 836 |  | 
|  | 837 | req->base.complete = rctx->complete; | 
|  | 838 |  | 
|  | 839 | out: | 
|  | 840 | cryptd_hash_complete(req, err); | 
|  | 841 | } | 
|  | 842 |  | 
|  | 843 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | 
|  | 844 | { | 
|  | 845 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | 
|  | 846 | } | 
|  | 847 |  | 
|  | 848 | static int cryptd_hash_export(struct ahash_request *req, void *out) | 
|  | 849 | { | 
|  | 850 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 
|  | 851 |  | 
|  | 852 | return crypto_shash_export(&rctx->desc, out); | 
|  | 853 | } | 
|  | 854 |  | 
|  | 855 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | 
|  | 856 | { | 
|  | 857 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 
|  | 858 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); | 
|  | 859 | struct shash_desc *desc = cryptd_shash_desc(req); | 
|  | 860 |  | 
|  | 861 | desc->tfm = ctx->child; | 
|  | 862 | desc->flags = req->base.flags; | 
|  | 863 |  | 
|  | 864 | return crypto_shash_import(desc, in); | 
|  | 865 | } | 
|  | 866 |  | 
|  | 867 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | 
|  | 868 | struct cryptd_queue *queue) | 
|  | 869 | { | 
|  | 870 | struct hashd_instance_ctx *ctx; | 
|  | 871 | struct ahash_instance *inst; | 
|  | 872 | struct shash_alg *salg; | 
|  | 873 | struct crypto_alg *alg; | 
|  | 874 | u32 type = 0; | 
|  | 875 | u32 mask = 0; | 
|  | 876 | int err; | 
|  | 877 |  | 
|  | 878 | cryptd_check_internal(tb, &type, &mask); | 
|  | 879 |  | 
|  | 880 | salg = shash_attr_alg(tb[1], type, mask); | 
|  | 881 | if (IS_ERR(salg)) | 
|  | 882 | return PTR_ERR(salg); | 
|  | 883 |  | 
|  | 884 | alg = &salg->base; | 
|  | 885 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), | 
|  | 886 | sizeof(*ctx)); | 
|  | 887 | err = PTR_ERR(inst); | 
|  | 888 | if (IS_ERR(inst)) | 
|  | 889 | goto out_put_alg; | 
|  | 890 |  | 
|  | 891 | ctx = ahash_instance_ctx(inst); | 
|  | 892 | ctx->queue = queue; | 
|  | 893 |  | 
|  | 894 | err = crypto_init_shash_spawn(&ctx->spawn, salg, | 
|  | 895 | ahash_crypto_instance(inst)); | 
|  | 896 | if (err) | 
|  | 897 | goto out_free_inst; | 
|  | 898 |  | 
|  | 899 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | | 
|  | 900 | (alg->cra_flags & (CRYPTO_ALG_INTERNAL | | 
|  | 901 | CRYPTO_ALG_OPTIONAL_KEY)); | 
|  | 902 |  | 
|  | 903 | inst->alg.halg.digestsize = salg->digestsize; | 
|  | 904 | inst->alg.halg.statesize = salg->statesize; | 
|  | 905 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | 
|  | 906 |  | 
|  | 907 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; | 
|  | 908 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; | 
|  | 909 |  | 
|  | 910 | inst->alg.init   = cryptd_hash_init_enqueue; | 
|  | 911 | inst->alg.update = cryptd_hash_update_enqueue; | 
|  | 912 | inst->alg.final  = cryptd_hash_final_enqueue; | 
|  | 913 | inst->alg.finup  = cryptd_hash_finup_enqueue; | 
|  | 914 | inst->alg.export = cryptd_hash_export; | 
|  | 915 | inst->alg.import = cryptd_hash_import; | 
|  | 916 | if (crypto_shash_alg_has_setkey(salg)) | 
|  | 917 | inst->alg.setkey = cryptd_hash_setkey; | 
|  | 918 | inst->alg.digest = cryptd_hash_digest_enqueue; | 
|  | 919 |  | 
|  | 920 | err = ahash_register_instance(tmpl, inst); | 
|  | 921 | if (err) { | 
|  | 922 | crypto_drop_shash(&ctx->spawn); | 
|  | 923 | out_free_inst: | 
|  | 924 | kfree(inst); | 
|  | 925 | } | 
|  | 926 |  | 
|  | 927 | out_put_alg: | 
|  | 928 | crypto_mod_put(alg); | 
|  | 929 | return err; | 
|  | 930 | } | 
|  | 931 |  | 
|  | 932 | static int cryptd_aead_setkey(struct crypto_aead *parent, | 
|  | 933 | const u8 *key, unsigned int keylen) | 
|  | 934 | { | 
|  | 935 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); | 
|  | 936 | struct crypto_aead *child = ctx->child; | 
|  | 937 |  | 
|  | 938 | return crypto_aead_setkey(child, key, keylen); | 
|  | 939 | } | 
|  | 940 |  | 
|  | 941 | static int cryptd_aead_setauthsize(struct crypto_aead *parent, | 
|  | 942 | unsigned int authsize) | 
|  | 943 | { | 
|  | 944 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); | 
|  | 945 | struct crypto_aead *child = ctx->child; | 
|  | 946 |  | 
|  | 947 | return crypto_aead_setauthsize(child, authsize); | 
|  | 948 | } | 
|  | 949 |  | 
|  | 950 | static void cryptd_aead_crypt(struct aead_request *req, | 
|  | 951 | struct crypto_aead *child, | 
|  | 952 | int err, | 
|  | 953 | int (*crypt)(struct aead_request *req)) | 
|  | 954 | { | 
|  | 955 | struct cryptd_aead_request_ctx *rctx; | 
|  | 956 | struct cryptd_aead_ctx *ctx; | 
|  | 957 | crypto_completion_t compl; | 
|  | 958 | struct crypto_aead *tfm; | 
|  | 959 | int refcnt; | 
|  | 960 |  | 
|  | 961 | rctx = aead_request_ctx(req); | 
|  | 962 | compl = rctx->complete; | 
|  | 963 |  | 
|  | 964 | tfm = crypto_aead_reqtfm(req); | 
|  | 965 |  | 
|  | 966 | if (unlikely(err == -EINPROGRESS)) | 
|  | 967 | goto out; | 
|  | 968 | aead_request_set_tfm(req, child); | 
|  | 969 | err = crypt( req ); | 
|  | 970 |  | 
|  | 971 | out: | 
|  | 972 | ctx = crypto_aead_ctx(tfm); | 
|  | 973 | refcnt = atomic_read(&ctx->refcnt); | 
|  | 974 |  | 
|  | 975 | local_bh_disable(); | 
|  | 976 | compl(&req->base, err); | 
|  | 977 | local_bh_enable(); | 
|  | 978 |  | 
|  | 979 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | 
|  | 980 | crypto_free_aead(tfm); | 
|  | 981 | } | 
|  | 982 |  | 
|  | 983 | static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) | 
|  | 984 | { | 
|  | 985 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | 
|  | 986 | struct crypto_aead *child = ctx->child; | 
|  | 987 | struct aead_request *req; | 
|  | 988 |  | 
|  | 989 | req = container_of(areq, struct aead_request, base); | 
|  | 990 | cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); | 
|  | 991 | } | 
|  | 992 |  | 
|  | 993 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | 
|  | 994 | { | 
|  | 995 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | 
|  | 996 | struct crypto_aead *child = ctx->child; | 
|  | 997 | struct aead_request *req; | 
|  | 998 |  | 
|  | 999 | req = container_of(areq, struct aead_request, base); | 
|  | 1000 | cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); | 
|  | 1001 | } | 
|  | 1002 |  | 
|  | 1003 | static int cryptd_aead_enqueue(struct aead_request *req, | 
|  | 1004 | crypto_completion_t compl) | 
|  | 1005 | { | 
|  | 1006 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); | 
|  | 1007 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 
|  | 1008 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); | 
|  | 1009 |  | 
|  | 1010 | rctx->complete = req->base.complete; | 
|  | 1011 | req->base.complete = compl; | 
|  | 1012 | return cryptd_enqueue_request(queue, &req->base); | 
|  | 1013 | } | 
|  | 1014 |  | 
|  | 1015 | static int cryptd_aead_encrypt_enqueue(struct aead_request *req) | 
|  | 1016 | { | 
|  | 1017 | return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); | 
|  | 1018 | } | 
|  | 1019 |  | 
|  | 1020 | static int cryptd_aead_decrypt_enqueue(struct aead_request *req) | 
|  | 1021 | { | 
|  | 1022 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); | 
|  | 1023 | } | 
|  | 1024 |  | 
|  | 1025 | static int cryptd_aead_init_tfm(struct crypto_aead *tfm) | 
|  | 1026 | { | 
|  | 1027 | struct aead_instance *inst = aead_alg_instance(tfm); | 
|  | 1028 | struct aead_instance_ctx *ictx = aead_instance_ctx(inst); | 
|  | 1029 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; | 
|  | 1030 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); | 
|  | 1031 | struct crypto_aead *cipher; | 
|  | 1032 |  | 
|  | 1033 | cipher = crypto_spawn_aead(spawn); | 
|  | 1034 | if (IS_ERR(cipher)) | 
|  | 1035 | return PTR_ERR(cipher); | 
|  | 1036 |  | 
|  | 1037 | ctx->child = cipher; | 
|  | 1038 | crypto_aead_set_reqsize( | 
|  | 1039 | tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), | 
|  | 1040 | crypto_aead_reqsize(cipher))); | 
|  | 1041 | return 0; | 
|  | 1042 | } | 
|  | 1043 |  | 
|  | 1044 | static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) | 
|  | 1045 | { | 
|  | 1046 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); | 
|  | 1047 | crypto_free_aead(ctx->child); | 
|  | 1048 | } | 
|  | 1049 |  | 
|  | 1050 | static int cryptd_create_aead(struct crypto_template *tmpl, | 
|  | 1051 | struct rtattr **tb, | 
|  | 1052 | struct cryptd_queue *queue) | 
|  | 1053 | { | 
|  | 1054 | struct aead_instance_ctx *ctx; | 
|  | 1055 | struct aead_instance *inst; | 
|  | 1056 | struct aead_alg *alg; | 
|  | 1057 | const char *name; | 
|  | 1058 | u32 type = 0; | 
|  | 1059 | u32 mask = CRYPTO_ALG_ASYNC; | 
|  | 1060 | int err; | 
|  | 1061 |  | 
|  | 1062 | cryptd_check_internal(tb, &type, &mask); | 
|  | 1063 |  | 
|  | 1064 | name = crypto_attr_alg_name(tb[1]); | 
|  | 1065 | if (IS_ERR(name)) | 
|  | 1066 | return PTR_ERR(name); | 
|  | 1067 |  | 
|  | 1068 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | 
|  | 1069 | if (!inst) | 
|  | 1070 | return -ENOMEM; | 
|  | 1071 |  | 
|  | 1072 | ctx = aead_instance_ctx(inst); | 
|  | 1073 | ctx->queue = queue; | 
|  | 1074 |  | 
|  | 1075 | crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); | 
|  | 1076 | err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); | 
|  | 1077 | if (err) | 
|  | 1078 | goto out_free_inst; | 
|  | 1079 |  | 
|  | 1080 | alg = crypto_spawn_aead_alg(&ctx->aead_spawn); | 
|  | 1081 | err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); | 
|  | 1082 | if (err) | 
|  | 1083 | goto out_drop_aead; | 
|  | 1084 |  | 
|  | 1085 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | | 
|  | 1086 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); | 
|  | 1087 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); | 
|  | 1088 |  | 
|  | 1089 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); | 
|  | 1090 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | 
|  | 1091 |  | 
|  | 1092 | inst->alg.init = cryptd_aead_init_tfm; | 
|  | 1093 | inst->alg.exit = cryptd_aead_exit_tfm; | 
|  | 1094 | inst->alg.setkey = cryptd_aead_setkey; | 
|  | 1095 | inst->alg.setauthsize = cryptd_aead_setauthsize; | 
|  | 1096 | inst->alg.encrypt = cryptd_aead_encrypt_enqueue; | 
|  | 1097 | inst->alg.decrypt = cryptd_aead_decrypt_enqueue; | 
|  | 1098 |  | 
|  | 1099 | err = aead_register_instance(tmpl, inst); | 
|  | 1100 | if (err) { | 
|  | 1101 | out_drop_aead: | 
|  | 1102 | crypto_drop_aead(&ctx->aead_spawn); | 
|  | 1103 | out_free_inst: | 
|  | 1104 | kfree(inst); | 
|  | 1105 | } | 
|  | 1106 | return err; | 
|  | 1107 | } | 
|  | 1108 |  | 
|  | 1109 | static struct cryptd_queue queue; | 
|  | 1110 |  | 
|  | 1111 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | 
|  | 1112 | { | 
|  | 1113 | struct crypto_attr_type *algt; | 
|  | 1114 |  | 
|  | 1115 | algt = crypto_get_attr_type(tb); | 
|  | 1116 | if (IS_ERR(algt)) | 
|  | 1117 | return PTR_ERR(algt); | 
|  | 1118 |  | 
|  | 1119 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 
|  | 1120 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 
|  | 1121 | if ((algt->type & CRYPTO_ALG_TYPE_MASK) == | 
|  | 1122 | CRYPTO_ALG_TYPE_BLKCIPHER) | 
|  | 1123 | return cryptd_create_blkcipher(tmpl, tb, &queue); | 
|  | 1124 |  | 
|  | 1125 | return cryptd_create_skcipher(tmpl, tb, &queue); | 
|  | 1126 | case CRYPTO_ALG_TYPE_DIGEST: | 
|  | 1127 | return cryptd_create_hash(tmpl, tb, &queue); | 
|  | 1128 | case CRYPTO_ALG_TYPE_AEAD: | 
|  | 1129 | return cryptd_create_aead(tmpl, tb, &queue); | 
|  | 1130 | } | 
|  | 1131 |  | 
|  | 1132 | return -EINVAL; | 
|  | 1133 | } | 
|  | 1134 |  | 
|  | 1135 | static void cryptd_free(struct crypto_instance *inst) | 
|  | 1136 | { | 
|  | 1137 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | 
|  | 1138 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | 
|  | 1139 | struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); | 
|  | 1140 |  | 
|  | 1141 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | 
|  | 1142 | case CRYPTO_ALG_TYPE_AHASH: | 
|  | 1143 | crypto_drop_shash(&hctx->spawn); | 
|  | 1144 | kfree(ahash_instance(inst)); | 
|  | 1145 | return; | 
|  | 1146 | case CRYPTO_ALG_TYPE_AEAD: | 
|  | 1147 | crypto_drop_aead(&aead_ctx->aead_spawn); | 
|  | 1148 | kfree(aead_instance(inst)); | 
|  | 1149 | return; | 
|  | 1150 | default: | 
|  | 1151 | crypto_drop_spawn(&ctx->spawn); | 
|  | 1152 | kfree(inst); | 
|  | 1153 | } | 
|  | 1154 | } | 
|  | 1155 |  | 
|  | 1156 | static struct crypto_template cryptd_tmpl = { | 
|  | 1157 | .name = "cryptd", | 
|  | 1158 | .create = cryptd_create, | 
|  | 1159 | .free = cryptd_free, | 
|  | 1160 | .module = THIS_MODULE, | 
|  | 1161 | }; | 
|  | 1162 |  | 
|  | 1163 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | 
|  | 1164 | u32 type, u32 mask) | 
|  | 1165 | { | 
|  | 1166 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 
|  | 1167 | struct cryptd_blkcipher_ctx *ctx; | 
|  | 1168 | struct crypto_tfm *tfm; | 
|  | 1169 |  | 
|  | 1170 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 
|  | 1171 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 
|  | 1172 | return ERR_PTR(-EINVAL); | 
|  | 1173 | type = crypto_skcipher_type(type); | 
|  | 1174 | mask &= ~CRYPTO_ALG_TYPE_MASK; | 
|  | 1175 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | 
|  | 1176 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | 
|  | 1177 | if (IS_ERR(tfm)) | 
|  | 1178 | return ERR_CAST(tfm); | 
|  | 1179 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { | 
|  | 1180 | crypto_free_tfm(tfm); | 
|  | 1181 | return ERR_PTR(-EINVAL); | 
|  | 1182 | } | 
|  | 1183 |  | 
|  | 1184 | ctx = crypto_tfm_ctx(tfm); | 
|  | 1185 | atomic_set(&ctx->refcnt, 1); | 
|  | 1186 |  | 
|  | 1187 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); | 
|  | 1188 | } | 
|  | 1189 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | 
|  | 1190 |  | 
|  | 1191 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | 
|  | 1192 | { | 
|  | 1193 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | 
|  | 1194 | return ctx->child; | 
|  | 1195 | } | 
|  | 1196 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | 
|  | 1197 |  | 
|  | 1198 | bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) | 
|  | 1199 | { | 
|  | 1200 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | 
|  | 1201 |  | 
|  | 1202 | return atomic_read(&ctx->refcnt) - 1; | 
|  | 1203 | } | 
|  | 1204 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); | 
|  | 1205 |  | 
|  | 1206 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | 
|  | 1207 | { | 
|  | 1208 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | 
|  | 1209 |  | 
|  | 1210 | if (atomic_dec_and_test(&ctx->refcnt)) | 
|  | 1211 | crypto_free_ablkcipher(&tfm->base); | 
|  | 1212 | } | 
|  | 1213 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | 
|  | 1214 |  | 
|  | 1215 | struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, | 
|  | 1216 | u32 type, u32 mask) | 
|  | 1217 | { | 
|  | 1218 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 
|  | 1219 | struct cryptd_skcipher_ctx *ctx; | 
|  | 1220 | struct crypto_skcipher *tfm; | 
|  | 1221 |  | 
|  | 1222 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 
|  | 1223 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 
|  | 1224 | return ERR_PTR(-EINVAL); | 
|  | 1225 |  | 
|  | 1226 | tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); | 
|  | 1227 | if (IS_ERR(tfm)) | 
|  | 1228 | return ERR_CAST(tfm); | 
|  | 1229 |  | 
|  | 1230 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | 
|  | 1231 | crypto_free_skcipher(tfm); | 
|  | 1232 | return ERR_PTR(-EINVAL); | 
|  | 1233 | } | 
|  | 1234 |  | 
|  | 1235 | ctx = crypto_skcipher_ctx(tfm); | 
|  | 1236 | atomic_set(&ctx->refcnt, 1); | 
|  | 1237 |  | 
|  | 1238 | return container_of(tfm, struct cryptd_skcipher, base); | 
|  | 1239 | } | 
|  | 1240 | EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); | 
|  | 1241 |  | 
|  | 1242 | struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) | 
|  | 1243 | { | 
|  | 1244 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | 
|  | 1245 |  | 
|  | 1246 | return ctx->child; | 
|  | 1247 | } | 
|  | 1248 | EXPORT_SYMBOL_GPL(cryptd_skcipher_child); | 
|  | 1249 |  | 
|  | 1250 | bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) | 
|  | 1251 | { | 
|  | 1252 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | 
|  | 1253 |  | 
|  | 1254 | return atomic_read(&ctx->refcnt) - 1; | 
|  | 1255 | } | 
|  | 1256 | EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); | 
|  | 1257 |  | 
|  | 1258 | void cryptd_free_skcipher(struct cryptd_skcipher *tfm) | 
|  | 1259 | { | 
|  | 1260 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | 
|  | 1261 |  | 
|  | 1262 | if (atomic_dec_and_test(&ctx->refcnt)) | 
|  | 1263 | crypto_free_skcipher(&tfm->base); | 
|  | 1264 | } | 
|  | 1265 | EXPORT_SYMBOL_GPL(cryptd_free_skcipher); | 
|  | 1266 |  | 
|  | 1267 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | 
|  | 1268 | u32 type, u32 mask) | 
|  | 1269 | { | 
|  | 1270 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 
|  | 1271 | struct cryptd_hash_ctx *ctx; | 
|  | 1272 | struct crypto_ahash *tfm; | 
|  | 1273 |  | 
|  | 1274 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 
|  | 1275 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 
|  | 1276 | return ERR_PTR(-EINVAL); | 
|  | 1277 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); | 
|  | 1278 | if (IS_ERR(tfm)) | 
|  | 1279 | return ERR_CAST(tfm); | 
|  | 1280 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | 
|  | 1281 | crypto_free_ahash(tfm); | 
|  | 1282 | return ERR_PTR(-EINVAL); | 
|  | 1283 | } | 
|  | 1284 |  | 
|  | 1285 | ctx = crypto_ahash_ctx(tfm); | 
|  | 1286 | atomic_set(&ctx->refcnt, 1); | 
|  | 1287 |  | 
|  | 1288 | return __cryptd_ahash_cast(tfm); | 
|  | 1289 | } | 
|  | 1290 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); | 
|  | 1291 |  | 
|  | 1292 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | 
|  | 1293 | { | 
|  | 1294 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | 
|  | 1295 |  | 
|  | 1296 | return ctx->child; | 
|  | 1297 | } | 
|  | 1298 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | 
|  | 1299 |  | 
|  | 1300 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) | 
|  | 1301 | { | 
|  | 1302 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 
|  | 1303 | return &rctx->desc; | 
|  | 1304 | } | 
|  | 1305 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); | 
|  | 1306 |  | 
|  | 1307 | bool cryptd_ahash_queued(struct cryptd_ahash *tfm) | 
|  | 1308 | { | 
|  | 1309 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | 
|  | 1310 |  | 
|  | 1311 | return atomic_read(&ctx->refcnt) - 1; | 
|  | 1312 | } | 
|  | 1313 | EXPORT_SYMBOL_GPL(cryptd_ahash_queued); | 
|  | 1314 |  | 
|  | 1315 | void cryptd_free_ahash(struct cryptd_ahash *tfm) | 
|  | 1316 | { | 
|  | 1317 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | 
|  | 1318 |  | 
|  | 1319 | if (atomic_dec_and_test(&ctx->refcnt)) | 
|  | 1320 | crypto_free_ahash(&tfm->base); | 
|  | 1321 | } | 
|  | 1322 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | 
|  | 1323 |  | 
|  | 1324 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, | 
|  | 1325 | u32 type, u32 mask) | 
|  | 1326 | { | 
|  | 1327 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | 
|  | 1328 | struct cryptd_aead_ctx *ctx; | 
|  | 1329 | struct crypto_aead *tfm; | 
|  | 1330 |  | 
|  | 1331 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | 
|  | 1332 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | 
|  | 1333 | return ERR_PTR(-EINVAL); | 
|  | 1334 | tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); | 
|  | 1335 | if (IS_ERR(tfm)) | 
|  | 1336 | return ERR_CAST(tfm); | 
|  | 1337 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | 
|  | 1338 | crypto_free_aead(tfm); | 
|  | 1339 | return ERR_PTR(-EINVAL); | 
|  | 1340 | } | 
|  | 1341 |  | 
|  | 1342 | ctx = crypto_aead_ctx(tfm); | 
|  | 1343 | atomic_set(&ctx->refcnt, 1); | 
|  | 1344 |  | 
|  | 1345 | return __cryptd_aead_cast(tfm); | 
|  | 1346 | } | 
|  | 1347 | EXPORT_SYMBOL_GPL(cryptd_alloc_aead); | 
|  | 1348 |  | 
|  | 1349 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) | 
|  | 1350 | { | 
|  | 1351 | struct cryptd_aead_ctx *ctx; | 
|  | 1352 | ctx = crypto_aead_ctx(&tfm->base); | 
|  | 1353 | return ctx->child; | 
|  | 1354 | } | 
|  | 1355 | EXPORT_SYMBOL_GPL(cryptd_aead_child); | 
|  | 1356 |  | 
|  | 1357 | bool cryptd_aead_queued(struct cryptd_aead *tfm) | 
|  | 1358 | { | 
|  | 1359 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); | 
|  | 1360 |  | 
|  | 1361 | return atomic_read(&ctx->refcnt) - 1; | 
|  | 1362 | } | 
|  | 1363 | EXPORT_SYMBOL_GPL(cryptd_aead_queued); | 
|  | 1364 |  | 
|  | 1365 | void cryptd_free_aead(struct cryptd_aead *tfm) | 
|  | 1366 | { | 
|  | 1367 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); | 
|  | 1368 |  | 
|  | 1369 | if (atomic_dec_and_test(&ctx->refcnt)) | 
|  | 1370 | crypto_free_aead(&tfm->base); | 
|  | 1371 | } | 
|  | 1372 | EXPORT_SYMBOL_GPL(cryptd_free_aead); | 
|  | 1373 |  | 
|  | 1374 | static int __init cryptd_init(void) | 
|  | 1375 | { | 
|  | 1376 | int err; | 
|  | 1377 |  | 
|  | 1378 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); | 
|  | 1379 | if (err) | 
|  | 1380 | return err; | 
|  | 1381 |  | 
|  | 1382 | err = crypto_register_template(&cryptd_tmpl); | 
|  | 1383 | if (err) | 
|  | 1384 | cryptd_fini_queue(&queue); | 
|  | 1385 |  | 
|  | 1386 | return err; | 
|  | 1387 | } | 
|  | 1388 |  | 
|  | 1389 | static void __exit cryptd_exit(void) | 
|  | 1390 | { | 
|  | 1391 | cryptd_fini_queue(&queue); | 
|  | 1392 | crypto_unregister_template(&cryptd_tmpl); | 
|  | 1393 | } | 
|  | 1394 |  | 
|  | 1395 | subsys_initcall(cryptd_init); | 
|  | 1396 | module_exit(cryptd_exit); | 
|  | 1397 |  | 
|  | 1398 | MODULE_LICENSE("GPL"); | 
|  | 1399 | MODULE_DESCRIPTION("Software async crypto daemon"); | 
|  | 1400 | MODULE_ALIAS_CRYPTO("cryptd"); |