b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | From f441873642eebf20566c18d2966a8cd4b433ec1c Mon Sep 17 00:00:00 2001 |
| 2 | From: Ard Biesheuvel <ardb@kernel.org> |
| 3 | Date: Tue, 5 Nov 2019 14:28:17 +0100 |
| 4 | Subject: [PATCH] crypto: qce - switch to skcipher API |
| 5 | |
| 6 | Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface") |
| 7 | dated 20 august 2015 introduced the new skcipher API which is supposed to |
| 8 | replace both blkcipher and ablkcipher. While all consumers of the API have |
| 9 | been converted long ago, some producers of the ablkcipher remain, forcing |
| 10 | us to keep the ablkcipher support routines alive, along with the matching |
| 11 | code to expose [a]blkciphers via the skcipher API. |
| 12 | |
| 13 | So switch this driver to the skcipher API, allowing us to finally drop the |
| 14 | blkcipher code in the near future. |
| 15 | |
| 16 | Reviewed-by: Stanimir Varbanov <stanimir.varbanov@linaro.org> |
| 17 | Signed-off-by: Ard Biesheuvel <ardb@kernel.org> |
| 18 | Backported-to-4.19-by: Eneas U de Queiroz <cotequeiroz@gmail.com> |
| 19 | |
| 20 | --- a/drivers/crypto/qce/Makefile |
| 21 | +++ b/drivers/crypto/qce/Makefile |
| 22 | @@ -4,4 +4,4 @@ qcrypto-objs := core.o \ |
| 23 | common.o \ |
| 24 | dma.o \ |
| 25 | sha.o \ |
| 26 | - ablkcipher.o |
| 27 | + skcipher.o |
| 28 | --- a/drivers/crypto/qce/cipher.h |
| 29 | +++ b/drivers/crypto/qce/cipher.h |
| 30 | @@ -45,12 +45,12 @@ struct qce_cipher_reqctx { |
| 31 | unsigned int cryptlen; |
| 32 | }; |
| 33 | |
| 34 | -static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm) |
| 35 | +static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm) |
| 36 | { |
| 37 | - struct crypto_alg *alg = tfm->__crt_alg; |
| 38 | - return container_of(alg, struct qce_alg_template, alg.crypto); |
| 39 | + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
| 40 | + return container_of(alg, struct qce_alg_template, alg.skcipher); |
| 41 | } |
| 42 | |
| 43 | -extern const struct qce_algo_ops ablkcipher_ops; |
| 44 | +extern const struct qce_algo_ops skcipher_ops; |
| 45 | |
| 46 | #endif /* _CIPHER_H_ */ |
| 47 | --- a/drivers/crypto/qce/common.c |
| 48 | +++ b/drivers/crypto/qce/common.c |
| 49 | @@ -304,13 +304,13 @@ go_proc: |
| 50 | return 0; |
| 51 | } |
| 52 | |
| 53 | -static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, |
| 54 | +static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, |
| 55 | u32 totallen, u32 offset) |
| 56 | { |
| 57 | - struct ablkcipher_request *req = ablkcipher_request_cast(async_req); |
| 58 | - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); |
| 59 | + struct skcipher_request *req = skcipher_request_cast(async_req); |
| 60 | + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); |
| 61 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); |
| 62 | - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); |
| 63 | + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); |
| 64 | struct qce_device *qce = tmpl->qce; |
| 65 | __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; |
| 66 | __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; |
| 67 | @@ -389,8 +389,8 @@ int qce_start(struct crypto_async_reques |
| 68 | u32 offset) |
| 69 | { |
| 70 | switch (type) { |
| 71 | - case CRYPTO_ALG_TYPE_ABLKCIPHER: |
| 72 | - return qce_setup_regs_ablkcipher(async_req, totallen, offset); |
| 73 | + case CRYPTO_ALG_TYPE_SKCIPHER: |
| 74 | + return qce_setup_regs_skcipher(async_req, totallen, offset); |
| 75 | case CRYPTO_ALG_TYPE_AHASH: |
| 76 | return qce_setup_regs_ahash(async_req, totallen, offset); |
| 77 | default: |
| 78 | --- a/drivers/crypto/qce/common.h |
| 79 | +++ b/drivers/crypto/qce/common.h |
| 80 | @@ -10,6 +10,7 @@ |
| 81 | #include <linux/types.h> |
| 82 | #include <crypto/aes.h> |
| 83 | #include <crypto/hash.h> |
| 84 | +#include <crypto/internal/skcipher.h> |
| 85 | |
| 86 | /* key size in bytes */ |
| 87 | #define QCE_SHA_HMAC_KEY_SIZE 64 |
| 88 | @@ -79,7 +80,7 @@ struct qce_alg_template { |
| 89 | unsigned long alg_flags; |
| 90 | const u32 *std_iv; |
| 91 | union { |
| 92 | - struct crypto_alg crypto; |
| 93 | + struct skcipher_alg skcipher; |
| 94 | struct ahash_alg ahash; |
| 95 | } alg; |
| 96 | struct qce_device *qce; |
| 97 | --- a/drivers/crypto/qce/core.c |
| 98 | +++ b/drivers/crypto/qce/core.c |
| 99 | @@ -22,7 +22,7 @@ |
| 100 | #define QCE_QUEUE_LENGTH 1 |
| 101 | |
| 102 | static const struct qce_algo_ops *qce_ops[] = { |
| 103 | - &ablkcipher_ops, |
| 104 | + &skcipher_ops, |
| 105 | &ahash_ops, |
| 106 | }; |
| 107 | |
| 108 | --- a/drivers/crypto/qce/ablkcipher.c |
| 109 | +++ /dev/null |
| 110 | @@ -1,440 +0,0 @@ |
| 111 | -// SPDX-License-Identifier: GPL-2.0-only |
| 112 | -/* |
| 113 | - * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. |
| 114 | - */ |
| 115 | - |
| 116 | -#include <linux/device.h> |
| 117 | -#include <linux/interrupt.h> |
| 118 | -#include <linux/types.h> |
| 119 | -#include <crypto/aes.h> |
| 120 | -#include <crypto/internal/des.h> |
| 121 | -#include <crypto/internal/skcipher.h> |
| 122 | - |
| 123 | -#include "cipher.h" |
| 124 | - |
| 125 | -static LIST_HEAD(ablkcipher_algs); |
| 126 | - |
| 127 | -static void qce_ablkcipher_done(void *data) |
| 128 | -{ |
| 129 | - struct crypto_async_request *async_req = data; |
| 130 | - struct ablkcipher_request *req = ablkcipher_request_cast(async_req); |
| 131 | - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); |
| 132 | - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); |
| 133 | - struct qce_device *qce = tmpl->qce; |
| 134 | - enum dma_data_direction dir_src, dir_dst; |
| 135 | - u32 status; |
| 136 | - int error; |
| 137 | - bool diff_dst; |
| 138 | - |
| 139 | - diff_dst = (req->src != req->dst) ? true : false; |
| 140 | - dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; |
| 141 | - dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; |
| 142 | - |
| 143 | - error = qce_dma_terminate_all(&qce->dma); |
| 144 | - if (error) |
| 145 | - dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n", |
| 146 | - error); |
| 147 | - |
| 148 | - if (diff_dst) |
| 149 | - dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); |
| 150 | - dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); |
| 151 | - |
| 152 | - sg_free_table(&rctx->dst_tbl); |
| 153 | - |
| 154 | - error = qce_check_status(qce, &status); |
| 155 | - if (error < 0) |
| 156 | - dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); |
| 157 | - |
| 158 | - qce->async_req_done(tmpl->qce, error); |
| 159 | -} |
| 160 | - |
| 161 | -static int |
| 162 | -qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) |
| 163 | -{ |
| 164 | - struct ablkcipher_request *req = ablkcipher_request_cast(async_req); |
| 165 | - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); |
| 166 | - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
| 167 | - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); |
| 168 | - struct qce_device *qce = tmpl->qce; |
| 169 | - enum dma_data_direction dir_src, dir_dst; |
| 170 | - struct scatterlist *sg; |
| 171 | - bool diff_dst; |
| 172 | - gfp_t gfp; |
| 173 | - int ret; |
| 174 | - |
| 175 | - rctx->iv = req->info; |
| 176 | - rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher); |
| 177 | - rctx->cryptlen = req->nbytes; |
| 178 | - |
| 179 | - diff_dst = (req->src != req->dst) ? true : false; |
| 180 | - dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; |
| 181 | - dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; |
| 182 | - |
| 183 | - rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); |
| 184 | - if (diff_dst) |
| 185 | - rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); |
| 186 | - else |
| 187 | - rctx->dst_nents = rctx->src_nents; |
| 188 | - if (rctx->src_nents < 0) { |
| 189 | - dev_err(qce->dev, "Invalid numbers of src SG.\n"); |
| 190 | - return rctx->src_nents; |
| 191 | - } |
| 192 | - if (rctx->dst_nents < 0) { |
| 193 | - dev_err(qce->dev, "Invalid numbers of dst SG.\n"); |
| 194 | - return -rctx->dst_nents; |
| 195 | - } |
| 196 | - |
| 197 | - rctx->dst_nents += 1; |
| 198 | - |
| 199 | - gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
| 200 | - GFP_KERNEL : GFP_ATOMIC; |
| 201 | - |
| 202 | - ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); |
| 203 | - if (ret) |
| 204 | - return ret; |
| 205 | - |
| 206 | - sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); |
| 207 | - |
| 208 | - sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); |
| 209 | - if (IS_ERR(sg)) { |
| 210 | - ret = PTR_ERR(sg); |
| 211 | - goto error_free; |
| 212 | - } |
| 213 | - |
| 214 | - sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); |
| 215 | - if (IS_ERR(sg)) { |
| 216 | - ret = PTR_ERR(sg); |
| 217 | - goto error_free; |
| 218 | - } |
| 219 | - |
| 220 | - sg_mark_end(sg); |
| 221 | - rctx->dst_sg = rctx->dst_tbl.sgl; |
| 222 | - |
| 223 | - ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); |
| 224 | - if (ret < 0) |
| 225 | - goto error_free; |
| 226 | - |
| 227 | - if (diff_dst) { |
| 228 | - ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); |
| 229 | - if (ret < 0) |
| 230 | - goto error_unmap_dst; |
| 231 | - rctx->src_sg = req->src; |
| 232 | - } else { |
| 233 | - rctx->src_sg = rctx->dst_sg; |
| 234 | - } |
| 235 | - |
| 236 | - ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, |
| 237 | - rctx->dst_sg, rctx->dst_nents, |
| 238 | - qce_ablkcipher_done, async_req); |
| 239 | - if (ret) |
| 240 | - goto error_unmap_src; |
| 241 | - |
| 242 | - qce_dma_issue_pending(&qce->dma); |
| 243 | - |
| 244 | - ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); |
| 245 | - if (ret) |
| 246 | - goto error_terminate; |
| 247 | - |
| 248 | - return 0; |
| 249 | - |
| 250 | -error_terminate: |
| 251 | - qce_dma_terminate_all(&qce->dma); |
| 252 | -error_unmap_src: |
| 253 | - if (diff_dst) |
| 254 | - dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); |
| 255 | -error_unmap_dst: |
| 256 | - dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); |
| 257 | -error_free: |
| 258 | - sg_free_table(&rctx->dst_tbl); |
| 259 | - return ret; |
| 260 | -} |
| 261 | - |
| 262 | -static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, |
| 263 | - unsigned int keylen) |
| 264 | -{ |
| 265 | - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk); |
| 266 | - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
| 267 | - int ret; |
| 268 | - |
| 269 | - if (!key || !keylen) |
| 270 | - return -EINVAL; |
| 271 | - |
| 272 | - switch (keylen) { |
| 273 | - case AES_KEYSIZE_128: |
| 274 | - case AES_KEYSIZE_256: |
| 275 | - break; |
| 276 | - default: |
| 277 | - goto fallback; |
| 278 | - } |
| 279 | - |
| 280 | - ctx->enc_keylen = keylen; |
| 281 | - memcpy(ctx->enc_key, key, keylen); |
| 282 | - return 0; |
| 283 | -fallback: |
| 284 | - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); |
| 285 | - if (!ret) |
| 286 | - ctx->enc_keylen = keylen; |
| 287 | - return ret; |
| 288 | -} |
| 289 | - |
| 290 | -static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key, |
| 291 | - unsigned int keylen) |
| 292 | -{ |
| 293 | - struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); |
| 294 | - int err; |
| 295 | - |
| 296 | - err = verify_ablkcipher_des_key(ablk, key); |
| 297 | - if (err) |
| 298 | - return err; |
| 299 | - |
| 300 | - ctx->enc_keylen = keylen; |
| 301 | - memcpy(ctx->enc_key, key, keylen); |
| 302 | - return 0; |
| 303 | -} |
| 304 | - |
| 305 | -static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key, |
| 306 | - unsigned int keylen) |
| 307 | -{ |
| 308 | - struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk); |
| 309 | - int err; |
| 310 | - |
| 311 | - err = verify_ablkcipher_des3_key(ablk, key); |
| 312 | - if (err) |
| 313 | - return err; |
| 314 | - |
| 315 | - ctx->enc_keylen = keylen; |
| 316 | - memcpy(ctx->enc_key, key, keylen); |
| 317 | - return 0; |
| 318 | -} |
| 319 | - |
| 320 | -static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) |
| 321 | -{ |
| 322 | - struct crypto_tfm *tfm = |
| 323 | - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); |
| 324 | - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
| 325 | - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); |
| 326 | - struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); |
| 327 | - int ret; |
| 328 | - |
| 329 | - rctx->flags = tmpl->alg_flags; |
| 330 | - rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; |
| 331 | - |
| 332 | - if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && |
| 333 | - ctx->enc_keylen != AES_KEYSIZE_256) { |
| 334 | - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); |
| 335 | - |
| 336 | - skcipher_request_set_sync_tfm(subreq, ctx->fallback); |
| 337 | - skcipher_request_set_callback(subreq, req->base.flags, |
| 338 | - NULL, NULL); |
| 339 | - skcipher_request_set_crypt(subreq, req->src, req->dst, |
| 340 | - req->nbytes, req->info); |
| 341 | - ret = encrypt ? crypto_skcipher_encrypt(subreq) : |
| 342 | - crypto_skcipher_decrypt(subreq); |
| 343 | - skcipher_request_zero(subreq); |
| 344 | - return ret; |
| 345 | - } |
| 346 | - |
| 347 | - return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); |
| 348 | -} |
| 349 | - |
| 350 | -static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) |
| 351 | -{ |
| 352 | - return qce_ablkcipher_crypt(req, 1); |
| 353 | -} |
| 354 | - |
| 355 | -static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) |
| 356 | -{ |
| 357 | - return qce_ablkcipher_crypt(req, 0); |
| 358 | -} |
| 359 | - |
| 360 | -static int qce_ablkcipher_init(struct crypto_tfm *tfm) |
| 361 | -{ |
| 362 | - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
| 363 | - |
| 364 | - memset(ctx, 0, sizeof(*ctx)); |
| 365 | - tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); |
| 366 | - |
| 367 | - ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm), |
| 368 | - 0, CRYPTO_ALG_NEED_FALLBACK); |
| 369 | - return PTR_ERR_OR_ZERO(ctx->fallback); |
| 370 | -} |
| 371 | - |
| 372 | -static void qce_ablkcipher_exit(struct crypto_tfm *tfm) |
| 373 | -{ |
| 374 | - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
| 375 | - |
| 376 | - crypto_free_sync_skcipher(ctx->fallback); |
| 377 | -} |
| 378 | - |
| 379 | -struct qce_ablkcipher_def { |
| 380 | - unsigned long flags; |
| 381 | - const char *name; |
| 382 | - const char *drv_name; |
| 383 | - unsigned int blocksize; |
| 384 | - unsigned int ivsize; |
| 385 | - unsigned int min_keysize; |
| 386 | - unsigned int max_keysize; |
| 387 | -}; |
| 388 | - |
| 389 | -static const struct qce_ablkcipher_def ablkcipher_def[] = { |
| 390 | - { |
| 391 | - .flags = QCE_ALG_AES | QCE_MODE_ECB, |
| 392 | - .name = "ecb(aes)", |
| 393 | - .drv_name = "ecb-aes-qce", |
| 394 | - .blocksize = AES_BLOCK_SIZE, |
| 395 | - .ivsize = AES_BLOCK_SIZE, |
| 396 | - .min_keysize = AES_MIN_KEY_SIZE, |
| 397 | - .max_keysize = AES_MAX_KEY_SIZE, |
| 398 | - }, |
| 399 | - { |
| 400 | - .flags = QCE_ALG_AES | QCE_MODE_CBC, |
| 401 | - .name = "cbc(aes)", |
| 402 | - .drv_name = "cbc-aes-qce", |
| 403 | - .blocksize = AES_BLOCK_SIZE, |
| 404 | - .ivsize = AES_BLOCK_SIZE, |
| 405 | - .min_keysize = AES_MIN_KEY_SIZE, |
| 406 | - .max_keysize = AES_MAX_KEY_SIZE, |
| 407 | - }, |
| 408 | - { |
| 409 | - .flags = QCE_ALG_AES | QCE_MODE_CTR, |
| 410 | - .name = "ctr(aes)", |
| 411 | - .drv_name = "ctr-aes-qce", |
| 412 | - .blocksize = AES_BLOCK_SIZE, |
| 413 | - .ivsize = AES_BLOCK_SIZE, |
| 414 | - .min_keysize = AES_MIN_KEY_SIZE, |
| 415 | - .max_keysize = AES_MAX_KEY_SIZE, |
| 416 | - }, |
| 417 | - { |
| 418 | - .flags = QCE_ALG_AES | QCE_MODE_XTS, |
| 419 | - .name = "xts(aes)", |
| 420 | - .drv_name = "xts-aes-qce", |
| 421 | - .blocksize = AES_BLOCK_SIZE, |
| 422 | - .ivsize = AES_BLOCK_SIZE, |
| 423 | - .min_keysize = AES_MIN_KEY_SIZE, |
| 424 | - .max_keysize = AES_MAX_KEY_SIZE, |
| 425 | - }, |
| 426 | - { |
| 427 | - .flags = QCE_ALG_DES | QCE_MODE_ECB, |
| 428 | - .name = "ecb(des)", |
| 429 | - .drv_name = "ecb-des-qce", |
| 430 | - .blocksize = DES_BLOCK_SIZE, |
| 431 | - .ivsize = 0, |
| 432 | - .min_keysize = DES_KEY_SIZE, |
| 433 | - .max_keysize = DES_KEY_SIZE, |
| 434 | - }, |
| 435 | - { |
| 436 | - .flags = QCE_ALG_DES | QCE_MODE_CBC, |
| 437 | - .name = "cbc(des)", |
| 438 | - .drv_name = "cbc-des-qce", |
| 439 | - .blocksize = DES_BLOCK_SIZE, |
| 440 | - .ivsize = DES_BLOCK_SIZE, |
| 441 | - .min_keysize = DES_KEY_SIZE, |
| 442 | - .max_keysize = DES_KEY_SIZE, |
| 443 | - }, |
| 444 | - { |
| 445 | - .flags = QCE_ALG_3DES | QCE_MODE_ECB, |
| 446 | - .name = "ecb(des3_ede)", |
| 447 | - .drv_name = "ecb-3des-qce", |
| 448 | - .blocksize = DES3_EDE_BLOCK_SIZE, |
| 449 | - .ivsize = 0, |
| 450 | - .min_keysize = DES3_EDE_KEY_SIZE, |
| 451 | - .max_keysize = DES3_EDE_KEY_SIZE, |
| 452 | - }, |
| 453 | - { |
| 454 | - .flags = QCE_ALG_3DES | QCE_MODE_CBC, |
| 455 | - .name = "cbc(des3_ede)", |
| 456 | - .drv_name = "cbc-3des-qce", |
| 457 | - .blocksize = DES3_EDE_BLOCK_SIZE, |
| 458 | - .ivsize = DES3_EDE_BLOCK_SIZE, |
| 459 | - .min_keysize = DES3_EDE_KEY_SIZE, |
| 460 | - .max_keysize = DES3_EDE_KEY_SIZE, |
| 461 | - }, |
| 462 | -}; |
| 463 | - |
| 464 | -static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def, |
| 465 | - struct qce_device *qce) |
| 466 | -{ |
| 467 | - struct qce_alg_template *tmpl; |
| 468 | - struct crypto_alg *alg; |
| 469 | - int ret; |
| 470 | - |
| 471 | - tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); |
| 472 | - if (!tmpl) |
| 473 | - return -ENOMEM; |
| 474 | - |
| 475 | - alg = &tmpl->alg.crypto; |
| 476 | - |
| 477 | - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); |
| 478 | - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
| 479 | - def->drv_name); |
| 480 | - |
| 481 | - alg->cra_blocksize = def->blocksize; |
| 482 | - alg->cra_ablkcipher.ivsize = def->ivsize; |
| 483 | - alg->cra_ablkcipher.min_keysize = def->min_keysize; |
| 484 | - alg->cra_ablkcipher.max_keysize = def->max_keysize; |
| 485 | - alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey : |
| 486 | - IS_DES(def->flags) ? qce_des_setkey : |
| 487 | - qce_ablkcipher_setkey; |
| 488 | - alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; |
| 489 | - alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; |
| 490 | - |
| 491 | - alg->cra_priority = 300; |
| 492 | - alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | |
| 493 | - CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY; |
| 494 | - alg->cra_ctxsize = sizeof(struct qce_cipher_ctx); |
| 495 | - alg->cra_alignmask = 0; |
| 496 | - alg->cra_type = &crypto_ablkcipher_type; |
| 497 | - alg->cra_module = THIS_MODULE; |
| 498 | - alg->cra_init = qce_ablkcipher_init; |
| 499 | - alg->cra_exit = qce_ablkcipher_exit; |
| 500 | - |
| 501 | - INIT_LIST_HEAD(&tmpl->entry); |
| 502 | - tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER; |
| 503 | - tmpl->alg_flags = def->flags; |
| 504 | - tmpl->qce = qce; |
| 505 | - |
| 506 | - ret = crypto_register_alg(alg); |
| 507 | - if (ret) { |
| 508 | - kfree(tmpl); |
| 509 | - dev_err(qce->dev, "%s registration failed\n", alg->cra_name); |
| 510 | - return ret; |
| 511 | - } |
| 512 | - |
| 513 | - list_add_tail(&tmpl->entry, &ablkcipher_algs); |
| 514 | - dev_dbg(qce->dev, "%s is registered\n", alg->cra_name); |
| 515 | - return 0; |
| 516 | -} |
| 517 | - |
| 518 | -static void qce_ablkcipher_unregister(struct qce_device *qce) |
| 519 | -{ |
| 520 | - struct qce_alg_template *tmpl, *n; |
| 521 | - |
| 522 | - list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { |
| 523 | - crypto_unregister_alg(&tmpl->alg.crypto); |
| 524 | - list_del(&tmpl->entry); |
| 525 | - kfree(tmpl); |
| 526 | - } |
| 527 | -} |
| 528 | - |
| 529 | -static int qce_ablkcipher_register(struct qce_device *qce) |
| 530 | -{ |
| 531 | - int ret, i; |
| 532 | - |
| 533 | - for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) { |
| 534 | - ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce); |
| 535 | - if (ret) |
| 536 | - goto err; |
| 537 | - } |
| 538 | - |
| 539 | - return 0; |
| 540 | -err: |
| 541 | - qce_ablkcipher_unregister(qce); |
| 542 | - return ret; |
| 543 | -} |
| 544 | - |
| 545 | -const struct qce_algo_ops ablkcipher_ops = { |
| 546 | - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, |
| 547 | - .register_algs = qce_ablkcipher_register, |
| 548 | - .unregister_algs = qce_ablkcipher_unregister, |
| 549 | - .async_req_handle = qce_ablkcipher_async_req_handle, |
| 550 | -}; |
| 551 | --- /dev/null |
| 552 | +++ b/drivers/crypto/qce/skcipher.c |
| 553 | @@ -0,0 +1,440 @@ |
| 554 | +// SPDX-License-Identifier: GPL-2.0-only |
| 555 | +/* |
| 556 | + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. |
| 557 | + */ |
| 558 | + |
| 559 | +#include <linux/device.h> |
| 560 | +#include <linux/interrupt.h> |
| 561 | +#include <linux/types.h> |
| 562 | +#include <crypto/aes.h> |
| 563 | +#include <crypto/internal/des.h> |
| 564 | +#include <crypto/internal/skcipher.h> |
| 565 | + |
| 566 | +#include "cipher.h" |
| 567 | + |
| 568 | +static LIST_HEAD(skcipher_algs); |
| 569 | + |
| 570 | +static void qce_skcipher_done(void *data) |
| 571 | +{ |
| 572 | + struct crypto_async_request *async_req = data; |
| 573 | + struct skcipher_request *req = skcipher_request_cast(async_req); |
| 574 | + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); |
| 575 | + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); |
| 576 | + struct qce_device *qce = tmpl->qce; |
| 577 | + enum dma_data_direction dir_src, dir_dst; |
| 578 | + u32 status; |
| 579 | + int error; |
| 580 | + bool diff_dst; |
| 581 | + |
| 582 | + diff_dst = (req->src != req->dst) ? true : false; |
| 583 | + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; |
| 584 | + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; |
| 585 | + |
| 586 | + error = qce_dma_terminate_all(&qce->dma); |
| 587 | + if (error) |
| 588 | + dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", |
| 589 | + error); |
| 590 | + |
| 591 | + if (diff_dst) |
| 592 | + dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); |
| 593 | + dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); |
| 594 | + |
| 595 | + sg_free_table(&rctx->dst_tbl); |
| 596 | + |
| 597 | + error = qce_check_status(qce, &status); |
| 598 | + if (error < 0) |
| 599 | + dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); |
| 600 | + |
| 601 | + qce->async_req_done(tmpl->qce, error); |
| 602 | +} |
| 603 | + |
| 604 | +static int |
| 605 | +qce_skcipher_async_req_handle(struct crypto_async_request *async_req) |
| 606 | +{ |
| 607 | + struct skcipher_request *req = skcipher_request_cast(async_req); |
| 608 | + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); |
| 609 | + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
| 610 | + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); |
| 611 | + struct qce_device *qce = tmpl->qce; |
| 612 | + enum dma_data_direction dir_src, dir_dst; |
| 613 | + struct scatterlist *sg; |
| 614 | + bool diff_dst; |
| 615 | + gfp_t gfp; |
| 616 | + int ret; |
| 617 | + |
| 618 | + rctx->iv = req->iv; |
| 619 | + rctx->ivsize = crypto_skcipher_ivsize(skcipher); |
| 620 | + rctx->cryptlen = req->cryptlen; |
| 621 | + |
| 622 | + diff_dst = (req->src != req->dst) ? true : false; |
| 623 | + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; |
| 624 | + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; |
| 625 | + |
| 626 | + rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); |
| 627 | + if (diff_dst) |
| 628 | + rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); |
| 629 | + else |
| 630 | + rctx->dst_nents = rctx->src_nents; |
| 631 | + if (rctx->src_nents < 0) { |
| 632 | + dev_err(qce->dev, "Invalid numbers of src SG.\n"); |
| 633 | + return rctx->src_nents; |
| 634 | + } |
| 635 | + if (rctx->dst_nents < 0) { |
| 636 | + dev_err(qce->dev, "Invalid numbers of dst SG.\n"); |
| 637 | + return -rctx->dst_nents; |
| 638 | + } |
| 639 | + |
| 640 | + rctx->dst_nents += 1; |
| 641 | + |
| 642 | + gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
| 643 | + GFP_KERNEL : GFP_ATOMIC; |
| 644 | + |
| 645 | + ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); |
| 646 | + if (ret) |
| 647 | + return ret; |
| 648 | + |
| 649 | + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); |
| 650 | + |
| 651 | + sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); |
| 652 | + if (IS_ERR(sg)) { |
| 653 | + ret = PTR_ERR(sg); |
| 654 | + goto error_free; |
| 655 | + } |
| 656 | + |
| 657 | + sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); |
| 658 | + if (IS_ERR(sg)) { |
| 659 | + ret = PTR_ERR(sg); |
| 660 | + goto error_free; |
| 661 | + } |
| 662 | + |
| 663 | + sg_mark_end(sg); |
| 664 | + rctx->dst_sg = rctx->dst_tbl.sgl; |
| 665 | + |
| 666 | + ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); |
| 667 | + if (ret < 0) |
| 668 | + goto error_free; |
| 669 | + |
| 670 | + if (diff_dst) { |
| 671 | + ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); |
| 672 | + if (ret < 0) |
| 673 | + goto error_unmap_dst; |
| 674 | + rctx->src_sg = req->src; |
| 675 | + } else { |
| 676 | + rctx->src_sg = rctx->dst_sg; |
| 677 | + } |
| 678 | + |
| 679 | + ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents, |
| 680 | + rctx->dst_sg, rctx->dst_nents, |
| 681 | + qce_skcipher_done, async_req); |
| 682 | + if (ret) |
| 683 | + goto error_unmap_src; |
| 684 | + |
| 685 | + qce_dma_issue_pending(&qce->dma); |
| 686 | + |
| 687 | + ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); |
| 688 | + if (ret) |
| 689 | + goto error_terminate; |
| 690 | + |
| 691 | + return 0; |
| 692 | + |
| 693 | +error_terminate: |
| 694 | + qce_dma_terminate_all(&qce->dma); |
| 695 | +error_unmap_src: |
| 696 | + if (diff_dst) |
| 697 | + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); |
| 698 | +error_unmap_dst: |
| 699 | + dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); |
| 700 | +error_free: |
| 701 | + sg_free_table(&rctx->dst_tbl); |
| 702 | + return ret; |
| 703 | +} |
| 704 | + |
| 705 | +static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, |
| 706 | + unsigned int keylen) |
| 707 | +{ |
| 708 | + struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); |
| 709 | + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
| 710 | + int ret; |
| 711 | + |
| 712 | + if (!key || !keylen) |
| 713 | + return -EINVAL; |
| 714 | + |
| 715 | + switch (keylen) { |
| 716 | + case AES_KEYSIZE_128: |
| 717 | + case AES_KEYSIZE_256: |
| 718 | + break; |
| 719 | + default: |
| 720 | + goto fallback; |
| 721 | + } |
| 722 | + |
| 723 | + ctx->enc_keylen = keylen; |
| 724 | + memcpy(ctx->enc_key, key, keylen); |
| 725 | + return 0; |
| 726 | +fallback: |
| 727 | + ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); |
| 728 | + if (!ret) |
| 729 | + ctx->enc_keylen = keylen; |
| 730 | + return ret; |
| 731 | +} |
| 732 | + |
| 733 | +static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key, |
| 734 | + unsigned int keylen) |
| 735 | +{ |
| 736 | + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); |
| 737 | + int err; |
| 738 | + |
| 739 | + err = verify_skcipher_des_key(ablk, key); |
| 740 | + if (err) |
| 741 | + return err; |
| 742 | + |
| 743 | + ctx->enc_keylen = keylen; |
| 744 | + memcpy(ctx->enc_key, key, keylen); |
| 745 | + return 0; |
| 746 | +} |
| 747 | + |
| 748 | +static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, |
| 749 | + unsigned int keylen) |
| 750 | +{ |
| 751 | + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); |
| 752 | + int err; |
| 753 | + |
| 754 | + err = verify_skcipher_des3_key(ablk, key); |
| 755 | + if (err) |
| 756 | + return err; |
| 757 | + |
| 758 | + ctx->enc_keylen = keylen; |
| 759 | + memcpy(ctx->enc_key, key, keylen); |
| 760 | + return 0; |
| 761 | +} |
| 762 | + |
| 763 | +static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) |
| 764 | +{ |
| 765 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 766 | + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 767 | + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); |
| 768 | + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); |
| 769 | + int ret; |
| 770 | + |
| 771 | + rctx->flags = tmpl->alg_flags; |
| 772 | + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; |
| 773 | + |
| 774 | + if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && |
| 775 | + ctx->enc_keylen != AES_KEYSIZE_256) { |
| 776 | + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); |
| 777 | + |
| 778 | + skcipher_request_set_sync_tfm(subreq, ctx->fallback); |
| 779 | + skcipher_request_set_callback(subreq, req->base.flags, |
| 780 | + NULL, NULL); |
| 781 | + skcipher_request_set_crypt(subreq, req->src, req->dst, |
| 782 | + req->cryptlen, req->iv); |
| 783 | + ret = encrypt ? crypto_skcipher_encrypt(subreq) : |
| 784 | + crypto_skcipher_decrypt(subreq); |
| 785 | + skcipher_request_zero(subreq); |
| 786 | + return ret; |
| 787 | + } |
| 788 | + |
| 789 | + return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); |
| 790 | +} |
| 791 | + |
| 792 | +static int qce_skcipher_encrypt(struct skcipher_request *req) |
| 793 | +{ |
| 794 | + return qce_skcipher_crypt(req, 1); |
| 795 | +} |
| 796 | + |
| 797 | +static int qce_skcipher_decrypt(struct skcipher_request *req) |
| 798 | +{ |
| 799 | + return qce_skcipher_crypt(req, 0); |
| 800 | +} |
| 801 | + |
| 802 | +static int qce_skcipher_init(struct crypto_skcipher *tfm) |
| 803 | +{ |
| 804 | + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 805 | + |
| 806 | + memset(ctx, 0, sizeof(*ctx)); |
| 807 | + crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx)); |
| 808 | + |
| 809 | + ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), |
| 810 | + 0, CRYPTO_ALG_NEED_FALLBACK); |
| 811 | + return PTR_ERR_OR_ZERO(ctx->fallback); |
| 812 | +} |
| 813 | + |
| 814 | +static void qce_skcipher_exit(struct crypto_skcipher *tfm) |
| 815 | +{ |
| 816 | + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); |
| 817 | + |
| 818 | + crypto_free_sync_skcipher(ctx->fallback); |
| 819 | +} |
| 820 | + |
| 821 | +struct qce_skcipher_def { |
| 822 | + unsigned long flags; |
| 823 | + const char *name; |
| 824 | + const char *drv_name; |
| 825 | + unsigned int blocksize; |
| 826 | + unsigned int ivsize; |
| 827 | + unsigned int min_keysize; |
| 828 | + unsigned int max_keysize; |
| 829 | +}; |
| 830 | + |
| 831 | +static const struct qce_skcipher_def skcipher_def[] = { |
| 832 | + { |
| 833 | + .flags = QCE_ALG_AES | QCE_MODE_ECB, |
| 834 | + .name = "ecb(aes)", |
| 835 | + .drv_name = "ecb-aes-qce", |
| 836 | + .blocksize = AES_BLOCK_SIZE, |
| 837 | + .ivsize = AES_BLOCK_SIZE, |
| 838 | + .min_keysize = AES_MIN_KEY_SIZE, |
| 839 | + .max_keysize = AES_MAX_KEY_SIZE, |
| 840 | + }, |
| 841 | + { |
| 842 | + .flags = QCE_ALG_AES | QCE_MODE_CBC, |
| 843 | + .name = "cbc(aes)", |
| 844 | + .drv_name = "cbc-aes-qce", |
| 845 | + .blocksize = AES_BLOCK_SIZE, |
| 846 | + .ivsize = AES_BLOCK_SIZE, |
| 847 | + .min_keysize = AES_MIN_KEY_SIZE, |
| 848 | + .max_keysize = AES_MAX_KEY_SIZE, |
| 849 | + }, |
| 850 | + { |
| 851 | + .flags = QCE_ALG_AES | QCE_MODE_CTR, |
| 852 | + .name = "ctr(aes)", |
| 853 | + .drv_name = "ctr-aes-qce", |
| 854 | + .blocksize = AES_BLOCK_SIZE, |
| 855 | + .ivsize = AES_BLOCK_SIZE, |
| 856 | + .min_keysize = AES_MIN_KEY_SIZE, |
| 857 | + .max_keysize = AES_MAX_KEY_SIZE, |
| 858 | + }, |
| 859 | + { |
| 860 | + .flags = QCE_ALG_AES | QCE_MODE_XTS, |
| 861 | + .name = "xts(aes)", |
| 862 | + .drv_name = "xts-aes-qce", |
| 863 | + .blocksize = AES_BLOCK_SIZE, |
| 864 | + .ivsize = AES_BLOCK_SIZE, |
| 865 | + .min_keysize = AES_MIN_KEY_SIZE, |
| 866 | + .max_keysize = AES_MAX_KEY_SIZE, |
| 867 | + }, |
| 868 | + { |
| 869 | + .flags = QCE_ALG_DES | QCE_MODE_ECB, |
| 870 | + .name = "ecb(des)", |
| 871 | + .drv_name = "ecb-des-qce", |
| 872 | + .blocksize = DES_BLOCK_SIZE, |
| 873 | + .ivsize = 0, |
| 874 | + .min_keysize = DES_KEY_SIZE, |
| 875 | + .max_keysize = DES_KEY_SIZE, |
| 876 | + }, |
| 877 | + { |
| 878 | + .flags = QCE_ALG_DES | QCE_MODE_CBC, |
| 879 | + .name = "cbc(des)", |
| 880 | + .drv_name = "cbc-des-qce", |
| 881 | + .blocksize = DES_BLOCK_SIZE, |
| 882 | + .ivsize = DES_BLOCK_SIZE, |
| 883 | + .min_keysize = DES_KEY_SIZE, |
| 884 | + .max_keysize = DES_KEY_SIZE, |
| 885 | + }, |
| 886 | + { |
| 887 | + .flags = QCE_ALG_3DES | QCE_MODE_ECB, |
| 888 | + .name = "ecb(des3_ede)", |
| 889 | + .drv_name = "ecb-3des-qce", |
| 890 | + .blocksize = DES3_EDE_BLOCK_SIZE, |
| 891 | + .ivsize = 0, |
| 892 | + .min_keysize = DES3_EDE_KEY_SIZE, |
| 893 | + .max_keysize = DES3_EDE_KEY_SIZE, |
| 894 | + }, |
| 895 | + { |
| 896 | + .flags = QCE_ALG_3DES | QCE_MODE_CBC, |
| 897 | + .name = "cbc(des3_ede)", |
| 898 | + .drv_name = "cbc-3des-qce", |
| 899 | + .blocksize = DES3_EDE_BLOCK_SIZE, |
| 900 | + .ivsize = DES3_EDE_BLOCK_SIZE, |
| 901 | + .min_keysize = DES3_EDE_KEY_SIZE, |
| 902 | + .max_keysize = DES3_EDE_KEY_SIZE, |
| 903 | + }, |
| 904 | +}; |
| 905 | + |
| 906 | +static int qce_skcipher_register_one(const struct qce_skcipher_def *def, |
| 907 | + struct qce_device *qce) |
| 908 | +{ |
| 909 | + struct qce_alg_template *tmpl; |
| 910 | + struct skcipher_alg *alg; |
| 911 | + int ret; |
| 912 | + |
| 913 | + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); |
| 914 | + if (!tmpl) |
| 915 | + return -ENOMEM; |
| 916 | + |
| 917 | + alg = &tmpl->alg.skcipher; |
| 918 | + |
| 919 | + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); |
| 920 | + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
| 921 | + def->drv_name); |
| 922 | + |
| 923 | + alg->base.cra_blocksize = def->blocksize; |
| 924 | + alg->ivsize = def->ivsize; |
| 925 | + alg->min_keysize = def->min_keysize; |
| 926 | + alg->max_keysize = def->max_keysize; |
| 927 | + alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey : |
| 928 | + IS_DES(def->flags) ? qce_des_setkey : |
| 929 | + qce_skcipher_setkey; |
| 930 | + alg->encrypt = qce_skcipher_encrypt; |
| 931 | + alg->decrypt = qce_skcipher_decrypt; |
| 932 | + |
| 933 | + alg->base.cra_priority = 300; |
| 934 | + alg->base.cra_flags = CRYPTO_ALG_ASYNC | |
| 935 | + CRYPTO_ALG_NEED_FALLBACK | |
| 936 | + CRYPTO_ALG_KERN_DRIVER_ONLY; |
| 937 | + alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); |
| 938 | + alg->base.cra_alignmask = 0; |
| 939 | + alg->base.cra_module = THIS_MODULE; |
| 940 | + |
| 941 | + alg->init = qce_skcipher_init; |
| 942 | + alg->exit = qce_skcipher_exit; |
| 943 | + |
| 944 | + INIT_LIST_HEAD(&tmpl->entry); |
| 945 | + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; |
| 946 | + tmpl->alg_flags = def->flags; |
| 947 | + tmpl->qce = qce; |
| 948 | + |
| 949 | + ret = crypto_register_skcipher(alg); |
| 950 | + if (ret) { |
| 951 | + kfree(tmpl); |
| 952 | + dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); |
| 953 | + return ret; |
| 954 | + } |
| 955 | + |
| 956 | + list_add_tail(&tmpl->entry, &skcipher_algs); |
| 957 | + dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name); |
| 958 | + return 0; |
| 959 | +} |
| 960 | + |
| 961 | +static void qce_skcipher_unregister(struct qce_device *qce) |
| 962 | +{ |
| 963 | + struct qce_alg_template *tmpl, *n; |
| 964 | + |
| 965 | + list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) { |
| 966 | + crypto_unregister_skcipher(&tmpl->alg.skcipher); |
| 967 | + list_del(&tmpl->entry); |
| 968 | + kfree(tmpl); |
| 969 | + } |
| 970 | +} |
| 971 | + |
| 972 | +static int qce_skcipher_register(struct qce_device *qce) |
| 973 | +{ |
| 974 | + int ret, i; |
| 975 | + |
| 976 | + for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) { |
| 977 | + ret = qce_skcipher_register_one(&skcipher_def[i], qce); |
| 978 | + if (ret) |
| 979 | + goto err; |
| 980 | + } |
| 981 | + |
| 982 | + return 0; |
| 983 | +err: |
| 984 | + qce_skcipher_unregister(qce); |
| 985 | + return ret; |
| 986 | +} |
| 987 | + |
| 988 | +const struct qce_algo_ops skcipher_ops = { |
| 989 | + .type = CRYPTO_ALG_TYPE_SKCIPHER, |
| 990 | + .register_algs = qce_skcipher_register, |
| 991 | + .unregister_algs = qce_skcipher_unregister, |
| 992 | + .async_req_handle = qce_skcipher_async_req_handle, |
| 993 | +}; |