b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0
|
| 2 | /*
|
| 3 | * Copyright (C) 2023 ASR Micro Limited
|
| 4 | *
|
| 5 | */
|
| 6 |
|
| 7 | #include <linux/module.h>
|
| 8 | #include <linux/kernel.h>
|
| 9 | #include <linux/platform_device.h>
|
| 10 | #include <linux/of.h>
|
| 11 | #include <linux/clk.h>
|
| 12 | #include <linux/io.h>
|
| 13 | #include <linux/slab.h>
|
| 14 | #include <linux/sched.h>
|
| 15 | #include <linux/fs.h>
|
| 16 | #include <linux/uaccess.h>
|
| 17 | #include <linux/errno.h>
|
| 18 | #include <linux/interrupt.h>
|
| 19 | #include <linux/irq.h>
|
| 20 | #ifdef CONFIG_TEE
|
| 21 | #include <linux/tee_drv.h>
|
| 22 | #endif
|
| 23 | #include <linux/crypto.h>
|
| 24 | #include <linux/cputype.h>
|
| 25 | #include <crypto/scatterwalk.h>
|
| 26 | #include <crypto/algapi.h>
|
| 27 | #include <crypto/aes.h>
|
| 28 | #include <crypto/internal/skcipher.h>
|
| 29 |
|
| 30 | #include "asr-aes-optee.h"
|
| 31 | #include "asr-geu-optee.h"
|
| 32 |
|
| 33 | struct asr_geu_aes *asr_aes_local;
|
| 34 |
|
| 35 | static struct teec_uuid pta_aes_uuid = ASR_AES_ACCESS_UUID;
|
| 36 |
|
| 37 | static int asr_optee_aes_get_rkek_state(u32 *state)
|
| 38 | {
|
| 39 | return asrgeu_optee_acquire_ta_data(&pta_aes_uuid, CMD_AES_HWKEY_STATUS, state);
|
| 40 | }
|
| 41 |
|
| 42 | static int asr_optee_aes_hwkey_process(uint32_t aes_mode, uint32_t op_mode,
|
| 43 | struct scatterlist *src, struct scatterlist *dst,
|
| 44 | size_t len, uint32_t key_size,
|
| 45 | u8 *iv, uint32_t ivsize)
|
| 46 | {
|
| 47 | return asrgeu_optee_aes_acquire_ta_dma(&pta_aes_uuid, aes_mode,
|
| 48 | src, dst, len, len, key_size, op_mode, iv, ivsize);
|
| 49 | }
|
| 50 |
|
| 51 | static inline void asr_aes_set_mode(struct asr_geu_aes *dd,
|
| 52 | const struct asr_aes_reqctx *rctx)
|
| 53 | {
|
| 54 | /* Clear all but persistent flags and set request flags. */
|
| 55 | dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
|
| 56 | }
|
| 57 |
|
| 58 | static void asr_aes_set_iv_as_last_ciphertext_block(struct asr_geu_aes *dd)
|
| 59 | {
|
| 60 | struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
| 61 | struct asr_aes_reqctx *rctx = skcipher_request_ctx(req);
|
| 62 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
|
| 63 | unsigned int ivsize = crypto_skcipher_ivsize(cipher);
|
| 64 |
|
| 65 | if (req->cryptlen < ivsize)
|
| 66 | return;
|
| 67 |
|
| 68 | if (rctx->mode & AES_FLAGS_ENCRYPT) {
|
| 69 | scatterwalk_map_and_copy(req->iv, req->dst,
|
| 70 | req->cryptlen - ivsize, ivsize, 0);
|
| 71 | } else {
|
| 72 | if (req->src == req->dst)
|
| 73 | memcpy(req->iv, rctx->lastc, ivsize);
|
| 74 | else
|
| 75 | scatterwalk_map_and_copy(req->iv, req->src,
|
| 76 | req->cryptlen - ivsize,
|
| 77 | ivsize, 0);
|
| 78 | }
|
| 79 | }
|
| 80 |
|
| 81 | static int asr_aes_handle_queue(struct asr_geu_aes *dd,
|
| 82 | struct crypto_async_request *new_areq)
|
| 83 | {
|
| 84 | struct crypto_async_request *areq, *backlog;
|
| 85 | struct asr_aes_ctx *ctx;
|
| 86 | unsigned long flags;
|
| 87 | bool start_async;
|
| 88 | int err, ret = 0;
|
| 89 |
|
| 90 | spin_lock_irqsave(&dd->lock, flags);
|
| 91 | if (new_areq)
|
| 92 | ret = crypto_enqueue_request(&dd->queue, new_areq);
|
| 93 | if (dd->flags & AES_FLAGS_BUSY) {
|
| 94 | spin_unlock_irqrestore(&dd->lock, flags);
|
| 95 | return ret;
|
| 96 | }
|
| 97 |
|
| 98 | backlog = crypto_get_backlog(&dd->queue);
|
| 99 | areq = crypto_dequeue_request(&dd->queue);
|
| 100 | if (areq) {
|
| 101 | dd->flags |= AES_FLAGS_BUSY;
|
| 102 | }
|
| 103 | spin_unlock_irqrestore(&dd->lock, flags);
|
| 104 |
|
| 105 | if (!areq)
|
| 106 | return ret;
|
| 107 |
|
| 108 | if (backlog)
|
| 109 | backlog->complete(backlog, -EINPROGRESS);
|
| 110 |
|
| 111 | ctx = crypto_tfm_ctx(areq->tfm);
|
| 112 |
|
| 113 | dd->areq = areq;
|
| 114 | dd->ctx = ctx;
|
| 115 | start_async = (areq != new_areq);
|
| 116 | dd->is_async = start_async;
|
| 117 |
|
| 118 | /* WARNING: ctx->start() MAY change dd->is_async. */
|
| 119 | err = ctx->start(dd);
|
| 120 | return (start_async) ? ret : err;
|
| 121 | }
|
| 122 |
|
| 123 | static inline int asr_aes_complete(struct asr_geu_aes *dd, int err)
|
| 124 | {
|
| 125 |
|
| 126 | dd->flags &= ~AES_FLAGS_BUSY;
|
| 127 |
|
| 128 | asr_aes_set_iv_as_last_ciphertext_block(dd);
|
| 129 |
|
| 130 | if (dd->is_async)
|
| 131 | dd->areq->complete(dd->areq, err);
|
| 132 |
|
| 133 | tasklet_schedule(&dd->queue_task);
|
| 134 |
|
| 135 | return err;
|
| 136 | }
|
| 137 |
|
| 138 | static int asr_aes_start(struct asr_geu_aes *dd)
|
| 139 | {
|
| 140 | struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
| 141 | struct asr_aes_reqctx *rctx = skcipher_request_ctx(req);
|
| 142 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
|
| 143 | u8 *iv;
|
| 144 | u32 flags, aes_mode, op_mode, keylen, ivsize;
|
| 145 | int err;
|
| 146 |
|
| 147 | asr_aes_set_mode(dd, rctx);
|
| 148 |
|
| 149 | flags = dd->flags & AES_FLAGS_MODE_MASK;
|
| 150 |
|
| 151 | if ((flags & AES_FLAGS_OPMODE_MASK) == AES_FLAGS_CBC){
|
| 152 | aes_mode = CMD_AES_HWKEY_CBC;
|
| 153 | ivsize = crypto_skcipher_ivsize(cipher);
|
| 154 | iv = req->iv;
|
| 155 | }
|
| 156 | else {
|
| 157 | iv = NULL;
|
| 158 | ivsize = 0;
|
| 159 | aes_mode = CMD_AES_HWKEY_ECB;
|
| 160 | }
|
| 161 |
|
| 162 | if (flags & AES_FLAGS_ENCRYPT)
|
| 163 | op_mode = AES_ENCRYPT;
|
| 164 | else
|
| 165 | op_mode = AES_DECRYPT;
|
| 166 |
|
| 167 | keylen = dd->ctx->keylen;
|
| 168 |
|
| 169 | err = asr_optee_aes_hwkey_process(aes_mode, op_mode, req->src,
|
| 170 | req->dst, req->cryptlen, keylen, iv, ivsize);
|
| 171 |
|
| 172 | return asr_aes_complete(dd, err);
|
| 173 | }
|
| 174 |
|
| 175 | static int asr_aes_crypt(struct skcipher_request *req, unsigned long mode)
|
| 176 | {
|
| 177 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
|
| 178 | struct asr_aes_ctx *ctx = crypto_skcipher_ctx(cipher);
|
| 179 | struct asr_aes_reqctx *rctx;
|
| 180 | struct asr_geu_aes *dd = asr_aes_local;
|
| 181 |
|
| 182 | ctx->block_size = AES_BLOCK_SIZE;
|
| 183 | ctx->dd = dd;
|
| 184 |
|
| 185 | rctx = skcipher_request_ctx(req);
|
| 186 | rctx->mode = mode;
|
| 187 | rctx->use_rkek = ctx->use_rkek;
|
| 188 |
|
| 189 | if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
|
| 190 | unsigned int ivsize = crypto_skcipher_ivsize(cipher);
|
| 191 | if (req->cryptlen >= ivsize) {
|
| 192 | scatterwalk_map_and_copy(rctx->lastc, req->src,
|
| 193 | req->cryptlen - ivsize,
|
| 194 | ivsize, 0);
|
| 195 | }
|
| 196 | }
|
| 197 |
|
| 198 | return asr_aes_handle_queue(dd, &req->base);
|
| 199 | }
|
| 200 |
|
| 201 | static int asr_aes_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
|
| 202 | unsigned int keylen)
|
| 203 | {
|
| 204 | struct asr_aes_ctx *ctx = crypto_skcipher_ctx(cipher);
|
| 205 | struct asr_geu_aes *dd = asr_aes_local;
|
| 206 |
|
| 207 | (void)key; /* ignore the sw key */
|
| 208 |
|
| 209 | if (!dd->rkek_burned)
|
| 210 | return -EPERM;
|
| 211 |
|
| 212 | if (keylen != AES_KEYSIZE_128 &&
|
| 213 | keylen != AES_KEYSIZE_192 &&
|
| 214 | keylen != AES_KEYSIZE_256) {
|
| 215 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
| 216 | return -EINVAL;
|
| 217 | }
|
| 218 |
|
| 219 | ctx->keylen = keylen;
|
| 220 |
|
| 221 | return 0;
|
| 222 | }
|
| 223 |
|
| 224 | static int asr_aes_ecb_encrypt(struct skcipher_request *req)
|
| 225 | {
|
| 226 | return asr_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
|
| 227 | }
|
| 228 |
|
| 229 | static int asr_aes_ecb_decrypt(struct skcipher_request *req)
|
| 230 | {
|
| 231 | return asr_aes_crypt(req, AES_FLAGS_ECB);
|
| 232 | }
|
| 233 |
|
| 234 | static int asr_aes_cbc_encrypt(struct skcipher_request *req)
|
| 235 | {
|
| 236 | return asr_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
|
| 237 | }
|
| 238 |
|
| 239 | static int asr_aes_cbc_decrypt(struct skcipher_request *req)
|
| 240 | {
|
| 241 | return asr_aes_crypt(req, AES_FLAGS_CBC);
|
| 242 | }
|
| 243 |
|
| 244 | static int asr_aes_hwkey_init(struct crypto_skcipher *tfm)
|
| 245 | {
|
| 246 | struct asr_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
| 247 |
|
| 248 | tfm->reqsize = sizeof(struct asr_aes_reqctx);
|
| 249 | ctx->start = asr_aes_start;
|
| 250 |
|
| 251 | return 0;
|
| 252 | }
|
| 253 |
|
| 254 | static void asr_aes_exit(struct crypto_skcipher *tfm)
|
| 255 | {
|
| 256 | struct asr_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
| 257 |
|
| 258 | memset(ctx, 0, sizeof(*ctx));
|
| 259 | }
|
| 260 |
|
| 261 | static void asr_aes_queue_task(unsigned long data)
|
| 262 | {
|
| 263 | struct asr_geu_aes *dd = (struct asr_geu_aes *)data;
|
| 264 |
|
| 265 | asr_aes_handle_queue(dd, NULL);
|
| 266 | }
|
| 267 |
|
| 268 | static struct skcipher_alg aes_algs[] = {
|
| 269 | /* AES - ECB, using hardware key, a.k.a. RKEK */
|
| 270 | {
|
| 271 | .base = {
|
| 272 | .cra_name = "ecb(aes-hwkey)",
|
| 273 | .cra_driver_name = "asr-ecb-aes-hwkey",
|
| 274 | .cra_priority = 300,
|
| 275 | .cra_flags = CRYPTO_ALG_ASYNC,
|
| 276 | .cra_blocksize = AES_BLOCK_SIZE,
|
| 277 | .cra_ctxsize = sizeof(struct asr_aes_ctx),
|
| 278 | .cra_alignmask = 0xf,
|
| 279 | .cra_module = THIS_MODULE,
|
| 280 | },
|
| 281 | .min_keysize = AES_MIN_KEY_SIZE,
|
| 282 | .max_keysize = AES_MAX_KEY_SIZE | ASR_AES_HWKEY,
|
| 283 | .setkey = asr_aes_set_hwkey,
|
| 284 | .encrypt = asr_aes_ecb_encrypt,
|
| 285 | .decrypt = asr_aes_ecb_decrypt,
|
| 286 | .init = asr_aes_hwkey_init,
|
| 287 | .exit = asr_aes_exit,
|
| 288 | },
|
| 289 | /* AES - CBC, using hardware key, a.k.a. RKEK */
|
| 290 | {
|
| 291 | .base = {
|
| 292 | .cra_name = "cbc(aes-hwkey)",
|
| 293 | .cra_driver_name = "asr-cbc-aes-hwkey",
|
| 294 | .cra_priority = 300,
|
| 295 | .cra_flags = CRYPTO_ALG_ASYNC,
|
| 296 | .cra_blocksize = AES_BLOCK_SIZE,
|
| 297 | .cra_ctxsize = sizeof(struct asr_aes_ctx),
|
| 298 | .cra_alignmask = 0xf,
|
| 299 | .cra_module = THIS_MODULE,
|
| 300 | },
|
| 301 | .min_keysize = AES_MIN_KEY_SIZE,
|
| 302 | .max_keysize = AES_MAX_KEY_SIZE | ASR_AES_HWKEY,
|
| 303 | .setkey = asr_aes_set_hwkey,
|
| 304 | .encrypt = asr_aes_cbc_encrypt,
|
| 305 | .decrypt = asr_aes_cbc_decrypt,
|
| 306 | .init = asr_aes_hwkey_init,
|
| 307 | .exit = asr_aes_exit,
|
| 308 | .ivsize = AES_BLOCK_SIZE,
|
| 309 | },
|
| 310 | };
|
| 311 |
|
| 312 | int asr_geu_aes_register(struct asr_geu_dev *geu_dd)
|
| 313 | {
|
| 314 | int i, j, err;
|
| 315 | struct asr_geu_aes *aes_dd = NULL;
|
| 316 | struct device *dev = geu_dd->dev;
|
| 317 | u32 rkek_state;
|
| 318 |
|
| 319 | aes_dd = devm_kzalloc(dev, sizeof(struct asr_geu_aes), GFP_KERNEL);
|
| 320 | if (!aes_dd)
|
| 321 | return -ENOMEM;
|
| 322 |
|
| 323 | asr_aes_local = aes_dd;
|
| 324 | geu_dd->asr_aes = aes_dd;
|
| 325 |
|
| 326 | err = asr_optee_aes_get_rkek_state(&rkek_state);
|
| 327 | if (err) {
|
| 328 | dev_warn(dev, "can't get hwkey(rkek) state\n");
|
| 329 | aes_dd->rkek_burned = 0;
|
| 330 | } else {
|
| 331 | if (rkek_state)
|
| 332 | aes_dd->rkek_burned = 1;
|
| 333 | else
|
| 334 | aes_dd->rkek_burned = 0;
|
| 335 | switch (rkek_state) {
|
| 336 | case 2:
|
| 337 | dev_warn(dev, "hwkey(rkek) burned, SW access not disabled\n");
|
| 338 | break;
|
| 339 | case 1:
|
| 340 | dev_warn(dev, "hwkey(rkek) burned, SW access disabled\n");
|
| 341 | break;
|
| 342 | case 0:
|
| 343 | dev_warn(dev, "hwkey(rkek) not burned\n");
|
| 344 | break;
|
| 345 | }
|
| 346 | }
|
| 347 |
|
| 348 | spin_lock_init(&aes_dd->lock);
|
| 349 | tasklet_init(&aes_dd->queue_task, asr_aes_queue_task,
|
| 350 | (unsigned long)aes_dd);
|
| 351 |
|
| 352 | crypto_init_queue(&aes_dd->queue, ASR_AES_QUEUE_LENGTH);
|
| 353 |
|
| 354 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
|
| 355 | err = crypto_register_skcipher(&aes_algs[i]);
|
| 356 | if (err){
|
| 357 | for (j = 0; j < i; j++)
|
| 358 | crypto_unregister_skcipher(&aes_algs[j]);
|
| 359 | return err;
|
| 360 | }
|
| 361 | }
|
| 362 |
|
| 363 | return 0;
|
| 364 | }
|
| 365 |
|
| 366 | int asr_geu_aes_unregister(struct asr_geu_dev *geu_dd)
|
| 367 | {
|
| 368 | int i;
|
| 369 | struct asr_geu_aes *aes_dd = geu_dd->asr_aes;
|
| 370 | struct device *dev = geu_dd->dev;
|
| 371 |
|
| 372 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
|
| 373 | crypto_unregister_skcipher(&aes_algs[i]);
|
| 374 |
|
| 375 | tasklet_kill(&aes_dd->queue_task);
|
| 376 |
|
| 377 | devm_kfree(dev, aes_dd);
|
| 378 |
|
| 379 | return 0;
|
| 380 | }
|
| 381 |
|
| 382 | MODULE_DESCRIPTION("ASR HWKey AES driver with optee-os.");
|
| 383 | MODULE_LICENSE("GPL v2");
|
| 384 | MODULE_AUTHOR("Yu Zhang"); |