| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* |
| 2 | * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions |
| 3 | * |
| 4 | * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <asm/neon.h> |
| 12 | #include <asm/simd.h> |
| 13 | #include <asm/unaligned.h> |
| 14 | #include <crypto/aes.h> |
| 15 | #include <crypto/scatterwalk.h> |
| 16 | #include <crypto/internal/aead.h> |
| 17 | #include <crypto/internal/skcipher.h> |
| 18 | #include <linux/module.h> |
| 19 | |
| 20 | #include "aes-ce-setkey.h" |
| 21 | |
| 22 | static int num_rounds(struct crypto_aes_ctx *ctx) |
| 23 | { |
| 24 | /* |
| 25 | * # of rounds specified by AES: |
| 26 | * 128 bit key 10 rounds |
| 27 | * 192 bit key 12 rounds |
| 28 | * 256 bit key 14 rounds |
| 29 | * => n byte key => 6 + (n/4) rounds |
| 30 | */ |
| 31 | return 6 + ctx->key_length / 4; |
| 32 | } |
| 33 | |
| 34 | asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, |
| 35 | u32 *macp, u32 const rk[], u32 rounds); |
| 36 | |
| 37 | asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes, |
| 38 | u32 const rk[], u32 rounds, u8 mac[], |
| 39 | u8 ctr[]); |
| 40 | |
| 41 | asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes, |
| 42 | u32 const rk[], u32 rounds, u8 mac[], |
| 43 | u8 ctr[]); |
| 44 | |
| 45 | asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[], |
| 46 | u32 rounds); |
| 47 | |
| 48 | asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); |
| 49 | |
| 50 | static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key, |
| 51 | unsigned int key_len) |
| 52 | { |
| 53 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm); |
| 54 | int ret; |
| 55 | |
| 56 | ret = ce_aes_expandkey(ctx, in_key, key_len); |
| 57 | if (!ret) |
| 58 | return 0; |
| 59 | |
| 60 | tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
| 61 | return -EINVAL; |
| 62 | } |
| 63 | |
| 64 | static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) |
| 65 | { |
| 66 | if ((authsize & 1) || authsize < 4) |
| 67 | return -EINVAL; |
| 68 | return 0; |
| 69 | } |
| 70 | |
| 71 | static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) |
| 72 | { |
| 73 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 74 | __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8]; |
| 75 | u32 l = req->iv[0] + 1; |
| 76 | |
| 77 | /* verify that CCM dimension 'L' is set correctly in the IV */ |
| 78 | if (l < 2 || l > 8) |
| 79 | return -EINVAL; |
| 80 | |
| 81 | /* verify that msglen can in fact be represented in L bytes */ |
| 82 | if (l < 4 && msglen >> (8 * l)) |
| 83 | return -EOVERFLOW; |
| 84 | |
| 85 | /* |
| 86 | * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi |
| 87 | * uses a u32 type to represent msglen so the top 4 bytes are always 0. |
| 88 | */ |
| 89 | n[0] = 0; |
| 90 | n[1] = cpu_to_be32(msglen); |
| 91 | |
| 92 | memcpy(maciv, req->iv, AES_BLOCK_SIZE - l); |
| 93 | |
| 94 | /* |
| 95 | * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C) |
| 96 | * - bits 0..2 : max # of bytes required to represent msglen, minus 1 |
| 97 | * (already set by caller) |
| 98 | * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc) |
| 99 | * - bit 6 : indicates presence of authenticate-only data |
| 100 | */ |
| 101 | maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2; |
| 102 | if (req->assoclen) |
| 103 | maciv[0] |= 0x40; |
| 104 | |
| 105 | memset(&req->iv[AES_BLOCK_SIZE - l], 0, l); |
| 106 | return 0; |
| 107 | } |
| 108 | |
| 109 | static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], |
| 110 | u32 abytes, u32 *macp) |
| 111 | { |
| 112 | if (may_use_simd()) { |
| 113 | kernel_neon_begin(); |
| 114 | ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, |
| 115 | num_rounds(key)); |
| 116 | kernel_neon_end(); |
| 117 | } else { |
| 118 | if (*macp > 0 && *macp < AES_BLOCK_SIZE) { |
| 119 | int added = min(abytes, AES_BLOCK_SIZE - *macp); |
| 120 | |
| 121 | crypto_xor(&mac[*macp], in, added); |
| 122 | |
| 123 | *macp += added; |
| 124 | in += added; |
| 125 | abytes -= added; |
| 126 | } |
| 127 | |
| 128 | while (abytes >= AES_BLOCK_SIZE) { |
| 129 | __aes_arm64_encrypt(key->key_enc, mac, mac, |
| 130 | num_rounds(key)); |
| 131 | crypto_xor(mac, in, AES_BLOCK_SIZE); |
| 132 | |
| 133 | in += AES_BLOCK_SIZE; |
| 134 | abytes -= AES_BLOCK_SIZE; |
| 135 | } |
| 136 | |
| 137 | if (abytes > 0) { |
| 138 | __aes_arm64_encrypt(key->key_enc, mac, mac, |
| 139 | num_rounds(key)); |
| 140 | crypto_xor(mac, in, abytes); |
| 141 | *macp = abytes; |
| 142 | } |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) |
| 147 | { |
| 148 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 149 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); |
| 150 | struct __packed { __be16 l; __be32 h; u16 len; } ltag; |
| 151 | struct scatter_walk walk; |
| 152 | u32 len = req->assoclen; |
| 153 | u32 macp = 0; |
| 154 | |
| 155 | /* prepend the AAD with a length tag */ |
| 156 | if (len < 0xff00) { |
| 157 | ltag.l = cpu_to_be16(len); |
| 158 | ltag.len = 2; |
| 159 | } else { |
| 160 | ltag.l = cpu_to_be16(0xfffe); |
| 161 | put_unaligned_be32(len, <ag.h); |
| 162 | ltag.len = 6; |
| 163 | } |
| 164 | |
| 165 | ccm_update_mac(ctx, mac, (u8 *)<ag, ltag.len, &macp); |
| 166 | scatterwalk_start(&walk, req->src); |
| 167 | |
| 168 | do { |
| 169 | u32 n = scatterwalk_clamp(&walk, len); |
| 170 | u8 *p; |
| 171 | |
| 172 | if (!n) { |
| 173 | scatterwalk_start(&walk, sg_next(walk.sg)); |
| 174 | n = scatterwalk_clamp(&walk, len); |
| 175 | } |
| 176 | p = scatterwalk_map(&walk); |
| 177 | ccm_update_mac(ctx, mac, p, n, &macp); |
| 178 | len -= n; |
| 179 | |
| 180 | scatterwalk_unmap(p); |
| 181 | scatterwalk_advance(&walk, n); |
| 182 | scatterwalk_done(&walk, 0, len); |
| 183 | } while (len); |
| 184 | } |
| 185 | |
| 186 | static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[], |
| 187 | struct crypto_aes_ctx *ctx, bool enc) |
| 188 | { |
| 189 | u8 buf[AES_BLOCK_SIZE]; |
| 190 | int err = 0; |
| 191 | |
| 192 | while (walk->nbytes) { |
| 193 | int blocks = walk->nbytes / AES_BLOCK_SIZE; |
| 194 | u32 tail = walk->nbytes % AES_BLOCK_SIZE; |
| 195 | u8 *dst = walk->dst.virt.addr; |
| 196 | u8 *src = walk->src.virt.addr; |
| 197 | u32 nbytes = walk->nbytes; |
| 198 | |
| 199 | if (nbytes == walk->total && tail > 0) { |
| 200 | blocks++; |
| 201 | tail = 0; |
| 202 | } |
| 203 | |
| 204 | do { |
| 205 | u32 bsize = AES_BLOCK_SIZE; |
| 206 | |
| 207 | if (nbytes < AES_BLOCK_SIZE) |
| 208 | bsize = nbytes; |
| 209 | |
| 210 | crypto_inc(walk->iv, AES_BLOCK_SIZE); |
| 211 | __aes_arm64_encrypt(ctx->key_enc, buf, walk->iv, |
| 212 | num_rounds(ctx)); |
| 213 | __aes_arm64_encrypt(ctx->key_enc, mac, mac, |
| 214 | num_rounds(ctx)); |
| 215 | if (enc) |
| 216 | crypto_xor(mac, src, bsize); |
| 217 | crypto_xor_cpy(dst, src, buf, bsize); |
| 218 | if (!enc) |
| 219 | crypto_xor(mac, dst, bsize); |
| 220 | dst += bsize; |
| 221 | src += bsize; |
| 222 | nbytes -= bsize; |
| 223 | } while (--blocks); |
| 224 | |
| 225 | err = skcipher_walk_done(walk, tail); |
| 226 | } |
| 227 | |
| 228 | if (!err) { |
| 229 | __aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx)); |
| 230 | __aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx)); |
| 231 | crypto_xor(mac, buf, AES_BLOCK_SIZE); |
| 232 | } |
| 233 | return err; |
| 234 | } |
| 235 | |
| 236 | static int ccm_encrypt(struct aead_request *req) |
| 237 | { |
| 238 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 239 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); |
| 240 | struct skcipher_walk walk; |
| 241 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; |
| 242 | u8 buf[AES_BLOCK_SIZE]; |
| 243 | u32 len = req->cryptlen; |
| 244 | int err; |
| 245 | |
| 246 | err = ccm_init_mac(req, mac, len); |
| 247 | if (err) |
| 248 | return err; |
| 249 | |
| 250 | if (req->assoclen) |
| 251 | ccm_calculate_auth_mac(req, mac); |
| 252 | |
| 253 | /* preserve the original iv for the final round */ |
| 254 | memcpy(buf, req->iv, AES_BLOCK_SIZE); |
| 255 | |
| 256 | err = skcipher_walk_aead_encrypt(&walk, req, true); |
| 257 | |
| 258 | if (may_use_simd()) { |
| 259 | while (walk.nbytes) { |
| 260 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; |
| 261 | |
| 262 | if (walk.nbytes == walk.total) |
| 263 | tail = 0; |
| 264 | |
| 265 | kernel_neon_begin(); |
| 266 | ce_aes_ccm_encrypt(walk.dst.virt.addr, |
| 267 | walk.src.virt.addr, |
| 268 | walk.nbytes - tail, ctx->key_enc, |
| 269 | num_rounds(ctx), mac, walk.iv); |
| 270 | kernel_neon_end(); |
| 271 | |
| 272 | err = skcipher_walk_done(&walk, tail); |
| 273 | } |
| 274 | if (!err) { |
| 275 | kernel_neon_begin(); |
| 276 | ce_aes_ccm_final(mac, buf, ctx->key_enc, |
| 277 | num_rounds(ctx)); |
| 278 | kernel_neon_end(); |
| 279 | } |
| 280 | } else { |
| 281 | err = ccm_crypt_fallback(&walk, mac, buf, ctx, true); |
| 282 | } |
| 283 | if (err) |
| 284 | return err; |
| 285 | |
| 286 | /* copy authtag to end of dst */ |
| 287 | scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, |
| 288 | crypto_aead_authsize(aead), 1); |
| 289 | |
| 290 | return 0; |
| 291 | } |
| 292 | |
| 293 | static int ccm_decrypt(struct aead_request *req) |
| 294 | { |
| 295 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 296 | struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); |
| 297 | unsigned int authsize = crypto_aead_authsize(aead); |
| 298 | struct skcipher_walk walk; |
| 299 | u8 __aligned(8) mac[AES_BLOCK_SIZE]; |
| 300 | u8 buf[AES_BLOCK_SIZE]; |
| 301 | u32 len = req->cryptlen - authsize; |
| 302 | int err; |
| 303 | |
| 304 | err = ccm_init_mac(req, mac, len); |
| 305 | if (err) |
| 306 | return err; |
| 307 | |
| 308 | if (req->assoclen) |
| 309 | ccm_calculate_auth_mac(req, mac); |
| 310 | |
| 311 | /* preserve the original iv for the final round */ |
| 312 | memcpy(buf, req->iv, AES_BLOCK_SIZE); |
| 313 | |
| 314 | err = skcipher_walk_aead_decrypt(&walk, req, true); |
| 315 | |
| 316 | if (may_use_simd()) { |
| 317 | while (walk.nbytes) { |
| 318 | u32 tail = walk.nbytes % AES_BLOCK_SIZE; |
| 319 | |
| 320 | if (walk.nbytes == walk.total) |
| 321 | tail = 0; |
| 322 | |
| 323 | kernel_neon_begin(); |
| 324 | ce_aes_ccm_decrypt(walk.dst.virt.addr, |
| 325 | walk.src.virt.addr, |
| 326 | walk.nbytes - tail, ctx->key_enc, |
| 327 | num_rounds(ctx), mac, walk.iv); |
| 328 | kernel_neon_end(); |
| 329 | |
| 330 | err = skcipher_walk_done(&walk, tail); |
| 331 | } |
| 332 | if (!err) { |
| 333 | kernel_neon_begin(); |
| 334 | ce_aes_ccm_final(mac, buf, ctx->key_enc, |
| 335 | num_rounds(ctx)); |
| 336 | kernel_neon_end(); |
| 337 | } |
| 338 | } else { |
| 339 | err = ccm_crypt_fallback(&walk, mac, buf, ctx, false); |
| 340 | } |
| 341 | |
| 342 | if (err) |
| 343 | return err; |
| 344 | |
| 345 | /* compare calculated auth tag with the stored one */ |
| 346 | scatterwalk_map_and_copy(buf, req->src, |
| 347 | req->assoclen + req->cryptlen - authsize, |
| 348 | authsize, 0); |
| 349 | |
| 350 | if (crypto_memneq(mac, buf, authsize)) |
| 351 | return -EBADMSG; |
| 352 | return 0; |
| 353 | } |
| 354 | |
| 355 | static struct aead_alg ccm_aes_alg = { |
| 356 | .base = { |
| 357 | .cra_name = "ccm(aes)", |
| 358 | .cra_driver_name = "ccm-aes-ce", |
| 359 | .cra_priority = 300, |
| 360 | .cra_blocksize = 1, |
| 361 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
| 362 | .cra_module = THIS_MODULE, |
| 363 | }, |
| 364 | .ivsize = AES_BLOCK_SIZE, |
| 365 | .chunksize = AES_BLOCK_SIZE, |
| 366 | .maxauthsize = AES_BLOCK_SIZE, |
| 367 | .setkey = ccm_setkey, |
| 368 | .setauthsize = ccm_setauthsize, |
| 369 | .encrypt = ccm_encrypt, |
| 370 | .decrypt = ccm_decrypt, |
| 371 | }; |
| 372 | |
| 373 | static int __init aes_mod_init(void) |
| 374 | { |
| 375 | if (!(elf_hwcap & HWCAP_AES)) |
| 376 | return -ENODEV; |
| 377 | return crypto_register_aead(&ccm_aes_alg); |
| 378 | } |
| 379 | |
| 380 | static void __exit aes_mod_exit(void) |
| 381 | { |
| 382 | crypto_unregister_aead(&ccm_aes_alg); |
| 383 | } |
| 384 | |
| 385 | module_init(aes_mod_init); |
| 386 | module_exit(aes_mod_exit); |
| 387 | |
| 388 | MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions"); |
| 389 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); |
| 390 | MODULE_LICENSE("GPL v2"); |
| 391 | MODULE_ALIAS_CRYPTO("ccm(aes)"); |