b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Shared glue code for 128bit block ciphers |
| 4 | * |
| 5 | * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> |
| 6 | * |
| 7 | * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: |
| 8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
| 9 | * CTR part based on code (crypto/ctr.c) by: |
| 10 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> |
| 11 | */ |
| 12 | |
| 13 | #include <linux/module.h> |
| 14 | #include <crypto/b128ops.h> |
| 15 | #include <crypto/gf128mul.h> |
| 16 | #include <crypto/internal/skcipher.h> |
| 17 | #include <crypto/scatterwalk.h> |
| 18 | #include <crypto/xts.h> |
| 19 | #include <asm/crypto/glue_helper.h> |
| 20 | |
| 21 | int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, |
| 22 | struct skcipher_request *req) |
| 23 | { |
| 24 | void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
| 25 | const unsigned int bsize = 128 / 8; |
| 26 | struct skcipher_walk walk; |
| 27 | bool fpu_enabled = false; |
| 28 | unsigned int nbytes; |
| 29 | int err; |
| 30 | |
| 31 | err = skcipher_walk_virt(&walk, req, false); |
| 32 | |
| 33 | while ((nbytes = walk.nbytes)) { |
| 34 | const u8 *src = walk.src.virt.addr; |
| 35 | u8 *dst = walk.dst.virt.addr; |
| 36 | unsigned int func_bytes; |
| 37 | unsigned int i; |
| 38 | |
| 39 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, |
| 40 | &walk, fpu_enabled, nbytes); |
| 41 | for (i = 0; i < gctx->num_funcs; i++) { |
| 42 | func_bytes = bsize * gctx->funcs[i].num_blocks; |
| 43 | |
| 44 | if (nbytes < func_bytes) |
| 45 | continue; |
| 46 | |
| 47 | /* Process multi-block batch */ |
| 48 | do { |
| 49 | gctx->funcs[i].fn_u.ecb(ctx, dst, src); |
| 50 | src += func_bytes; |
| 51 | dst += func_bytes; |
| 52 | nbytes -= func_bytes; |
| 53 | } while (nbytes >= func_bytes); |
| 54 | |
| 55 | if (nbytes < bsize) |
| 56 | break; |
| 57 | } |
| 58 | err = skcipher_walk_done(&walk, nbytes); |
| 59 | } |
| 60 | |
| 61 | glue_fpu_end(fpu_enabled); |
| 62 | return err; |
| 63 | } |
| 64 | EXPORT_SYMBOL_GPL(glue_ecb_req_128bit); |
| 65 | |
| 66 | int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn, |
| 67 | struct skcipher_request *req) |
| 68 | { |
| 69 | void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
| 70 | const unsigned int bsize = 128 / 8; |
| 71 | struct skcipher_walk walk; |
| 72 | unsigned int nbytes; |
| 73 | int err; |
| 74 | |
| 75 | err = skcipher_walk_virt(&walk, req, false); |
| 76 | |
| 77 | while ((nbytes = walk.nbytes)) { |
| 78 | const u128 *src = (u128 *)walk.src.virt.addr; |
| 79 | u128 *dst = (u128 *)walk.dst.virt.addr; |
| 80 | u128 *iv = (u128 *)walk.iv; |
| 81 | |
| 82 | do { |
| 83 | u128_xor(dst, src, iv); |
| 84 | fn(ctx, (u8 *)dst, (u8 *)dst); |
| 85 | iv = dst; |
| 86 | src++; |
| 87 | dst++; |
| 88 | nbytes -= bsize; |
| 89 | } while (nbytes >= bsize); |
| 90 | |
| 91 | *(u128 *)walk.iv = *iv; |
| 92 | err = skcipher_walk_done(&walk, nbytes); |
| 93 | } |
| 94 | return err; |
| 95 | } |
| 96 | EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit); |
| 97 | |
| 98 | int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, |
| 99 | struct skcipher_request *req) |
| 100 | { |
| 101 | void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
| 102 | const unsigned int bsize = 128 / 8; |
| 103 | struct skcipher_walk walk; |
| 104 | bool fpu_enabled = false; |
| 105 | unsigned int nbytes; |
| 106 | int err; |
| 107 | |
| 108 | err = skcipher_walk_virt(&walk, req, false); |
| 109 | |
| 110 | while ((nbytes = walk.nbytes)) { |
| 111 | const u128 *src = walk.src.virt.addr; |
| 112 | u128 *dst = walk.dst.virt.addr; |
| 113 | unsigned int func_bytes, num_blocks; |
| 114 | unsigned int i; |
| 115 | u128 last_iv; |
| 116 | |
| 117 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, |
| 118 | &walk, fpu_enabled, nbytes); |
| 119 | /* Start of the last block. */ |
| 120 | src += nbytes / bsize - 1; |
| 121 | dst += nbytes / bsize - 1; |
| 122 | |
| 123 | last_iv = *src; |
| 124 | |
| 125 | for (i = 0; i < gctx->num_funcs; i++) { |
| 126 | num_blocks = gctx->funcs[i].num_blocks; |
| 127 | func_bytes = bsize * num_blocks; |
| 128 | |
| 129 | if (nbytes < func_bytes) |
| 130 | continue; |
| 131 | |
| 132 | /* Process multi-block batch */ |
| 133 | do { |
| 134 | src -= num_blocks - 1; |
| 135 | dst -= num_blocks - 1; |
| 136 | |
| 137 | gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, |
| 138 | (const u8 *)src); |
| 139 | |
| 140 | nbytes -= func_bytes; |
| 141 | if (nbytes < bsize) |
| 142 | goto done; |
| 143 | |
| 144 | u128_xor(dst, dst, --src); |
| 145 | dst--; |
| 146 | } while (nbytes >= func_bytes); |
| 147 | } |
| 148 | done: |
| 149 | u128_xor(dst, dst, (u128 *)walk.iv); |
| 150 | *(u128 *)walk.iv = last_iv; |
| 151 | err = skcipher_walk_done(&walk, nbytes); |
| 152 | } |
| 153 | |
| 154 | glue_fpu_end(fpu_enabled); |
| 155 | return err; |
| 156 | } |
| 157 | EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit); |
| 158 | |
| 159 | int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, |
| 160 | struct skcipher_request *req) |
| 161 | { |
| 162 | void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
| 163 | const unsigned int bsize = 128 / 8; |
| 164 | struct skcipher_walk walk; |
| 165 | bool fpu_enabled = false; |
| 166 | unsigned int nbytes; |
| 167 | int err; |
| 168 | |
| 169 | err = skcipher_walk_virt(&walk, req, false); |
| 170 | |
| 171 | while ((nbytes = walk.nbytes) >= bsize) { |
| 172 | const u128 *src = walk.src.virt.addr; |
| 173 | u128 *dst = walk.dst.virt.addr; |
| 174 | unsigned int func_bytes, num_blocks; |
| 175 | unsigned int i; |
| 176 | le128 ctrblk; |
| 177 | |
| 178 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, |
| 179 | &walk, fpu_enabled, nbytes); |
| 180 | |
| 181 | be128_to_le128(&ctrblk, (be128 *)walk.iv); |
| 182 | |
| 183 | for (i = 0; i < gctx->num_funcs; i++) { |
| 184 | num_blocks = gctx->funcs[i].num_blocks; |
| 185 | func_bytes = bsize * num_blocks; |
| 186 | |
| 187 | if (nbytes < func_bytes) |
| 188 | continue; |
| 189 | |
| 190 | /* Process multi-block batch */ |
| 191 | do { |
| 192 | gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst, |
| 193 | (const u8 *)src, |
| 194 | &ctrblk); |
| 195 | src += num_blocks; |
| 196 | dst += num_blocks; |
| 197 | nbytes -= func_bytes; |
| 198 | } while (nbytes >= func_bytes); |
| 199 | |
| 200 | if (nbytes < bsize) |
| 201 | break; |
| 202 | } |
| 203 | |
| 204 | le128_to_be128((be128 *)walk.iv, &ctrblk); |
| 205 | err = skcipher_walk_done(&walk, nbytes); |
| 206 | } |
| 207 | |
| 208 | glue_fpu_end(fpu_enabled); |
| 209 | |
| 210 | if (nbytes) { |
| 211 | le128 ctrblk; |
| 212 | u128 tmp; |
| 213 | |
| 214 | be128_to_le128(&ctrblk, (be128 *)walk.iv); |
| 215 | memcpy(&tmp, walk.src.virt.addr, nbytes); |
| 216 | gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp, |
| 217 | (const u8 *)&tmp, |
| 218 | &ctrblk); |
| 219 | memcpy(walk.dst.virt.addr, &tmp, nbytes); |
| 220 | le128_to_be128((be128 *)walk.iv, &ctrblk); |
| 221 | |
| 222 | err = skcipher_walk_done(&walk, 0); |
| 223 | } |
| 224 | |
| 225 | return err; |
| 226 | } |
| 227 | EXPORT_SYMBOL_GPL(glue_ctr_req_128bit); |
| 228 | |
| 229 | static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx, |
| 230 | void *ctx, |
| 231 | struct skcipher_walk *walk) |
| 232 | { |
| 233 | const unsigned int bsize = 128 / 8; |
| 234 | unsigned int nbytes = walk->nbytes; |
| 235 | u128 *src = walk->src.virt.addr; |
| 236 | u128 *dst = walk->dst.virt.addr; |
| 237 | unsigned int num_blocks, func_bytes; |
| 238 | unsigned int i; |
| 239 | |
| 240 | /* Process multi-block batch */ |
| 241 | for (i = 0; i < gctx->num_funcs; i++) { |
| 242 | num_blocks = gctx->funcs[i].num_blocks; |
| 243 | func_bytes = bsize * num_blocks; |
| 244 | |
| 245 | if (nbytes >= func_bytes) { |
| 246 | do { |
| 247 | gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst, |
| 248 | (const u8 *)src, |
| 249 | walk->iv); |
| 250 | |
| 251 | src += num_blocks; |
| 252 | dst += num_blocks; |
| 253 | nbytes -= func_bytes; |
| 254 | } while (nbytes >= func_bytes); |
| 255 | |
| 256 | if (nbytes < bsize) |
| 257 | goto done; |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | done: |
| 262 | return nbytes; |
| 263 | } |
| 264 | |
| 265 | int glue_xts_req_128bit(const struct common_glue_ctx *gctx, |
| 266 | struct skcipher_request *req, |
| 267 | common_glue_func_t tweak_fn, void *tweak_ctx, |
| 268 | void *crypt_ctx, bool decrypt) |
| 269 | { |
| 270 | const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); |
| 271 | const unsigned int bsize = 128 / 8; |
| 272 | struct skcipher_request subreq; |
| 273 | struct skcipher_walk walk; |
| 274 | bool fpu_enabled = false; |
| 275 | unsigned int nbytes, tail; |
| 276 | int err; |
| 277 | |
| 278 | if (req->cryptlen < XTS_BLOCK_SIZE) |
| 279 | return -EINVAL; |
| 280 | |
| 281 | if (unlikely(cts)) { |
| 282 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 283 | |
| 284 | tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE; |
| 285 | |
| 286 | skcipher_request_set_tfm(&subreq, tfm); |
| 287 | skcipher_request_set_callback(&subreq, |
| 288 | crypto_skcipher_get_flags(tfm), |
| 289 | NULL, NULL); |
| 290 | skcipher_request_set_crypt(&subreq, req->src, req->dst, |
| 291 | req->cryptlen - tail, req->iv); |
| 292 | req = &subreq; |
| 293 | } |
| 294 | |
| 295 | err = skcipher_walk_virt(&walk, req, false); |
| 296 | nbytes = walk.nbytes; |
| 297 | if (err) |
| 298 | return err; |
| 299 | |
| 300 | /* set minimum length to bsize, for tweak_fn */ |
| 301 | fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, |
| 302 | &walk, fpu_enabled, |
| 303 | nbytes < bsize ? bsize : nbytes); |
| 304 | |
| 305 | /* calculate first value of T */ |
| 306 | tweak_fn(tweak_ctx, walk.iv, walk.iv); |
| 307 | |
| 308 | while (nbytes) { |
| 309 | nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk); |
| 310 | |
| 311 | err = skcipher_walk_done(&walk, nbytes); |
| 312 | nbytes = walk.nbytes; |
| 313 | } |
| 314 | |
| 315 | if (unlikely(cts)) { |
| 316 | u8 *next_tweak, *final_tweak = req->iv; |
| 317 | struct scatterlist *src, *dst; |
| 318 | struct scatterlist s[2], d[2]; |
| 319 | le128 b[2]; |
| 320 | |
| 321 | dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen); |
| 322 | if (req->dst != req->src) |
| 323 | dst = scatterwalk_ffwd(d, req->dst, req->cryptlen); |
| 324 | |
| 325 | if (decrypt) { |
| 326 | next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE); |
| 327 | gf128mul_x_ble(b, b); |
| 328 | } else { |
| 329 | next_tweak = req->iv; |
| 330 | } |
| 331 | |
| 332 | skcipher_request_set_crypt(&subreq, src, dst, XTS_BLOCK_SIZE, |
| 333 | next_tweak); |
| 334 | |
| 335 | err = skcipher_walk_virt(&walk, req, false) ?: |
| 336 | skcipher_walk_done(&walk, |
| 337 | __glue_xts_req_128bit(gctx, crypt_ctx, &walk)); |
| 338 | if (err) |
| 339 | goto out; |
| 340 | |
| 341 | scatterwalk_map_and_copy(b, dst, 0, XTS_BLOCK_SIZE, 0); |
| 342 | memcpy(b + 1, b, tail - XTS_BLOCK_SIZE); |
| 343 | scatterwalk_map_and_copy(b, src, XTS_BLOCK_SIZE, |
| 344 | tail - XTS_BLOCK_SIZE, 0); |
| 345 | scatterwalk_map_and_copy(b, dst, 0, tail, 1); |
| 346 | |
| 347 | skcipher_request_set_crypt(&subreq, dst, dst, XTS_BLOCK_SIZE, |
| 348 | final_tweak); |
| 349 | |
| 350 | err = skcipher_walk_virt(&walk, req, false) ?: |
| 351 | skcipher_walk_done(&walk, |
| 352 | __glue_xts_req_128bit(gctx, crypt_ctx, &walk)); |
| 353 | } |
| 354 | |
| 355 | out: |
| 356 | glue_fpu_end(fpu_enabled); |
| 357 | |
| 358 | return err; |
| 359 | } |
| 360 | EXPORT_SYMBOL_GPL(glue_xts_req_128bit); |
| 361 | |
| 362 | void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src, |
| 363 | le128 *iv, common_glue_func_t fn) |
| 364 | { |
| 365 | le128 ivblk = *iv; |
| 366 | |
| 367 | /* generate next IV */ |
| 368 | gf128mul_x_ble(iv, &ivblk); |
| 369 | |
| 370 | /* CC <- T xor C */ |
| 371 | u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk); |
| 372 | |
| 373 | /* PP <- D(Key2,CC) */ |
| 374 | fn(ctx, dst, dst); |
| 375 | |
| 376 | /* P <- T xor PP */ |
| 377 | u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk); |
| 378 | } |
| 379 | EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one); |
| 380 | |
| 381 | MODULE_LICENSE("GPL"); |