| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /** |
| 3 | * GHASH routines supporting VMX instructions on the Power 8 |
| 4 | * |
| 5 | * Copyright (C) 2015, 2019 International Business Machines Inc. |
| 6 | * |
| 7 | * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> |
| 8 | * |
| 9 | * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback |
| 10 | * mechanism. The new approach is based on arm64 code, which is: |
| 11 | * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> |
| 12 | */ |
| 13 | |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/err.h> |
| 16 | #include <linux/crypto.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/hardirq.h> |
| 19 | #include <asm/switch_to.h> |
| 20 | #include <crypto/aes.h> |
| 21 | #include <crypto/ghash.h> |
| 22 | #include <crypto/scatterwalk.h> |
| 23 | #include <crypto/internal/hash.h> |
| 24 | #include <crypto/b128ops.h> |
| 25 | |
| 26 | #define IN_INTERRUPT in_interrupt() |
| 27 | |
| 28 | void gcm_init_p8(u128 htable[16], const u64 Xi[2]); |
| 29 | void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); |
| 30 | void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], |
| 31 | const u8 *in, size_t len); |
| 32 | |
| 33 | struct p8_ghash_ctx { |
| 34 | /* key used by vector asm */ |
| 35 | u128 htable[16]; |
| 36 | /* key used by software fallback */ |
| 37 | be128 key; |
| 38 | }; |
| 39 | |
| 40 | struct p8_ghash_desc_ctx { |
| 41 | u64 shash[2]; |
| 42 | u8 buffer[GHASH_DIGEST_SIZE]; |
| 43 | int bytes; |
| 44 | }; |
| 45 | |
| 46 | static int p8_ghash_init(struct shash_desc *desc) |
| 47 | { |
| 48 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
| 49 | |
| 50 | dctx->bytes = 0; |
| 51 | memset(dctx->shash, 0, GHASH_DIGEST_SIZE); |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, |
| 56 | unsigned int keylen) |
| 57 | { |
| 58 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); |
| 59 | |
| 60 | if (keylen != GHASH_BLOCK_SIZE) |
| 61 | return -EINVAL; |
| 62 | |
| 63 | preempt_disable(); |
| 64 | pagefault_disable(); |
| 65 | enable_kernel_vsx(); |
| 66 | gcm_init_p8(ctx->htable, (const u64 *) key); |
| 67 | disable_kernel_vsx(); |
| 68 | pagefault_enable(); |
| 69 | preempt_enable(); |
| 70 | |
| 71 | memcpy(&ctx->key, key, GHASH_BLOCK_SIZE); |
| 72 | |
| 73 | return 0; |
| 74 | } |
| 75 | |
| 76 | static inline void __ghash_block(struct p8_ghash_ctx *ctx, |
| 77 | struct p8_ghash_desc_ctx *dctx) |
| 78 | { |
| 79 | if (!IN_INTERRUPT) { |
| 80 | preempt_disable(); |
| 81 | pagefault_disable(); |
| 82 | enable_kernel_vsx(); |
| 83 | gcm_ghash_p8(dctx->shash, ctx->htable, |
| 84 | dctx->buffer, GHASH_DIGEST_SIZE); |
| 85 | disable_kernel_vsx(); |
| 86 | pagefault_enable(); |
| 87 | preempt_enable(); |
| 88 | } else { |
| 89 | crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); |
| 90 | gf128mul_lle((be128 *)dctx->shash, &ctx->key); |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, |
| 95 | struct p8_ghash_desc_ctx *dctx, |
| 96 | const u8 *src, unsigned int srclen) |
| 97 | { |
| 98 | if (!IN_INTERRUPT) { |
| 99 | preempt_disable(); |
| 100 | pagefault_disable(); |
| 101 | enable_kernel_vsx(); |
| 102 | gcm_ghash_p8(dctx->shash, ctx->htable, |
| 103 | src, srclen); |
| 104 | disable_kernel_vsx(); |
| 105 | pagefault_enable(); |
| 106 | preempt_enable(); |
| 107 | } else { |
| 108 | while (srclen >= GHASH_BLOCK_SIZE) { |
| 109 | crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); |
| 110 | gf128mul_lle((be128 *)dctx->shash, &ctx->key); |
| 111 | srclen -= GHASH_BLOCK_SIZE; |
| 112 | src += GHASH_BLOCK_SIZE; |
| 113 | } |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | static int p8_ghash_update(struct shash_desc *desc, |
| 118 | const u8 *src, unsigned int srclen) |
| 119 | { |
| 120 | unsigned int len; |
| 121 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); |
| 122 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
| 123 | |
| 124 | if (dctx->bytes) { |
| 125 | if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { |
| 126 | memcpy(dctx->buffer + dctx->bytes, src, |
| 127 | srclen); |
| 128 | dctx->bytes += srclen; |
| 129 | return 0; |
| 130 | } |
| 131 | memcpy(dctx->buffer + dctx->bytes, src, |
| 132 | GHASH_DIGEST_SIZE - dctx->bytes); |
| 133 | |
| 134 | __ghash_block(ctx, dctx); |
| 135 | |
| 136 | src += GHASH_DIGEST_SIZE - dctx->bytes; |
| 137 | srclen -= GHASH_DIGEST_SIZE - dctx->bytes; |
| 138 | dctx->bytes = 0; |
| 139 | } |
| 140 | len = srclen & ~(GHASH_DIGEST_SIZE - 1); |
| 141 | if (len) { |
| 142 | __ghash_blocks(ctx, dctx, src, len); |
| 143 | src += len; |
| 144 | srclen -= len; |
| 145 | } |
| 146 | if (srclen) { |
| 147 | memcpy(dctx->buffer, src, srclen); |
| 148 | dctx->bytes = srclen; |
| 149 | } |
| 150 | return 0; |
| 151 | } |
| 152 | |
| 153 | static int p8_ghash_final(struct shash_desc *desc, u8 *out) |
| 154 | { |
| 155 | int i; |
| 156 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); |
| 157 | struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
| 158 | |
| 159 | if (dctx->bytes) { |
| 160 | for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) |
| 161 | dctx->buffer[i] = 0; |
| 162 | __ghash_block(ctx, dctx); |
| 163 | dctx->bytes = 0; |
| 164 | } |
| 165 | memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); |
| 166 | return 0; |
| 167 | } |
| 168 | |
| 169 | struct shash_alg p8_ghash_alg = { |
| 170 | .digestsize = GHASH_DIGEST_SIZE, |
| 171 | .init = p8_ghash_init, |
| 172 | .update = p8_ghash_update, |
| 173 | .final = p8_ghash_final, |
| 174 | .setkey = p8_ghash_setkey, |
| 175 | .descsize = sizeof(struct p8_ghash_desc_ctx) |
| 176 | + sizeof(struct ghash_desc_ctx), |
| 177 | .base = { |
| 178 | .cra_name = "ghash", |
| 179 | .cra_driver_name = "p8_ghash", |
| 180 | .cra_priority = 1000, |
| 181 | .cra_blocksize = GHASH_BLOCK_SIZE, |
| 182 | .cra_ctxsize = sizeof(struct p8_ghash_ctx), |
| 183 | .cra_module = THIS_MODULE, |
| 184 | }, |
| 185 | }; |