blob: 9d6fda81986da6b4e3470ebcb2ded59f7a6966e8 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
3 * including ChaCha20 (RFC7539)
4 *
5 * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Based on:
12 * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
13 *
14 * Copyright (C) 2015 Martin Willi
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <crypto/algapi.h>
23#include <crypto/chacha.h>
24#include <crypto/internal/skcipher.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27
28#include <asm/hwcap.h>
29#include <asm/neon.h>
30#include <asm/simd.h>
31
32asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
33 int nrounds);
34asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
35 int nrounds);
36asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
37
38static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
39 unsigned int bytes, int nrounds)
40{
41 u8 buf[CHACHA_BLOCK_SIZE];
42
43 while (bytes >= CHACHA_BLOCK_SIZE * 4) {
44 chacha_4block_xor_neon(state, dst, src, nrounds);
45 bytes -= CHACHA_BLOCK_SIZE * 4;
46 src += CHACHA_BLOCK_SIZE * 4;
47 dst += CHACHA_BLOCK_SIZE * 4;
48 state[12] += 4;
49 }
50 while (bytes >= CHACHA_BLOCK_SIZE) {
51 chacha_block_xor_neon(state, dst, src, nrounds);
52 bytes -= CHACHA_BLOCK_SIZE;
53 src += CHACHA_BLOCK_SIZE;
54 dst += CHACHA_BLOCK_SIZE;
55 state[12]++;
56 }
57 if (bytes) {
58 memcpy(buf, src, bytes);
59 chacha_block_xor_neon(state, buf, buf, nrounds);
60 memcpy(dst, buf, bytes);
61 }
62}
63
64static int chacha_neon_stream_xor(struct skcipher_request *req,
65 struct chacha_ctx *ctx, u8 *iv)
66{
67 struct skcipher_walk walk;
68 u32 state[16];
69 int err;
70
71 err = skcipher_walk_virt(&walk, req, false);
72
73 crypto_chacha_init(state, ctx, iv);
74
75 while (walk.nbytes > 0) {
76 unsigned int nbytes = walk.nbytes;
77
78 if (nbytes < walk.total)
79 nbytes = round_down(nbytes, walk.stride);
80
81 kernel_neon_begin();
82 chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
83 nbytes, ctx->nrounds);
84 kernel_neon_end();
85 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
86 }
87
88 return err;
89}
90
91static int chacha_neon(struct skcipher_request *req)
92{
93 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
94 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
95
96 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
97 return crypto_chacha_crypt(req);
98
99 return chacha_neon_stream_xor(req, ctx, req->iv);
100}
101
102static int xchacha_neon(struct skcipher_request *req)
103{
104 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
105 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
106 struct chacha_ctx subctx;
107 u32 state[16];
108 u8 real_iv[16];
109
110 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
111 return crypto_xchacha_crypt(req);
112
113 crypto_chacha_init(state, ctx, req->iv);
114
115 kernel_neon_begin();
116 hchacha_block_neon(state, subctx.key, ctx->nrounds);
117 kernel_neon_end();
118 subctx.nrounds = ctx->nrounds;
119
120 memcpy(&real_iv[0], req->iv + 24, 8);
121 memcpy(&real_iv[8], req->iv + 16, 8);
122 return chacha_neon_stream_xor(req, &subctx, real_iv);
123}
124
125static struct skcipher_alg algs[] = {
126 {
127 .base.cra_name = "chacha20",
128 .base.cra_driver_name = "chacha20-neon",
129 .base.cra_priority = 300,
130 .base.cra_blocksize = 1,
131 .base.cra_ctxsize = sizeof(struct chacha_ctx),
132 .base.cra_module = THIS_MODULE,
133
134 .min_keysize = CHACHA_KEY_SIZE,
135 .max_keysize = CHACHA_KEY_SIZE,
136 .ivsize = CHACHA_IV_SIZE,
137 .chunksize = CHACHA_BLOCK_SIZE,
138 .walksize = 4 * CHACHA_BLOCK_SIZE,
139 .setkey = crypto_chacha20_setkey,
140 .encrypt = chacha_neon,
141 .decrypt = chacha_neon,
142 }, {
143 .base.cra_name = "xchacha20",
144 .base.cra_driver_name = "xchacha20-neon",
145 .base.cra_priority = 300,
146 .base.cra_blocksize = 1,
147 .base.cra_ctxsize = sizeof(struct chacha_ctx),
148 .base.cra_module = THIS_MODULE,
149
150 .min_keysize = CHACHA_KEY_SIZE,
151 .max_keysize = CHACHA_KEY_SIZE,
152 .ivsize = XCHACHA_IV_SIZE,
153 .chunksize = CHACHA_BLOCK_SIZE,
154 .walksize = 4 * CHACHA_BLOCK_SIZE,
155 .setkey = crypto_chacha20_setkey,
156 .encrypt = xchacha_neon,
157 .decrypt = xchacha_neon,
158 }, {
159 .base.cra_name = "xchacha12",
160 .base.cra_driver_name = "xchacha12-neon",
161 .base.cra_priority = 300,
162 .base.cra_blocksize = 1,
163 .base.cra_ctxsize = sizeof(struct chacha_ctx),
164 .base.cra_module = THIS_MODULE,
165
166 .min_keysize = CHACHA_KEY_SIZE,
167 .max_keysize = CHACHA_KEY_SIZE,
168 .ivsize = XCHACHA_IV_SIZE,
169 .chunksize = CHACHA_BLOCK_SIZE,
170 .walksize = 4 * CHACHA_BLOCK_SIZE,
171 .setkey = crypto_chacha12_setkey,
172 .encrypt = xchacha_neon,
173 .decrypt = xchacha_neon,
174 }
175};
176
177static int __init chacha_simd_mod_init(void)
178{
179 if (!(elf_hwcap & HWCAP_NEON))
180 return -ENODEV;
181
182 return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
183}
184
185static void __exit chacha_simd_mod_fini(void)
186{
187 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
188}
189
190module_init(chacha_simd_mod_init);
191module_exit(chacha_simd_mod_fini);
192
193MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)");
194MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
195MODULE_LICENSE("GPL v2");
196MODULE_ALIAS_CRYPTO("chacha20");
197MODULE_ALIAS_CRYPTO("chacha20-neon");
198MODULE_ALIAS_CRYPTO("xchacha20");
199MODULE_ALIAS_CRYPTO("xchacha20-neon");
200MODULE_ALIAS_CRYPTO("xchacha12");
201MODULE_ALIAS_CRYPTO("xchacha12-neon");