blob: 03ed0358c6eb46bf99322fc3703c88044ddbeae6 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
4 *
5 * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
6 */
7
8#include <asm/hwcap.h>
9#include <asm/neon.h>
10#include <asm/simd.h>
11#include <crypto/internal/hash.h>
12#include <crypto/internal/simd.h>
13#include <crypto/sha.h>
14#include <crypto/sha256_base.h>
15#include <linux/cryptohash.h>
16#include <linux/types.h>
17#include <linux/string.h>
18
19MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
20MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
21MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
22MODULE_LICENSE("GPL v2");
23MODULE_ALIAS_CRYPTO("sha224");
24MODULE_ALIAS_CRYPTO("sha256");
25
26asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
27 unsigned int num_blks);
28EXPORT_SYMBOL(sha256_block_data_order);
29
30static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
31 int blocks)
32{
33 return sha256_block_data_order(sst->state, src, blocks);
34}
35
36asmlinkage void sha256_block_neon(u32 *digest, const void *data,
37 unsigned int num_blks);
38
39static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
40 int blocks)
41{
42 return sha256_block_neon(sst->state, src, blocks);
43}
44
45static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
46 unsigned int len)
47{
48 return sha256_base_do_update(desc, data, len,
49 __sha256_block_data_order);
50}
51
52static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
53 unsigned int len, u8 *out)
54{
55 if (len)
56 sha256_base_do_update(desc, data, len,
57 __sha256_block_data_order);
58 sha256_base_do_finalize(desc, __sha256_block_data_order);
59
60 return sha256_base_finish(desc, out);
61}
62
63static int crypto_sha256_arm64_final(struct shash_desc *desc, u8 *out)
64{
65 return crypto_sha256_arm64_finup(desc, NULL, 0, out);
66}
67
68static struct shash_alg algs[] = { {
69 .digestsize = SHA256_DIGEST_SIZE,
70 .init = sha256_base_init,
71 .update = crypto_sha256_arm64_update,
72 .final = crypto_sha256_arm64_final,
73 .finup = crypto_sha256_arm64_finup,
74 .descsize = sizeof(struct sha256_state),
75 .base.cra_name = "sha256",
76 .base.cra_driver_name = "sha256-arm64",
77 .base.cra_priority = 125,
78 .base.cra_blocksize = SHA256_BLOCK_SIZE,
79 .base.cra_module = THIS_MODULE,
80}, {
81 .digestsize = SHA224_DIGEST_SIZE,
82 .init = sha224_base_init,
83 .update = crypto_sha256_arm64_update,
84 .final = crypto_sha256_arm64_final,
85 .finup = crypto_sha256_arm64_finup,
86 .descsize = sizeof(struct sha256_state),
87 .base.cra_name = "sha224",
88 .base.cra_driver_name = "sha224-arm64",
89 .base.cra_priority = 125,
90 .base.cra_blocksize = SHA224_BLOCK_SIZE,
91 .base.cra_module = THIS_MODULE,
92} };
93
94static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
95 unsigned int len)
96{
97 struct sha256_state *sctx = shash_desc_ctx(desc);
98
99 if (!crypto_simd_usable())
100 return sha256_base_do_update(desc, data, len,
101 __sha256_block_data_order);
102
103 while (len > 0) {
104 unsigned int chunk = len;
105
106 /*
107 * Don't hog the CPU for the entire time it takes to process all
108 * input when running on a preemptible kernel, but process the
109 * data block by block instead.
110 */
111 if (IS_ENABLED(CONFIG_PREEMPT) &&
112 chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
113 chunk = SHA256_BLOCK_SIZE -
114 sctx->count % SHA256_BLOCK_SIZE;
115
116 kernel_neon_begin();
117 sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
118 kernel_neon_end();
119 data += chunk;
120 len -= chunk;
121 }
122 return 0;
123}
124
125static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
126 unsigned int len, u8 *out)
127{
128 if (!crypto_simd_usable()) {
129 if (len)
130 sha256_base_do_update(desc, data, len,
131 __sha256_block_data_order);
132 sha256_base_do_finalize(desc, __sha256_block_data_order);
133 } else {
134 if (len)
135 sha256_update_neon(desc, data, len);
136 kernel_neon_begin();
137 sha256_base_do_finalize(desc, __sha256_block_neon);
138 kernel_neon_end();
139 }
140 return sha256_base_finish(desc, out);
141}
142
143static int sha256_final_neon(struct shash_desc *desc, u8 *out)
144{
145 return sha256_finup_neon(desc, NULL, 0, out);
146}
147
148static struct shash_alg neon_algs[] = { {
149 .digestsize = SHA256_DIGEST_SIZE,
150 .init = sha256_base_init,
151 .update = sha256_update_neon,
152 .final = sha256_final_neon,
153 .finup = sha256_finup_neon,
154 .descsize = sizeof(struct sha256_state),
155 .base.cra_name = "sha256",
156 .base.cra_driver_name = "sha256-arm64-neon",
157 .base.cra_priority = 150,
158 .base.cra_blocksize = SHA256_BLOCK_SIZE,
159 .base.cra_module = THIS_MODULE,
160}, {
161 .digestsize = SHA224_DIGEST_SIZE,
162 .init = sha224_base_init,
163 .update = sha256_update_neon,
164 .final = sha256_final_neon,
165 .finup = sha256_finup_neon,
166 .descsize = sizeof(struct sha256_state),
167 .base.cra_name = "sha224",
168 .base.cra_driver_name = "sha224-arm64-neon",
169 .base.cra_priority = 150,
170 .base.cra_blocksize = SHA224_BLOCK_SIZE,
171 .base.cra_module = THIS_MODULE,
172} };
173
174static int __init sha256_mod_init(void)
175{
176 int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
177 if (ret)
178 return ret;
179
180 if (cpu_have_named_feature(ASIMD)) {
181 ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
182 if (ret)
183 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
184 }
185 return ret;
186}
187
188static void __exit sha256_mod_fini(void)
189{
190 if (cpu_have_named_feature(ASIMD))
191 crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
192 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
193}
194
195module_init(sha256_mod_init);
196module_exit(sha256_mod_fini);