blob: 5b1372d08f7a3d1f2a999efe1d34d59a76eca1cd [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From 59e056cda4beb5412e3653e6360c2eb0fa770baa Mon Sep 17 00:00:00 2001
2From: Eneas U de Queiroz <cotequeiroz@gmail.com>
3Date: Fri, 20 Dec 2019 16:02:18 -0300
4Subject: [PATCH 07/11] crypto: qce - allow building only hashes/ciphers
5
6Allow the user to choose whether to build support for all algorithms
7(default), hashes-only, or skciphers-only.
8
9The QCE engine does not appear to scale as well as the CPU to handle
10multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
11QCE handles only 2 requests in parallel.
12
13Ipsec throughput seems to improve when disabling either family of
14algorithms, sharing the load with the CPU. Enabling skciphers-only
15appears to work best.
16
17Signed-off-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
18Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
19---
20
21--- a/drivers/crypto/Kconfig
22+++ b/drivers/crypto/Kconfig
23@@ -617,6 +617,14 @@ config CRYPTO_DEV_QCE
24 tristate "Qualcomm crypto engine accelerator"
25 depends on ARCH_QCOM || COMPILE_TEST
26 depends on HAS_IOMEM
27+ help
28+ This driver supports Qualcomm crypto engine accelerator
29+ hardware. To compile this driver as a module, choose M here. The
30+ module will be called qcrypto.
31+
32+config CRYPTO_DEV_QCE_SKCIPHER
33+ bool
34+ depends on CRYPTO_DEV_QCE
35 select CRYPTO_AES
36 select CRYPTO_LIB_DES
37 select CRYPTO_ECB
38@@ -624,10 +632,57 @@ config CRYPTO_DEV_QCE
39 select CRYPTO_XTS
40 select CRYPTO_CTR
41 select CRYPTO_BLKCIPHER
42+
43+config CRYPTO_DEV_QCE_SHA
44+ bool
45+ depends on CRYPTO_DEV_QCE
46+
47+choice
48+ prompt "Algorithms enabled for QCE acceleration"
49+ default CRYPTO_DEV_QCE_ENABLE_ALL
50+ depends on CRYPTO_DEV_QCE
51 help
52- This driver supports Qualcomm crypto engine accelerator
53- hardware. To compile this driver as a module, choose M here. The
54- module will be called qcrypto.
55+ This option allows to choose whether to build support for all algorihtms
56+ (default), hashes-only, or skciphers-only.
57+
58+ The QCE engine does not appear to scale as well as the CPU to handle
59+ multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
60+ QCE handles only 2 requests in parallel.
61+
62+ Ipsec throughput seems to improve when disabling either family of
63+ algorithms, sharing the load with the CPU. Enabling skciphers-only
64+ appears to work best.
65+
66+ config CRYPTO_DEV_QCE_ENABLE_ALL
67+ bool "All supported algorithms"
68+ select CRYPTO_DEV_QCE_SKCIPHER
69+ select CRYPTO_DEV_QCE_SHA
70+ help
71+ Enable all supported algorithms:
72+ - AES (CBC, CTR, ECB, XTS)
73+ - 3DES (CBC, ECB)
74+ - DES (CBC, ECB)
75+ - SHA1, HMAC-SHA1
76+ - SHA256, HMAC-SHA256
77+
78+ config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
79+ bool "Symmetric-key ciphers only"
80+ select CRYPTO_DEV_QCE_SKCIPHER
81+ help
82+ Enable symmetric-key ciphers only:
83+ - AES (CBC, CTR, ECB, XTS)
84+ - 3DES (ECB, CBC)
85+ - DES (ECB, CBC)
86+
87+ config CRYPTO_DEV_QCE_ENABLE_SHA
88+ bool "Hash/HMAC only"
89+ select CRYPTO_DEV_QCE_SHA
90+ help
91+ Enable hashes/HMAC algorithms only:
92+ - SHA1, HMAC-SHA1
93+ - SHA256, HMAC-SHA256
94+
95+endchoice
96
97 config CRYPTO_DEV_QCOM_RNG
98 tristate "Qualcomm Random Number Generator Driver"
99--- a/drivers/crypto/qce/Makefile
100+++ b/drivers/crypto/qce/Makefile
101@@ -2,6 +2,7 @@
102 obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
103 qcrypto-objs := core.o \
104 common.o \
105- dma.o \
106- sha.o \
107- skcipher.o
108+ dma.o
109+
110+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
111+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
112--- a/drivers/crypto/qce/common.c
113+++ b/drivers/crypto/qce/common.c
114@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce,
115 qce_write(qce, offset + i * sizeof(u32), 0);
116 }
117
118-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
119+static u32 qce_config_reg(struct qce_device *qce, int little)
120 {
121- u32 cfg = 0;
122+ u32 beats = (qce->burst_size >> 3) - 1;
123+ u32 pipe_pair = qce->pipe_pair_id;
124+ u32 config;
125
126- if (IS_AES(flags)) {
127- if (aes_key_size == AES_KEYSIZE_128)
128- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
129- else if (aes_key_size == AES_KEYSIZE_256)
130- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
131- }
132+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
133+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
134+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
135+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
136+ config &= ~HIGH_SPD_EN_N_SHIFT;
137
138- if (IS_AES(flags))
139- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
140- else if (IS_DES(flags) || IS_3DES(flags))
141- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
142+ if (little)
143+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
144
145- if (IS_DES(flags))
146- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
147+ return config;
148+}
149
150- if (IS_3DES(flags))
151- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
152+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
153+{
154+ __be32 *d = dst;
155+ const u8 *s = src;
156+ unsigned int n;
157
158- switch (flags & QCE_MODE_MASK) {
159- case QCE_MODE_ECB:
160- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
161- break;
162- case QCE_MODE_CBC:
163- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
164- break;
165- case QCE_MODE_CTR:
166- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
167- break;
168- case QCE_MODE_XTS:
169- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
170- break;
171- case QCE_MODE_CCM:
172- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
173- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
174- break;
175- default:
176- return ~0;
177+ n = len / sizeof(u32);
178+ for (; n > 0; n--) {
179+ *d = cpu_to_be32p((const __u32 *) s);
180+ s += sizeof(__u32);
181+ d++;
182 }
183+}
184
185- return cfg;
186+static void qce_setup_config(struct qce_device *qce)
187+{
188+ u32 config;
189+
190+ /* get big endianness */
191+ config = qce_config_reg(qce, 0);
192+
193+ /* clear status */
194+ qce_write(qce, REG_STATUS, 0);
195+ qce_write(qce, REG_CONFIG, config);
196+}
197+
198+static inline void qce_crypto_go(struct qce_device *qce)
199+{
200+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
201 }
202
203+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
204 static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
205 {
206 u32 cfg = 0;
207@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long fl
208 return cfg;
209 }
210
211-static u32 qce_config_reg(struct qce_device *qce, int little)
212-{
213- u32 beats = (qce->burst_size >> 3) - 1;
214- u32 pipe_pair = qce->pipe_pair_id;
215- u32 config;
216-
217- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
218- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
219- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
220- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
221- config &= ~HIGH_SPD_EN_N_SHIFT;
222-
223- if (little)
224- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
225-
226- return config;
227-}
228-
229-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
230-{
231- __be32 *d = dst;
232- const u8 *s = src;
233- unsigned int n;
234-
235- n = len / sizeof(u32);
236- for (; n > 0; n--) {
237- *d = cpu_to_be32p((const __u32 *) s);
238- s += sizeof(__u32);
239- d++;
240- }
241-}
242-
243-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
244-{
245- u8 swap[QCE_AES_IV_LENGTH];
246- u32 i, j;
247-
248- if (ivsize > QCE_AES_IV_LENGTH)
249- return;
250-
251- memset(swap, 0, QCE_AES_IV_LENGTH);
252-
253- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
254- i < QCE_AES_IV_LENGTH; i++, j--)
255- swap[i] = src[j];
256-
257- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
258-}
259-
260-static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
261- unsigned int enckeylen, unsigned int cryptlen)
262-{
263- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
264- unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
265- unsigned int xtsdusize;
266-
267- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
268- enckeylen / 2);
269- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
270-
271- /* xts du size 512B */
272- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
273- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
274-}
275-
276-static void qce_setup_config(struct qce_device *qce)
277-{
278- u32 config;
279-
280- /* get big endianness */
281- config = qce_config_reg(qce, 0);
282-
283- /* clear status */
284- qce_write(qce, REG_STATUS, 0);
285- qce_write(qce, REG_CONFIG, config);
286-}
287-
288-static inline void qce_crypto_go(struct qce_device *qce)
289-{
290- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
291-}
292-
293 static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
294 u32 totallen, u32 offset)
295 {
296@@ -303,6 +225,87 @@ go_proc:
297
298 return 0;
299 }
300+#endif
301+
302+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
303+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
304+{
305+ u32 cfg = 0;
306+
307+ if (IS_AES(flags)) {
308+ if (aes_key_size == AES_KEYSIZE_128)
309+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
310+ else if (aes_key_size == AES_KEYSIZE_256)
311+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
312+ }
313+
314+ if (IS_AES(flags))
315+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
316+ else if (IS_DES(flags) || IS_3DES(flags))
317+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
318+
319+ if (IS_DES(flags))
320+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
321+
322+ if (IS_3DES(flags))
323+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
324+
325+ switch (flags & QCE_MODE_MASK) {
326+ case QCE_MODE_ECB:
327+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
328+ break;
329+ case QCE_MODE_CBC:
330+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
331+ break;
332+ case QCE_MODE_CTR:
333+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
334+ break;
335+ case QCE_MODE_XTS:
336+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
337+ break;
338+ case QCE_MODE_CCM:
339+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
340+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
341+ break;
342+ default:
343+ return ~0;
344+ }
345+
346+ return cfg;
347+}
348+
349+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
350+{
351+ u8 swap[QCE_AES_IV_LENGTH];
352+ u32 i, j;
353+
354+ if (ivsize > QCE_AES_IV_LENGTH)
355+ return;
356+
357+ memset(swap, 0, QCE_AES_IV_LENGTH);
358+
359+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
360+ i < QCE_AES_IV_LENGTH; i++, j--)
361+ swap[i] = src[j];
362+
363+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
364+}
365+
366+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
367+ unsigned int enckeylen, unsigned int cryptlen)
368+{
369+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
370+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
371+ unsigned int xtsdusize;
372+
373+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
374+ enckeylen / 2);
375+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
376+
377+ /* xts du size 512B */
378+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
379+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
380+}
381
382 static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
383 u32 totallen, u32 offset)
384@@ -384,15 +387,20 @@ static int qce_setup_regs_skcipher(struc
385
386 return 0;
387 }
388+#endif
389
390 int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
391 u32 offset)
392 {
393 switch (type) {
394+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
395 case CRYPTO_ALG_TYPE_SKCIPHER:
396 return qce_setup_regs_skcipher(async_req, totallen, offset);
397+#endif
398+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
399 case CRYPTO_ALG_TYPE_AHASH:
400 return qce_setup_regs_ahash(async_req, totallen, offset);
401+#endif
402 default:
403 return -EINVAL;
404 }
405--- a/drivers/crypto/qce/core.c
406+++ b/drivers/crypto/qce/core.c
407@@ -22,8 +22,12 @@
408 #define QCE_QUEUE_LENGTH 1
409
410 static const struct qce_algo_ops *qce_ops[] = {
411+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
412 &skcipher_ops,
413+#endif
414+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
415 &ahash_ops,
416+#endif
417 };
418
419 static void qce_unregister_algs(struct qce_device *qce)