blob: d0f4b2d180592e8c7f9cff5976e70cbf29bf8f54 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Crypto acceleration support for Rockchip RK3288
4 *
5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6 *
7 * Author: Zain Wang <zain.wang@rock-chips.com>
8 *
9 * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
10 */
11#include "rk3288_crypto.h"
12
13#define RK_CRYPTO_DEC BIT(0)
14
15static void rk_crypto_complete(struct crypto_async_request *base, int err)
16{
17 if (base->complete)
18 base->complete(base, err);
19}
20
21static int rk_handle_req(struct rk_crypto_info *dev,
22 struct ablkcipher_request *req)
23{
24 if (!IS_ALIGNED(req->nbytes, dev->align_size))
25 return -EINVAL;
26 else
27 return dev->enqueue(dev, &req->base);
28}
29
30static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
31 const u8 *key, unsigned int keylen)
32{
33 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
34 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
35
36 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
37 keylen != AES_KEYSIZE_256) {
38 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
39 return -EINVAL;
40 }
41 ctx->keylen = keylen;
42 memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
43 return 0;
44}
45
46static int rk_des_setkey(struct crypto_ablkcipher *cipher,
47 const u8 *key, unsigned int keylen)
48{
49 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
50 int err;
51
52 err = verify_ablkcipher_des_key(cipher, key);
53 if (err)
54 return err;
55
56 ctx->keylen = keylen;
57 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
58 return 0;
59}
60
61static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
62 const u8 *key, unsigned int keylen)
63{
64 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
65 int err;
66
67 err = verify_ablkcipher_des3_key(cipher, key);
68 if (err)
69 return err;
70
71 ctx->keylen = keylen;
72 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
73 return 0;
74}
75
76static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
77{
78 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
79 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
80 struct rk_crypto_info *dev = ctx->dev;
81
82 ctx->mode = RK_CRYPTO_AES_ECB_MODE;
83 return rk_handle_req(dev, req);
84}
85
86static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
87{
88 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
89 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
90 struct rk_crypto_info *dev = ctx->dev;
91
92 ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
93 return rk_handle_req(dev, req);
94}
95
96static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
97{
98 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
99 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
100 struct rk_crypto_info *dev = ctx->dev;
101
102 ctx->mode = RK_CRYPTO_AES_CBC_MODE;
103 return rk_handle_req(dev, req);
104}
105
106static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
107{
108 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
109 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
110 struct rk_crypto_info *dev = ctx->dev;
111
112 ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
113 return rk_handle_req(dev, req);
114}
115
116static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
117{
118 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
119 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
120 struct rk_crypto_info *dev = ctx->dev;
121
122 ctx->mode = 0;
123 return rk_handle_req(dev, req);
124}
125
126static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
127{
128 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
129 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
130 struct rk_crypto_info *dev = ctx->dev;
131
132 ctx->mode = RK_CRYPTO_DEC;
133 return rk_handle_req(dev, req);
134}
135
136static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
137{
138 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
139 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
140 struct rk_crypto_info *dev = ctx->dev;
141
142 ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
143 return rk_handle_req(dev, req);
144}
145
146static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
147{
148 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
149 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
150 struct rk_crypto_info *dev = ctx->dev;
151
152 ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
153 return rk_handle_req(dev, req);
154}
155
156static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
157{
158 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
159 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
160 struct rk_crypto_info *dev = ctx->dev;
161
162 ctx->mode = RK_CRYPTO_TDES_SELECT;
163 return rk_handle_req(dev, req);
164}
165
166static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
167{
168 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
169 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
170 struct rk_crypto_info *dev = ctx->dev;
171
172 ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
173 return rk_handle_req(dev, req);
174}
175
176static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
177{
178 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
179 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
180 struct rk_crypto_info *dev = ctx->dev;
181
182 ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
183 return rk_handle_req(dev, req);
184}
185
186static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
187{
188 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
189 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
190 struct rk_crypto_info *dev = ctx->dev;
191
192 ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
193 RK_CRYPTO_DEC;
194 return rk_handle_req(dev, req);
195}
196
197static void rk_ablk_hw_init(struct rk_crypto_info *dev)
198{
199 struct ablkcipher_request *req =
200 ablkcipher_request_cast(dev->async_req);
201 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
202 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
203 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
204 u32 ivsize, block, conf_reg = 0;
205
206 block = crypto_tfm_alg_blocksize(tfm);
207 ivsize = crypto_ablkcipher_ivsize(cipher);
208
209 if (block == DES_BLOCK_SIZE) {
210 ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
211 RK_CRYPTO_TDES_BYTESWAP_KEY |
212 RK_CRYPTO_TDES_BYTESWAP_IV;
213 CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
214 memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
215 conf_reg = RK_CRYPTO_DESSEL;
216 } else {
217 ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
218 RK_CRYPTO_AES_KEY_CHANGE |
219 RK_CRYPTO_AES_BYTESWAP_KEY |
220 RK_CRYPTO_AES_BYTESWAP_IV;
221 if (ctx->keylen == AES_KEYSIZE_192)
222 ctx->mode |= RK_CRYPTO_AES_192BIT_key;
223 else if (ctx->keylen == AES_KEYSIZE_256)
224 ctx->mode |= RK_CRYPTO_AES_256BIT_key;
225 CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
226 memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
227 }
228 conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
229 RK_CRYPTO_BYTESWAP_BRFIFO;
230 CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
231 CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
232 RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
233}
234
235static void crypto_dma_start(struct rk_crypto_info *dev)
236{
237 CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
238 CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
239 CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
240 CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
241 _SBF(RK_CRYPTO_BLOCK_START, 16));
242}
243
244static int rk_set_data_start(struct rk_crypto_info *dev)
245{
246 int err;
247 struct ablkcipher_request *req =
248 ablkcipher_request_cast(dev->async_req);
249 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
250 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
251 u32 ivsize = crypto_ablkcipher_ivsize(tfm);
252 u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
253 dev->sg_src->offset + dev->sg_src->length - ivsize;
254
255 /* Store the iv that need to be updated in chain mode.
256 * And update the IV buffer to contain the next IV for decryption mode.
257 */
258 if (ctx->mode & RK_CRYPTO_DEC) {
259 memcpy(ctx->iv, src_last_blk, ivsize);
260 sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
261 ivsize, dev->total - ivsize);
262 }
263
264 err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
265 if (!err)
266 crypto_dma_start(dev);
267 return err;
268}
269
270static int rk_ablk_start(struct rk_crypto_info *dev)
271{
272 struct ablkcipher_request *req =
273 ablkcipher_request_cast(dev->async_req);
274 unsigned long flags;
275 int err = 0;
276
277 dev->left_bytes = req->nbytes;
278 dev->total = req->nbytes;
279 dev->sg_src = req->src;
280 dev->first = req->src;
281 dev->src_nents = sg_nents(req->src);
282 dev->sg_dst = req->dst;
283 dev->dst_nents = sg_nents(req->dst);
284 dev->aligned = 1;
285
286 spin_lock_irqsave(&dev->lock, flags);
287 rk_ablk_hw_init(dev);
288 err = rk_set_data_start(dev);
289 spin_unlock_irqrestore(&dev->lock, flags);
290 return err;
291}
292
293static void rk_iv_copyback(struct rk_crypto_info *dev)
294{
295 struct ablkcipher_request *req =
296 ablkcipher_request_cast(dev->async_req);
297 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
298 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
299 u32 ivsize = crypto_ablkcipher_ivsize(tfm);
300
301 /* Update the IV buffer to contain the next IV for encryption mode. */
302 if (!(ctx->mode & RK_CRYPTO_DEC)) {
303 if (dev->aligned) {
304 memcpy(req->info, sg_virt(dev->sg_dst) +
305 dev->sg_dst->length - ivsize, ivsize);
306 } else {
307 memcpy(req->info, dev->addr_vir +
308 dev->count - ivsize, ivsize);
309 }
310 }
311}
312
313static void rk_update_iv(struct rk_crypto_info *dev)
314{
315 struct ablkcipher_request *req =
316 ablkcipher_request_cast(dev->async_req);
317 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
318 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
319 u32 ivsize = crypto_ablkcipher_ivsize(tfm);
320 u8 *new_iv = NULL;
321
322 if (ctx->mode & RK_CRYPTO_DEC) {
323 new_iv = ctx->iv;
324 } else {
325 new_iv = page_address(sg_page(dev->sg_dst)) +
326 dev->sg_dst->offset + dev->sg_dst->length - ivsize;
327 }
328
329 if (ivsize == DES_BLOCK_SIZE)
330 memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
331 else if (ivsize == AES_BLOCK_SIZE)
332 memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
333}
334
335/* return:
336 * true some err was occurred
337 * fault no err, continue
338 */
339static int rk_ablk_rx(struct rk_crypto_info *dev)
340{
341 int err = 0;
342 struct ablkcipher_request *req =
343 ablkcipher_request_cast(dev->async_req);
344
345 dev->unload_data(dev);
346 if (!dev->aligned) {
347 if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
348 dev->addr_vir, dev->count,
349 dev->total - dev->left_bytes -
350 dev->count)) {
351 err = -EINVAL;
352 goto out_rx;
353 }
354 }
355 if (dev->left_bytes) {
356 rk_update_iv(dev);
357 if (dev->aligned) {
358 if (sg_is_last(dev->sg_src)) {
359 dev_err(dev->dev, "[%s:%d] Lack of data\n",
360 __func__, __LINE__);
361 err = -ENOMEM;
362 goto out_rx;
363 }
364 dev->sg_src = sg_next(dev->sg_src);
365 dev->sg_dst = sg_next(dev->sg_dst);
366 }
367 err = rk_set_data_start(dev);
368 } else {
369 rk_iv_copyback(dev);
370 /* here show the calculation is over without any err */
371 dev->complete(dev->async_req, 0);
372 tasklet_schedule(&dev->queue_task);
373 }
374out_rx:
375 return err;
376}
377
378static int rk_ablk_cra_init(struct crypto_tfm *tfm)
379{
380 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
381 struct crypto_alg *alg = tfm->__crt_alg;
382 struct rk_crypto_tmp *algt;
383
384 algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
385
386 ctx->dev = algt->dev;
387 ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
388 ctx->dev->start = rk_ablk_start;
389 ctx->dev->update = rk_ablk_rx;
390 ctx->dev->complete = rk_crypto_complete;
391 ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
392
393 return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
394}
395
396static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
397{
398 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
399
400 free_page((unsigned long)ctx->dev->addr_vir);
401 ctx->dev->disable_clk(ctx->dev);
402}
403
404struct rk_crypto_tmp rk_ecb_aes_alg = {
405 .type = ALG_TYPE_CIPHER,
406 .alg.crypto = {
407 .cra_name = "ecb(aes)",
408 .cra_driver_name = "ecb-aes-rk",
409 .cra_priority = 300,
410 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
411 CRYPTO_ALG_ASYNC,
412 .cra_blocksize = AES_BLOCK_SIZE,
413 .cra_ctxsize = sizeof(struct rk_cipher_ctx),
414 .cra_alignmask = 0x0f,
415 .cra_type = &crypto_ablkcipher_type,
416 .cra_module = THIS_MODULE,
417 .cra_init = rk_ablk_cra_init,
418 .cra_exit = rk_ablk_cra_exit,
419 .cra_u.ablkcipher = {
420 .min_keysize = AES_MIN_KEY_SIZE,
421 .max_keysize = AES_MAX_KEY_SIZE,
422 .setkey = rk_aes_setkey,
423 .encrypt = rk_aes_ecb_encrypt,
424 .decrypt = rk_aes_ecb_decrypt,
425 }
426 }
427};
428
429struct rk_crypto_tmp rk_cbc_aes_alg = {
430 .type = ALG_TYPE_CIPHER,
431 .alg.crypto = {
432 .cra_name = "cbc(aes)",
433 .cra_driver_name = "cbc-aes-rk",
434 .cra_priority = 300,
435 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
436 CRYPTO_ALG_ASYNC,
437 .cra_blocksize = AES_BLOCK_SIZE,
438 .cra_ctxsize = sizeof(struct rk_cipher_ctx),
439 .cra_alignmask = 0x0f,
440 .cra_type = &crypto_ablkcipher_type,
441 .cra_module = THIS_MODULE,
442 .cra_init = rk_ablk_cra_init,
443 .cra_exit = rk_ablk_cra_exit,
444 .cra_u.ablkcipher = {
445 .min_keysize = AES_MIN_KEY_SIZE,
446 .max_keysize = AES_MAX_KEY_SIZE,
447 .ivsize = AES_BLOCK_SIZE,
448 .setkey = rk_aes_setkey,
449 .encrypt = rk_aes_cbc_encrypt,
450 .decrypt = rk_aes_cbc_decrypt,
451 }
452 }
453};
454
455struct rk_crypto_tmp rk_ecb_des_alg = {
456 .type = ALG_TYPE_CIPHER,
457 .alg.crypto = {
458 .cra_name = "ecb(des)",
459 .cra_driver_name = "ecb-des-rk",
460 .cra_priority = 300,
461 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
462 CRYPTO_ALG_ASYNC,
463 .cra_blocksize = DES_BLOCK_SIZE,
464 .cra_ctxsize = sizeof(struct rk_cipher_ctx),
465 .cra_alignmask = 0x07,
466 .cra_type = &crypto_ablkcipher_type,
467 .cra_module = THIS_MODULE,
468 .cra_init = rk_ablk_cra_init,
469 .cra_exit = rk_ablk_cra_exit,
470 .cra_u.ablkcipher = {
471 .min_keysize = DES_KEY_SIZE,
472 .max_keysize = DES_KEY_SIZE,
473 .setkey = rk_des_setkey,
474 .encrypt = rk_des_ecb_encrypt,
475 .decrypt = rk_des_ecb_decrypt,
476 }
477 }
478};
479
480struct rk_crypto_tmp rk_cbc_des_alg = {
481 .type = ALG_TYPE_CIPHER,
482 .alg.crypto = {
483 .cra_name = "cbc(des)",
484 .cra_driver_name = "cbc-des-rk",
485 .cra_priority = 300,
486 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
487 CRYPTO_ALG_ASYNC,
488 .cra_blocksize = DES_BLOCK_SIZE,
489 .cra_ctxsize = sizeof(struct rk_cipher_ctx),
490 .cra_alignmask = 0x07,
491 .cra_type = &crypto_ablkcipher_type,
492 .cra_module = THIS_MODULE,
493 .cra_init = rk_ablk_cra_init,
494 .cra_exit = rk_ablk_cra_exit,
495 .cra_u.ablkcipher = {
496 .min_keysize = DES_KEY_SIZE,
497 .max_keysize = DES_KEY_SIZE,
498 .ivsize = DES_BLOCK_SIZE,
499 .setkey = rk_des_setkey,
500 .encrypt = rk_des_cbc_encrypt,
501 .decrypt = rk_des_cbc_decrypt,
502 }
503 }
504};
505
506struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
507 .type = ALG_TYPE_CIPHER,
508 .alg.crypto = {
509 .cra_name = "ecb(des3_ede)",
510 .cra_driver_name = "ecb-des3-ede-rk",
511 .cra_priority = 300,
512 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
513 CRYPTO_ALG_ASYNC,
514 .cra_blocksize = DES_BLOCK_SIZE,
515 .cra_ctxsize = sizeof(struct rk_cipher_ctx),
516 .cra_alignmask = 0x07,
517 .cra_type = &crypto_ablkcipher_type,
518 .cra_module = THIS_MODULE,
519 .cra_init = rk_ablk_cra_init,
520 .cra_exit = rk_ablk_cra_exit,
521 .cra_u.ablkcipher = {
522 .min_keysize = DES3_EDE_KEY_SIZE,
523 .max_keysize = DES3_EDE_KEY_SIZE,
524 .ivsize = DES_BLOCK_SIZE,
525 .setkey = rk_tdes_setkey,
526 .encrypt = rk_des3_ede_ecb_encrypt,
527 .decrypt = rk_des3_ede_ecb_decrypt,
528 }
529 }
530};
531
532struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
533 .type = ALG_TYPE_CIPHER,
534 .alg.crypto = {
535 .cra_name = "cbc(des3_ede)",
536 .cra_driver_name = "cbc-des3-ede-rk",
537 .cra_priority = 300,
538 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
539 CRYPTO_ALG_ASYNC,
540 .cra_blocksize = DES_BLOCK_SIZE,
541 .cra_ctxsize = sizeof(struct rk_cipher_ctx),
542 .cra_alignmask = 0x07,
543 .cra_type = &crypto_ablkcipher_type,
544 .cra_module = THIS_MODULE,
545 .cra_init = rk_ablk_cra_init,
546 .cra_exit = rk_ablk_cra_exit,
547 .cra_u.ablkcipher = {
548 .min_keysize = DES3_EDE_KEY_SIZE,
549 .max_keysize = DES3_EDE_KEY_SIZE,
550 .ivsize = DES_BLOCK_SIZE,
551 .setkey = rk_tdes_setkey,
552 .encrypt = rk_des3_ede_cbc_encrypt,
553 .decrypt = rk_des3_ede_cbc_decrypt,
554 }
555 }
556};