blob: b7216935236f03fc1892f9ec3e4b8bff7a48f07e [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2//
3// Cryptographic API.
4//
5// Support for Samsung S5PV210 and Exynos HW acceleration.
6//
7// Copyright (C) 2011 NetUP Inc. All rights reserved.
8// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
9//
10// Hash part based on omap-sham.c driver.
11
12#include <linux/clk.h>
13#include <linux/crypto.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/platform_device.h>
24#include <linux/scatterlist.h>
25
26#include <crypto/ctr.h>
27#include <crypto/aes.h>
28#include <crypto/algapi.h>
29#include <crypto/scatterwalk.h>
30
31#include <crypto/hash.h>
32#include <crypto/md5.h>
33#include <crypto/sha.h>
34#include <crypto/internal/hash.h>
35
36#define _SBF(s, v) ((v) << (s))
37
38/* Feed control registers */
39#define SSS_REG_FCINTSTAT 0x0000
40#define SSS_FCINTSTAT_HPARTINT BIT(7)
41#define SSS_FCINTSTAT_HDONEINT BIT(5)
42#define SSS_FCINTSTAT_BRDMAINT BIT(3)
43#define SSS_FCINTSTAT_BTDMAINT BIT(2)
44#define SSS_FCINTSTAT_HRDMAINT BIT(1)
45#define SSS_FCINTSTAT_PKDMAINT BIT(0)
46
47#define SSS_REG_FCINTENSET 0x0004
48#define SSS_FCINTENSET_HPARTINTENSET BIT(7)
49#define SSS_FCINTENSET_HDONEINTENSET BIT(5)
50#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
51#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
52#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
53#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
54
55#define SSS_REG_FCINTENCLR 0x0008
56#define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
57#define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
58#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
59#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
60#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
61#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
62
63#define SSS_REG_FCINTPEND 0x000C
64#define SSS_FCINTPEND_HPARTINTP BIT(7)
65#define SSS_FCINTPEND_HDONEINTP BIT(5)
66#define SSS_FCINTPEND_BRDMAINTP BIT(3)
67#define SSS_FCINTPEND_BTDMAINTP BIT(2)
68#define SSS_FCINTPEND_HRDMAINTP BIT(1)
69#define SSS_FCINTPEND_PKDMAINTP BIT(0)
70
71#define SSS_REG_FCFIFOSTAT 0x0010
72#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
73#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
74#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
75#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
76#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
77#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
78#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
79#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
80
81#define SSS_REG_FCFIFOCTRL 0x0014
82#define SSS_FCFIFOCTRL_DESSEL BIT(2)
83#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
84#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
85#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
86#define SSS_HASHIN_MASK _SBF(0, 0x03)
87
88#define SSS_REG_FCBRDMAS 0x0020
89#define SSS_REG_FCBRDMAL 0x0024
90#define SSS_REG_FCBRDMAC 0x0028
91#define SSS_FCBRDMAC_BYTESWAP BIT(1)
92#define SSS_FCBRDMAC_FLUSH BIT(0)
93
94#define SSS_REG_FCBTDMAS 0x0030
95#define SSS_REG_FCBTDMAL 0x0034
96#define SSS_REG_FCBTDMAC 0x0038
97#define SSS_FCBTDMAC_BYTESWAP BIT(1)
98#define SSS_FCBTDMAC_FLUSH BIT(0)
99
100#define SSS_REG_FCHRDMAS 0x0040
101#define SSS_REG_FCHRDMAL 0x0044
102#define SSS_REG_FCHRDMAC 0x0048
103#define SSS_FCHRDMAC_BYTESWAP BIT(1)
104#define SSS_FCHRDMAC_FLUSH BIT(0)
105
106#define SSS_REG_FCPKDMAS 0x0050
107#define SSS_REG_FCPKDMAL 0x0054
108#define SSS_REG_FCPKDMAC 0x0058
109#define SSS_FCPKDMAC_BYTESWAP BIT(3)
110#define SSS_FCPKDMAC_DESCEND BIT(2)
111#define SSS_FCPKDMAC_TRANSMIT BIT(1)
112#define SSS_FCPKDMAC_FLUSH BIT(0)
113
114#define SSS_REG_FCPKDMAO 0x005C
115
116/* AES registers */
117#define SSS_REG_AES_CONTROL 0x00
118#define SSS_AES_BYTESWAP_DI BIT(11)
119#define SSS_AES_BYTESWAP_DO BIT(10)
120#define SSS_AES_BYTESWAP_IV BIT(9)
121#define SSS_AES_BYTESWAP_CNT BIT(8)
122#define SSS_AES_BYTESWAP_KEY BIT(7)
123#define SSS_AES_KEY_CHANGE_MODE BIT(6)
124#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
125#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
126#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
127#define SSS_AES_FIFO_MODE BIT(3)
128#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
129#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
130#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
131#define SSS_AES_MODE_DECRYPT BIT(0)
132
133#define SSS_REG_AES_STATUS 0x04
134#define SSS_AES_BUSY BIT(2)
135#define SSS_AES_INPUT_READY BIT(1)
136#define SSS_AES_OUTPUT_READY BIT(0)
137
138#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
139#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
140#define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
141#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
142#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
143
144#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
145#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
146#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
147
148#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
149#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
150 SSS_AES_REG(dev, reg))
151
152/* HW engine modes */
153#define FLAGS_AES_DECRYPT BIT(0)
154#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
155#define FLAGS_AES_CBC _SBF(1, 0x01)
156#define FLAGS_AES_CTR _SBF(1, 0x02)
157
158#define AES_KEY_LEN 16
159#define CRYPTO_QUEUE_LEN 1
160
161/* HASH registers */
162#define SSS_REG_HASH_CTRL 0x00
163
164#define SSS_HASH_USER_IV_EN BIT(5)
165#define SSS_HASH_INIT_BIT BIT(4)
166#define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
167#define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
168#define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
169
170#define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
171
172#define SSS_REG_HASH_CTRL_PAUSE 0x04
173
174#define SSS_HASH_PAUSE BIT(0)
175
176#define SSS_REG_HASH_CTRL_FIFO 0x08
177
178#define SSS_HASH_FIFO_MODE_DMA BIT(0)
179#define SSS_HASH_FIFO_MODE_CPU 0
180
181#define SSS_REG_HASH_CTRL_SWAP 0x0C
182
183#define SSS_HASH_BYTESWAP_DI BIT(3)
184#define SSS_HASH_BYTESWAP_DO BIT(2)
185#define SSS_HASH_BYTESWAP_IV BIT(1)
186#define SSS_HASH_BYTESWAP_KEY BIT(0)
187
188#define SSS_REG_HASH_STATUS 0x10
189
190#define SSS_HASH_STATUS_MSG_DONE BIT(6)
191#define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
192#define SSS_HASH_STATUS_BUFFER_READY BIT(0)
193
194#define SSS_REG_HASH_MSG_SIZE_LOW 0x20
195#define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
196
197#define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
198#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
199
200#define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
201#define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
202
203#define HASH_BLOCK_SIZE 64
204#define HASH_REG_SIZEOF 4
205#define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
206#define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
207#define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
208
209/*
210 * HASH bit numbers, used by device, setting in dev->hash_flags with
211 * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
212 * to keep HASH state BUSY or FREE, or to signal state from irq_handler
213 * to hash_tasklet. SGS keep track of allocated memory for scatterlist
214 */
215#define HASH_FLAGS_BUSY 0
216#define HASH_FLAGS_FINAL 1
217#define HASH_FLAGS_DMA_ACTIVE 2
218#define HASH_FLAGS_OUTPUT_READY 3
219#define HASH_FLAGS_DMA_READY 4
220#define HASH_FLAGS_SGS_COPIED 5
221#define HASH_FLAGS_SGS_ALLOCED 6
222
223/* HASH HW constants */
224#define BUFLEN HASH_BLOCK_SIZE
225
226#define SSS_HASH_DMA_LEN_ALIGN 8
227#define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
228
229#define SSS_HASH_QUEUE_LENGTH 10
230
231/**
232 * struct samsung_aes_variant - platform specific SSS driver data
233 * @aes_offset: AES register offset from SSS module's base.
234 * @hash_offset: HASH register offset from SSS module's base.
235 *
236 * Specifies platform specific configuration of SSS module.
237 * Note: A structure for driver specific platform data is used for future
238 * expansion of its usage.
239 */
240struct samsung_aes_variant {
241 unsigned int aes_offset;
242 unsigned int hash_offset;
243};
244
245struct s5p_aes_reqctx {
246 unsigned long mode;
247};
248
249struct s5p_aes_ctx {
250 struct s5p_aes_dev *dev;
251
252 uint8_t aes_key[AES_MAX_KEY_SIZE];
253 uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
254 int keylen;
255};
256
257/**
258 * struct s5p_aes_dev - Crypto device state container
259 * @dev: Associated device
260 * @clk: Clock for accessing hardware
261 * @ioaddr: Mapped IO memory region
262 * @aes_ioaddr: Per-varian offset for AES block IO memory
263 * @irq_fc: Feed control interrupt line
264 * @req: Crypto request currently handled by the device
265 * @ctx: Configuration for currently handled crypto request
266 * @sg_src: Scatter list with source data for currently handled block
267 * in device. This is DMA-mapped into device.
268 * @sg_dst: Scatter list with destination data for currently handled block
269 * in device. This is DMA-mapped into device.
270 * @sg_src_cpy: In case of unaligned access, copied scatter list
271 * with source data.
272 * @sg_dst_cpy: In case of unaligned access, copied scatter list
273 * with destination data.
274 * @tasklet: New request scheduling jib
275 * @queue: Crypto queue
276 * @busy: Indicates whether the device is currently handling some request
277 * thus it uses some of the fields from this state, like:
278 * req, ctx, sg_src/dst (and copies). This essentially
279 * protects against concurrent access to these fields.
280 * @lock: Lock for protecting both access to device hardware registers
281 * and fields related to current request (including the busy field).
282 * @res: Resources for hash.
283 * @io_hash_base: Per-variant offset for HASH block IO memory.
284 * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
285 * variable.
286 * @hash_flags: Flags for current HASH op.
287 * @hash_queue: Async hash queue.
288 * @hash_tasklet: New HASH request scheduling job.
289 * @xmit_buf: Buffer for current HASH request transfer into SSS block.
290 * @hash_req: Current request sending to SSS HASH block.
291 * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
292 * @hash_sg_cnt: Counter for hash_sg_iter.
293 *
294 * @use_hash: true if HASH algs enabled
295 */
296struct s5p_aes_dev {
297 struct device *dev;
298 struct clk *clk;
299 void __iomem *ioaddr;
300 void __iomem *aes_ioaddr;
301 int irq_fc;
302
303 struct ablkcipher_request *req;
304 struct s5p_aes_ctx *ctx;
305 struct scatterlist *sg_src;
306 struct scatterlist *sg_dst;
307
308 struct scatterlist *sg_src_cpy;
309 struct scatterlist *sg_dst_cpy;
310
311 struct tasklet_struct tasklet;
312 struct crypto_queue queue;
313 bool busy;
314 spinlock_t lock;
315
316 struct resource *res;
317 void __iomem *io_hash_base;
318
319 spinlock_t hash_lock; /* protect hash_ vars */
320 unsigned long hash_flags;
321 struct crypto_queue hash_queue;
322 struct tasklet_struct hash_tasklet;
323
324 u8 xmit_buf[BUFLEN];
325 struct ahash_request *hash_req;
326 struct scatterlist *hash_sg_iter;
327 unsigned int hash_sg_cnt;
328
329 bool use_hash;
330};
331
332/**
333 * struct s5p_hash_reqctx - HASH request context
334 * @dd: Associated device
335 * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
336 * @digcnt: Number of bytes processed by HW (without buffer[] ones)
337 * @digest: Digest message or IV for partial result
338 * @nregs: Number of HW registers for digest or IV read/write
339 * @engine: Bits for selecting type of HASH in SSS block
340 * @sg: sg for DMA transfer
341 * @sg_len: Length of sg for DMA transfer
342 * @sgl[]: sg for joining buffer and req->src scatterlist
343 * @skip: Skip offset in req->src for current op
344 * @total: Total number of bytes for current request
345 * @finup: Keep state for finup or final.
346 * @error: Keep track of error.
347 * @bufcnt: Number of bytes holded in buffer[]
348 * @buffer[]: For byte(s) from end of req->src in UPDATE op
349 */
350struct s5p_hash_reqctx {
351 struct s5p_aes_dev *dd;
352 bool op_update;
353
354 u64 digcnt;
355 u8 digest[SHA256_DIGEST_SIZE];
356
357 unsigned int nregs; /* digest_size / sizeof(reg) */
358 u32 engine;
359
360 struct scatterlist *sg;
361 unsigned int sg_len;
362 struct scatterlist sgl[2];
363 unsigned int skip;
364 unsigned int total;
365 bool finup;
366 bool error;
367
368 u32 bufcnt;
369 u8 buffer[0];
370};
371
372/**
373 * struct s5p_hash_ctx - HASH transformation context
374 * @dd: Associated device
375 * @flags: Bits for algorithm HASH.
376 * @fallback: Software transformation for zero message or size < BUFLEN.
377 */
378struct s5p_hash_ctx {
379 struct s5p_aes_dev *dd;
380 unsigned long flags;
381 struct crypto_shash *fallback;
382};
383
384static const struct samsung_aes_variant s5p_aes_data = {
385 .aes_offset = 0x4000,
386 .hash_offset = 0x6000,
387};
388
389static const struct samsung_aes_variant exynos_aes_data = {
390 .aes_offset = 0x200,
391 .hash_offset = 0x400,
392};
393
394static const struct of_device_id s5p_sss_dt_match[] = {
395 {
396 .compatible = "samsung,s5pv210-secss",
397 .data = &s5p_aes_data,
398 },
399 {
400 .compatible = "samsung,exynos4210-secss",
401 .data = &exynos_aes_data,
402 },
403 { },
404};
405MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
406
407static inline const struct samsung_aes_variant *find_s5p_sss_version
408 (const struct platform_device *pdev)
409{
410 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
411 const struct of_device_id *match;
412
413 match = of_match_node(s5p_sss_dt_match,
414 pdev->dev.of_node);
415 return (const struct samsung_aes_variant *)match->data;
416 }
417 return (const struct samsung_aes_variant *)
418 platform_get_device_id(pdev)->driver_data;
419}
420
421static struct s5p_aes_dev *s5p_dev;
422
423static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
424 const struct scatterlist *sg)
425{
426 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
427 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
428}
429
430static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
431 const struct scatterlist *sg)
432{
433 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
434 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
435}
436
437static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
438{
439 int len;
440
441 if (!*sg)
442 return;
443
444 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
445 free_pages((unsigned long)sg_virt(*sg), get_order(len));
446
447 kfree(*sg);
448 *sg = NULL;
449}
450
451static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
452 unsigned int nbytes, int out)
453{
454 struct scatter_walk walk;
455
456 if (!nbytes)
457 return;
458
459 scatterwalk_start(&walk, sg);
460 scatterwalk_copychunks(buf, &walk, nbytes, out);
461 scatterwalk_done(&walk, out, 0);
462}
463
464static void s5p_sg_done(struct s5p_aes_dev *dev)
465{
466 if (dev->sg_dst_cpy) {
467 dev_dbg(dev->dev,
468 "Copying %d bytes of output data back to original place\n",
469 dev->req->nbytes);
470 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
471 dev->req->nbytes, 1);
472 }
473 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
474 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
475}
476
477/* Calls the completion. Cannot be called with dev->lock hold. */
478static void s5p_aes_complete(struct ablkcipher_request *req, int err)
479{
480 req->base.complete(&req->base, err);
481}
482
483static void s5p_unset_outdata(struct s5p_aes_dev *dev)
484{
485 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
486}
487
488static void s5p_unset_indata(struct s5p_aes_dev *dev)
489{
490 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
491}
492
493static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
494 struct scatterlist **dst)
495{
496 void *pages;
497 int len;
498
499 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
500 if (!*dst)
501 return -ENOMEM;
502
503 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
504 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
505 if (!pages) {
506 kfree(*dst);
507 *dst = NULL;
508 return -ENOMEM;
509 }
510
511 s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
512
513 sg_init_table(*dst, 1);
514 sg_set_buf(*dst, pages, len);
515
516 return 0;
517}
518
519static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
520{
521 int err;
522
523 if (!sg->length) {
524 err = -EINVAL;
525 goto exit;
526 }
527
528 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
529 if (!err) {
530 err = -ENOMEM;
531 goto exit;
532 }
533
534 dev->sg_dst = sg;
535 err = 0;
536
537exit:
538 return err;
539}
540
541static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
542{
543 int err;
544
545 if (!sg->length) {
546 err = -EINVAL;
547 goto exit;
548 }
549
550 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
551 if (!err) {
552 err = -ENOMEM;
553 goto exit;
554 }
555
556 dev->sg_src = sg;
557 err = 0;
558
559exit:
560 return err;
561}
562
563/*
564 * Returns -ERRNO on error (mapping of new data failed).
565 * On success returns:
566 * - 0 if there is no more data,
567 * - 1 if new transmitting (output) data is ready and its address+length
568 * have to be written to device (by calling s5p_set_dma_outdata()).
569 */
570static int s5p_aes_tx(struct s5p_aes_dev *dev)
571{
572 int ret = 0;
573
574 s5p_unset_outdata(dev);
575
576 if (!sg_is_last(dev->sg_dst)) {
577 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
578 if (!ret)
579 ret = 1;
580 }
581
582 return ret;
583}
584
585/*
586 * Returns -ERRNO on error (mapping of new data failed).
587 * On success returns:
588 * - 0 if there is no more data,
589 * - 1 if new receiving (input) data is ready and its address+length
590 * have to be written to device (by calling s5p_set_dma_indata()).
591 */
592static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
593{
594 int ret = 0;
595
596 s5p_unset_indata(dev);
597
598 if (!sg_is_last(dev->sg_src)) {
599 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
600 if (!ret)
601 ret = 1;
602 }
603
604 return ret;
605}
606
607static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
608{
609 return __raw_readl(dd->io_hash_base + offset);
610}
611
612static inline void s5p_hash_write(struct s5p_aes_dev *dd,
613 u32 offset, u32 value)
614{
615 __raw_writel(value, dd->io_hash_base + offset);
616}
617
618/**
619 * s5p_set_dma_hashdata() - start DMA with sg
620 * @dev: device
621 * @sg: scatterlist ready to DMA transmit
622 */
623static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
624 const struct scatterlist *sg)
625{
626 dev->hash_sg_cnt--;
627 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
628 SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
629}
630
631/**
632 * s5p_hash_rx() - get next hash_sg_iter
633 * @dev: device
634 *
635 * Return:
636 * 2 if there is no more data and it is UPDATE op
637 * 1 if new receiving (input) data is ready and can be written to device
638 * 0 if there is no more data and it is FINAL op
639 */
640static int s5p_hash_rx(struct s5p_aes_dev *dev)
641{
642 if (dev->hash_sg_cnt > 0) {
643 dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
644 return 1;
645 }
646
647 set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
648 if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
649 return 0;
650
651 return 2;
652}
653
654static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
655{
656 struct platform_device *pdev = dev_id;
657 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
658 struct ablkcipher_request *req;
659 int err_dma_tx = 0;
660 int err_dma_rx = 0;
661 int err_dma_hx = 0;
662 bool tx_end = false;
663 bool hx_end = false;
664 unsigned long flags;
665 uint32_t status;
666 u32 st_bits;
667 int err;
668
669 spin_lock_irqsave(&dev->lock, flags);
670
671 /*
672 * Handle rx or tx interrupt. If there is still data (scatterlist did not
673 * reach end), then map next scatterlist entry.
674 * In case of such mapping error, s5p_aes_complete() should be called.
675 *
676 * If there is no more data in tx scatter list, call s5p_aes_complete()
677 * and schedule new tasklet.
678 *
679 * Handle hx interrupt. If there is still data map next entry.
680 */
681 status = SSS_READ(dev, FCINTSTAT);
682 if (status & SSS_FCINTSTAT_BRDMAINT)
683 err_dma_rx = s5p_aes_rx(dev);
684
685 if (status & SSS_FCINTSTAT_BTDMAINT) {
686 if (sg_is_last(dev->sg_dst))
687 tx_end = true;
688 err_dma_tx = s5p_aes_tx(dev);
689 }
690
691 if (status & SSS_FCINTSTAT_HRDMAINT)
692 err_dma_hx = s5p_hash_rx(dev);
693
694 st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
695 SSS_FCINTSTAT_HRDMAINT);
696 /* clear DMA bits */
697 SSS_WRITE(dev, FCINTPEND, st_bits);
698
699 /* clear HASH irq bits */
700 if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
701 /* cannot have both HPART and HDONE */
702 if (status & SSS_FCINTSTAT_HPARTINT)
703 st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
704
705 if (status & SSS_FCINTSTAT_HDONEINT)
706 st_bits = SSS_HASH_STATUS_MSG_DONE;
707
708 set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
709 s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
710 hx_end = true;
711 /* when DONE or PART, do not handle HASH DMA */
712 err_dma_hx = 0;
713 }
714
715 if (err_dma_rx < 0) {
716 err = err_dma_rx;
717 goto error;
718 }
719 if (err_dma_tx < 0) {
720 err = err_dma_tx;
721 goto error;
722 }
723
724 if (tx_end) {
725 s5p_sg_done(dev);
726 if (err_dma_hx == 1)
727 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
728
729 spin_unlock_irqrestore(&dev->lock, flags);
730
731 s5p_aes_complete(dev->req, 0);
732 /* Device is still busy */
733 tasklet_schedule(&dev->tasklet);
734 } else {
735 /*
736 * Writing length of DMA block (either receiving or
737 * transmitting) will start the operation immediately, so this
738 * should be done at the end (even after clearing pending
739 * interrupts to not miss the interrupt).
740 */
741 if (err_dma_tx == 1)
742 s5p_set_dma_outdata(dev, dev->sg_dst);
743 if (err_dma_rx == 1)
744 s5p_set_dma_indata(dev, dev->sg_src);
745 if (err_dma_hx == 1)
746 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
747
748 spin_unlock_irqrestore(&dev->lock, flags);
749 }
750
751 goto hash_irq_end;
752
753error:
754 s5p_sg_done(dev);
755 dev->busy = false;
756 req = dev->req;
757 if (err_dma_hx == 1)
758 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
759
760 spin_unlock_irqrestore(&dev->lock, flags);
761 s5p_aes_complete(req, err);
762
763hash_irq_end:
764 /*
765 * Note about else if:
766 * when hash_sg_iter reaches end and its UPDATE op,
767 * issue SSS_HASH_PAUSE and wait for HPART irq
768 */
769 if (hx_end)
770 tasklet_schedule(&dev->hash_tasklet);
771 else if (err_dma_hx == 2)
772 s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
773 SSS_HASH_PAUSE);
774
775 return IRQ_HANDLED;
776}
777
778/**
779 * s5p_hash_read_msg() - read message or IV from HW
780 * @req: AHASH request
781 */
782static void s5p_hash_read_msg(struct ahash_request *req)
783{
784 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
785 struct s5p_aes_dev *dd = ctx->dd;
786 u32 *hash = (u32 *)ctx->digest;
787 unsigned int i;
788
789 for (i = 0; i < ctx->nregs; i++)
790 hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
791}
792
793/**
794 * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
795 * @dd: device
796 * @ctx: request context
797 */
798static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
799 const struct s5p_hash_reqctx *ctx)
800{
801 const u32 *hash = (const u32 *)ctx->digest;
802 unsigned int i;
803
804 for (i = 0; i < ctx->nregs; i++)
805 s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
806}
807
808/**
809 * s5p_hash_write_iv() - write IV for next partial/finup op.
810 * @req: AHASH request
811 */
812static void s5p_hash_write_iv(struct ahash_request *req)
813{
814 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
815
816 s5p_hash_write_ctx_iv(ctx->dd, ctx);
817}
818
819/**
820 * s5p_hash_copy_result() - copy digest into req->result
821 * @req: AHASH request
822 */
823static void s5p_hash_copy_result(struct ahash_request *req)
824{
825 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
826
827 if (!req->result)
828 return;
829
830 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
831}
832
833/**
834 * s5p_hash_dma_flush() - flush HASH DMA
835 * @dev: secss device
836 */
837static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
838{
839 SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
840}
841
842/**
843 * s5p_hash_dma_enable() - enable DMA mode for HASH
844 * @dev: secss device
845 *
846 * enable DMA mode for HASH
847 */
848static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
849{
850 s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
851}
852
853/**
854 * s5p_hash_irq_disable() - disable irq HASH signals
855 * @dev: secss device
856 * @flags: bitfield with irq's to be disabled
857 */
858static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
859{
860 SSS_WRITE(dev, FCINTENCLR, flags);
861}
862
863/**
864 * s5p_hash_irq_enable() - enable irq signals
865 * @dev: secss device
866 * @flags: bitfield with irq's to be enabled
867 */
868static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
869{
870 SSS_WRITE(dev, FCINTENSET, flags);
871}
872
873/**
874 * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
875 * @dev: secss device
876 * @hashflow: HASH stream flow with/without crypto AES/DES
877 */
878static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
879{
880 unsigned long flags;
881 u32 flow;
882
883 spin_lock_irqsave(&dev->lock, flags);
884
885 flow = SSS_READ(dev, FCFIFOCTRL);
886 flow &= ~SSS_HASHIN_MASK;
887 flow |= hashflow;
888 SSS_WRITE(dev, FCFIFOCTRL, flow);
889
890 spin_unlock_irqrestore(&dev->lock, flags);
891}
892
893/**
894 * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
895 * @dev: secss device
896 * @hashflow: HASH stream flow with/without AES/DES
897 *
898 * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
899 * enable HASH irq's HRDMA, HDONE, HPART
900 */
901static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
902{
903 s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
904 SSS_FCINTENCLR_HDONEINTENCLR |
905 SSS_FCINTENCLR_HPARTINTENCLR);
906 s5p_hash_dma_flush(dev);
907
908 s5p_hash_dma_enable(dev);
909 s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
910 s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
911 SSS_FCINTENSET_HDONEINTENSET |
912 SSS_FCINTENSET_HPARTINTENSET);
913}
914
915/**
916 * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
917 * @dd: secss device
918 * @length: length for request
919 * @final: true if final op
920 *
921 * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
922 * after previous updates, fill up IV words. For final, calculate and set
923 * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
924 * length as 2^63 so it will be never reached and set to zero prelow and
925 * prehigh.
926 *
927 * This function does not start DMA transfer.
928 */
929static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
930 bool final)
931{
932 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
933 u32 prelow, prehigh, low, high;
934 u32 configflags, swapflags;
935 u64 tmplen;
936
937 configflags = ctx->engine | SSS_HASH_INIT_BIT;
938
939 if (likely(ctx->digcnt)) {
940 s5p_hash_write_ctx_iv(dd, ctx);
941 configflags |= SSS_HASH_USER_IV_EN;
942 }
943
944 if (final) {
945 /* number of bytes for last part */
946 low = length;
947 high = 0;
948 /* total number of bits prev hashed */
949 tmplen = ctx->digcnt * 8;
950 prelow = (u32)tmplen;
951 prehigh = (u32)(tmplen >> 32);
952 } else {
953 prelow = 0;
954 prehigh = 0;
955 low = 0;
956 high = BIT(31);
957 }
958
959 swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
960 SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
961
962 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
963 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
964 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
965 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
966
967 s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
968 s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
969}
970
971/**
972 * s5p_hash_xmit_dma() - start DMA hash processing
973 * @dd: secss device
974 * @length: length for request
975 * @final: true if final op
976 *
977 * Update digcnt here, as it is needed for finup/final op.
978 */
979static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
980 bool final)
981{
982 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
983 unsigned int cnt;
984
985 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
986 if (!cnt) {
987 dev_err(dd->dev, "dma_map_sg error\n");
988 ctx->error = true;
989 return -EINVAL;
990 }
991
992 set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
993 dd->hash_sg_iter = ctx->sg;
994 dd->hash_sg_cnt = cnt;
995 s5p_hash_write_ctrl(dd, length, final);
996 ctx->digcnt += length;
997 ctx->total -= length;
998
999 /* catch last interrupt */
1000 if (final)
1001 set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
1002
1003 s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
1004
1005 return -EINPROGRESS;
1006}
1007
1008/**
1009 * s5p_hash_copy_sgs() - copy request's bytes into new buffer
1010 * @ctx: request context
1011 * @sg: source scatterlist request
1012 * @new_len: number of bytes to process from sg
1013 *
1014 * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
1015 * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
1016 * with allocated buffer.
1017 *
1018 * Set bit in dd->hash_flag so we can free it after irq ends processing.
1019 */
1020static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1021 struct scatterlist *sg, unsigned int new_len)
1022{
1023 unsigned int pages, len;
1024 void *buf;
1025
1026 len = new_len + ctx->bufcnt;
1027 pages = get_order(len);
1028
1029 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1030 if (!buf) {
1031 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1032 ctx->error = true;
1033 return -ENOMEM;
1034 }
1035
1036 if (ctx->bufcnt)
1037 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1038
1039 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
1040 new_len, 0);
1041 sg_init_table(ctx->sgl, 1);
1042 sg_set_buf(ctx->sgl, buf, len);
1043 ctx->sg = ctx->sgl;
1044 ctx->sg_len = 1;
1045 ctx->bufcnt = 0;
1046 ctx->skip = 0;
1047 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1048
1049 return 0;
1050}
1051
1052/**
1053 * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
1054 * @ctx: request context
1055 * @sg: source scatterlist request
1056 * @new_len: number of bytes to process from sg
1057 *
1058 * Allocate new scatterlist table, copy data for HASH into it. If there was
1059 * xmit_buf filled, prepare it first, then copy page, length and offset from
1060 * source sg into it, adjusting begin and/or end for skip offset and
1061 * hash_later value.
1062 *
1063 * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
1064 * it after irq ends processing.
1065 */
1066static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1067 struct scatterlist *sg, unsigned int new_len)
1068{
1069 unsigned int skip = ctx->skip, n = sg_nents(sg);
1070 struct scatterlist *tmp;
1071 unsigned int len;
1072
1073 if (ctx->bufcnt)
1074 n++;
1075
1076 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1077 if (!ctx->sg) {
1078 ctx->error = true;
1079 return -ENOMEM;
1080 }
1081
1082 sg_init_table(ctx->sg, n);
1083
1084 tmp = ctx->sg;
1085
1086 ctx->sg_len = 0;
1087
1088 if (ctx->bufcnt) {
1089 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1090 tmp = sg_next(tmp);
1091 ctx->sg_len++;
1092 }
1093
1094 while (sg && skip >= sg->length) {
1095 skip -= sg->length;
1096 sg = sg_next(sg);
1097 }
1098
1099 while (sg && new_len) {
1100 len = sg->length - skip;
1101 if (new_len < len)
1102 len = new_len;
1103
1104 new_len -= len;
1105 sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
1106 skip = 0;
1107 if (new_len <= 0)
1108 sg_mark_end(tmp);
1109
1110 tmp = sg_next(tmp);
1111 ctx->sg_len++;
1112 sg = sg_next(sg);
1113 }
1114
1115 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1116
1117 return 0;
1118}
1119
1120/**
1121 * s5p_hash_prepare_sgs() - prepare sg for processing
1122 * @ctx: request context
1123 * @sg: source scatterlist request
1124 * @nbytes: number of bytes to process from sg
1125 * @final: final flag
1126 *
1127 * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
1128 * sg table have good aligned elements (list_ok). If one of this checks fails,
1129 * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
1130 * data into this buffer and prepare request in sgl, or (2) allocates new sg
1131 * table and prepare sg elements.
1132 *
1133 * For digest or finup all conditions can be good, and we may not need any
1134 * fixes.
1135 */
1136static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1137 struct scatterlist *sg,
1138 unsigned int new_len, bool final)
1139{
1140 unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1141 bool aligned = true, list_ok = true;
1142 struct scatterlist *sg_tmp = sg;
1143
1144 if (!sg || !sg->length || !new_len)
1145 return 0;
1146
1147 if (skip || !final)
1148 list_ok = false;
1149
1150 while (nbytes > 0 && sg_tmp) {
1151 n++;
1152 if (skip >= sg_tmp->length) {
1153 skip -= sg_tmp->length;
1154 if (!sg_tmp->length) {
1155 aligned = false;
1156 break;
1157 }
1158 } else {
1159 if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
1160 aligned = false;
1161 break;
1162 }
1163
1164 if (nbytes < sg_tmp->length - skip) {
1165 list_ok = false;
1166 break;
1167 }
1168
1169 nbytes -= sg_tmp->length - skip;
1170 skip = 0;
1171 }
1172
1173 sg_tmp = sg_next(sg_tmp);
1174 }
1175
1176 if (!aligned)
1177 return s5p_hash_copy_sgs(ctx, sg, new_len);
1178 else if (!list_ok)
1179 return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1180
1181 /*
1182 * Have aligned data from previous operation and/or current
1183 * Note: will enter here only if (digest or finup) and aligned
1184 */
1185 if (ctx->bufcnt) {
1186 ctx->sg_len = n;
1187 sg_init_table(ctx->sgl, 2);
1188 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1189 sg_chain(ctx->sgl, 2, sg);
1190 ctx->sg = ctx->sgl;
1191 ctx->sg_len++;
1192 } else {
1193 ctx->sg = sg;
1194 ctx->sg_len = n;
1195 }
1196
1197 return 0;
1198}
1199
1200/**
1201 * s5p_hash_prepare_request() - prepare request for processing
1202 * @req: AHASH request
1203 * @update: true if UPDATE op
1204 *
1205 * Note 1: we can have update flag _and_ final flag at the same time.
1206 * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
1207 * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
1208 * we have final op
1209 */
1210static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1211{
1212 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1213 bool final = ctx->finup;
1214 int xmit_len, hash_later, nbytes;
1215 int ret;
1216
1217 if (update)
1218 nbytes = req->nbytes;
1219 else
1220 nbytes = 0;
1221
1222 ctx->total = nbytes + ctx->bufcnt;
1223 if (!ctx->total)
1224 return 0;
1225
1226 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1227 /* bytes left from previous request, so fill up to BUFLEN */
1228 int len = BUFLEN - ctx->bufcnt % BUFLEN;
1229
1230 if (len > nbytes)
1231 len = nbytes;
1232
1233 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1234 0, len, 0);
1235 ctx->bufcnt += len;
1236 nbytes -= len;
1237 ctx->skip = len;
1238 } else {
1239 ctx->skip = 0;
1240 }
1241
1242 if (ctx->bufcnt)
1243 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1244
1245 xmit_len = ctx->total;
1246 if (final) {
1247 hash_later = 0;
1248 } else {
1249 if (IS_ALIGNED(xmit_len, BUFLEN))
1250 xmit_len -= BUFLEN;
1251 else
1252 xmit_len -= xmit_len & (BUFLEN - 1);
1253
1254 hash_later = ctx->total - xmit_len;
1255 /* copy hash_later bytes from end of req->src */
1256 /* previous bytes are in xmit_buf, so no overwrite */
1257 scatterwalk_map_and_copy(ctx->buffer, req->src,
1258 req->nbytes - hash_later,
1259 hash_later, 0);
1260 }
1261
1262 if (xmit_len > BUFLEN) {
1263 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1264 final);
1265 if (ret)
1266 return ret;
1267 } else {
1268 /* have buffered data only */
1269 if (unlikely(!ctx->bufcnt)) {
1270 /* first update didn't fill up buffer */
1271 scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
1272 0, xmit_len, 0);
1273 }
1274
1275 sg_init_table(ctx->sgl, 1);
1276 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1277
1278 ctx->sg = ctx->sgl;
1279 ctx->sg_len = 1;
1280 }
1281
1282 ctx->bufcnt = hash_later;
1283 if (!final)
1284 ctx->total = xmit_len;
1285
1286 return 0;
1287}
1288
1289/**
1290 * s5p_hash_update_dma_stop() - unmap DMA
1291 * @dd: secss device
1292 *
1293 * Unmap scatterlist ctx->sg.
1294 */
1295static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
1296{
1297 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1298
1299 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1300 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
1301}
1302
1303/**
1304 * s5p_hash_finish() - copy calculated digest to crypto layer
1305 * @req: AHASH request
1306 */
1307static void s5p_hash_finish(struct ahash_request *req)
1308{
1309 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1310 struct s5p_aes_dev *dd = ctx->dd;
1311
1312 if (ctx->digcnt)
1313 s5p_hash_copy_result(req);
1314
1315 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1316}
1317
1318/**
1319 * s5p_hash_finish_req() - finish request
1320 * @req: AHASH request
1321 * @err: error
1322 */
1323static void s5p_hash_finish_req(struct ahash_request *req, int err)
1324{
1325 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1326 struct s5p_aes_dev *dd = ctx->dd;
1327 unsigned long flags;
1328
1329 if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
1330 free_pages((unsigned long)sg_virt(ctx->sg),
1331 get_order(ctx->sg->length));
1332
1333 if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
1334 kfree(ctx->sg);
1335
1336 ctx->sg = NULL;
1337 dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
1338 BIT(HASH_FLAGS_SGS_COPIED));
1339
1340 if (!err && !ctx->error) {
1341 s5p_hash_read_msg(req);
1342 if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
1343 s5p_hash_finish(req);
1344 } else {
1345 ctx->error = true;
1346 }
1347
1348 spin_lock_irqsave(&dd->hash_lock, flags);
1349 dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
1350 BIT(HASH_FLAGS_DMA_READY) |
1351 BIT(HASH_FLAGS_OUTPUT_READY));
1352 spin_unlock_irqrestore(&dd->hash_lock, flags);
1353
1354 if (req->base.complete)
1355 req->base.complete(&req->base, err);
1356}
1357
1358/**
1359 * s5p_hash_handle_queue() - handle hash queue
1360 * @dd: device s5p_aes_dev
1361 * @req: AHASH request
1362 *
1363 * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
1364 * device then processes the first request from the dd->queue
1365 *
1366 * Returns: see s5p_hash_final below.
1367 */
1368static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
1369 struct ahash_request *req)
1370{
1371 struct crypto_async_request *async_req, *backlog;
1372 struct s5p_hash_reqctx *ctx;
1373 unsigned long flags;
1374 int err = 0, ret = 0;
1375
1376retry:
1377 spin_lock_irqsave(&dd->hash_lock, flags);
1378 if (req)
1379 ret = ahash_enqueue_request(&dd->hash_queue, req);
1380
1381 if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1382 spin_unlock_irqrestore(&dd->hash_lock, flags);
1383 return ret;
1384 }
1385
1386 backlog = crypto_get_backlog(&dd->hash_queue);
1387 async_req = crypto_dequeue_request(&dd->hash_queue);
1388 if (async_req)
1389 set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
1390
1391 spin_unlock_irqrestore(&dd->hash_lock, flags);
1392
1393 if (!async_req)
1394 return ret;
1395
1396 if (backlog)
1397 backlog->complete(backlog, -EINPROGRESS);
1398
1399 req = ahash_request_cast(async_req);
1400 dd->hash_req = req;
1401 ctx = ahash_request_ctx(req);
1402
1403 err = s5p_hash_prepare_request(req, ctx->op_update);
1404 if (err || !ctx->total)
1405 goto out;
1406
1407 dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
1408 ctx->op_update, req->nbytes);
1409
1410 s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
1411 if (ctx->digcnt)
1412 s5p_hash_write_iv(req); /* restore hash IV */
1413
1414 if (ctx->op_update) { /* HASH_OP_UPDATE */
1415 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1416 if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1417 /* no final() after finup() */
1418 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1419 } else { /* HASH_OP_FINAL */
1420 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1421 }
1422out:
1423 if (err != -EINPROGRESS) {
1424 /* hash_tasklet_cb will not finish it, so do it here */
1425 s5p_hash_finish_req(req, err);
1426 req = NULL;
1427
1428 /*
1429 * Execute next request immediately if there is anything
1430 * in queue.
1431 */
1432 goto retry;
1433 }
1434
1435 return ret;
1436}
1437
1438/**
1439 * s5p_hash_tasklet_cb() - hash tasklet
1440 * @data: ptr to s5p_aes_dev
1441 */
1442static void s5p_hash_tasklet_cb(unsigned long data)
1443{
1444 struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
1445
1446 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1447 s5p_hash_handle_queue(dd, NULL);
1448 return;
1449 }
1450
1451 if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
1452 if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
1453 &dd->hash_flags)) {
1454 s5p_hash_update_dma_stop(dd);
1455 }
1456
1457 if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
1458 &dd->hash_flags)) {
1459 /* hash or semi-hash ready */
1460 clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
1461 goto finish;
1462 }
1463 }
1464
1465 return;
1466
1467finish:
1468 /* finish curent request */
1469 s5p_hash_finish_req(dd->hash_req, 0);
1470
1471 /* If we are not busy, process next req */
1472 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
1473 s5p_hash_handle_queue(dd, NULL);
1474}
1475
1476/**
1477 * s5p_hash_enqueue() - enqueue request
1478 * @req: AHASH request
1479 * @op: operation UPDATE (true) or FINAL (false)
1480 *
1481 * Returns: see s5p_hash_final below.
1482 */
1483static int s5p_hash_enqueue(struct ahash_request *req, bool op)
1484{
1485 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1486 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1487
1488 ctx->op_update = op;
1489
1490 return s5p_hash_handle_queue(tctx->dd, req);
1491}
1492
1493/**
1494 * s5p_hash_update() - process the hash input data
1495 * @req: AHASH request
1496 *
1497 * If request will fit in buffer, copy it and return immediately
1498 * else enqueue it with OP_UPDATE.
1499 *
1500 * Returns: see s5p_hash_final below.
1501 */
1502static int s5p_hash_update(struct ahash_request *req)
1503{
1504 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1505
1506 if (!req->nbytes)
1507 return 0;
1508
1509 if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1510 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1511 0, req->nbytes, 0);
1512 ctx->bufcnt += req->nbytes;
1513 return 0;
1514 }
1515
1516 return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
1517}
1518
1519/**
1520 * s5p_hash_shash_digest() - calculate shash digest
1521 * @tfm: crypto transformation
1522 * @flags: tfm flags
1523 * @data: input data
1524 * @len: length of data
1525 * @out: output buffer
1526 */
1527static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
1528 const u8 *data, unsigned int len, u8 *out)
1529{
1530 SHASH_DESC_ON_STACK(shash, tfm);
1531
1532 shash->tfm = tfm;
1533 shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
1534
1535 return crypto_shash_digest(shash, data, len, out);
1536}
1537
1538/**
1539 * s5p_hash_final_shash() - calculate shash digest
1540 * @req: AHASH request
1541 */
1542static int s5p_hash_final_shash(struct ahash_request *req)
1543{
1544 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1545 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1546
1547 return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
1548 ctx->buffer, ctx->bufcnt, req->result);
1549}
1550
1551/**
1552 * s5p_hash_final() - close up hash and calculate digest
1553 * @req: AHASH request
1554 *
1555 * Note: in final req->src do not have any data, and req->nbytes can be
1556 * non-zero.
1557 *
1558 * If there were no input data processed yet and the buffered hash data is
1559 * less than BUFLEN (64) then calculate the final hash immediately by using
1560 * SW algorithm fallback.
1561 *
1562 * Otherwise enqueues the current AHASH request with OP_FINAL operation op
1563 * and finalize hash message in HW. Note that if digcnt!=0 then there were
1564 * previous update op, so there are always some buffered bytes in ctx->buffer,
1565 * which means that ctx->bufcnt!=0
1566 *
1567 * Returns:
1568 * 0 if the request has been processed immediately,
1569 * -EINPROGRESS if the operation has been queued for later execution or is set
1570 * to processing by HW,
1571 * -EBUSY if queue is full and request should be resubmitted later,
1572 * other negative values denotes an error.
1573 */
1574static int s5p_hash_final(struct ahash_request *req)
1575{
1576 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1577
1578 ctx->finup = true;
1579 if (ctx->error)
1580 return -EINVAL; /* uncompleted hash is not needed */
1581
1582 if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
1583 return s5p_hash_final_shash(req);
1584
1585 return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
1586}
1587
1588/**
1589 * s5p_hash_finup() - process last req->src and calculate digest
1590 * @req: AHASH request containing the last update data
1591 *
1592 * Return values: see s5p_hash_final above.
1593 */
1594static int s5p_hash_finup(struct ahash_request *req)
1595{
1596 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1597 int err1, err2;
1598
1599 ctx->finup = true;
1600
1601 err1 = s5p_hash_update(req);
1602 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1603 return err1;
1604
1605 /*
1606 * final() has to be always called to cleanup resources even if
1607 * update() failed, except EINPROGRESS or calculate digest for small
1608 * size
1609 */
1610 err2 = s5p_hash_final(req);
1611
1612 return err1 ?: err2;
1613}
1614
1615/**
1616 * s5p_hash_init() - initialize AHASH request contex
1617 * @req: AHASH request
1618 *
1619 * Init async hash request context.
1620 */
1621static int s5p_hash_init(struct ahash_request *req)
1622{
1623 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1624 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1625 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1626
1627 ctx->dd = tctx->dd;
1628 ctx->error = false;
1629 ctx->finup = false;
1630 ctx->bufcnt = 0;
1631 ctx->digcnt = 0;
1632 ctx->total = 0;
1633 ctx->skip = 0;
1634
1635 dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
1636 crypto_ahash_digestsize(tfm));
1637
1638 switch (crypto_ahash_digestsize(tfm)) {
1639 case MD5_DIGEST_SIZE:
1640 ctx->engine = SSS_HASH_ENGINE_MD5;
1641 ctx->nregs = HASH_MD5_MAX_REG;
1642 break;
1643 case SHA1_DIGEST_SIZE:
1644 ctx->engine = SSS_HASH_ENGINE_SHA1;
1645 ctx->nregs = HASH_SHA1_MAX_REG;
1646 break;
1647 case SHA256_DIGEST_SIZE:
1648 ctx->engine = SSS_HASH_ENGINE_SHA256;
1649 ctx->nregs = HASH_SHA256_MAX_REG;
1650 break;
1651 default:
1652 ctx->error = true;
1653 return -EINVAL;
1654 }
1655
1656 return 0;
1657}
1658
1659/**
1660 * s5p_hash_digest - calculate digest from req->src
1661 * @req: AHASH request
1662 *
1663 * Return values: see s5p_hash_final above.
1664 */
1665static int s5p_hash_digest(struct ahash_request *req)
1666{
1667 return s5p_hash_init(req) ?: s5p_hash_finup(req);
1668}
1669
1670/**
1671 * s5p_hash_cra_init_alg - init crypto alg transformation
1672 * @tfm: crypto transformation
1673 */
1674static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
1675{
1676 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1677 const char *alg_name = crypto_tfm_alg_name(tfm);
1678
1679 tctx->dd = s5p_dev;
1680 /* Allocate a fallback and abort if it failed. */
1681 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1682 CRYPTO_ALG_NEED_FALLBACK);
1683 if (IS_ERR(tctx->fallback)) {
1684 pr_err("fallback alloc fails for '%s'\n", alg_name);
1685 return PTR_ERR(tctx->fallback);
1686 }
1687
1688 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1689 sizeof(struct s5p_hash_reqctx) + BUFLEN);
1690
1691 return 0;
1692}
1693
1694/**
1695 * s5p_hash_cra_init - init crypto tfm
1696 * @tfm: crypto transformation
1697 */
1698static int s5p_hash_cra_init(struct crypto_tfm *tfm)
1699{
1700 return s5p_hash_cra_init_alg(tfm);
1701}
1702
1703/**
1704 * s5p_hash_cra_exit - exit crypto tfm
1705 * @tfm: crypto transformation
1706 *
1707 * free allocated fallback
1708 */
1709static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
1710{
1711 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1712
1713 crypto_free_shash(tctx->fallback);
1714 tctx->fallback = NULL;
1715}
1716
1717/**
1718 * s5p_hash_export - export hash state
1719 * @req: AHASH request
1720 * @out: buffer for exported state
1721 */
1722static int s5p_hash_export(struct ahash_request *req, void *out)
1723{
1724 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1725
1726 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1727
1728 return 0;
1729}
1730
1731/**
1732 * s5p_hash_import - import hash state
1733 * @req: AHASH request
1734 * @in: buffer with state to be imported from
1735 */
1736static int s5p_hash_import(struct ahash_request *req, const void *in)
1737{
1738 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1739 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1740 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1741 const struct s5p_hash_reqctx *ctx_in = in;
1742
1743 memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1744 if (ctx_in->bufcnt > BUFLEN) {
1745 ctx->error = true;
1746 return -EINVAL;
1747 }
1748
1749 ctx->dd = tctx->dd;
1750 ctx->error = false;
1751
1752 return 0;
1753}
1754
1755static struct ahash_alg algs_sha1_md5_sha256[] = {
1756{
1757 .init = s5p_hash_init,
1758 .update = s5p_hash_update,
1759 .final = s5p_hash_final,
1760 .finup = s5p_hash_finup,
1761 .digest = s5p_hash_digest,
1762 .export = s5p_hash_export,
1763 .import = s5p_hash_import,
1764 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1765 .halg.digestsize = SHA1_DIGEST_SIZE,
1766 .halg.base = {
1767 .cra_name = "sha1",
1768 .cra_driver_name = "exynos-sha1",
1769 .cra_priority = 100,
1770 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1771 CRYPTO_ALG_ASYNC |
1772 CRYPTO_ALG_NEED_FALLBACK,
1773 .cra_blocksize = HASH_BLOCK_SIZE,
1774 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1775 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1776 .cra_module = THIS_MODULE,
1777 .cra_init = s5p_hash_cra_init,
1778 .cra_exit = s5p_hash_cra_exit,
1779 }
1780},
1781{
1782 .init = s5p_hash_init,
1783 .update = s5p_hash_update,
1784 .final = s5p_hash_final,
1785 .finup = s5p_hash_finup,
1786 .digest = s5p_hash_digest,
1787 .export = s5p_hash_export,
1788 .import = s5p_hash_import,
1789 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1790 .halg.digestsize = MD5_DIGEST_SIZE,
1791 .halg.base = {
1792 .cra_name = "md5",
1793 .cra_driver_name = "exynos-md5",
1794 .cra_priority = 100,
1795 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1796 CRYPTO_ALG_ASYNC |
1797 CRYPTO_ALG_NEED_FALLBACK,
1798 .cra_blocksize = HASH_BLOCK_SIZE,
1799 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1800 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1801 .cra_module = THIS_MODULE,
1802 .cra_init = s5p_hash_cra_init,
1803 .cra_exit = s5p_hash_cra_exit,
1804 }
1805},
1806{
1807 .init = s5p_hash_init,
1808 .update = s5p_hash_update,
1809 .final = s5p_hash_final,
1810 .finup = s5p_hash_finup,
1811 .digest = s5p_hash_digest,
1812 .export = s5p_hash_export,
1813 .import = s5p_hash_import,
1814 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1815 .halg.digestsize = SHA256_DIGEST_SIZE,
1816 .halg.base = {
1817 .cra_name = "sha256",
1818 .cra_driver_name = "exynos-sha256",
1819 .cra_priority = 100,
1820 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1821 CRYPTO_ALG_ASYNC |
1822 CRYPTO_ALG_NEED_FALLBACK,
1823 .cra_blocksize = HASH_BLOCK_SIZE,
1824 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1825 .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1826 .cra_module = THIS_MODULE,
1827 .cra_init = s5p_hash_cra_init,
1828 .cra_exit = s5p_hash_cra_exit,
1829 }
1830}
1831
1832};
1833
1834static void s5p_set_aes(struct s5p_aes_dev *dev,
1835 const uint8_t *key, const uint8_t *iv,
1836 unsigned int keylen)
1837{
1838 void __iomem *keystart;
1839
1840 if (iv)
1841 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
1842
1843 if (keylen == AES_KEYSIZE_256)
1844 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
1845 else if (keylen == AES_KEYSIZE_192)
1846 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
1847 else
1848 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
1849
1850 memcpy_toio(keystart, key, keylen);
1851}
1852
1853static bool s5p_is_sg_aligned(struct scatterlist *sg)
1854{
1855 while (sg) {
1856 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
1857 return false;
1858 sg = sg_next(sg);
1859 }
1860
1861 return true;
1862}
1863
1864static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1865 struct ablkcipher_request *req)
1866{
1867 struct scatterlist *sg;
1868 int err;
1869
1870 dev->sg_src_cpy = NULL;
1871 sg = req->src;
1872 if (!s5p_is_sg_aligned(sg)) {
1873 dev_dbg(dev->dev,
1874 "At least one unaligned source scatter list, making a copy\n");
1875 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
1876 if (err)
1877 return err;
1878
1879 sg = dev->sg_src_cpy;
1880 }
1881
1882 err = s5p_set_indata(dev, sg);
1883 if (err) {
1884 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
1885 return err;
1886 }
1887
1888 return 0;
1889}
1890
1891static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1892 struct ablkcipher_request *req)
1893{
1894 struct scatterlist *sg;
1895 int err;
1896
1897 dev->sg_dst_cpy = NULL;
1898 sg = req->dst;
1899 if (!s5p_is_sg_aligned(sg)) {
1900 dev_dbg(dev->dev,
1901 "At least one unaligned dest scatter list, making a copy\n");
1902 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
1903 if (err)
1904 return err;
1905
1906 sg = dev->sg_dst_cpy;
1907 }
1908
1909 err = s5p_set_outdata(dev, sg);
1910 if (err) {
1911 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
1912 return err;
1913 }
1914
1915 return 0;
1916}
1917
1918static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1919{
1920 struct ablkcipher_request *req = dev->req;
1921 uint32_t aes_control;
1922 unsigned long flags;
1923 int err;
1924 u8 *iv;
1925
1926 aes_control = SSS_AES_KEY_CHANGE_MODE;
1927 if (mode & FLAGS_AES_DECRYPT)
1928 aes_control |= SSS_AES_MODE_DECRYPT;
1929
1930 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1931 aes_control |= SSS_AES_CHAIN_MODE_CBC;
1932 iv = req->info;
1933 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1934 aes_control |= SSS_AES_CHAIN_MODE_CTR;
1935 iv = req->info;
1936 } else {
1937 iv = NULL; /* AES_ECB */
1938 }
1939
1940 if (dev->ctx->keylen == AES_KEYSIZE_192)
1941 aes_control |= SSS_AES_KEY_SIZE_192;
1942 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1943 aes_control |= SSS_AES_KEY_SIZE_256;
1944
1945 aes_control |= SSS_AES_FIFO_MODE;
1946
1947 /* as a variant it is possible to use byte swapping on DMA side */
1948 aes_control |= SSS_AES_BYTESWAP_DI
1949 | SSS_AES_BYTESWAP_DO
1950 | SSS_AES_BYTESWAP_IV
1951 | SSS_AES_BYTESWAP_KEY
1952 | SSS_AES_BYTESWAP_CNT;
1953
1954 spin_lock_irqsave(&dev->lock, flags);
1955
1956 SSS_WRITE(dev, FCINTENCLR,
1957 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
1958 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
1959
1960 err = s5p_set_indata_start(dev, req);
1961 if (err)
1962 goto indata_error;
1963
1964 err = s5p_set_outdata_start(dev, req);
1965 if (err)
1966 goto outdata_error;
1967
1968 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1969 s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
1970
1971 s5p_set_dma_indata(dev, dev->sg_src);
1972 s5p_set_dma_outdata(dev, dev->sg_dst);
1973
1974 SSS_WRITE(dev, FCINTENSET,
1975 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
1976
1977 spin_unlock_irqrestore(&dev->lock, flags);
1978
1979 return;
1980
1981outdata_error:
1982 s5p_unset_indata(dev);
1983
1984indata_error:
1985 s5p_sg_done(dev);
1986 dev->busy = false;
1987 spin_unlock_irqrestore(&dev->lock, flags);
1988 s5p_aes_complete(req, err);
1989}
1990
1991static void s5p_tasklet_cb(unsigned long data)
1992{
1993 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
1994 struct crypto_async_request *async_req, *backlog;
1995 struct s5p_aes_reqctx *reqctx;
1996 unsigned long flags;
1997
1998 spin_lock_irqsave(&dev->lock, flags);
1999 backlog = crypto_get_backlog(&dev->queue);
2000 async_req = crypto_dequeue_request(&dev->queue);
2001
2002 if (!async_req) {
2003 dev->busy = false;
2004 spin_unlock_irqrestore(&dev->lock, flags);
2005 return;
2006 }
2007 spin_unlock_irqrestore(&dev->lock, flags);
2008
2009 if (backlog)
2010 backlog->complete(backlog, -EINPROGRESS);
2011
2012 dev->req = ablkcipher_request_cast(async_req);
2013 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
2014 reqctx = ablkcipher_request_ctx(dev->req);
2015
2016 s5p_aes_crypt_start(dev, reqctx->mode);
2017}
2018
2019static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2020 struct ablkcipher_request *req)
2021{
2022 unsigned long flags;
2023 int err;
2024
2025 spin_lock_irqsave(&dev->lock, flags);
2026 err = ablkcipher_enqueue_request(&dev->queue, req);
2027 if (dev->busy) {
2028 spin_unlock_irqrestore(&dev->lock, flags);
2029 goto exit;
2030 }
2031 dev->busy = true;
2032
2033 spin_unlock_irqrestore(&dev->lock, flags);
2034
2035 tasklet_schedule(&dev->tasklet);
2036
2037exit:
2038 return err;
2039}
2040
2041static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
2042{
2043 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2044 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
2045 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2046 struct s5p_aes_dev *dev = ctx->dev;
2047
2048 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
2049 dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
2050 return -EINVAL;
2051 }
2052
2053 reqctx->mode = mode;
2054
2055 return s5p_aes_handle_req(dev, req);
2056}
2057
2058static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
2059 const uint8_t *key, unsigned int keylen)
2060{
2061 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2062 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2063
2064 if (keylen != AES_KEYSIZE_128 &&
2065 keylen != AES_KEYSIZE_192 &&
2066 keylen != AES_KEYSIZE_256)
2067 return -EINVAL;
2068
2069 memcpy(ctx->aes_key, key, keylen);
2070 ctx->keylen = keylen;
2071
2072 return 0;
2073}
2074
2075static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req)
2076{
2077 return s5p_aes_crypt(req, 0);
2078}
2079
2080static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req)
2081{
2082 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
2083}
2084
2085static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req)
2086{
2087 return s5p_aes_crypt(req, FLAGS_AES_CBC);
2088}
2089
2090static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
2091{
2092 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
2093}
2094
2095static int s5p_aes_cra_init(struct crypto_tfm *tfm)
2096{
2097 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2098
2099 ctx->dev = s5p_dev;
2100 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
2101
2102 return 0;
2103}
2104
2105static struct crypto_alg algs[] = {
2106 {
2107 .cra_name = "ecb(aes)",
2108 .cra_driver_name = "ecb-aes-s5p",
2109 .cra_priority = 100,
2110 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2111 CRYPTO_ALG_ASYNC |
2112 CRYPTO_ALG_KERN_DRIVER_ONLY,
2113 .cra_blocksize = AES_BLOCK_SIZE,
2114 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2115 .cra_alignmask = 0x0f,
2116 .cra_type = &crypto_ablkcipher_type,
2117 .cra_module = THIS_MODULE,
2118 .cra_init = s5p_aes_cra_init,
2119 .cra_u.ablkcipher = {
2120 .min_keysize = AES_MIN_KEY_SIZE,
2121 .max_keysize = AES_MAX_KEY_SIZE,
2122 .setkey = s5p_aes_setkey,
2123 .encrypt = s5p_aes_ecb_encrypt,
2124 .decrypt = s5p_aes_ecb_decrypt,
2125 }
2126 },
2127 {
2128 .cra_name = "cbc(aes)",
2129 .cra_driver_name = "cbc-aes-s5p",
2130 .cra_priority = 100,
2131 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2132 CRYPTO_ALG_ASYNC |
2133 CRYPTO_ALG_KERN_DRIVER_ONLY,
2134 .cra_blocksize = AES_BLOCK_SIZE,
2135 .cra_ctxsize = sizeof(struct s5p_aes_ctx),
2136 .cra_alignmask = 0x0f,
2137 .cra_type = &crypto_ablkcipher_type,
2138 .cra_module = THIS_MODULE,
2139 .cra_init = s5p_aes_cra_init,
2140 .cra_u.ablkcipher = {
2141 .min_keysize = AES_MIN_KEY_SIZE,
2142 .max_keysize = AES_MAX_KEY_SIZE,
2143 .ivsize = AES_BLOCK_SIZE,
2144 .setkey = s5p_aes_setkey,
2145 .encrypt = s5p_aes_cbc_encrypt,
2146 .decrypt = s5p_aes_cbc_decrypt,
2147 }
2148 },
2149};
2150
2151static int s5p_aes_probe(struct platform_device *pdev)
2152{
2153 struct device *dev = &pdev->dev;
2154 int i, j, err = -ENODEV;
2155 const struct samsung_aes_variant *variant;
2156 struct s5p_aes_dev *pdata;
2157 struct resource *res;
2158 unsigned int hash_i;
2159
2160 if (s5p_dev)
2161 return -EEXIST;
2162
2163 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2164 if (!pdata)
2165 return -ENOMEM;
2166
2167 variant = find_s5p_sss_version(pdev);
2168 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2169
2170 /*
2171 * Note: HASH and PRNG uses the same registers in secss, avoid
2172 * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
2173 * is enabled in config. We need larger size for HASH registers in
2174 * secss, current describe only AES/DES
2175 */
2176 if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
2177 if (variant == &exynos_aes_data) {
2178 res->end += 0x300;
2179 pdata->use_hash = true;
2180 }
2181 }
2182
2183 pdata->res = res;
2184 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2185 if (IS_ERR(pdata->ioaddr)) {
2186 if (!pdata->use_hash)
2187 return PTR_ERR(pdata->ioaddr);
2188 /* try AES without HASH */
2189 res->end -= 0x300;
2190 pdata->use_hash = false;
2191 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2192 if (IS_ERR(pdata->ioaddr))
2193 return PTR_ERR(pdata->ioaddr);
2194 }
2195
2196 pdata->clk = devm_clk_get(dev, "secss");
2197 if (IS_ERR(pdata->clk)) {
2198 dev_err(dev, "failed to find secss clock source\n");
2199 return -ENOENT;
2200 }
2201
2202 err = clk_prepare_enable(pdata->clk);
2203 if (err < 0) {
2204 dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
2205 return err;
2206 }
2207
2208 spin_lock_init(&pdata->lock);
2209 spin_lock_init(&pdata->hash_lock);
2210
2211 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
2212 pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
2213
2214 pdata->irq_fc = platform_get_irq(pdev, 0);
2215 if (pdata->irq_fc < 0) {
2216 err = pdata->irq_fc;
2217 dev_warn(dev, "feed control interrupt is not available.\n");
2218 goto err_irq;
2219 }
2220 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
2221 s5p_aes_interrupt, IRQF_ONESHOT,
2222 pdev->name, pdev);
2223 if (err < 0) {
2224 dev_warn(dev, "feed control interrupt is not available.\n");
2225 goto err_irq;
2226 }
2227
2228 pdata->busy = false;
2229 pdata->dev = dev;
2230 platform_set_drvdata(pdev, pdata);
2231 s5p_dev = pdata;
2232
2233 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
2234 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
2235
2236 for (i = 0; i < ARRAY_SIZE(algs); i++) {
2237 err = crypto_register_alg(&algs[i]);
2238 if (err)
2239 goto err_algs;
2240 }
2241
2242 if (pdata->use_hash) {
2243 tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
2244 (unsigned long)pdata);
2245 crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
2246
2247 for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
2248 hash_i++) {
2249 struct ahash_alg *alg;
2250
2251 alg = &algs_sha1_md5_sha256[hash_i];
2252 err = crypto_register_ahash(alg);
2253 if (err) {
2254 dev_err(dev, "can't register '%s': %d\n",
2255 alg->halg.base.cra_driver_name, err);
2256 goto err_hash;
2257 }
2258 }
2259 }
2260
2261 dev_info(dev, "s5p-sss driver registered\n");
2262
2263 return 0;
2264
2265err_hash:
2266 for (j = hash_i - 1; j >= 0; j--)
2267 crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
2268
2269 tasklet_kill(&pdata->hash_tasklet);
2270 res->end -= 0x300;
2271
2272err_algs:
2273 if (i < ARRAY_SIZE(algs))
2274 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
2275 err);
2276
2277 for (j = 0; j < i; j++)
2278 crypto_unregister_alg(&algs[j]);
2279
2280 tasklet_kill(&pdata->tasklet);
2281
2282err_irq:
2283 clk_disable_unprepare(pdata->clk);
2284
2285 s5p_dev = NULL;
2286
2287 return err;
2288}
2289
2290static int s5p_aes_remove(struct platform_device *pdev)
2291{
2292 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
2293 int i;
2294
2295 if (!pdata)
2296 return -ENODEV;
2297
2298 for (i = 0; i < ARRAY_SIZE(algs); i++)
2299 crypto_unregister_alg(&algs[i]);
2300
2301 tasklet_kill(&pdata->tasklet);
2302 if (pdata->use_hash) {
2303 for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
2304 crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
2305
2306 pdata->res->end -= 0x300;
2307 tasklet_kill(&pdata->hash_tasklet);
2308 pdata->use_hash = false;
2309 }
2310
2311 clk_disable_unprepare(pdata->clk);
2312 s5p_dev = NULL;
2313
2314 return 0;
2315}
2316
2317static struct platform_driver s5p_aes_crypto = {
2318 .probe = s5p_aes_probe,
2319 .remove = s5p_aes_remove,
2320 .driver = {
2321 .name = "s5p-secss",
2322 .of_match_table = s5p_sss_dt_match,
2323 },
2324};
2325
2326module_platform_driver(s5p_aes_crypto);
2327
2328MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
2329MODULE_LICENSE("GPL v2");
2330MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
2331MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");