blob: 511ee88b60b14874ff6c6469ac4f411d644eeb74 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/slab.h>
4#include <linux/err.h>
5#include <linux/clk-provider.h>
6#include <linux/clk.h>
7#include <linux/io.h>
8#include <linux/hw_random.h>
9#include <linux/platform_device.h>
10#include <linux/device.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/scatterlist.h>
16#include <linux/dma-mapping.h>
17#include <linux/of_device.h>
18#include <linux/delay.h>
19#include <linux/crypto.h>
20#include <crypto/scatterwalk.h>
21#include <crypto/algapi.h>
22#include <crypto/aes.h>
23#include <linux/cputype.h>
24
25#include "asr-geu.h"
26#include "../../../crypto/asr/asr_aes_clk.h"
27
28static inline u32 asr_geu_read(struct asr_geu_dev *dd, u32 offset)
29{
30 u32 value = readl_relaxed(dd->io_base + offset);
31
32 return value;
33}
34
35static inline void asr_geu_write(struct asr_geu_dev *dd,
36 u32 offset, u32 value)
37{
38 writel_relaxed(value, dd->io_base + offset);
39}
40
41static int asr_geu_clk_sync(struct asr_geu_dev *dd)
42{
43 struct clk *geu_clk;
44
45 if (dd->clk_synced)
46 return 0;
47
48 geu_clk = dd->geu_clk;
49 /* GEU clk will be disable by CP core, but the enable count is still 1.
50 * Need to sync the clk enable state here and re-enable the clk.
51 */
52 if (__clk_is_enabled(geu_clk) == false &&
53 __clk_get_enable_count(geu_clk))
54 {
55 asr_aes_clk_put(geu_clk);
56 asr_aes_clk_get(geu_clk);
57 dd->clk_synced = 1;
58 dev_dbg(dd->dev, "sync geu clk done\n");
59 return 1;
60 }
61
62 return 0;
63}
64
65static int asr_geu_dev_get(struct asr_geu_dev *dd)
66{
67 mutex_lock(&dd->geu_lock);
68
69 asr_geu_clk_sync(dd);
70 asr_aes_clk_get(dd->geu_clk);
71
72 return 0;
73}
74
75static int asr_geu_dev_put(struct asr_geu_dev *dd)
76{
77 asr_aes_clk_put(dd->geu_clk);
78 mutex_unlock(&dd->geu_lock);
79
80 return 0;
81}
82
83static void asr_geu_hw_init(struct asr_geu_dev *dd)
84{
85 asr_geu_write(dd, GEU_CONFIG, 0);
86 asr_geu_write(dd, GEU_STATUS, 0);
87}
88
89static irqreturn_t asr_geu_irq(int irq, void *dev_id)
90{
91 u32 status;
92 irqreturn_t ret = IRQ_NONE;
93 struct asr_geu_dev *geu_dd = dev_id;
94 struct asr_geu_aes *aes_dd = &geu_dd->asr_aes;
95
96 status = asr_geu_read(geu_dd, GEU_STATUS);
97
98 if (aes_dd->aes_irq) {
99 ret = aes_dd->aes_irq(status, aes_dd);
100 }
101
102 return ret;
103}
104
105#if defined(CONFIG_OF)
106static const struct of_device_id asr_geu_dt_ids[] = {
107 { .compatible = "asr,asr-geu" },
108 { /* sentinel */ }
109};
110MODULE_DEVICE_TABLE(of, asr_geu_dt_ids);
111#endif
112
113static struct asr_geu_ops geu_ops = {
114 .dev_get = asr_geu_dev_get,
115 .dev_put = asr_geu_dev_put,
116};
117
118static int asr_geu_probe(struct platform_device *pdev)
119{
120 struct asr_geu_dev *geu_dd;
121 struct device *dev = &pdev->dev;
122 struct resource *geu_res;
123 struct device_node *np = NULL;
124 int err = 0, devnum = 0;
125
126 geu_dd = devm_kzalloc(&pdev->dev, sizeof(*geu_dd), GFP_KERNEL);
127 if (geu_dd == NULL) {
128 err = -ENOMEM;
129 goto res_err;
130 }
131
132 np = dev->of_node;
133 geu_dd->dev = dev;
134 geu_dd->geu_ops = &geu_ops;
135
136 platform_set_drvdata(pdev, geu_dd);
137
138 mutex_init(&geu_dd->geu_lock);
139
140 /* Get the base address */
141 geu_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
142 if (!geu_res) {
143 dev_err(dev, "no MEM resource info\n");
144 err = -ENODEV;
145 goto res_err;
146 }
147 geu_dd->phys_base = geu_res->start;
148
149 /* Get the IRQ */
150 geu_dd->irq = platform_get_irq(pdev, 0);
151 if (geu_dd->irq < 0) {
152 err = geu_dd->irq;
153 goto res_err;
154 }
155 err = devm_request_irq(&pdev->dev, geu_dd->irq, asr_geu_irq,
156 IRQF_SHARED, "asr-geu", geu_dd);
157 if (err) {
158 dev_err(dev, "unable to request geu irq.\n");
159 goto no_mem_err;
160 }
161
162 /* Initializing the clock */
163 geu_dd->geu_clk = devm_clk_get(&pdev->dev, NULL);
164 if (IS_ERR(geu_dd->geu_clk)) {
165 dev_err(dev, "clock initialization failed.\n");
166 err = PTR_ERR(geu_dd->geu_clk);
167 goto res_err;
168 }
169 geu_dd->clk_synced = 0;
170
171 geu_dd->io_base = devm_ioremap_resource(&pdev->dev, geu_res);
172 if (IS_ERR(geu_dd->io_base)) {
173 dev_err(dev, "can't ioremap\n");
174 err = PTR_ERR(geu_dd->io_base);
175 goto res_err;
176 }
177 err = clk_prepare(geu_dd->geu_clk);
178 if (err)
179 goto res_err;
180
181 err = asr_aes_clk_get(geu_dd->geu_clk);
182 if (err)
183 goto geu_clk_unprepare;
184
185 asr_geu_hw_init(geu_dd);
186
187#ifdef CONFIG_ASR_FUSE
188 if (of_get_property(np, "asr,asr-fuse", NULL)) {
189 err = asr_geu_fuse_register(geu_dd);
190 if (err)
191 goto geu_asr_aes_clk_put;
192 dev_info(dev, "Fuse is initialized\n");
193 devnum ++;
194 }
195#endif
196
197#ifdef CONFIG_ASR_RNG
198 if (of_get_property(np, "asr,asr-hwrng", NULL)) {
199 err = asr_geu_rng_register(geu_dd);
200 if (err)
201 goto rng_error;
202 dev_info(dev, "H/W RNG is initialized\n");
203 devnum ++;
204 }
205#endif
206
207#ifdef CONFIG_ASR_AES
208 if (of_get_property(np, "asr,asr-aes", NULL)) {
209 if (!cpu_is_asr1903_b0()) {
210 err = asr_geu_aes_register(geu_dd);
211 if (err)
212 goto aes_error;
213 dev_info(dev, "AES engine is initialized\n");
214 devnum ++;
215 }
216 }
217#endif
218
219 if (!devnum) {
220 dev_err(dev, "No GEU device enabled\n");
221 err = -ENODEV;
222 goto geu_asr_aes_clk_put;
223 }
224
225 return 0;
226
227aes_error:
228#ifdef CONFIG_ASR_RNG
229 asr_geu_rng_unregister(geu_dd);
230#endif
231rng_error:
232#ifdef CONFIG_ASR_FUSE
233 asr_geu_fuse_unregister(geu_dd);
234#endif
235geu_asr_aes_clk_put:
236 asr_aes_clk_put(geu_dd->geu_clk);
237geu_clk_unprepare:
238 clk_unprepare(geu_dd->geu_clk);
239res_err:
240 devm_kfree(dev, geu_dd);
241no_mem_err:
242 dev_err(dev, "initialization failed.\n");
243
244 return err;
245}
246
247static int asr_geu_remove(struct platform_device *pdev)
248{
249 struct asr_geu_dev *geu_dd;
250
251 geu_dd = platform_get_drvdata(pdev);
252 if (!geu_dd)
253 return -ENODEV;
254
255 clk_unprepare(geu_dd->geu_clk);
256 asr_aes_clk_put(geu_dd->geu_clk);
257
258#ifdef CONFIG_ASR_RNG
259 asr_geu_rng_unregister(geu_dd);
260#endif
261
262#ifdef CONFIG_ASR_FUSE
263 asr_geu_fuse_unregister(geu_dd);
264#endif
265
266#ifdef CONFIG_ASR_AES
267 asr_geu_aes_unregister(geu_dd);
268#endif
269
270 devm_kfree(geu_dd->dev, geu_dd);
271
272 return 0;
273}
274
275#ifdef CONFIG_PM
276static int asr_geu_suspend(struct device *dev)
277{
278 struct asr_geu_dev *geu_dd = dev_get_drvdata(dev);
279
280 asr_aes_clk_put(geu_dd->geu_clk);
281
282 return 0;
283}
284
285static int asr_geu_resume(struct device *dev)
286{
287 struct asr_geu_dev *geu_dd = dev_get_drvdata(dev);
288
289 return asr_aes_clk_get(geu_dd->geu_clk);
290}
291
292static const struct dev_pm_ops asr_geu_pm_ops = {
293 .suspend = asr_geu_suspend,
294 .resume = asr_geu_resume,
295};
296#endif /* CONFIG_PM */
297
298static struct platform_driver asr_geu_driver = {
299 .probe = asr_geu_probe,
300 .remove = asr_geu_remove,
301 .driver = {
302 .name = "asr_geu",
303#ifdef CONFIG_PM
304 .pm = &asr_geu_pm_ops,
305#endif
306 .of_match_table = of_match_ptr(asr_geu_dt_ids),
307 },
308};
309
310static int __init asr_geu_init(void)
311{
312 int ret;
313
314 ret = platform_driver_register(&asr_geu_driver);
315
316 return ret;
317}
318device_initcall_sync(asr_geu_init);
319
320MODULE_DESCRIPTION("ASR Generic Encryption Unit support.");
321MODULE_LICENSE("GPL v2");
322MODULE_AUTHOR("Yu Zhang");