blob: 3c00c6f0eeef30e7744da4174b22622d3fe970bd [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019 MediaTek Inc.
4 */
5
6
7#include <linux/interrupt.h>
8#include <linux/platform_device.h>
9#include <linux/io.h>
10#include <linux/slab.h>
11#include <linux/sched/clock.h>
12#include <linux/soc/mediatek/mtk-mbox.h>
13
14/*
15 * memory copy to tiny
16 * @param dest: dest address
17 * @param src: src address
18 * @param size: memory size
19 */
20void mtk_memcpy_to_tinysys(void __iomem *dest, const void *src, int size)
21{
22 int i;
23 u32 __iomem *t = dest;
24 const u32 *s = src;
25
26 for (i = 0; i < ((size + 3) >> 2); i++)
27 *t++ = *s++;
28}
29
30/*
31 * memory copy from tiny
32 * @param dest: dest address
33 * @param src: src address
34 * @param size: memory size
35 */
36void mtk_memcpy_from_tinysys(void *dest, const void __iomem *src, int size)
37{
38 int i;
39 u32 *t = dest;
40 const u32 __iomem *s = src;
41
42 for (i = 0; i < ((size + 3) >> 2); i++)
43 *t++ = *s++;
44}
45
46/*
47 * write data to mbox with ipi msg header
48 * function must in critical context
49 */
50int mtk_mbox_write_hd(struct mtk_mbox_device *mbdev, unsigned int mbox,
51 unsigned int slot, void *msg)
52{
53 unsigned int slot_ofs, size;
54 struct mtk_mbox_info *minfo;
55 struct mtk_ipi_msg *ipimsg;
56 void __iomem *base;
57 int len;
58 unsigned long flags;
59
60 if (!mbdev) {
61 pr_notice("[MBOX]write header fail, dev null");
62 return MBOX_PLT_ERR;
63 }
64
65 if (mbox >= mbdev->count || !msg) {
66 pr_notice("[MBOX]write header config err");
67 return MBOX_PARA_ERR;
68 }
69
70 minfo = &(mbdev->info_table[mbox]);
71 base = minfo->base;
72 slot_ofs = slot * MBOX_SLOT_SIZE;
73 size = minfo->slot;
74 ipimsg = (struct mtk_ipi_msg *)msg;
75 len = ipimsg->ipihd.len;
76
77 if (len > size * MBOX_SLOT_SIZE)
78 return MBOX_WRITE_SZ_ERR;
79
80 spin_lock_irqsave(&mbdev->info_table[mbox].mbox_lock, flags);
81 /*ipi header and payload*/
82 if (mbdev->memcpy_to_tiny) {
83 mbdev->memcpy_to_tiny((void __iomem *)(base + slot_ofs),
84 ipimsg, sizeof(struct mtk_ipi_msg_hd));
85 mbdev->memcpy_to_tiny((void __iomem *)
86 (base + slot_ofs + sizeof(struct mtk_ipi_msg_hd)),
87 ipimsg->data, len);
88 } else {
89 mtk_memcpy_to_tinysys((void __iomem *)(base + slot_ofs),
90 ipimsg, sizeof(struct mtk_ipi_msg_hd));
91 mtk_memcpy_to_tinysys((void __iomem *)
92 (base + slot_ofs + sizeof(struct mtk_ipi_msg_hd)),
93 ipimsg->data, len);
94 }
95
96 minfo->record.write_count++;
97 spin_unlock_irqrestore(&mbdev->info_table[mbox].mbox_lock, flags);
98
99 return MBOX_DONE;
100}
101
102/*
103 * read data from mbox with ipi msg header
104 * function must in critical context
105 */
106int mtk_mbox_read_hd(struct mtk_mbox_device *mbdev, unsigned int mbox,
107 unsigned int slot, void *dest)
108{
109 unsigned int slot_ofs, size;
110 struct mtk_mbox_info *minfo;
111 struct mtk_ipi_msg_hd *ipihd;
112 void __iomem *base;
113 unsigned long flags;
114
115 if (!mbdev) {
116 pr_notice("[MBOX]read header fail, dev null");
117 return MBOX_PLT_ERR;
118 }
119
120 if (mbox >= mbdev->count || !dest) {
121 pr_notice("[MBOX]read header config err");
122 return MBOX_PARA_ERR;
123 }
124
125 minfo = &(mbdev->info_table[mbox]);
126 base = minfo->base;
127 slot_ofs = slot * MBOX_SLOT_SIZE;
128 size = minfo->slot;
129 ipihd = (struct mtk_ipi_msg_hd *)(base + slot_ofs);
130
131 if (ipihd->len > size * MBOX_SLOT_SIZE)
132 return MBOX_READ_SZ_ERR;
133
134 spin_lock_irqsave(&mbdev->info_table[mbox].mbox_lock, flags);
135 /*ipi header and payload*/
136 if (mbdev->memcpy_from_tiny)
137 mbdev->memcpy_from_tiny(dest, (void __iomem *)
138 (base + slot_ofs + sizeof(struct mtk_ipi_msg_hd)),
139 ipihd->len);
140 else
141 mtk_memcpy_from_tinysys(dest, (void __iomem *)
142 (base + slot_ofs + sizeof(struct mtk_ipi_msg_hd)),
143 ipihd->len);
144 spin_unlock_irqrestore(&mbdev->info_table[mbox].mbox_lock, flags);
145
146 return MBOX_DONE;
147}
148
149/*
150 * write data to mbox, function must in critical context
151 */
152int mtk_mbox_write(struct mtk_mbox_device *mbdev, unsigned int mbox,
153 unsigned int slot, void *data, unsigned int len)
154{
155 unsigned int slot_ofs, size;
156 struct mtk_mbox_info *minfo;
157 void __iomem *base;
158 unsigned long flags;
159
160 if (!mbdev) {
161 pr_notice("[MBOX]write fail, dev or ptr null");
162 return MBOX_PLT_ERR;
163 }
164
165 if (mbox >= mbdev->count || !data)
166 return MBOX_PARA_ERR;
167
168 minfo = &(mbdev->info_table[mbox]);
169 base = minfo->base;
170 slot_ofs = slot * MBOX_SLOT_SIZE;
171 size = minfo->slot;
172
173 if (slot > size)
174 return MBOX_WRITE_SZ_ERR;
175
176 spin_lock_irqsave(&mbdev->info_table[mbox].mbox_lock, flags);
177 if (mbdev->memcpy_to_tiny)
178 mbdev->memcpy_to_tiny((void __iomem *)(base + slot_ofs),
179 data, len);
180 else
181 mtk_memcpy_to_tinysys((void __iomem *)(base + slot_ofs),
182 data, len);
183
184 minfo->record.write_count++;
185 spin_unlock_irqrestore(&mbdev->info_table[mbox].mbox_lock, flags);
186
187 return MBOX_DONE;
188}
189
190/*
191 * read data to user buffer, function must in critical context
192 */
193int mtk_mbox_read(struct mtk_mbox_device *mbdev, unsigned int mbox,
194 unsigned int slot, void *data, unsigned int len)
195{
196 unsigned int slot_ofs, size;
197 struct mtk_mbox_info *minfo;
198 void __iomem *base;
199 unsigned long flags;
200
201 if (!mbdev || !data) {
202 pr_notice("[MBOX]read fail,dev or ptr null");
203 return MBOX_PLT_ERR;
204 }
205
206 if (mbox >= mbdev->count)
207 return MBOX_PARA_ERR;
208
209 minfo = &(mbdev->info_table[mbox]);
210 base = minfo->base;
211 slot_ofs = slot * MBOX_SLOT_SIZE;
212 size = minfo->slot;
213
214 if (slot > size)
215 return MBOX_READ_SZ_ERR;
216
217 spin_lock_irqsave(&mbdev->info_table[mbox].mbox_lock, flags);
218 if (mbdev->memcpy_from_tiny)
219 mbdev->memcpy_from_tiny(data,
220 (void __iomem *)(base + slot_ofs), len);
221 else
222 mtk_memcpy_from_tinysys(data,
223 (void __iomem *)(base + slot_ofs), len);
224 spin_unlock_irqrestore(&mbdev->info_table[mbox].mbox_lock, flags);
225
226 return MBOX_DONE;
227}
228
229/*
230 * clear mbox irq,
231 * with read/write function must in critical context
232 */
233int mtk_mbox_clr_irq(struct mtk_mbox_device *mbdev, unsigned int mbox,
234 unsigned int irq)
235{
236 struct mtk_mbox_info *minfo;
237
238 if (!mbdev)
239 return MBOX_PLT_ERR;
240
241 if (mbox >= mbdev->count)
242 return MBOX_IRQ_ERR;
243
244 minfo = &(mbdev->info_table[mbox]);
245 writel(irq, minfo->clr_irq_reg);
246
247 return MBOX_DONE;
248}
249
250/*
251 * trigger mbox irq,
252 * with read/write function must in critical context
253 */
254int mtk_mbox_trigger_irq(struct mtk_mbox_device *mbdev, unsigned int mbox,
255 unsigned int irq)
256{
257 struct mtk_mbox_info *minfo;
258 unsigned long flags;
259
260 if (!mbdev)
261 return MBOX_PLT_ERR;
262
263 if (mbox >= mbdev->count)
264 return MBOX_IRQ_ERR;
265
266 minfo = &(mbdev->info_table[mbox]);
267 spin_lock_irqsave(&mbdev->info_table[mbox].mbox_lock, flags);
268 writel(irq, minfo->set_irq_reg);
269 minfo->record.trig_irq_count++;
270 spin_unlock_irqrestore(&mbdev->info_table[mbox].mbox_lock, flags);
271
272 return MBOX_DONE;
273}
274
275/*
276 * check mbox 32bits set irq reg status
277 * with read/write function must in critical context
278 * @return irq status 0: not triggered , other: irq triggered
279 */
280unsigned int mtk_mbox_check_send_irq(struct mtk_mbox_device *mbdev,
281 unsigned int mbox, unsigned int pin_index)
282{
283 struct mtk_mbox_info *minfo;
284 unsigned int reg, irq_state;
285 unsigned long flags;
286
287 if (!mbdev)
288 return 0;
289
290 if (mbox >= mbdev->count)
291 return 0;
292
293 spin_lock_irqsave(&mbdev->info_table[mbox].mbox_lock, flags);
294 irq_state = 0;
295 minfo = &(mbdev->info_table[mbox]);
296 if (minfo->send_status_reg)
297 reg = readl(minfo->send_status_reg);
298 else
299 reg = readl(minfo->set_irq_reg);
300
301 irq_state = (reg & (0x1 << pin_index));
302
303 if (irq_state)
304 minfo->record.busy_count++;
305
306 spin_unlock_irqrestore(&mbdev->info_table[mbox].mbox_lock, flags);
307
308 return irq_state;
309}
310
311/*
312 * check mbox 32bits clr irq reg status
313 * with read/write function must in critical context
314 * @return irq status 0: not triggered , other: irq triggered
315 */
316unsigned int mtk_mbox_read_recv_irq(struct mtk_mbox_device *mbdev,
317 unsigned int mbox)
318{
319 struct mtk_mbox_info *minfo;
320 unsigned int reg;
321
322 if (!mbdev)
323 return 0;
324
325 if (mbox >= mbdev->count)
326 return 0;
327
328 minfo = &(mbdev->info_table[mbox]);
329
330 if (minfo->recv_status_reg)
331 reg = readl(minfo->recv_status_reg);
332 else if (minfo->clr_irq_reg)
333 reg = readl(minfo->clr_irq_reg);
334 else {
335 pr_err("[mtk_mbox_read_recv_irq] NULL dev=%s mbox=%u\n",
336 mbdev->name, mbox);
337 reg = 0;
338 }
339
340 return reg;
341}
342
343/*
344 * set mbox base address to init register
345 *
346 */
347int mtk_mbox_set_base_reg(struct mtk_mbox_device *mbdev, unsigned int mbox,
348 unsigned int addr)
349{
350 struct mtk_mbox_info *minfo;
351
352 if (!mbdev)
353 return MBOX_PLT_ERR;
354
355 if (mbox >= mbdev->count)
356 return MBOX_PARA_ERR;
357
358 minfo = &(mbdev->info_table[mbox]);
359 writel(addr, minfo->init_base_reg);
360
361
362 return MBOX_DONE;
363}
364
365/*
366 * set mbox base address, task context
367 *
368 */
369int mtk_mbox_set_base_addr(struct mtk_mbox_device *mbdev, unsigned int mbox,
370 unsigned int addr)
371{
372 struct mtk_mbox_info *minfo;
373 unsigned long flags;
374 int ret;
375
376 if (!mbdev)
377 return MBOX_PLT_ERR;
378
379 if (mbox >= mbdev->count)
380 return MBOX_PARA_ERR;
381
382 spin_lock_irqsave(&mbdev->info_table[mbox].mbox_lock, flags);
383 ret = mtk_mbox_set_base_reg(mbdev, mbox, addr);
384
385 if (ret != MBOX_DONE) {
386 spin_unlock_irqrestore(
387 &mbdev->info_table[mbox].mbox_lock, flags);
388 return ret;
389 }
390
391 minfo = &(mbdev->info_table[mbox]);
392 writel(addr, minfo->base);
393 spin_unlock_irqrestore(&mbdev->info_table[mbox].mbox_lock, flags);
394
395 return MBOX_DONE;
396}
397
398/*
399 * mtk_mbox_cb_register, register callback function
400 *
401 */
402int mtk_mbox_cb_register(struct mtk_mbox_device *mbdev, unsigned int pin_offset,
403 mbox_pin_cb_t mbox_pin_cb, void *prdata)
404{
405 struct mtk_mbox_pin_recv *pin_recv;
406
407 if (!mbdev)
408 return MBOX_PLT_ERR;
409
410 pin_recv = &(mbdev->pin_recv_table[pin_offset]);
411 pin_recv->mbox_pin_cb = mbox_pin_cb;
412 pin_recv->prdata = prdata;
413
414 return MBOX_DONE;
415}
416
417/*
418 * mbox polling, context is protected by mbox_lock
419 */
420int mtk_mbox_polling(struct mtk_mbox_device *mbdev, unsigned int mbox,
421 void *data, struct mtk_mbox_pin_recv *pin_recv)
422{
423 struct mtk_mbox_info *minfo;
424 unsigned long flags;
425 unsigned int reg, irq_state;
426 unsigned int recv_pin_index;
427 int ret;
428
429 if (!mbdev)
430 return MBOX_PLT_ERR;
431
432 if (mbox >= mbdev->count)
433 return MBOX_PARA_ERR;
434
435 recv_pin_index = pin_recv->pin_index;
436 minfo = &(mbdev->info_table[mbox]);
437
438 spin_lock_irqsave(&mbdev->info_table[mbox].mbox_lock, flags);
439 /*check lock for */
440 if (pin_recv->lock == MBOX_PIN_BUSY) {
441 spin_unlock_irqrestore(
442 &mbdev->info_table[mbox].mbox_lock, flags);
443 minfo->record.busy_count++;
444 return MBOX_PIN_BUSY;
445 }
446 /*check bit*/
447 reg = mtk_mbox_read_recv_irq(mbdev, mbox);
448 irq_state = (reg & (0x1 << recv_pin_index));
449
450 if (irq_state > 0) {
451 /*clear bit*/
452 ret = mtk_mbox_clr_irq(mbdev, mbox, irq_state);
453 } else {
454 spin_unlock_irqrestore(
455 &mbdev->info_table[mbox].mbox_lock, flags);
456 minfo->record.busy_count++;
457 return MBOX_PIN_BUSY;
458 }
459
460 spin_unlock_irqrestore(&mbdev->info_table[mbox].mbox_lock, flags);
461 /*copy data*/
462 ret = mtk_mbox_read(mbdev, mbox, pin_recv->offset, data,
463 pin_recv->msg_size * MBOX_SLOT_SIZE);
464
465 if (ret != MBOX_DONE)
466 return ret;
467
468 pin_recv->recv_record.poll_count++;
469
470 /*dump recv info*/
471 if (mbdev->log_enable)
472 mtk_mbox_dump_recv_pin(mbdev, pin_recv);
473
474 return MBOX_DONE;
475}
476
477
478/*
479 * set lock status
480 */
481static void mtk_mbox_set_lock(struct mtk_mbox_device *mbdev, unsigned int mbox,
482 unsigned int lock)
483{
484 struct mtk_mbox_pin_recv *pin_recv;
485 int i;
486
487 for (i = 0; i < mbdev->recv_count; i++) {
488 pin_recv = &(mbdev->pin_recv_table[i]);
489 if (pin_recv->mbox != mbox)
490 continue;
491 pin_recv->lock = lock;
492 }
493}
494
495
496/*
497 * mbox driver isr, in isr context
498 */
499static irqreturn_t mtk_mbox_isr(int irq, void *dev_id)
500{
501 unsigned int mbox, irq_status, irq_temp;
502 struct mtk_mbox_pin_recv *pin_recv;
503 struct mtk_mbox_info *minfo = (struct mtk_mbox_info *)dev_id;
504 struct mtk_mbox_device *mbdev = minfo->mbdev;
505 struct mtk_ipi_msg_hd *ipihead;
506 unsigned long flags;
507 //void *user_data;
508 int ret;
509 int i;
510
511 mbox = minfo->id;
512 ret = MBOX_DONE;
513
514 spin_lock_irqsave(&minfo->mbox_lock, flags);
515 /*lock pin*/
516 mtk_mbox_set_lock(mbdev, mbox, MBOX_PIN_BUSY);
517 /*get irq status*/
518 irq_status = mtk_mbox_read_recv_irq(mbdev, mbox);
519 irq_temp = 0;
520 spin_unlock_irqrestore(&minfo->mbox_lock, flags);
521
522 if (mbdev->pre_cb)
523 mbdev->pre_cb(mbdev->prdata);
524
525 /*execute all receive pin handler*/
526 for (i = 0; i < mbdev->recv_count; i++) {
527 pin_recv = &(mbdev->pin_recv_table[i]);
528 if (pin_recv->mbox != mbox)
529 continue;
530 /*recv irq trigger*/
531 if (((0x1 << pin_recv->pin_index) & irq_status) > 0x0) {
532 pin_recv->recv_record.recv_irq_count++;
533 irq_temp = irq_temp | (0x1 << pin_recv->pin_index);
534 /*check user buf*/
535 if (!pin_recv->pin_buf) {
536 pr_err("[MBOX Error]null ptr dev=%s ipi_id=%d",
537 mbdev->name, pin_recv->chan_id);
538 BUG_ON(1);
539 }
540 if (minfo->opt == MBOX_OPT_QUEUE_DIR ||
541 minfo->opt == MBOX_OPT_QUEUE_SMEM) {
542 /*queue mode*/
543 ipihead = (struct mtk_ipi_msg_hd *)(minfo->base
544 + (pin_recv->offset * MBOX_SLOT_SIZE));
545 ret = mtk_mbox_read_hd(mbdev, mbox,
546 pin_recv->offset, pin_recv->pin_buf);
547
548 if (pin_recv->recv_opt == MBOX_RECV_MESSAGE
549 && pin_recv->cb_ctx_opt
550 == MBOX_CB_IN_ISR
551 && pin_recv->mbox_pin_cb
552 && ret == MBOX_DONE) {
553 pin_recv->recv_record.pre_timestamp
554 = cpu_clock(0);
555 pin_recv->mbox_pin_cb(ipihead->id,
556 pin_recv->prdata, pin_recv->pin_buf,
557 (unsigned int)ipihead->len);
558 pin_recv->recv_record.post_timestamp
559 = cpu_clock(0);
560 pin_recv->recv_record.cb_count++;
561 }
562 } else {
563 /*direct mode*/
564 ret = mtk_mbox_read(mbdev, mbox,
565 pin_recv->offset, pin_recv->pin_buf,
566 pin_recv->msg_size * MBOX_SLOT_SIZE);
567
568 if (pin_recv->recv_opt == MBOX_RECV_MESSAGE
569 && pin_recv->cb_ctx_opt
570 == MBOX_CB_IN_ISR
571 && pin_recv->mbox_pin_cb
572 && ret == MBOX_DONE) {
573 pin_recv->recv_record.pre_timestamp
574 = cpu_clock(0);
575 pin_recv->mbox_pin_cb(pin_recv->chan_id,
576 pin_recv->prdata, pin_recv->pin_buf,
577 pin_recv->msg_size * MBOX_SLOT_SIZE);
578 pin_recv->recv_record.post_timestamp
579 = cpu_clock(0);
580 pin_recv->recv_record.cb_count++;
581 }
582 }
583
584 if (ret != MBOX_DONE)
585 pr_err("[MBOX ISR]cp to buf fail,dev=%s chan=%d ret=%d",
586 mbdev->name, pin_recv->chan_id, ret);
587
588 /*dump recv info*/
589 if (mbdev->log_enable)
590 mtk_mbox_dump_recv(mbdev, i);
591 }
592 }
593
594 if (mbdev->post_cb)
595 mbdev->post_cb(mbdev->prdata);
596
597 /*clear irq status*/
598 spin_lock_irqsave(&minfo->mbox_lock, flags);
599 mtk_mbox_clr_irq(mbdev, mbox, irq_temp);
600 /*release pin*/
601 mtk_mbox_set_lock(mbdev, mbox, MBOX_DONE);
602 spin_unlock_irqrestore(&minfo->mbox_lock, flags);
603
604 if (irq_temp == 0 && irq_status != 0) {
605 pr_err("[MBOX ISR]dev=%s pin table err, status=%x",
606 mbdev->name, irq_status);
607 for (i = 0; i < mbdev->recv_count; i++) {
608 pin_recv = &(mbdev->pin_recv_table[i]);
609 mtk_mbox_dump_recv_pin(mbdev, pin_recv);
610 }
611 }
612
613 /*notify all receive pin handler*/
614 for (i = 0; i < mbdev->recv_count; i++) {
615 pin_recv = &(mbdev->pin_recv_table[i]);
616 if (pin_recv->mbox != mbox)
617 continue;
618 /*recv irq trigger*/
619 if (((0x1 << pin_recv->pin_index) & irq_status) > 0x0) {
620 /*notify task*/
621 if (mbdev->ipi_cb) {
622 mbdev->ipi_cb(pin_recv, mbdev->ipi_priv);
623 pin_recv->recv_record.notify_count++;
624 }
625 }
626 }
627
628 return IRQ_HANDLED;
629}
630
631/*
632 * mtk_smem_init, initial share memory
633 *
634 */
635int mtk_smem_init(struct platform_device *pdev, struct mtk_mbox_device *mbdev,
636 unsigned int mbox, void __iomem *base,
637 void __iomem *set_irq_reg, void __iomem *clr_irq_reg,
638 void __iomem *send_status_reg, void __iomem *recv_status_reg)
639{
640 struct mtk_mbox_info *minfo;
641 char name[32];
642 int ret;
643
644 minfo = &(mbdev->info_table[mbox]);
645
646 minfo->base = base;
647 minfo->set_irq_reg = set_irq_reg;
648 minfo->clr_irq_reg = clr_irq_reg;
649 minfo->send_status_reg = send_status_reg;
650 minfo->recv_status_reg = recv_status_reg;
651 minfo->enable = true;
652 minfo->id = mbox;
653 minfo->mbdev = mbdev;
654 minfo->is64d = 0;
655 spin_lock_init(&minfo->mbox_lock);
656
657 snprintf(name, sizeof(name), "mbox%d", mbox);
658 minfo->irq_num = platform_get_irq_byname(pdev, name);
659 if (minfo->irq_num < 0) {
660 pr_err("MBOX %d can't find IRQ\n", mbox);
661 goto smem_fail;
662 }
663
664 ret = request_irq(minfo->irq_num, mtk_mbox_isr, IRQF_TRIGGER_NONE,
665 "MBOX_ISR", (void *) minfo);
666 if (ret) {
667 pr_err("MBOX %d request irq Failed\n", mbox);
668 goto smem_fail;
669 }
670
671 return MBOX_DONE;
672
673smem_fail:
674 return MBOX_CONFIG_ERR;
675}
676
677/*
678 * mtk_mbox_probe , porbe and initial mbox
679 *
680 */
681int mtk_mbox_probe(struct platform_device *pdev, struct mtk_mbox_device *mbdev,
682 unsigned int mbox)
683{
684 struct mtk_mbox_info *minfo;
685 char name[32];
686 int ret;
687 struct device *dev = &pdev->dev;
688 struct resource *res;
689
690 minfo = &(mbdev->info_table[mbox]);
691
692 if (pdev) {
693 snprintf(name, sizeof(name), "mbox%d_base", mbox);
694 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
695 minfo->base = devm_ioremap_resource(dev, res);
696
697 if (IS_ERR((void const *) minfo->base))
698 pr_err("MBOX %d can't remap base\n", mbox);
699
700 minfo->slot = (unsigned int)resource_size(res)/MBOX_SLOT_SIZE;
701
702 /*init reg*/
703 snprintf(name, sizeof(name), "mbox%d_init", mbox);
704 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
705 minfo->init_base_reg = devm_ioremap_resource(dev, res);
706 if (IS_ERR((void const *) minfo->init_base_reg))
707 pr_err("MBOX %d can't find init reg\n", mbox);
708 /*set irq reg*/
709 snprintf(name, sizeof(name), "mbox%d_set", mbox);
710 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
711 minfo->set_irq_reg = devm_ioremap_resource(dev, res);
712 if (IS_ERR((void const *) minfo->set_irq_reg)) {
713 pr_err("MBOX %d can't find set reg\n", mbox);
714 goto mtk_mbox_probe_fail;
715 }
716 /*clear reg*/
717 snprintf(name, sizeof(name), "mbox%d_clr", mbox);
718 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
719 minfo->clr_irq_reg = devm_ioremap_resource(dev, res);
720 if (IS_ERR((void const *) minfo->clr_irq_reg)) {
721 pr_err("MBOX %d can't find clr reg\n", mbox);
722 goto mtk_mbox_probe_fail;
723 }
724 /*send status reg*/
725 snprintf(name, sizeof(name), "mbox%d_send", mbox);
726 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
727 minfo->send_status_reg = devm_ioremap_resource(dev, res);
728 if (IS_ERR((void const *) minfo->send_status_reg)) {
729 pr_notice("MBOX %d can't find send status reg\n", mbox);
730 minfo->send_status_reg = NULL;
731 }
732 /*recv status reg*/
733 snprintf(name, sizeof(name), "mbox%d_recv", mbox);
734 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
735 minfo->recv_status_reg = devm_ioremap_resource(dev, res);
736 if (IS_ERR((void const *) minfo->recv_status_reg)) {
737 pr_notice("MBOX %d can't find recv status reg\n", mbox);
738 minfo->recv_status_reg = NULL;
739 }
740
741 snprintf(name, sizeof(name), "mbox%d", mbox);
742 minfo->irq_num = platform_get_irq_byname(pdev, name);
743 if (minfo->irq_num < 0) {
744 pr_err("MBOX %d can't find IRQ\n", mbox);
745 goto mtk_mbox_probe_fail;
746 }
747
748 minfo->enable = true;
749 minfo->id = mbox;
750 minfo->mbdev = mbdev;
751 spin_lock_init(&minfo->mbox_lock);
752
753 ret = request_irq(minfo->irq_num, mtk_mbox_isr,
754 IRQF_TRIGGER_NONE, "MBOX_ISR", (void *) minfo);
755 if (ret) {
756 pr_err("MBOX %d request irq Failed\n", mbox);
757 goto mtk_mbox_probe_fail;
758 }
759 }
760
761 return MBOX_DONE;
762
763mtk_mbox_probe_fail:
764 return MBOX_CONFIG_ERR;
765}
766
767/*
768 *mbox print receive pin function
769 */
770void mtk_mbox_print_recv(struct mtk_mbox_device *mbdev,
771 struct mtk_mbox_pin_recv *pin_recv)
772{
773 pr_notice("[MBOX]dev=%s recv mbox=%u off=%u cv_opt=%u ctx_opt=%u mg_sz=%u p_idx=%u id=%u\n"
774 , mbdev->name
775 , pin_recv->mbox
776 , pin_recv->offset
777 , pin_recv->recv_opt
778 , pin_recv->cb_ctx_opt
779 , pin_recv->msg_size
780 , pin_recv->pin_index
781 , pin_recv->chan_id);
782
783 pr_notice("[MBOX]dev=%s recv id=%u poll=%u cv_irq=%u noti=%u cb=%u pre=%lld po=%lld\n"
784 , mbdev->name
785 , pin_recv->chan_id
786 , pin_recv->recv_record.poll_count
787 , pin_recv->recv_record.recv_irq_count
788 , pin_recv->recv_record.notify_count
789 , pin_recv->recv_record.cb_count
790 , pin_recv->recv_record.pre_timestamp
791 , pin_recv->recv_record.post_timestamp);
792}
793
794/*
795 *mbox print send pin function
796 */
797void mtk_mbox_print_send(struct mtk_mbox_device *mbdev,
798 struct mtk_mbox_pin_send *pin_send)
799{
800 pr_notice("[MBOX]dev=%s send mbox=%u off=%u s_opt=%u mg_sz=%u p_idx=%u id=%u\n"
801 , mbdev->name
802 , pin_send->mbox
803 , pin_send->offset
804 , pin_send->send_opt
805 , pin_send->msg_size
806 , pin_send->pin_index
807 , pin_send->chan_id);
808}
809
810/*
811 *mbox print mbox function
812 */
813void mtk_mbox_print_minfo(struct mtk_mbox_device *mbdev,
814 struct mtk_mbox_info *minfo)
815{
816 pr_notice("[MBOX]dev=%s mbox id=%u slot=%u opt=%u base=%p set_reg=%p clr_reg=%p init_reg=%p s_sta=%p cv_sta=%p\n"
817 , mbdev->name
818 , minfo->id
819 , minfo->slot
820 , minfo->opt
821 , minfo->base
822 , minfo->set_irq_reg
823 , minfo->clr_irq_reg
824 , minfo->init_base_reg
825 , minfo->send_status_reg
826 , minfo->recv_status_reg);
827
828 pr_notice("[MBOX]dev=%s write=%u busy=%u tri_irq=%u\n"
829 , mbdev->name
830 , minfo->record.write_count
831 , minfo->record.busy_count
832 , minfo->record.trig_irq_count);
833}
834
835
836/*
837 *mbox information dump
838 */
839void mtk_mbox_dump_all(struct mtk_mbox_device *mbdev)
840{
841 struct mtk_mbox_pin_recv *pin_recv;
842 struct mtk_mbox_pin_send *pin_send;
843 struct mtk_mbox_info *minfo;
844 int i;
845
846 if (!mbdev)
847 return;
848
849 pr_notice("[MBOX]dev=%s recv count=%u send count=%u\n",
850 mbdev->name, mbdev->recv_count, mbdev->send_count);
851
852 for (i = 0; i < mbdev->recv_count; i++) {
853 pin_recv = &(mbdev->pin_recv_table[i]);
854 mtk_mbox_print_recv(mbdev, pin_recv);
855 }
856
857 for (i = 0; i < mbdev->send_count; i++) {
858 pin_send = &(mbdev->pin_send_table[i]);
859 mtk_mbox_print_send(mbdev, pin_send);
860 }
861
862 for (i = 0; i < mbdev->count; i++) {
863 minfo = &(mbdev->info_table[i]);
864 mtk_mbox_print_minfo(mbdev, minfo);
865 }
866}
867
868
869/*
870 *mbox single receive pin information dump
871 */
872void mtk_mbox_dump_recv(struct mtk_mbox_device *mbdev, unsigned int pin)
873{
874 struct mtk_mbox_pin_recv *pin_recv;
875
876 if (mbdev) {
877 if (pin < mbdev->recv_count) {
878 pin_recv = &(mbdev->pin_recv_table[pin]);
879 mtk_mbox_print_recv(mbdev, pin_recv);
880 }
881 }
882}
883
884/*
885 *mbox single receive pin information dump
886 */
887void mtk_mbox_dump_recv_pin(struct mtk_mbox_device *mbdev,
888 struct mtk_mbox_pin_recv *pin_recv)
889{
890 unsigned int irq_reg;
891
892 if (mbdev && pin_recv) {
893 irq_reg = mtk_mbox_read_recv_irq(mbdev, pin_recv->mbox);
894 pr_err("[MBOX]dev=%s mbox=%u recv irq status=%x\n",
895 mbdev->name, pin_recv->mbox, irq_reg);
896 mtk_mbox_print_recv(mbdev, pin_recv);
897 }
898}
899
900/*
901 *mbox single send pin information dump
902 */
903void mtk_mbox_dump_send(struct mtk_mbox_device *mbdev, unsigned int pin)
904{
905 struct mtk_mbox_pin_send *pin_send;
906
907 if (mbdev) {
908 if (pin < mbdev->send_count) {
909 pin_send = &(mbdev->pin_send_table[pin]);
910 mtk_mbox_print_send(mbdev, pin_send);
911 }
912 }
913}
914
915/*
916 *mbox single mbox information dump
917 */
918void mtk_mbox_dump(struct mtk_mbox_device *mbdev, unsigned int mbox)
919{
920 struct mtk_mbox_info *minfo;
921
922 if (mbdev) {
923 if (mbox < mbdev->count) {
924 minfo = &(mbdev->info_table[mbox]);
925 mtk_mbox_print_minfo(mbdev, minfo);
926 }
927 }
928}
929
930/*
931 *mbox log enable function
932 */
933int mtk_mbox_log_enable(struct mtk_mbox_device *mbdev, bool enable)
934{
935 if (!mbdev)
936 return MBOX_PLT_ERR;
937
938 mbdev->log_enable = enable;
939 return MBOX_DONE;
940}
941
942/*
943 *mbox reset record
944 */
945void mtk_mbox_reset_record(struct mtk_mbox_device *mbdev)
946{
947 struct mtk_mbox_pin_recv *pin_recv;
948 struct mtk_mbox_info *minfo;
949 int i;
950
951 if (!mbdev)
952 return;
953
954 for (i = 0; i < mbdev->recv_count; i++) {
955 pin_recv = &(mbdev->pin_recv_table[i]);
956 pin_recv->recv_record.poll_count = 0;
957 pin_recv->recv_record.recv_irq_count = 0;
958 pin_recv->recv_record.notify_count = 0;
959 pin_recv->recv_record.cb_count = 0;
960 pin_recv->recv_record.pre_timestamp = 0;
961 pin_recv->recv_record.post_timestamp = 0;
962 }
963
964 for (i = 0; i < mbdev->count; i++) {
965 minfo = &(mbdev->info_table[i]);
966 minfo->record.write_count = 0;
967 minfo->record.busy_count = 0;
968 minfo->record.trig_irq_count = 0;
969 }
970
971}
972