blob: 72b10b1141c4accc742c8662d2591a2189866b9c [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2015 MediaTek Inc.
4 */
5
6#include <linux/bitops.h>
7#include <linux/clk.h>
8#include <linux/clk-provider.h>
9#include <linux/dma-mapping.h>
10
11#include <linux/errno.h>
12#include <linux/interrupt.h>
13#include <linux/iopoll.h>
14#include <linux/kernel.h>
15#include <linux/mailbox_controller.h>
16#include <linux/mailbox/mtk-cmdq-mailbox.h>
17#include <linux/soc/mediatek/mtk-cmdq.h>
18#include <linux/timer.h>
19#include <linux/workqueue.h>
20#include <linux/module.h>
21#include <linux/of_device.h>
22#include <linux/atomic.h>
23#include <linux/sched/clock.h>
24#if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT)
25#include "cmdq-util.h"
26#endif
27
28/* ddp main/sub, mdp path 0/1/2/3, general(misc) */
29#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
30
31#define CMDQ_CURR_IRQ_STATUS 0x10
32#define CMDQ_CURR_LOADED_THR 0x18
33#define CMDQ_THR_SLOT_CYCLES 0x30
34#define CMDQ_THR_EXEC_CYCLES 0x34
35#define CMDQ_THR_TIMEOUT_TIMER 0x38
36#define GCE_GCTL_VALUE 0x48
37#define CMDQ_SYNC_TOKEN_ID 0x60
38#define CMDQ_SYNC_TOKEN_VAL 0x64
39#define CMDQ_SYNC_TOKEN_UPD 0x68
40#define CMDQ_PREFETCH_GSIZE 0xC0
41#define CMDQ_TPR_MASK 0xD0
42#define CMDQ_TPR_TIMEOUT_EN 0xDC
43
44#define CMDQ_THR_BASE 0x100
45#define CMDQ_THR_SIZE 0x80
46#define CMDQ_THR_WARM_RESET 0x00
47#define CMDQ_THR_ENABLE_TASK 0x04
48#define CMDQ_THR_SUSPEND_TASK 0x08
49#define CMDQ_THR_CURR_STATUS 0x0c
50#define CMDQ_THR_IRQ_STATUS 0x10
51#define CMDQ_THR_IRQ_ENABLE 0x14
52#define CMDQ_THR_CURR_ADDR 0x20
53#define CMDQ_THR_END_ADDR 0x24
54#define CMDQ_THR_CNT 0x28
55#define CMDQ_THR_WAIT_TOKEN 0x30
56#define CMDQ_THR_CFG 0x40
57#define CMDQ_THR_PREFETCH 0x44
58#define CMDQ_THR_INST_CYCLES 0x50
59#define CMDQ_THR_INST_THRESX 0x54
60#define CMDQ_THR_SPR 0x60
61
62#define CMDQ_THR_ENABLED 0x1
63#define CMDQ_THR_DISABLED 0x0
64#define CMDQ_THR_SUSPEND 0x1
65#define CMDQ_THR_RESUME 0x0
66#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
67#define CMDQ_THR_DO_WARM_RESET BIT(0)
68#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
69#define CMDQ_INST_CYCLE_TIMEOUT 0x0
70#define CMDQ_THR_IRQ_DONE 0x1
71#define CMDQ_THR_IRQ_ERROR 0x12
72#define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
73#define CMDQ_THR_IS_WAITING BIT(31)
74#define CMDQ_THR_PRIORITY 0x7
75#define CMDQ_TPR_EN BIT(31)
76
77
78#define CMDQ_JUMP_BY_OFFSET 0x10000000
79#define CMDQ_JUMP_BY_PA 0x10000001
80
81#define CMDQ_MIN_AGE_VALUE (5) /* currently disable age */
82
83#define CMDQ_DRIVER_NAME "mtk_cmdq_mbox"
84
85/* pc and end shift bit for gce, should be config in probe */
86int gce_shift_bit;
87EXPORT_SYMBOL(gce_shift_bit);
88
89/* CMDQ log flag */
90int mtk_cmdq_log;
91EXPORT_SYMBOL(mtk_cmdq_log);
92
93int mtk_cmdq_msg;
94EXPORT_SYMBOL(mtk_cmdq_msg);
95
96int mtk_cmdq_err = 1;
97EXPORT_SYMBOL(mtk_cmdq_err);
98module_param(mtk_cmdq_log, int, 0644);
99
100int cmdq_trace;
101EXPORT_SYMBOL(cmdq_trace);
102module_param(cmdq_trace, int, 0644);
103
104struct cmdq_task {
105 struct cmdq *cmdq;
106 struct list_head list_entry;
107 dma_addr_t pa_base;
108 struct cmdq_thread *thread;
109 struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
110 u64 exec_time;
111};
112
113struct cmdq_buf_dump {
114 struct cmdq *cmdq;
115 struct work_struct dump_work;
116 bool timeout; /* 0: error, 1: timeout */
117 void *cmd_buf;
118 size_t cmd_buf_size;
119 u32 pa_offset; /* pa_curr - pa_base */
120};
121
122struct cmdq {
123 struct mbox_controller mbox;
124 void __iomem *base;
125 phys_addr_t base_pa;
126 u8 hwid;
127 u32 irq;
128 u32 thread_nr;
129 u32 irq_mask;
130 struct workqueue_struct *buf_dump_wq;
131 struct cmdq_thread thread[CMDQ_THR_MAX_COUNT];
132 u32 prefetch;
133 struct clk *clock;
134 struct clk *clock_timer;
135 bool suspended;
136 atomic_t usage;
137 struct workqueue_struct *timeout_wq;
138 struct wakeup_source wake_lock;
139 bool wake_locked;
140 spinlock_t lock;
141 u32 token_cnt;
142 u16 *tokens;
143};
144
145struct gce_plat {
146 u32 thread_nr;
147 u8 shift;
148};
149
150#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
151#include "../misc/mediatek/mmp/mmprofile.h"
152#include "../misc/mediatek/mmp/mmprofile_function.h"
153
154struct cmdq_mmp_event {
155 mmp_event cmdq;
156 mmp_event cmdq_irq;
157 mmp_event loop_irq;
158 mmp_event thread_en;
159 mmp_event thread_suspend;
160 mmp_event submit;
161 mmp_event wait;
162 mmp_event warning;
163};
164
165struct cmdq_mmp_event cmdq_mmp;
166
167#define MMP_THD(t, c) ((t)->idx | ((c)->hwid << 5))
168#endif
169
170static void cmdq_init(struct cmdq *cmdq)
171{
172 int i;
173
174 cmdq_trace_ex_begin("%s", __func__);
175
176 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
177 for (i = 0; i <= CMDQ_EVENT_MAX; i++)
178 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
179
180 /* some of events need default 1 */
181 for (i = 0; i < cmdq->token_cnt; i++)
182 writel(cmdq->tokens[i] | BIT(16),
183 cmdq->base + CMDQ_SYNC_TOKEN_UPD);
184
185 cmdq_trace_ex_end();
186}
187
188static inline void cmdq_mmp_init(void)
189{
190#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
191 mmprofile_enable(1);
192 if (cmdq_mmp.cmdq) {
193 mmprofile_start(1);
194 return;
195 }
196
197 cmdq_mmp.cmdq = mmprofile_register_event(MMP_ROOT_EVENT, "CMDQ");
198 cmdq_mmp.cmdq_irq = mmprofile_register_event(cmdq_mmp.cmdq, "cmdq_irq");
199 cmdq_mmp.loop_irq = mmprofile_register_event(cmdq_mmp.cmdq, "loop_irq");
200 cmdq_mmp.thread_en =
201 mmprofile_register_event(cmdq_mmp.cmdq, "thread_en");
202 cmdq_mmp.thread_suspend =
203 mmprofile_register_event(cmdq_mmp.cmdq, "thread_suspend");
204 cmdq_mmp.submit = mmprofile_register_event(cmdq_mmp.cmdq, "submit");
205 cmdq_mmp.wait = mmprofile_register_event(cmdq_mmp.cmdq, "wait");
206 cmdq_mmp.warning = mmprofile_register_event(cmdq_mmp.cmdq, "warning");
207 mmprofile_start(1);
208#endif
209}
210
211static void cmdq_lock_wake_lock(struct cmdq *cmdq, bool lock)
212{
213 cmdq_trace_ex_begin("%s", __func__);
214
215 if (lock) {
216 if (!cmdq->wake_locked) {
217 __pm_stay_awake(&cmdq->wake_lock);
218 cmdq->wake_locked = true;
219 } else {
220 /* should not reach here */
221 cmdq_err("try lock twice cmdq:%lx",
222 (unsigned long)cmdq);
223 dump_stack();
224 }
225 } else {
226 if (cmdq->wake_locked) {
227 __pm_relax(&cmdq->wake_lock);
228 cmdq->wake_locked = false;
229 } else {
230 /* should not reach here */
231 cmdq_err("try unlock twice cmdq:%lx",
232 (unsigned long)cmdq);
233 dump_stack();
234 }
235
236 }
237
238 cmdq_trace_ex_end();
239}
240
241static s32 cmdq_clk_enable(struct cmdq *cmdq)
242{
243 s32 usage, err, err_timer;
244 unsigned long flags;
245
246 cmdq_trace_ex_begin("%s", __func__);
247
248 spin_lock_irqsave(&cmdq->lock, flags);
249
250 usage = atomic_inc_return(&cmdq->usage);
251
252 err = clk_enable(cmdq->clock);
253 if (usage <= 0 || err < 0)
254 cmdq_err("ref count error after inc:%d err:%d suspend:%s",
255 usage, err, cmdq->suspended ? "true" : "false");
256 else if (usage == 1) {
257 cmdq_log("cmdq begin mbox");
258 if (cmdq->prefetch)
259 writel(cmdq->prefetch,
260 cmdq->base + CMDQ_PREFETCH_GSIZE);
261 writel(CMDQ_TPR_EN, cmdq->base + CMDQ_TPR_MASK);
262 /* make sure pm not suspend */
263 cmdq_lock_wake_lock(cmdq, true);
264 cmdq_init(cmdq);
265 }
266
267 err_timer = clk_enable(cmdq->clock_timer);
268 if (err_timer < 0)
269 cmdq_err("timer clk fail:%d", err_timer);
270
271 spin_unlock_irqrestore(&cmdq->lock, flags);
272
273 cmdq_trace_ex_end();
274
275 return err;
276}
277
278static void cmdq_clk_disable(struct cmdq *cmdq)
279{
280 s32 usage;
281 unsigned long flags;
282
283 cmdq_trace_ex_begin("%s", __func__);
284
285 spin_lock_irqsave(&cmdq->lock, flags);
286
287 usage = atomic_dec_return(&cmdq->usage);
288
289 if (usage < 0) {
290 /* print error but still try close */
291 cmdq_err("ref count error after dec:%d suspend:%s",
292 usage, cmdq->suspended ? "true" : "false");
293 } else if (usage == 0) {
294 cmdq_log("cmdq shutdown mbox");
295 /* clear tpr mask */
296 writel(0, cmdq->base + CMDQ_TPR_MASK);
297 writel(0x7, cmdq->base + GCE_GCTL_VALUE);
298
299 /* now allow pm suspend */
300 cmdq_lock_wake_lock(cmdq, false);
301 }
302
303 clk_disable(cmdq->clock_timer);
304 clk_disable(cmdq->clock);
305
306 spin_unlock_irqrestore(&cmdq->lock, flags);
307
308 cmdq_trace_ex_end();
309}
310
311dma_addr_t cmdq_thread_get_pc(struct cmdq_thread *thread)
312{
313 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq, mbox);
314
315 if (atomic_read(&cmdq->usage) <= 0)
316 return 0;
317
318 return CMDQ_REG_REVERT_ADDR(readl(thread->base + CMDQ_THR_CURR_ADDR));
319}
320
321dma_addr_t cmdq_thread_get_end(struct cmdq_thread *thread)
322{
323 dma_addr_t end = readl(thread->base + CMDQ_THR_END_ADDR);
324
325 return CMDQ_REG_REVERT_ADDR(end);
326}
327
328static void cmdq_thread_set_pc(struct cmdq_thread *thread, dma_addr_t pc)
329{
330 writel(CMDQ_REG_SHIFT_ADDR(pc), thread->base + CMDQ_THR_CURR_ADDR);
331}
332
333static void cmdq_thread_set_end(struct cmdq_thread *thread, dma_addr_t end)
334{
335 writel(CMDQ_REG_SHIFT_ADDR(end), thread->base + CMDQ_THR_END_ADDR);
336}
337
338void cmdq_thread_set_spr(struct mbox_chan *chan, u8 id, u32 val)
339{
340 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
341
342 writel(val, thread->base + CMDQ_THR_SPR + id * 4);
343}
344
345static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
346{
347 u32 status;
348
349#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
350 mmprofile_log_ex(cmdq_mmp.thread_suspend, MMPROFILE_FLAG_PULSE,
351 MMP_THD(thread, cmdq), CMDQ_THR_SUSPEND);
352#endif
353 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
354
355 /* If already disabled, treat as suspended successful. */
356 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
357 return 0;
358
359 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
360 status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 100)) {
361 cmdq_err("suspend GCE thread 0x%x failed",
362 (u32)(thread->base - cmdq->base));
363 return -EFAULT;
364 }
365
366 return 0;
367}
368
369static void cmdq_thread_resume(struct cmdq_thread *thread)
370{
371#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
372 struct cmdq *cmdq = container_of(
373 thread->chan->mbox, typeof(*cmdq), mbox);
374#endif
375
376 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
377#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
378 mmprofile_log_ex(cmdq_mmp.thread_suspend, MMPROFILE_FLAG_PULSE,
379 MMP_THD(thread, cmdq), CMDQ_THR_RESUME);
380#endif
381}
382
383static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
384{
385 u32 warm_reset;
386
387 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
388 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
389 warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
390 0, 10)) {
391 cmdq_err("reset GCE thread %u failed", thread->idx);
392 return -EFAULT;
393 }
394 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
395 return 0;
396}
397
398static void cmdq_thread_err_reset(struct cmdq *cmdq, struct cmdq_thread *thread,
399 dma_addr_t pc, u32 thd_pri)
400{
401 u32 i, spr[4], cookie;
402 dma_addr_t end;
403
404 for (i = 0; i < 4; i++)
405 spr[i] = readl(thread->base + CMDQ_THR_SPR + i * 4);
406 end = cmdq_thread_get_end(thread);
407 cookie = readl(thread->base + CMDQ_THR_CNT);
408
409 cmdq_msg(
410 "reset backup pc:%pa end:%pa cookie:0x%08x spr:0x%x 0x%x 0x%x 0x%x",
411 &pc, &end, cookie, spr[0], spr[1], spr[2], spr[3]);
412 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
413
414 for (i = 0; i < 4; i++)
415 writel(spr[i], thread->base + CMDQ_THR_SPR + i * 4);
416 writel(CMDQ_INST_CYCLE_TIMEOUT, thread->base + CMDQ_THR_INST_CYCLES);
417 cmdq_thread_set_end(thread, end);
418 cmdq_thread_set_pc(thread, pc);
419 writel(cookie, thread->base + CMDQ_THR_CNT);
420 writel(thd_pri, thread->base + CMDQ_THR_CFG);
421 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
422 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
423#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
424 mmprofile_log_ex(cmdq_mmp.thread_en, MMPROFILE_FLAG_PULSE,
425 MMP_THD(thread, cmdq), CMDQ_THR_ENABLED);
426#endif
427}
428
429static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
430{
431#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
432 mmprofile_log_ex(cmdq_mmp.thread_en, MMPROFILE_FLAG_PULSE,
433 MMP_THD(thread, cmdq), CMDQ_THR_DISABLED);
434#endif
435 cmdq_thread_reset(cmdq, thread);
436 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
437}
438
439/* notify GCE to re-fetch commands by setting GCE thread PC */
440static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
441{
442 cmdq_thread_set_pc(thread, cmdq_thread_get_pc(thread));
443}
444
445static void cmdq_task_connect_buffer(struct cmdq_task *task,
446 struct cmdq_task *next_task)
447{
448 u64 *task_base;
449 struct cmdq_pkt_buffer *buf;
450 u64 inst;
451
452 /* let previous task jump to this task */
453 buf = list_last_entry(&task->pkt->buf, typeof(*buf), list_entry);
454 task_base = (u64 *)(buf->va_base + CMDQ_CMD_BUFFER_SIZE -
455 task->pkt->avail_buf_size - CMDQ_INST_SIZE);
456 inst = *task_base;
457 *task_base = (u64)CMDQ_JUMP_BY_PA << 32 |
458 CMDQ_REG_SHIFT_ADDR(next_task->pa_base);
459 cmdq_log("change last inst 0x%016llx to 0x%016llx connect 0x%p -> 0x%p",
460 inst, *task_base, task->pkt, next_task->pkt);
461}
462
463static void *cmdq_task_current_va(unsigned long pa, struct cmdq_pkt *pkt)
464{
465 struct cmdq_pkt_buffer *buf;
466 u32 end;
467
468 list_for_each_entry(buf, &pkt->buf, list_entry) {
469 if (list_is_last(&buf->list_entry, &pkt->buf))
470 end = buf->pa_base + CMDQ_CMD_BUFFER_SIZE -
471 pkt->avail_buf_size;
472 else
473 end = buf->pa_base + CMDQ_CMD_BUFFER_SIZE;
474 if (pa >= buf->pa_base && pa < end)
475 return buf->va_base + (pa - buf->pa_base);
476 }
477
478 return NULL;
479}
480
481static bool cmdq_task_is_current_run(unsigned long pa, struct cmdq_pkt *pkt)
482{
483 if (cmdq_task_current_va(pa, pkt))
484 return true;
485 return false;
486}
487
488
489static void cmdq_task_insert_into_thread(dma_addr_t curr_pa,
490 struct cmdq_task *task, struct list_head **insert_pos)
491{
492 struct cmdq_thread *thread = task->thread;
493 struct cmdq_task *prev_task = NULL, *next_task = NULL, *cursor_task;
494
495 list_for_each_entry_reverse(cursor_task, &thread->task_busy_list,
496 list_entry) {
497 prev_task = cursor_task;
498 if (next_task)
499 next_task->pkt->priority += CMDQ_MIN_AGE_VALUE;
500 /* stop if we found current running task */
501 if (cmdq_task_is_current_run(curr_pa, prev_task->pkt))
502 break;
503 /* stop if new task priority lower than this one */
504 if (prev_task->pkt->priority >= task->pkt->priority)
505 break;
506 next_task = prev_task;
507 }
508
509 *insert_pos = &prev_task->list_entry;
510 cmdq_task_connect_buffer(prev_task, task);
511 if (next_task && next_task != prev_task) {
512 cmdq_msg("reorder pkt:0x%p(%u) next pkt:0x%p(%u) pc:%pa",
513 task->pkt, task->pkt->priority,
514 next_task->pkt, next_task->pkt->priority, &curr_pa);
515 cmdq_task_connect_buffer(task, next_task);
516 }
517
518 cmdq_thread_invalidate_fetched_data(thread);
519}
520
521static void cmdq_task_callback(struct cmdq_pkt *pkt, s32 err)
522{
523 struct cmdq_cb_data cmdq_cb_data;
524
525 if (pkt->cb.cb) {
526 cmdq_cb_data.err = err;
527 cmdq_cb_data.data = pkt->cb.data;
528 pkt->cb.cb(cmdq_cb_data);
529 }
530}
531
532static void cmdq_task_err_callback(struct cmdq_pkt *pkt, s32 err)
533{
534 struct cmdq_cb_data cmdq_cb_data;
535
536 if (pkt->err_cb.cb) {
537 cmdq_cb_data.err = err;
538 cmdq_cb_data.data = pkt->err_cb.data;
539 pkt->err_cb.cb(cmdq_cb_data);
540 }
541}
542
543static dma_addr_t cmdq_task_get_end_pa(struct cmdq_pkt *pkt)
544{
545 struct cmdq_pkt_buffer *buf;
546
547 /* let previous task jump to this task */
548 buf = list_last_entry(&pkt->buf, typeof(*buf), list_entry);
549 return buf->pa_base + CMDQ_CMD_BUFFER_SIZE - pkt->avail_buf_size;
550}
551
552static void *cmdq_task_get_end_va(struct cmdq_pkt *pkt)
553{
554 struct cmdq_pkt_buffer *buf;
555
556 /* let previous task jump to this task */
557 buf = list_last_entry(&pkt->buf, typeof(*buf), list_entry);
558 return buf->va_base + CMDQ_CMD_BUFFER_SIZE - pkt->avail_buf_size;
559}
560
561static void cmdq_task_exec(struct cmdq_pkt *pkt, struct cmdq_thread *thread)
562{
563 struct cmdq *cmdq;
564 struct cmdq_task *task, *last_task;
565 dma_addr_t curr_pa, end_pa, dma_handle;
566 struct list_head *insert_pos;
567 struct cmdq_pkt_buffer *buf;
568
569 cmdq = dev_get_drvdata(thread->chan->mbox->dev);
570
571 /* Client should not flush new tasks if suspended. */
572 WARN_ON(cmdq->suspended);
573
574 buf = list_first_entry_or_null(&pkt->buf, typeof(*buf),
575 list_entry);
576 if (!buf) {
577 cmdq_err("no command to execute");
578 return;
579 }
580 dma_handle = buf->pa_base;
581
582 task = kzalloc(sizeof(*task), GFP_ATOMIC);
583 if (!task)
584 return;
585
586#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
587 mmprofile_log_ex(cmdq_mmp.submit, MMPROFILE_FLAG_PULSE,
588 MMP_THD(thread, cmdq), (unsigned long)pkt);
589#endif
590
591 task->cmdq = cmdq;
592 INIT_LIST_HEAD(&task->list_entry);
593 task->pa_base = dma_handle;
594 task->thread = thread;
595 task->pkt = pkt;
596 task->exec_time = sched_clock();
597
598 if (list_empty(&thread->task_busy_list)) {
599 WARN_ON(cmdq_clk_enable(cmdq) < 0);
600 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
601
602 cmdq_log("task %pa size:%zu thread->base=0x%p thread:%u",
603 &task->pa_base, pkt->cmd_buf_size, thread->base,
604 thread->idx);
605
606 writel(CMDQ_INST_CYCLE_TIMEOUT,
607 thread->base + CMDQ_THR_INST_CYCLES);
608 writel(thread->priority & CMDQ_THR_PRIORITY,
609 thread->base + CMDQ_THR_CFG);
610 cmdq_thread_set_end(thread, cmdq_task_get_end_pa(pkt));
611 cmdq_thread_set_pc(thread, task->pa_base);
612 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
613 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
614#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
615 mmprofile_log_ex(cmdq_mmp.thread_en, MMPROFILE_FLAG_PULSE,
616 MMP_THD(thread, cmdq), CMDQ_THR_ENABLED);
617#endif
618
619 cmdq_log("set pc:0x%08x end:0x%08x pkt:0x%p",
620 (u32)task->pa_base,
621 (u32)cmdq_task_get_end_pa(pkt),
622 pkt);
623
624 if (thread->timeout_ms != CMDQ_NO_TIMEOUT) {
625 mod_timer(&thread->timeout, jiffies +
626 msecs_to_jiffies(thread->timeout_ms));
627 thread->timer_mod = sched_clock();
628 }
629 list_move_tail(&task->list_entry, &thread->task_busy_list);
630 } else {
631 /* no warn on here to prevent slow down cpu */
632 cmdq_thread_suspend(cmdq, thread);
633 curr_pa = cmdq_thread_get_pc(thread);
634 end_pa = cmdq_thread_get_end(thread);
635
636 cmdq_log("curr task %pa~%pa thread->base:0x%p thread:%u",
637 &curr_pa, &end_pa, thread->base, thread->idx);
638
639 /* check boundary */
640 if (curr_pa == end_pa - CMDQ_INST_SIZE || curr_pa == end_pa) {
641 /* set to this task directly */
642 cmdq_thread_set_pc(thread, task->pa_base);
643 last_task = list_last_entry(&thread->task_busy_list,
644 typeof(*task), list_entry);
645 insert_pos = &last_task->list_entry;
646 cmdq_log("set pc:%pa pkt:0x%p",
647 &task->pa_base, task->pkt);
648 } else {
649 cmdq_task_insert_into_thread(curr_pa, task,
650 &insert_pos);
651 smp_mb(); /* modify jump before enable thread */
652 }
653 list_add(&task->list_entry, insert_pos);
654 last_task = list_last_entry(&thread->task_busy_list,
655 typeof(*task), list_entry);
656 cmdq_thread_set_end(thread,
657 cmdq_task_get_end_pa(last_task->pkt));
658 cmdq_log("set end:0x%08x pkt:0x%p",
659 (u32)cmdq_task_get_end_pa(last_task->pkt),
660 last_task->pkt);
661
662 if (thread->dirty) {
663 cmdq_err("new task during error on thread:%u",
664 thread->idx);
665 } else {
666 /* safe to go */
667 cmdq_thread_resume(thread);
668 }
669 }
670
671#if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT)
672 pkt->rec_trigger = sched_clock();
673#endif
674}
675
676static void cmdq_task_exec_done(struct cmdq_task *task, s32 err)
677{
678#if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT)
679 task->pkt->rec_irq = sched_clock();
680#endif
681 cmdq_task_callback(task->pkt, err);
682 cmdq_log("pkt:0x%p done err:%d", task->pkt, err);
683 list_del_init(&task->list_entry);
684}
685
686static void cmdq_buf_dump_schedule(struct cmdq_task *task, bool timeout,
687 u32 pa_curr)
688{
689 struct cmdq_pkt_buffer *buf;
690 u64 *inst = NULL;
691
692 list_for_each_entry(buf, &task->pkt->buf, list_entry) {
693 if (!(pa_curr >= buf->pa_base &&
694 pa_curr < buf->pa_base + CMDQ_CMD_BUFFER_SIZE)) {
695 continue;
696 }
697 inst = (u64 *)(buf->va_base + (pa_curr - buf->pa_base));
698 break;
699 }
700
701 cmdq_err("task:0x%p timeout:%s pkt:0x%p size:%zu pc:0x%08x inst:0x%016llx",
702 task, timeout ? "true" : "false", task->pkt,
703 task->pkt->cmd_buf_size, pa_curr,
704 inst ? *inst : -1);
705}
706
707static void cmdq_task_handle_error(struct cmdq_task *task)
708{
709 struct cmdq_thread *thread = task->thread;
710 struct cmdq_task *next_task;
711
712 cmdq_err("task 0x%p pkt 0x%p error", task, task->pkt);
713 WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0);
714 next_task = list_first_entry_or_null(&thread->task_busy_list,
715 struct cmdq_task, list_entry);
716 if (next_task)
717 cmdq_thread_set_pc(thread, next_task->pa_base);
718 cmdq_thread_resume(thread);
719}
720
721static void cmdq_thread_irq_handler(struct cmdq *cmdq,
722 struct cmdq_thread *thread, struct list_head *removes)
723{
724 struct cmdq_task *task, *tmp, *curr_task = NULL;
725 u32 irq_flag;
726 dma_addr_t curr_pa, task_end_pa;
727 s32 err = 0;
728
729 if (atomic_read(&cmdq->usage) <= 0) {
730 cmdq_log("irq handling during gce off gce:%lx thread:%u",
731 (unsigned long)cmdq->base_pa, thread->idx);
732 return;
733 }
734
735 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
736 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
737
738 cmdq_log("irq flag:%#x gce:%lx idx:%u",
739 irq_flag, (unsigned long)cmdq->base_pa, thread->idx);
740
741 /*
742 * When ISR call this function, another CPU core could run
743 * "release task" right before we acquire the spin lock, and thus
744 * reset / disable this GCE thread, so we need to check the enable
745 * bit of this GCE thread.
746 */
747 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
748 return;
749
750 if (irq_flag & CMDQ_THR_IRQ_ERROR)
751 err = -EINVAL;
752 else if (irq_flag & CMDQ_THR_IRQ_DONE)
753 err = 0;
754 else
755 return;
756
757 if (list_empty(&thread->task_busy_list))
758 cmdq_err("empty! may we hang later?");
759
760 curr_pa = cmdq_thread_get_pc(thread);
761 task_end_pa = cmdq_thread_get_end(thread);
762
763 if (err < 0)
764 cmdq_err("pc:%pa end:%pa err:%d gce base:%lx",
765 &curr_pa, &task_end_pa, err,
766 (unsigned long)cmdq->base_pa);
767
768 cmdq_log("task status %pa~%pa err:%d",
769 &curr_pa, &task_end_pa, err);
770
771 task = list_first_entry_or_null(&thread->task_busy_list,
772 struct cmdq_task, list_entry);
773 if (task && task->pkt->loop) {
774 cmdq_log("task loop %p", &task->pkt);
775 cmdq_task_callback(task->pkt, err);
776
777#if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT)
778 task->pkt->rec_irq = sched_clock();
779#endif
780
781#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
782 mmprofile_log_ex(cmdq_mmp.loop_irq, MMPROFILE_FLAG_PULSE,
783 MMP_THD(thread, cmdq), (unsigned long)task->pkt);
784#endif
785
786 return;
787 }
788
789 if (thread->dirty) {
790 cmdq_log("task in error dump thread:%u pkt:0x%p",
791 thread->idx, task ? task->pkt : NULL);
792 return;
793 }
794
795#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
796 mmprofile_log_ex(cmdq_mmp.cmdq_irq, MMPROFILE_FLAG_PULSE,
797 MMP_THD(thread, cmdq), task ? (unsigned long)task->pkt : 0);
798#endif
799
800 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
801 list_entry) {
802 task_end_pa = cmdq_task_get_end_pa(task->pkt);
803 if (cmdq_task_is_current_run(curr_pa, task->pkt))
804 curr_task = task;
805
806 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
807 if (curr_task && (curr_pa != task_end_pa)) {
808 cmdq_log(
809 "remove task that not ending pkt:0x%p %pa to %pa",
810 curr_task->pkt, &curr_pa, &task_end_pa);
811 }
812 cmdq_task_exec_done(task, 0);
813 list_add_tail(&task->list_entry, removes);
814 } else if (err) {
815 cmdq_err("pkt:0x%p thread:%u err:%d",
816 curr_task->pkt, thread->idx, err);
817 cmdq_buf_dump_schedule(task, false, curr_pa);
818 cmdq_task_exec_done(task, err);
819 cmdq_task_handle_error(curr_task);
820 list_add_tail(&task->list_entry, removes);
821 }
822
823 if (curr_task)
824 break;
825 }
826
827 task = list_first_entry_or_null(&thread->task_busy_list,
828 struct cmdq_task, list_entry);
829 if (!task) {
830 cmdq_thread_disable(cmdq, thread);
831 cmdq_clk_disable(cmdq);
832
833 cmdq_log("empty task thread:%u", thread->idx);
834 } else {
835 mod_timer(&thread->timeout, jiffies +
836 msecs_to_jiffies(thread->timeout_ms));
837 thread->timer_mod = sched_clock();
838 cmdq_log("mod_timer pkt:0x%p timeout:%u thread:%u",
839 task->pkt, thread->timeout_ms, thread->idx);
840 }
841}
842
843static irqreturn_t cmdq_irq_handler(int irq, void *dev)
844{
845 struct cmdq *cmdq = dev;
846 unsigned long irq_status, flags = 0L;
847 int bit;
848 bool secure_irq = false;
849 struct cmdq_task *task, *tmp;
850 struct list_head removes;
851
852 if (atomic_read(&cmdq->usage) <= 0) {
853 cmdq_msg("%s cmdq:%#lx suspend:%s",
854 __func__, (unsigned long)cmdq->base_pa,
855 cmdq->suspended ? "true" : "false");
856 return IRQ_HANDLED;
857 }
858
859 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
860 cmdq_log("gce:%lx irq: %#x, %#x",
861 (unsigned long)cmdq->base_pa, (u32)irq_status,
862 (u32)(irq_status ^ cmdq->irq_mask));
863 if (!(irq_status ^ cmdq->irq_mask)) {
864 cmdq_msg("not handle for empty status:0x%x",
865 (u32)irq_status);
866 return IRQ_NONE;
867 }
868
869 INIT_LIST_HEAD(&removes);
870 for_each_clear_bit(bit, &irq_status, fls(cmdq->irq_mask)) {
871 struct cmdq_thread *thread = &cmdq->thread[bit];
872
873 cmdq_log("bit=%d, thread->base=%p", bit, thread->base);
874 if (!thread->occupied) {
875 secure_irq = true;
876 continue;
877 }
878
879 spin_lock_irqsave(&thread->chan->lock, flags);
880 cmdq_thread_irq_handler(cmdq, thread, &removes);
881 spin_unlock_irqrestore(&thread->chan->lock, flags);
882 }
883
884 list_for_each_entry_safe(task, tmp, &removes, list_entry) {
885 list_del(&task->list_entry);
886 kfree(task);
887 }
888
889 return secure_irq ? IRQ_NONE : IRQ_HANDLED;
890}
891
892static bool cmdq_thread_timeout_excceed(struct cmdq_thread *thread)
893{
894 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq, mbox);
895 u64 duration;
896
897 /* If first time exec time stamp smaller than timeout value,
898 * it is last round timeout. Skip it.
899 */
900 duration = div_s64(sched_clock() - thread->timer_mod, 1000000);
901 if (duration < thread->timeout_ms) {
902 mod_timer(&thread->timeout, jiffies +
903 msecs_to_jiffies(thread->timeout_ms - duration));
904 thread->timer_mod = sched_clock();
905 cmdq_msg(
906 "thread:%u usage:%d mod time:%llu dur:%llu timeout not excceed",
907 thread->idx, atomic_read(&cmdq->usage),
908 thread->timer_mod, duration);
909 return false;
910 }
911
912 return true;
913}
914
915static void cmdq_thread_handle_timeout_work(struct work_struct *work_item)
916{
917 struct cmdq_thread *thread = container_of(work_item,
918 struct cmdq_thread, timeout_work);
919 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq, mbox);
920 struct cmdq_task *task, *tmp, *timeout_task = NULL;
921 unsigned long flags;
922 bool first_task = true;
923 u32 pa_curr;
924 struct list_head removes;
925
926 INIT_LIST_HEAD(&removes);
927
928 spin_lock_irqsave(&thread->chan->lock, flags);
929 if (list_empty(&thread->task_busy_list)) {
930 spin_unlock_irqrestore(&thread->chan->lock, flags);
931 return;
932 }
933
934 /* Check before suspend thread to prevent hurt performance. */
935 if (!cmdq_thread_timeout_excceed(thread)) {
936 spin_unlock_irqrestore(&thread->chan->lock, flags);
937 return;
938 }
939
940 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
941
942 /*
943 * Although IRQ is disabled, GCE continues to execute.
944 * It may have pending IRQ before GCE thread is suspended,
945 * so check this condition again.
946 */
947 cmdq_thread_irq_handler(cmdq, thread, &removes);
948
949 if (list_empty(&thread->task_busy_list)) {
950 cmdq_err("thread:%u empty after irq handle in timeout",
951 thread->idx);
952 goto unlock_free_done;
953 }
954
955 /* After IRQ, first task may change. */
956 if (!cmdq_thread_timeout_excceed(thread)) {
957 cmdq_thread_resume(thread);
958 goto unlock_free_done;
959 }
960
961 cmdq_err("timeout for thread:0x%p idx:%u usage:%d",
962 thread->base, thread->idx, atomic_read(&cmdq->usage));
963
964 pa_curr = cmdq_thread_get_pc(thread);
965
966 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
967 list_entry) {
968 bool curr_task = cmdq_task_is_current_run(pa_curr, task->pkt);
969
970 if (first_task) {
971 cmdq_buf_dump_schedule(task, true, pa_curr);
972 first_task = false;
973 }
974
975 if (curr_task) {
976 timeout_task = task;
977 break;
978 }
979
980 cmdq_msg("ending not curr in timeout pkt:0x%p curr_pa:0x%08x",
981 task->pkt, pa_curr);
982 cmdq_task_exec_done(task, 0);
983 kfree(task);
984 }
985
986#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
987 mmprofile_log_ex(cmdq_mmp.warning, MMPROFILE_FLAG_PULSE,
988 MMP_THD(thread, cmdq),
989 timeout_task ? (unsigned long)timeout_task : 0);
990#endif
991
992 if (timeout_task) {
993 thread->dirty = true;
994 spin_unlock_irqrestore(&thread->chan->lock, flags);
995
996 cmdq_task_err_callback(timeout_task->pkt, -ETIMEDOUT);
997
998 spin_lock_irqsave(&thread->chan->lock, flags);
999 thread->dirty = false;
1000
1001 task = list_first_entry_or_null(&thread->task_busy_list,
1002 struct cmdq_task, list_entry);
1003 if (timeout_task == task) {
1004 cmdq_task_exec_done(task, -ETIMEDOUT);
1005 kfree(task);
1006 } else {
1007 cmdq_err("task list changed");
1008 }
1009 }
1010
1011 task = list_first_entry_or_null(&thread->task_busy_list,
1012 struct cmdq_task, list_entry);
1013 if (task) {
1014 mod_timer(&thread->timeout, jiffies +
1015 msecs_to_jiffies(thread->timeout_ms));
1016 thread->timer_mod = sched_clock();
1017 cmdq_thread_err_reset(cmdq, thread,
1018 task->pa_base, thread->priority);
1019 cmdq_thread_resume(thread);
1020 } else {
1021 cmdq_thread_resume(thread);
1022 cmdq_thread_disable(cmdq, thread);
1023 cmdq_clk_disable(cmdq);
1024 }
1025
1026unlock_free_done:
1027 spin_unlock_irqrestore(&thread->chan->lock, flags);
1028
1029 list_for_each_entry_safe(task, tmp, &removes, list_entry) {
1030 list_del(&task->list_entry);
1031 kfree(task);
1032 }
1033}
1034static void cmdq_thread_handle_timeout(struct timer_list *t)
1035{
1036 struct cmdq_thread *thread = from_timer(thread, t, timeout);
1037 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq, mbox);
1038 unsigned long flags;
1039 bool empty;
1040
1041 spin_lock_irqsave(&thread->chan->lock, flags);
1042 empty = list_empty(&thread->task_busy_list);
1043 spin_unlock_irqrestore(&thread->chan->lock, flags);
1044 if (empty)
1045 return;
1046
1047 if (!work_pending(&thread->timeout_work)) {
1048 cmdq_log("queue cmdq timeout thread:%u", thread->idx);
1049 queue_work(cmdq->timeout_wq, &thread->timeout_work);
1050 } else {
1051 cmdq_msg("ignore cmdq timeout thread:%u", thread->idx);
1052 }
1053}
1054
1055void cmdq_dump_core(struct mbox_chan *chan)
1056{
1057 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
1058 u32 irq, loaded, cycle, thd_timer, tpr_mask, tpr_en;
1059
1060 irq = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS);
1061 loaded = readl(cmdq->base + CMDQ_CURR_LOADED_THR);
1062 cycle = readl(cmdq->base + CMDQ_THR_EXEC_CYCLES);
1063 thd_timer = readl(cmdq->base + CMDQ_THR_TIMEOUT_TIMER);
1064 tpr_mask = readl(cmdq->base + CMDQ_TPR_MASK);
1065 tpr_en = readl(cmdq->base + CMDQ_TPR_TIMEOUT_EN);
1066
1067 cmdq_util_msg(
1068 "irq:%#x loaded:%#x cycle:%#x thd timer:%#x mask:%#x en:%#x",
1069 irq, loaded, cycle, thd_timer, tpr_mask, tpr_en);
1070#if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT)
1071 cmdq_util_dump_dbg_reg(chan);
1072#endif
1073}
1074EXPORT_SYMBOL(cmdq_dump_core);
1075
1076void cmdq_thread_dump(struct mbox_chan *chan, struct cmdq_pkt *cl_pkt,
1077 u64 **inst_out, dma_addr_t *pc_out)
1078{
1079 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
1080 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq, mbox);
1081 unsigned long flags;
1082 struct cmdq_task *task;
1083 struct cmdq_pkt_buffer *buf;
1084
1085 struct cmdq_pkt *pkt = NULL;
1086 u32 warn_rst, en, suspend, status, irq, irq_en, curr_pa, end_pa, cnt,
1087 wait_token, cfg, prefetch, pri = 0, spr[4];
1088 size_t size = 0;
1089 u64 *end_va, *curr_va = NULL, inst = 0, last_inst[2] = {0};
1090 void *va_base = NULL;
1091 dma_addr_t pa_base;
1092 bool empty = true;
1093
1094 /* lock channel and get info */
1095 spin_lock_irqsave(&chan->lock, flags);
1096
1097 if (atomic_read(&cmdq->usage) <= 0) {
1098 cmdq_err("%s gce off cmdq:%p thread:%u",
1099 __func__, cmdq, thread->idx);
1100 dump_stack();
1101 spin_unlock_irqrestore(&chan->lock, flags);
1102 return;
1103 }
1104
1105 warn_rst = readl(thread->base + CMDQ_THR_WARM_RESET);
1106 en = readl(thread->base + CMDQ_THR_ENABLE_TASK);
1107 suspend = readl(thread->base + CMDQ_THR_SUSPEND_TASK);
1108 status = readl(thread->base + CMDQ_THR_CURR_STATUS);
1109 irq = readl(thread->base + CMDQ_THR_IRQ_STATUS);
1110 irq_en = readl(thread->base + CMDQ_THR_IRQ_ENABLE);
1111 curr_pa = cmdq_thread_get_pc(thread);
1112 end_pa = cmdq_thread_get_end(thread);
1113 cnt = readl(thread->base + CMDQ_THR_CNT);
1114 wait_token = readl(thread->base + CMDQ_THR_WAIT_TOKEN);
1115 cfg = readl(thread->base + CMDQ_THR_CFG);
1116 prefetch = readl(thread->base + CMDQ_THR_PREFETCH);
1117 spr[0] = readl(thread->base + CMDQ_THR_SPR);
1118 spr[1] = readl(thread->base + CMDQ_THR_SPR + 4);
1119 spr[2] = readl(thread->base + CMDQ_THR_SPR + 8);
1120 spr[3] = readl(thread->base + CMDQ_THR_SPR + 12);
1121
1122 list_for_each_entry(task, &thread->task_busy_list, list_entry) {
1123 empty = false;
1124
1125 if (curr_pa == cmdq_task_get_end_pa(task->pkt))
1126 curr_va = (u64 *)cmdq_task_get_end_va(task->pkt);
1127 else
1128 curr_va = (u64 *)cmdq_task_current_va(curr_pa,
1129 task->pkt);
1130 if (!curr_va)
1131 continue;
1132 inst = *curr_va;
1133 pkt = task->pkt;
1134 size = pkt->cmd_buf_size;
1135 pri = pkt->priority;
1136
1137 buf = list_first_entry(&pkt->buf, typeof(*buf), list_entry);
1138 va_base = buf->va_base;
1139 pa_base = buf->pa_base;
1140
1141 buf = list_last_entry(&pkt->buf, typeof(*buf), list_entry);
1142 end_va = (u64 *)(buf->va_base + CMDQ_CMD_BUFFER_SIZE -
1143 pkt->avail_buf_size - CMDQ_INST_SIZE * 2);
1144 last_inst[0] = *end_va;
1145 last_inst[1] = *++end_va;
1146 break;
1147 }
1148 spin_unlock_irqrestore(&chan->lock, flags);
1149
1150 cmdq_util_msg(
1151 "thd:%u pc:%#010x(%p) inst:%#018llx end:%#010x cnt:%#x token:%#010x",
1152 thread->idx, curr_pa, curr_va, inst, end_pa, cnt, wait_token);
1153 cmdq_util_msg(
1154 "rst:%#x en:%#x suspend:%#x status:%#x irq:%x en:%#x cfg:%#x",
1155 warn_rst, en, suspend, status, irq, irq_en, cfg);
1156 cmdq_util_msg("spr:%#x %#x %#x %#x",
1157 spr[0], spr[1], spr[2], spr[3]);
1158 if (pkt) {
1159 cmdq_util_msg(
1160 "cur pkt:0x%p size:%zu va:0x%p pa:%pa priority:%u",
1161 pkt, size, va_base, &pa_base, pri);
1162 cmdq_util_msg("last inst %#018llx %#018llx",
1163 last_inst[0], last_inst[1]);
1164
1165 if (cl_pkt && cl_pkt != pkt) {
1166 buf = list_first_entry(&cl_pkt->buf, typeof(*buf),
1167 list_entry);
1168 cmdq_util_msg(
1169 "expect pkt:0x%p size:%zu va:0x%p pa:%pa priority:%u",
1170 cl_pkt, cl_pkt->cmd_buf_size, buf->va_base,
1171 &buf->pa_base, cl_pkt->priority);
1172
1173 curr_va = NULL;
1174 curr_pa = 0;
1175 }
1176 } else {
1177 /* empty or not found case is critical */
1178 cmdq_util_msg("pkt not available (%s)",
1179 empty ? "thread empty" : "pc not match");
1180 }
1181
1182/* if pc match end and irq flag on, dump irq status */
1183 if (curr_pa == end_pa && irq)
1184#ifdef CONFIG_MTK_GIC_V3_EXT
1185 mt_irq_dump_status(cmdq->irq);
1186#else
1187 cmdq_util_msg("gic dump not support irq id:%u\n",
1188 cmdq->irq);
1189#endif
1190
1191 if (inst_out)
1192 *inst_out = curr_va;
1193 if (pc_out)
1194 *pc_out = curr_pa;
1195}
1196EXPORT_SYMBOL(cmdq_thread_dump);
1197
1198void cmdq_thread_dump_all(void *mbox_cmdq)
1199{
1200 struct cmdq *cmdq = mbox_cmdq;
1201 u32 i;
1202 u32 en, curr_pa, end_pa;
1203 s32 usage = atomic_read(&cmdq->usage);
1204
1205 cmdq_util_msg("cmdq:%#x usage:%d", (u32)cmdq->base_pa, usage);
1206 if (usage <= 0)
1207 return;
1208
1209 for (i = 0; i < cmdq->thread_nr; i++) {
1210 struct cmdq_thread *thread = &cmdq->thread[i];
1211
1212 if (!thread->occupied || list_empty(&thread->task_busy_list))
1213 continue;
1214
1215 en = readl(thread->base + CMDQ_THR_ENABLE_TASK);
1216 if (!en)
1217 continue;
1218
1219 curr_pa = cmdq_thread_get_pc(thread);
1220 end_pa = cmdq_thread_get_end(thread);
1221
1222 cmdq_util_msg("thd idx:%u pc:%#x end:%#x",
1223 thread->idx, curr_pa, end_pa);
1224 }
1225
1226}
1227EXPORT_SYMBOL(cmdq_thread_dump_all);
1228
1229void cmdq_thread_dump_all_seq(void *mbox_cmdq, struct seq_file *seq)
1230{
1231 struct cmdq *cmdq = mbox_cmdq;
1232 u32 i;
1233 u32 en, curr_pa, end_pa;
1234 s32 usage = atomic_read(&cmdq->usage);
1235
1236 seq_printf(seq, "[cmdq] cmdq:%#x usage:%d\n",
1237 (u32)cmdq->base_pa, usage);
1238 if (usage <= 0)
1239 return;
1240
1241 for (i = 0; i < cmdq->thread_nr; i++) {
1242 struct cmdq_thread *thread = &cmdq->thread[i];
1243
1244 if (!thread->occupied || list_empty(&thread->task_busy_list))
1245 continue;
1246
1247 en = readl(thread->base + CMDQ_THR_ENABLE_TASK);
1248 if (!en)
1249 continue;
1250
1251 curr_pa = cmdq_thread_get_pc(thread);
1252 end_pa = cmdq_thread_get_end(thread);
1253
1254 seq_printf(seq, "[cmdq] thd idx:%u pc:%#x end:%#x\n",
1255 thread->idx, curr_pa, end_pa);
1256 }
1257
1258}
1259EXPORT_SYMBOL(cmdq_thread_dump_all_seq);
1260
1261void cmdq_mbox_thread_remove_task(struct mbox_chan *chan,
1262 struct cmdq_pkt *pkt)
1263{
1264 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
1265 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq, mbox);
1266 struct cmdq_task *task, *tmp;
1267 unsigned long flags;
1268 u32 pa_curr;
1269 bool curr_task = false;
1270 bool last_task = false;
1271 struct list_head removes;
1272
1273 INIT_LIST_HEAD(&removes);
1274
1275 spin_lock_irqsave(&thread->chan->lock, flags);
1276 if (list_empty(&thread->task_busy_list)) {
1277 spin_unlock_irqrestore(&thread->chan->lock, flags);
1278 return;
1279 }
1280
1281 cmdq_msg("remove task from thread idx:%u usage:%d pkt:0x%p",
1282 thread->idx, atomic_read(&cmdq->usage), pkt);
1283
1284 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
1285
1286 /*
1287 * Although IRQ is disabled, GCE continues to execute.
1288 * It may have pending IRQ before GCE thread is suspended,
1289 * so check this condition again.
1290 */
1291 cmdq_thread_irq_handler(cmdq, thread, &removes);
1292
1293 if (list_empty(&thread->task_busy_list)) {
1294 cmdq_err("thread:%u empty after irq handle in timeout",
1295 thread->idx);
1296 cmdq_thread_resume(thread);
1297 spin_unlock_irqrestore(&thread->chan->lock, flags);
1298 return;
1299 }
1300
1301 pa_curr = cmdq_thread_get_pc(thread);
1302
1303 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
1304 list_entry) {
1305 if (task->pkt != pkt)
1306 continue;
1307
1308 curr_task = cmdq_task_is_current_run(pa_curr, task->pkt);
1309 if (list_is_last(&task->list_entry, &thread->task_busy_list))
1310 last_task = true;
1311
1312 if (task == list_first_entry(&thread->task_busy_list,
1313 typeof(*task), list_entry) &&
1314 thread->dirty) {
1315 /* task during error handling, skip */
1316 spin_unlock_irqrestore(&thread->chan->lock, flags);
1317 return;
1318 }
1319
1320 cmdq_task_exec_done(task, curr_task ? -ECONNABORTED : 0);
1321 kfree(task);
1322 break;
1323 }
1324
1325 if (list_empty(&thread->task_busy_list)) {
1326 cmdq_thread_resume(thread);
1327 cmdq_thread_disable(cmdq, thread);
1328 cmdq_clk_disable(cmdq);
1329 spin_unlock_irqrestore(&thread->chan->lock, flags);
1330 return;
1331 }
1332
1333 if (curr_task) {
1334 task = list_first_entry(&thread->task_busy_list,
1335 typeof(*task), list_entry);
1336
1337 cmdq_thread_set_pc(thread, task->pa_base);
1338 mod_timer(&thread->timeout, jiffies +
1339 msecs_to_jiffies(thread->timeout_ms));
1340 thread->timer_mod = sched_clock();
1341 }
1342
1343 if (last_task) {
1344 /* reset end addr again if remove last task */
1345 task = list_last_entry(&thread->task_busy_list,
1346 typeof(*task), list_entry);
1347 cmdq_thread_set_end(thread, cmdq_task_get_end_pa(task->pkt));
1348 }
1349
1350 cmdq_thread_resume(thread);
1351
1352 spin_unlock_irqrestore(&thread->chan->lock, flags);
1353
1354 list_for_each_entry_safe(task, tmp, &removes, list_entry) {
1355 list_del(&task->list_entry);
1356 kfree(task);
1357 }
1358}
1359EXPORT_SYMBOL(cmdq_mbox_thread_remove_task);
1360
1361static void cmdq_mbox_thread_stop(struct cmdq_thread *thread)
1362{
1363 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq, mbox);
1364 struct cmdq_task *task, *tmp;
1365 unsigned long flags;
1366 struct list_head removes;
1367
1368 INIT_LIST_HEAD(&removes);
1369
1370 spin_lock_irqsave(&thread->chan->lock, flags);
1371 if (list_empty(&thread->task_busy_list)) {
1372 cmdq_err("stop empty thread:%u", thread->idx);
1373 spin_unlock_irqrestore(&thread->chan->lock, flags);
1374 return;
1375 }
1376
1377 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
1378
1379 /*
1380 * Although IRQ is disabled, GCE continues to execute.
1381 * It may have pending IRQ before GCE thread is suspended,
1382 * so check this condition again.
1383 */
1384 cmdq_thread_irq_handler(cmdq, thread, &removes);
1385 if (list_empty(&thread->task_busy_list)) {
1386 cmdq_err("thread:%u empty after irq handle in disable thread",
1387 thread->idx);
1388 cmdq_thread_resume(thread);
1389 spin_unlock_irqrestore(&thread->chan->lock, flags);
1390 return;
1391 }
1392
1393 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
1394 list_entry) {
1395 cmdq_task_exec_done(task, -ECONNABORTED);
1396 kfree(task);
1397 }
1398
1399 cmdq_thread_disable(cmdq, thread);
1400 cmdq_clk_disable(cmdq);
1401 spin_unlock_irqrestore(&thread->chan->lock, flags);
1402
1403 list_for_each_entry_safe(task, tmp, &removes, list_entry) {
1404 list_del(&task->list_entry);
1405 kfree(task);
1406 }
1407}
1408
1409void cmdq_mbox_channel_stop(struct mbox_chan *chan)
1410{
1411 cmdq_mbox_thread_stop(chan->con_priv);
1412}
1413EXPORT_SYMBOL(cmdq_mbox_channel_stop);
1414
1415static int cmdq_suspend(struct device *dev)
1416{
1417 struct cmdq *cmdq = dev_get_drvdata(dev);
1418 struct cmdq_thread *thread;
1419 int i;
1420 bool task_running = false;
1421
1422 cmdq->suspended = true;
1423
1424 for (i = 0; i < cmdq->thread_nr; i++) {
1425 thread = &cmdq->thread[i];
1426 if (!list_empty(&thread->task_busy_list)) {
1427 cmdq_mbox_thread_stop(thread);
1428 task_running = true;
1429 cmdq_err("thread %d running", i);
1430 }
1431 }
1432
1433 if (task_running) {
1434 dev_warn(dev, "exist running task(s) in suspend\n");
1435 schedule();
1436 }
1437
1438 clk_unprepare(cmdq->clock_timer);
1439 clk_unprepare(cmdq->clock);
1440 return 0;
1441}
1442
1443static int cmdq_resume(struct device *dev)
1444{
1445 struct cmdq *cmdq = dev_get_drvdata(dev);
1446
1447 WARN_ON(clk_prepare(cmdq->clock) < 0);
1448 WARN_ON(clk_prepare(cmdq->clock_timer) < 0);
1449 cmdq->suspended = false;
1450 return 0;
1451}
1452
1453static int cmdq_remove(struct platform_device *pdev)
1454{
1455 struct cmdq *cmdq = platform_get_drvdata(pdev);
1456
1457 destroy_workqueue(cmdq->buf_dump_wq);
1458 mbox_controller_unregister(&cmdq->mbox);
1459 clk_unprepare(cmdq->clock_timer);
1460 clk_unprepare(cmdq->clock);
1461 return 0;
1462}
1463
1464static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
1465{
1466 cmdq_trace_begin("%s", __func__);
1467 cmdq_task_exec(data, chan->con_priv);
1468 cmdq_trace_end();
1469 return 0;
1470}
1471
1472static int cmdq_mbox_startup(struct mbox_chan *chan)
1473{
1474 struct cmdq_thread *thread = chan->con_priv;
1475
1476 thread->occupied = true;
1477 return 0;
1478}
1479
1480static void cmdq_mbox_shutdown(struct mbox_chan *chan)
1481{
1482 struct cmdq_thread *thread = chan->con_priv;
1483
1484 cmdq_mbox_thread_stop(chan->con_priv);
1485 thread->occupied = false;
1486}
1487
1488static bool cmdq_mbox_last_tx_done(struct mbox_chan *chan)
1489{
1490 return true;
1491}
1492
1493static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
1494 .send_data = cmdq_mbox_send_data,
1495 .startup = cmdq_mbox_startup,
1496 .shutdown = cmdq_mbox_shutdown,
1497 .last_tx_done = cmdq_mbox_last_tx_done,
1498};
1499
1500u32 cmdq_thread_timeout_backup(struct cmdq_thread *thread, const u32 ms)
1501{
1502 u32 backup = thread->timeout_ms;
1503
1504 thread->timeout_ms = ms;
1505 return backup;
1506}
1507EXPORT_SYMBOL(cmdq_thread_timeout_backup);
1508
1509void cmdq_thread_timeout_restore(struct cmdq_thread *thread, const u32 ms)
1510{
1511 thread->timeout_ms = ms;
1512}
1513EXPORT_SYMBOL(cmdq_thread_timeout_restore);
1514
1515static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
1516 const struct of_phandle_args *sp)
1517{
1518 int ind = sp->args[0];
1519 struct cmdq_thread *thread;
1520
1521 if (ind >= mbox->num_chans)
1522 return ERR_PTR(-EINVAL);
1523
1524 thread = mbox->chans[ind].con_priv;
1525 thread->timeout_ms = sp->args[1] != 0 ?
1526 sp->args[1] : CMDQ_TIMEOUT_DEFAULT;
1527 thread->priority = sp->args[2];
1528 thread->chan = &mbox->chans[ind];
1529
1530 return &mbox->chans[ind];
1531}
1532
1533static s32 cmdq_config_prefetch(struct device_node *np, struct cmdq *cmdq)
1534{
1535 u32 i, prefetch_cnt = 0, prefetchs[4] = {0};
1536 s32 err;
1537
1538 cmdq->prefetch = 0;
1539 of_property_read_u32(np, "max_prefetch_cnt", &prefetch_cnt);
1540 if (!prefetch_cnt)
1541 return 0;
1542
1543 if (prefetch_cnt > ARRAY_SIZE(prefetchs)) {
1544 cmdq_err("prefetch count more than expect:%u",
1545 prefetch_cnt);
1546 prefetch_cnt = ARRAY_SIZE(prefetchs);
1547 }
1548
1549 err = of_property_read_u32_array(np, "prefetch_size",
1550 prefetchs, prefetch_cnt);
1551 if (err != 0) {
1552 /* print log but do notify error hw setting */
1553 cmdq_err("read prefetch count:%u size error:%d",
1554 prefetch_cnt, err);
1555 return -EINVAL;
1556 }
1557
1558 if (!prefetch_cnt)
1559 return 0;
1560
1561 for (i = 0; i < prefetch_cnt; i++)
1562 cmdq->prefetch |= (prefetchs[i] / 32 - 1) << (i * 4);
1563
1564 cmdq_msg("prefetch size configure:0x%x", cmdq->prefetch);
1565 return 0;
1566}
1567
1568static void cmdq_config_dma_mask(struct device *dev)
1569{
1570 u32 dma_mask_bit = 0;
1571 s32 ret;
1572
1573 ret = of_property_read_u32(dev->of_node, "dma_mask_bit",
1574 &dma_mask_bit);
1575 /* if not assign from dts, give default 32bit for legacy chip */
1576 if (ret != 0 || !dma_mask_bit)
1577 dma_mask_bit = 32;
1578 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(dma_mask_bit));
1579 cmdq_msg("mbox set dma mask bit:%u result:%d\n",
1580 dma_mask_bit, ret);
1581}
1582
1583static void cmdq_config_default_token(struct device *dev, struct cmdq *cmdq)
1584{
1585 int count, ret;
1586
1587 count = of_property_count_u16_elems(dev->of_node, "default_tokens");
1588 if (count <= 0) {
1589 cmdq_err("no default tokens:%d", count);
1590 return;
1591 }
1592
1593 cmdq->token_cnt = count;
1594 cmdq->tokens = devm_kcalloc(dev, count, sizeof(*cmdq->tokens),
1595 GFP_KERNEL);
1596 ret = of_property_read_u16_array(dev->of_node,
1597 "default_tokens", cmdq->tokens, count);
1598 if (ret < 0) {
1599 cmdq_err("of_property_read_u16_array fail err:%d", ret);
1600 cmdq->token_cnt = 0;
1601 }
1602}
1603
1604static int cmdq_probe(struct platform_device *pdev)
1605{
1606 struct device *dev = &pdev->dev;
1607 struct resource *res;
1608 struct cmdq *cmdq;
1609 int err, i;
1610 struct gce_plat *plat_data;
1611
1612 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
1613 if (!cmdq)
1614 return -ENOMEM;
1615
1616 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1617 cmdq->base = devm_ioremap_resource(dev, res);
1618 cmdq->base_pa = res->start;
1619 if (IS_ERR(cmdq->base)) {
1620 cmdq_err("failed to ioremap gce");
1621 return PTR_ERR(cmdq->base);
1622 }
1623
1624 cmdq->irq = platform_get_irq(pdev, 0);
1625 if (!cmdq->irq) {
1626 cmdq_err("failed to get irq");
1627 return -EINVAL;
1628 }
1629 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
1630 "mtk_cmdq", cmdq);
1631 if (err < 0) {
1632 cmdq_err("failed to register ISR (%d)", err);
1633 return err;
1634 }
1635
1636 plat_data = (struct gce_plat *)of_device_get_match_data(dev);
1637 if (!plat_data) {
1638 dev_err(dev, "failed to get match data\n");
1639 return -EINVAL;
1640 }
1641
1642 cmdq->thread_nr = plat_data->thread_nr;
1643 gce_shift_bit = plat_data->shift;
1644 cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
1645
1646 dev_notice(dev, "cmdq thread:%u shift:%u base:0x%lx pa:0x%lx\n",
1647 plat_data->thread_nr, plat_data->shift,
1648 (unsigned long)cmdq->base,
1649 (unsigned long)cmdq->base_pa);
1650
1651 cmdq_msg("cmdq thread:%u shift:%u base:0x%lx pa:0x%lx\n",
1652 plat_data->thread_nr, plat_data->shift,
1653 (unsigned long)cmdq->base,
1654 (unsigned long)cmdq->base_pa);
1655
1656 cmdq_config_prefetch(dev->of_node, cmdq);
1657 cmdq_config_dma_mask(dev);
1658 cmdq_config_default_token(dev, cmdq);
1659
1660 cmdq->clock = devm_clk_get(dev, "gce");
1661 if (IS_ERR(cmdq->clock)) {
1662 cmdq_err("failed to get gce clk");
1663 cmdq->clock = NULL;
1664 }
1665
1666 cmdq->clock_timer = devm_clk_get(dev, "gce-timer");
1667 if (IS_ERR(cmdq->clock_timer)) {
1668 cmdq_err("failed to get gce timer clk");
1669 cmdq->clock_timer = NULL;
1670 }
1671
1672 cmdq->mbox.dev = dev;
1673 cmdq->mbox.chans = devm_kcalloc(dev, plat_data->thread_nr,
1674 sizeof(*cmdq->mbox.chans), GFP_KERNEL);
1675 if (!cmdq->mbox.chans)
1676 return -ENOMEM;
1677
1678 cmdq->mbox.num_chans = plat_data->thread_nr;
1679 cmdq->mbox.ops = &cmdq_mbox_chan_ops;
1680 cmdq->mbox.of_xlate = cmdq_xlate;
1681
1682 /* make use of TXDONE_BY_ACK */
1683 cmdq->mbox.txdone_irq = false;
1684 cmdq->mbox.txdone_poll = false;
1685
1686 for (i = 0; i < cmdq->thread_nr; i++) {
1687 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
1688 CMDQ_THR_SIZE * i;
1689 cmdq->thread[i].gce_pa = cmdq->base_pa;
1690 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
1691 timer_setup(&cmdq->thread[i].timeout,
1692 cmdq_thread_handle_timeout, 0);
1693 cmdq->thread[i].idx = i;
1694 cmdq->mbox.chans[i].con_priv = &cmdq->thread[i];
1695 INIT_WORK(&cmdq->thread[i].timeout_work,
1696 cmdq_thread_handle_timeout_work);
1697 }
1698
1699 err = mbox_controller_register(&cmdq->mbox);
1700 if (err < 0) {
1701 cmdq_err("failed to register mailbox:%d", err);
1702 return err;
1703 }
1704 dev_notice(dev, "register mailbox successfully\n");
1705
1706 cmdq->buf_dump_wq = alloc_ordered_workqueue(
1707 "%s", WQ_MEM_RECLAIM | WQ_HIGHPRI,
1708 "cmdq_buf_dump");
1709
1710 cmdq->timeout_wq = create_singlethread_workqueue(
1711 "cmdq_timeout_handler");
1712
1713 platform_set_drvdata(pdev, cmdq);
1714 WARN_ON(clk_prepare(cmdq->clock) < 0);
1715 WARN_ON(clk_prepare(cmdq->clock_timer) < 0);
1716
1717 wakeup_source_add(&cmdq->wake_lock);
1718
1719 spin_lock_init(&cmdq->lock);
1720 clk_enable(cmdq->clock);
1721 cmdq_init(cmdq);
1722 clk_disable(cmdq->clock);
1723
1724 cmdq_mmp_init();
1725
1726#ifdef CONFIG_MTK_CMDQ_MBOX_EXT
1727 cmdq->hwid = cmdq_util_track_ctrl(cmdq, cmdq->base_pa, false);
1728#endif
1729 return 0;
1730}
1731
1732static const struct dev_pm_ops cmdq_pm_ops = {
1733 .suspend = cmdq_suspend,
1734 .resume = cmdq_resume,
1735};
1736
1737static const struct gce_plat gce_plat_v2 = {.thread_nr = 16};
1738static const struct gce_plat gce_plat_v4 = {.thread_nr = 24, .shift = 3};
1739
1740static const struct of_device_id cmdq_of_ids[] = {
1741 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
1742 {.compatible = "mediatek,mt6761-gce", .data = (void *)&gce_plat_v2},
1743 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
1744 {.compatible = "mediatek,mailbox-gce", .data = (void *)&gce_plat_v4},
1745 {}
1746};
1747
1748static struct platform_driver cmdq_drv = {
1749 .probe = cmdq_probe,
1750 .remove = cmdq_remove,
1751 .driver = {
1752 .name = CMDQ_DRIVER_NAME,
1753 .pm = &cmdq_pm_ops,
1754 .of_match_table = cmdq_of_ids,
1755 }
1756};
1757
1758static __init int cmdq_drv_init(void)
1759{
1760 u32 err = 0;
1761
1762 cmdq_msg("%s enter", __func__);
1763
1764 err = platform_driver_register(&cmdq_drv);
1765 if (err) {
1766 cmdq_err("platform driver register failed:%d", err);
1767 return err;
1768 }
1769
1770 return 0;
1771}
1772
1773void cmdq_mbox_enable(void *chan)
1774{
1775 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
1776 typeof(*cmdq), mbox);
1777
1778 WARN_ON(clk_prepare(cmdq->clock) < 0);
1779 cmdq_clk_enable(cmdq);
1780}
1781
1782void cmdq_mbox_disable(void *chan)
1783{
1784 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
1785 typeof(*cmdq), mbox);
1786
1787 cmdq_clk_disable(cmdq);
1788 clk_unprepare(cmdq->clock);
1789}
1790
1791s32 cmdq_mbox_get_usage(void *chan)
1792{
1793 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
1794 typeof(*cmdq), mbox);
1795
1796 return atomic_read(&cmdq->usage);
1797}
1798
1799void *cmdq_mbox_get_base(void *chan)
1800{
1801 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
1802 typeof(*cmdq), mbox);
1803
1804 return (void *)cmdq->base;
1805}
1806
1807phys_addr_t cmdq_mbox_get_base_pa(void *chan)
1808{
1809 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
1810 typeof(*cmdq), mbox);
1811
1812 return cmdq->base_pa;
1813}
1814
1815struct device *cmdq_mbox_get_dev(void *chan)
1816{
1817 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
1818 typeof(*cmdq), mbox);
1819
1820 return cmdq->mbox.dev;
1821}
1822
1823s32 cmdq_mbox_thread_reset(void *chan)
1824{
1825 struct cmdq_thread *thread = ((struct mbox_chan *)chan)->con_priv;
1826 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq,
1827 mbox);
1828
1829 return cmdq_thread_reset(cmdq, thread);
1830}
1831EXPORT_SYMBOL(cmdq_mbox_thread_reset);
1832
1833s32 cmdq_mbox_thread_suspend(void *chan)
1834{
1835 struct cmdq_thread *thread = ((struct mbox_chan *)chan)->con_priv;
1836 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq,
1837 mbox);
1838
1839 return cmdq_thread_suspend(cmdq, thread);
1840}
1841EXPORT_SYMBOL(cmdq_mbox_thread_suspend);
1842
1843void cmdq_mbox_thread_disable(void *chan)
1844{
1845 struct cmdq_thread *thread = ((struct mbox_chan *)chan)->con_priv;
1846 struct cmdq *cmdq = container_of(thread->chan->mbox, struct cmdq,
1847 mbox);
1848
1849 cmdq_thread_disable(cmdq, thread);
1850}
1851EXPORT_SYMBOL(cmdq_mbox_thread_disable);
1852
1853u32 cmdq_mbox_get_thread_timeout(void *chan)
1854{
1855 struct cmdq_thread *thread = ((struct mbox_chan *)chan)->con_priv;
1856
1857 return thread->timeout_ms;
1858}
1859EXPORT_SYMBOL(cmdq_mbox_get_thread_timeout);
1860
1861u32 cmdq_mbox_set_thread_timeout(void *chan, u32 timeout)
1862{
1863 struct cmdq_thread *thread = ((struct mbox_chan *)chan)->con_priv;
1864 unsigned long flags;
1865 u32 timeout_prv;
1866
1867 spin_lock_irqsave(&thread->chan->lock, flags);
1868 timeout_prv = thread->timeout_ms;
1869 thread->timeout_ms = timeout;
1870 spin_unlock_irqrestore(&thread->chan->lock, flags);
1871
1872 return timeout_prv;
1873}
1874EXPORT_SYMBOL(cmdq_mbox_set_thread_timeout);
1875
1876s32 cmdq_mbox_chan_id(void *chan)
1877{
1878 struct cmdq_thread *thread = ((struct mbox_chan *)chan)->con_priv;
1879
1880 if (!thread || !thread->occupied)
1881 return -1;
1882
1883 return thread->idx;
1884}
1885EXPORT_SYMBOL(cmdq_mbox_chan_id);
1886
1887s32 cmdq_task_get_thread_pc(struct mbox_chan *chan, dma_addr_t *pc_out)
1888{
1889 struct cmdq_thread *thread;
1890 dma_addr_t pc = 0;
1891
1892 if (!pc_out || !chan)
1893 return -EINVAL;
1894
1895 thread = chan->con_priv;
1896 pc = cmdq_thread_get_pc(thread);
1897
1898 *pc_out = pc;
1899
1900 return 0;
1901}
1902
1903s32 cmdq_task_get_thread_irq(struct mbox_chan *chan, u32 *irq_out)
1904{
1905 struct cmdq_thread *thread;
1906
1907 if (!irq_out || !chan)
1908 return -EINVAL;
1909
1910 thread = chan->con_priv;
1911 *irq_out = readl(thread->base + CMDQ_THR_IRQ_STATUS);
1912
1913 return 0;
1914}
1915
1916s32 cmdq_task_get_thread_irq_en(struct mbox_chan *chan, u32 *irq_en_out)
1917{
1918 struct cmdq_thread *thread;
1919
1920 if (!irq_en_out || !chan)
1921 return -EINVAL;
1922
1923 thread = chan->con_priv;
1924 *irq_en_out = readl(thread->base + CMDQ_THR_IRQ_ENABLE);
1925
1926 return 0;
1927}
1928
1929s32 cmdq_task_get_thread_end_addr(struct mbox_chan *chan,
1930 dma_addr_t *end_addr_out)
1931{
1932 struct cmdq_thread *thread;
1933
1934 if (!end_addr_out || !chan)
1935 return -EINVAL;
1936
1937 thread = chan->con_priv;
1938 *end_addr_out = cmdq_thread_get_end(thread);
1939
1940 return 0;
1941}
1942
1943s32 cmdq_task_get_task_info_from_thread_unlock(struct mbox_chan *chan,
1944 struct list_head *task_list_out, u32 *task_num_out)
1945{
1946 struct cmdq_thread *thread;
1947 struct cmdq_task *task;
1948 u32 task_num = 0;
1949
1950 if (!chan || !task_list_out)
1951 return -EINVAL;
1952
1953 thread = chan->con_priv;
1954 list_for_each_entry(task, &thread->task_busy_list, list_entry) {
1955 struct cmdq_thread_task_info *task_info;
1956
1957 task_info = kzalloc(sizeof(*task_info), GFP_ATOMIC);
1958 if (!task_info)
1959 continue;
1960
1961 task_info->pa_base = task->pa_base;
1962
1963 /* copy pkt here to avoid released */
1964 task_info->pkt = kzalloc(sizeof(*task_info->pkt), GFP_ATOMIC);
1965 if (!task_info->pkt) {
1966 kfree(task_info);
1967 continue;
1968 }
1969 memcpy(task_info->pkt, task->pkt, sizeof(*task->pkt));
1970
1971 INIT_LIST_HEAD(&task_info->list_entry);
1972 list_add_tail(&task_info->list_entry, task_list_out);
1973 task_num++;
1974 }
1975
1976 if (task_num_out)
1977 *task_num_out = task_num;
1978
1979 return 0;
1980}
1981
1982s32 cmdq_task_get_pkt_from_thread(struct mbox_chan *chan,
1983 struct cmdq_pkt **pkt_list_out, u32 pkt_list_size, u32 *pkt_count_out)
1984{
1985 struct cmdq_thread *thread;
1986 struct cmdq_task *task;
1987 u32 pkt_num = 0;
1988 u32 tmp_num = 0;
1989 unsigned long flags;
1990
1991 if (!chan || !pkt_list_out || !pkt_count_out) {
1992 if (chan) {
1993 thread = chan->con_priv;
1994 list_for_each_entry(task, &thread->task_busy_list,
1995 list_entry) {
1996 tmp_num++;
1997 }
1998
1999 }
2000
2001 if (pkt_count_out)
2002 *pkt_count_out = pkt_num;
2003
2004 return -EINVAL;
2005 }
2006
2007 thread = chan->con_priv;
2008
2009 spin_lock_irqsave(&thread->chan->lock, flags);
2010
2011 if (list_empty(&thread->task_busy_list)) {
2012 *pkt_count_out = pkt_num;
2013 spin_unlock_irqrestore(&thread->chan->lock, flags);
2014 return 0;
2015 }
2016
2017 list_for_each_entry(task, &thread->task_busy_list, list_entry) {
2018 if (pkt_list_size == pkt_num)
2019 break;
2020 pkt_list_out[pkt_num] = task->pkt;
2021 pkt_num++;
2022 }
2023
2024 spin_unlock_irqrestore(&thread->chan->lock, flags);
2025
2026 *pkt_count_out = pkt_num;
2027
2028 return 0;
2029}
2030
2031void cmdq_set_event(void *chan, u16 event_id)
2032{
2033 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
2034 typeof(*cmdq), mbox);
2035
2036 writel((1L << 16) | event_id, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2037}
2038EXPORT_SYMBOL(cmdq_set_event);
2039
2040void cmdq_clear_event(void *chan, u16 event_id)
2041{
2042 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
2043 typeof(*cmdq), mbox);
2044
2045 writel(event_id, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2046}
2047EXPORT_SYMBOL(cmdq_clear_event);
2048
2049u32 cmdq_get_event(void *chan, u16 event_id)
2050{
2051 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
2052 typeof(*cmdq), mbox);
2053
2054 writel(0x3FF & event_id, cmdq->base + CMDQ_SYNC_TOKEN_ID);
2055 return readl(cmdq->base + CMDQ_SYNC_TOKEN_VAL);
2056}
2057EXPORT_SYMBOL(cmdq_get_event);
2058
2059void cmdq_event_verify(void *chan, u16 event_id)
2060{
2061 struct cmdq *cmdq = container_of(((struct mbox_chan *)chan)->mbox,
2062 typeof(*cmdq), mbox);
2063 /* should be CMDQ_SYNC_TOKEN_USER_0 */
2064 const u16 test_token = 649;
2065 u32 i;
2066
2067 cmdq_msg("chan:%lx cmdq:%lx event:%u",
2068 (unsigned long)chan, (unsigned long)cmdq, event_id);
2069
2070 if (event_id > 512)
2071 event_id = 512;
2072
2073 /* check if this event can be set and clear */
2074 writel((1L << 16) | event_id, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2075 writel(event_id, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2076 if (!readl(cmdq->base + CMDQ_SYNC_TOKEN_VAL))
2077 cmdq_msg("event cannot be set:%u", event_id);
2078
2079 writel(event_id, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2080 if (readl(cmdq->base + CMDQ_SYNC_TOKEN_VAL))
2081 cmdq_msg("event cannot be clear:%u", event_id);
2082
2083 /* check if sw token can be set and clear */
2084 writel((1L << 16) | test_token, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2085 writel(test_token, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2086 if (!readl(cmdq->base + CMDQ_SYNC_TOKEN_VAL))
2087 cmdq_msg("event cannot be set:%u", test_token);
2088
2089 writel(test_token, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2090 if (readl(cmdq->base + CMDQ_SYNC_TOKEN_VAL))
2091 cmdq_msg("event cannot be clear:%u", test_token);
2092
2093 /* clear all event first */
2094 for (i = 0; i < event_id + 20; i++)
2095 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2096
2097 /* now see if any event unable to clear */
2098 for (i = 0; i < event_id + 20; i++) {
2099 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPD);
2100 if (readl(cmdq->base + CMDQ_SYNC_TOKEN_VAL))
2101 cmdq_msg("event still on:%u", i);
2102 }
2103
2104 cmdq_msg("end debug event for %u", event_id);
2105}
2106EXPORT_SYMBOL(cmdq_event_verify);
2107
2108unsigned long cmdq_get_tracing_mark(void)
2109{
2110 static unsigned long __read_mostly tracing_mark_write_addr;
2111
2112 if (unlikely(tracing_mark_write_addr == 0))
2113 tracing_mark_write_addr =
2114 kallsyms_lookup_name("tracing_mark_write");
2115
2116 return tracing_mark_write_addr;
2117}
2118
2119#if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT)
2120void cmdq_mmp_wait(struct mbox_chan *chan, void *pkt)
2121{
2122 struct cmdq_thread *thread = chan->con_priv;
2123 struct cmdq *cmdq = container_of(chan->mbox, typeof(*cmdq), mbox);
2124
2125 mmprofile_log_ex(cmdq_mmp.wait, MMPROFILE_FLAG_PULSE,
2126 MMP_THD(thread, cmdq), (unsigned long)pkt);
2127}
2128EXPORT_SYMBOL(cmdq_mmp_wait);
2129#endif
2130
2131arch_initcall(cmdq_drv_init);