blob: 46b48ae3d41504f77560ea95b688036244ea7670 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (c) 2018 MediaTek Inc.
4
5#include <linux/completion.h>
6#include <linux/errno.h>
7#include <linux/dma-mapping.h>
8#include <linux/module.h>
9#include <linux/mailbox_controller.h>
10#include <linux/soc/mediatek/mtk-cmdq.h>
11
12#define CMDQ_GET_ARG_B(arg) (((arg) & GENMASK(31, 16)) >> 16)
13#define CMDQ_GET_ARG_C(arg) ((arg) & GENMASK(15, 0))
14/** conbine the argument b and c to a 32bits balue */
15#define CMDQ_GET_32B_VALUE(arg_b, arg_c) ((u32)((arg_b) << 16) | (arg_c))
16/** get the register index prefix from type */
17#define CMDQ_REG_IDX_PREFIX(type) ((type) ? "" : "Reg Index ")
18/** get operand index or value */
19#define CMDQ_OPERAND_GET_IDX_VALUE(operand) ((operand)->reg ? \
20 (operand)->idx : \
21 (operand)->value)
22#define CMDQ_WRITE_ENABLE_MASK BIT(0)
23#define CMDQ_EOC_IRQ_EN BIT(0)
24#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
25 << 32 | CMDQ_EOC_IRQ_EN)
26#define CMDQ_IMMEDIATE_VALUE 0
27#define CMDQ_REG_TYPE 1
28
29struct cmdq_instruction {
30 s16 arg_c:16;
31 s16 arg_b:16;
32 s16 arg_a:16;
33 u8 s_op:5;
34 u8 arg_c_type:1;
35 u8 arg_b_type:1;
36 u8 arg_a_type:1;
37 u8 op:8;
38};
39
40static void cmdq_pkt_instr_encoder(struct cmdq_pkt *pkt, s16 arg_c, s16 arg_b,
41 s16 arg_a, u8 s_op, u8 arg_c_type,
42 u8 arg_b_type, u8 arg_a_type, u8 op)
43{
44 struct cmdq_instruction *cmdq_inst;
45
46 cmdq_inst = pkt->va_base + pkt->cmd_buf_size;
47 cmdq_inst->op = op;
48 cmdq_inst->arg_a_type = arg_a_type;
49 cmdq_inst->arg_b_type = arg_b_type;
50 cmdq_inst->arg_c_type = arg_c_type;
51 cmdq_inst->s_op = s_op;
52 cmdq_inst->arg_a = arg_a;
53 cmdq_inst->arg_b = arg_b;
54 cmdq_inst->arg_c = arg_c;
55 pkt->cmd_buf_size += CMDQ_INST_SIZE;
56}
57
58static void cmdq_client_timeout(struct timer_list *t)
59{
60 struct cmdq_client *client = from_timer(client, t, timer);
61
62 dev_err(client->client.dev, "cmdq timeout!\n");
63}
64
65struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
66{
67 struct cmdq_client *client;
68
69 client = kzalloc(sizeof(*client), GFP_KERNEL);
70 if (!client)
71 return (struct cmdq_client *)-ENOMEM;
72
73 client->timeout_ms = timeout;
74 if (timeout != CMDQ_NO_TIMEOUT) {
75 spin_lock_init(&client->lock);
76 timer_setup(&client->timer, cmdq_client_timeout, 0);
77 }
78 client->pkt_cnt = 0;
79 client->client.dev = dev;
80 client->client.tx_block = false;
81 client->chan = mbox_request_channel(&client->client, index);
82
83 if (IS_ERR(client->chan)) {
84 long err;
85
86 dev_err(dev, "failed to request channel\n");
87 err = PTR_ERR(client->chan);
88 kfree(client);
89
90 return ERR_PTR(err);
91 }
92
93 return client;
94}
95EXPORT_SYMBOL(cmdq_mbox_create);
96
97void cmdq_mbox_destroy(struct cmdq_client *client)
98{
99 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
100 spin_lock(&client->lock);
101 del_timer_sync(&client->timer);
102 spin_unlock(&client->lock);
103 }
104 mbox_free_channel(client->chan);
105 kfree(client);
106}
107EXPORT_SYMBOL(cmdq_mbox_destroy);
108
109struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
110{
111 struct cmdq_pkt *pkt;
112 struct device *dev;
113 dma_addr_t dma_addr;
114
115 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
116 if (!pkt)
117 return ERR_PTR(-ENOMEM);
118 pkt->va_base = kzalloc(size, GFP_KERNEL);
119 if (!pkt->va_base) {
120 kfree(pkt);
121 return ERR_PTR(-ENOMEM);
122 }
123 pkt->buf_size = size;
124 pkt->cl = (void *)client;
125
126 dev = client->chan->mbox->dev;
127 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
128 DMA_TO_DEVICE);
129 if (dma_mapping_error(dev, dma_addr)) {
130 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
131 kfree(pkt->va_base);
132 kfree(pkt);
133 return ERR_PTR(-ENOMEM);
134 }
135
136 pkt->pa_base = dma_addr;
137
138 return pkt;
139}
140EXPORT_SYMBOL(cmdq_pkt_create);
141
142void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
143{
144 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
145
146 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
147 DMA_TO_DEVICE);
148 kfree(pkt->va_base);
149 kfree(pkt);
150}
151EXPORT_SYMBOL(cmdq_pkt_destroy);
152
153static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, s16 arg_c, s16 arg_b,
154 s16 arg_a, u8 s_op, u8 arg_c_type,
155 u8 arg_b_type, u8 arg_a_type,
156 enum cmdq_code code)
157{
158
159 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
160 /*
161 * In the case of allocated buffer size (pkt->buf_size) is used
162 * up, the real required size (pkt->cmdq_buf_size) is still
163 * increased, so that the user knows how much memory should be
164 * ultimately allocated after appending all commands and
165 * flushing the command packet. Therefor, the user can call
166 * cmdq_pkt_create() again with the real required buffer size.
167 */
168 pkt->cmd_buf_size += CMDQ_INST_SIZE;
169 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
170 __func__, (u32)pkt->buf_size);
171 return -ENOMEM;
172 }
173 cmdq_pkt_instr_encoder(pkt, arg_c, arg_b, arg_a, s_op, arg_c_type,
174 arg_b_type, arg_a_type, code);
175
176 return 0;
177}
178
179int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
180{
181 return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value),
182 CMDQ_GET_ARG_B(value), offset, subsys,
183 CMDQ_IMMEDIATE_VALUE,
184 CMDQ_IMMEDIATE_VALUE,
185 CMDQ_IMMEDIATE_VALUE, CMDQ_CODE_WRITE);
186}
187EXPORT_SYMBOL(cmdq_pkt_write);
188
189int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, u16 offset,
190 u32 value, u32 mask)
191{
192 u32 offset_mask = offset;
193 int err = 0;
194
195 if (mask != 0xffffffff) {
196 err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask),
197 CMDQ_GET_ARG_B(~mask),
198 CMDQ_IMMEDIATE_VALUE,
199 CMDQ_IMMEDIATE_VALUE,
200 CMDQ_IMMEDIATE_VALUE,
201 CMDQ_IMMEDIATE_VALUE,
202 CMDQ_IMMEDIATE_VALUE,
203 CMDQ_CODE_MASK);
204 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
205 }
206 err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
207
208 return err;
209}
210EXPORT_SYMBOL(cmdq_pkt_write_mask);
211
212int cmdq_pkt_load(struct cmdq_pkt *pkt, u16 dst_reg_idx,
213 u16 indirect_src_reg_idx)
214{
215 return cmdq_pkt_append_command(pkt, 0, indirect_src_reg_idx,
216 dst_reg_idx, 0, CMDQ_IMMEDIATE_VALUE,
217 CMDQ_REG_TYPE, CMDQ_REG_TYPE,
218 CMDQ_CODE_READ_S);
219}
220EXPORT_SYMBOL(cmdq_pkt_load);
221
222int cmdq_pkt_store_reg(struct cmdq_pkt *pkt, u16 indirect_dst_reg_idx,
223 u16 src_reg_idx, u32 mask)
224{
225 int err = 0;
226 enum cmdq_code op = CMDQ_CODE_WRITE_S;
227
228 if (mask != 0xffffffff) {
229 err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask),
230 CMDQ_GET_ARG_B(~mask), 0, 0, 0, 0,
231 0, CMDQ_CODE_MASK);
232 if (err != 0)
233 return err;
234
235 op = CMDQ_CODE_WRITE_S_W_MASK;
236 }
237
238 return cmdq_pkt_append_command(pkt, 0, src_reg_idx,
239 indirect_dst_reg_idx, 0,
240 CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE,
241 CMDQ_REG_TYPE, op);
242}
243EXPORT_SYMBOL(cmdq_pkt_store_reg);
244
245int cmdq_pkt_store_value(struct cmdq_pkt *pkt, u16 indirect_dst_reg_idx,
246 u32 value, u32 mask)
247{
248 int err = 0;
249 enum cmdq_code op = CMDQ_CODE_WRITE_S;
250
251 if (mask != 0xffffffff) {
252 err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask),
253 CMDQ_GET_ARG_B(~mask), 0, 0, 0, 0,
254 0, CMDQ_CODE_MASK);
255 if (err != 0)
256 return err;
257
258 op = CMDQ_CODE_WRITE_S_W_MASK;
259 }
260
261 return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value),
262 CMDQ_GET_ARG_B(value),
263 indirect_dst_reg_idx, 0,
264 CMDQ_IMMEDIATE_VALUE,
265 CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE, op);
266}
267EXPORT_SYMBOL(cmdq_pkt_store_value);
268
269int cmdq_pkt_assign_command(struct cmdq_pkt *pkt, u16 reg_idx, s32 value)
270{
271 return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value),
272 CMDQ_GET_ARG_B(value), reg_idx,
273 CMDQ_LOGIC_ASSIGN, CMDQ_IMMEDIATE_VALUE,
274 CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE,
275 CMDQ_CODE_LOGIC);
276}
277EXPORT_SYMBOL(cmdq_pkt_assign_command);
278
279int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, enum CMDQ_LOGIC_ENUM s_op,
280 u16 result_reg_idx,
281 struct cmdq_operand *left_operand,
282 struct cmdq_operand *right_operand)
283{
284 u32 left_idx_value;
285 u32 right_idx_value;
286
287 if (!left_operand || !right_operand)
288 return -EINVAL;
289
290 left_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(left_operand);
291 right_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(right_operand);
292
293 return cmdq_pkt_append_command(pkt, right_idx_value, left_idx_value,
294 result_reg_idx, s_op, right_operand->reg,
295 left_operand->reg, CMDQ_REG_TYPE,
296 CMDQ_CODE_LOGIC);
297}
298EXPORT_SYMBOL(cmdq_pkt_logic_command);
299
300int cmdq_pkt_jump(struct cmdq_pkt *pkt, s32 addr_offset)
301{
302 return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(addr_offset),
303 CMDQ_GET_ARG_B(addr_offset), 0, 0, 0, 0,
304 0, CMDQ_CODE_JUMP);
305}
306EXPORT_SYMBOL(cmdq_pkt_jump);
307
308int cmdq_pkt_cond_jump(struct cmdq_pkt *pkt,
309 u16 offset_reg_idx,
310 struct cmdq_operand *left_operand,
311 struct cmdq_operand *right_operand,
312 enum CMDQ_CONDITION_ENUM condition_operator)
313{
314 u32 left_idx_value;
315 u32 right_idx_value;
316
317 if (!left_operand || !right_operand)
318 return -EINVAL;
319
320 left_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(left_operand);
321 right_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(right_operand);
322
323 return cmdq_pkt_append_command(pkt, right_idx_value, left_idx_value,
324 offset_reg_idx, condition_operator,
325 right_operand->reg, left_operand->reg,
326 CMDQ_REG_TYPE,
327 CMDQ_CODE_JUMP_C_RELATIVE);
328}
329EXPORT_SYMBOL(cmdq_pkt_cond_jump);
330
331struct cmdq_operand *cmdq_operand_immediate(struct cmdq_operand *operand,
332 u16 value)
333{
334 if (!operand)
335 return (struct cmdq_operand *)ERR_PTR(-EINVAL);
336
337 operand->reg = false;
338 operand->value = value;
339
340 return operand;
341}
342EXPORT_SYMBOL(cmdq_operand_immediate);
343
344struct cmdq_operand *cmdq_operand_reg(struct cmdq_operand *operand, u16 idx)
345{
346 if (!operand)
347 return (struct cmdq_operand *)ERR_PTR(-EINVAL);
348
349 operand->reg = true;
350 operand->idx = idx;
351
352 return operand;
353}
354EXPORT_SYMBOL(cmdq_operand_reg);
355
356int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
357{
358 if (event >= CMDQ_MAX_EVENT)
359 return -EINVAL;
360
361 return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(CMDQ_WFE_OPTION),
362 CMDQ_GET_ARG_B(CMDQ_WFE_OPTION), event,
363 CMDQ_IMMEDIATE_VALUE,
364 CMDQ_IMMEDIATE_VALUE,
365 CMDQ_IMMEDIATE_VALUE,
366 CMDQ_IMMEDIATE_VALUE,
367 CMDQ_CODE_WFE);
368}
369EXPORT_SYMBOL(cmdq_pkt_wfe);
370
371int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
372{
373 if (event >= CMDQ_MAX_EVENT)
374 return -EINVAL;
375
376 return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(CMDQ_WFE_UPDATE),
377 CMDQ_GET_ARG_B(CMDQ_WFE_UPDATE), event,
378 CMDQ_IMMEDIATE_VALUE,
379 CMDQ_IMMEDIATE_VALUE,
380 CMDQ_IMMEDIATE_VALUE,
381 CMDQ_IMMEDIATE_VALUE,
382 CMDQ_CODE_WFE);
383}
384EXPORT_SYMBOL(cmdq_pkt_clear_event);
385
386int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
387 u16 offset, u32 value, u32 mask)
388{
389 int err;
390
391 if (mask != 0xffffffff) {
392 err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask),
393 CMDQ_GET_ARG_B(~mask),
394 CMDQ_IMMEDIATE_VALUE,
395 CMDQ_IMMEDIATE_VALUE,
396 CMDQ_IMMEDIATE_VALUE,
397 CMDQ_IMMEDIATE_VALUE,
398 CMDQ_IMMEDIATE_VALUE,
399 CMDQ_CODE_MASK);
400
401 if (err != 0)
402 return err;
403 }
404 offset = offset | 0x1;
405
406 return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value),
407 CMDQ_GET_ARG_B(value),
408 offset, subsys,
409 CMDQ_IMMEDIATE_VALUE,
410 CMDQ_IMMEDIATE_VALUE,
411 CMDQ_IMMEDIATE_VALUE,
412 CMDQ_CODE_POLL);
413}
414EXPORT_SYMBOL(cmdq_pkt_poll);
415
416static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
417{
418 int err;
419
420 /* insert EOC and generate IRQ for each command iteration */
421 err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(CMDQ_EOC_IRQ_EN),
422 CMDQ_GET_ARG_B(CMDQ_EOC_IRQ_EN),
423 CMDQ_IMMEDIATE_VALUE,
424 CMDQ_IMMEDIATE_VALUE,
425 CMDQ_IMMEDIATE_VALUE,
426 CMDQ_IMMEDIATE_VALUE,
427 CMDQ_IMMEDIATE_VALUE,
428 CMDQ_CODE_EOC);
429 if (err < 0)
430 return err;
431 /* JUMP to end */
432 err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(CMDQ_JUMP_PASS),
433 CMDQ_GET_ARG_B(CMDQ_JUMP_PASS),
434 CMDQ_IMMEDIATE_VALUE,
435 CMDQ_IMMEDIATE_VALUE,
436 CMDQ_IMMEDIATE_VALUE,
437 CMDQ_IMMEDIATE_VALUE,
438 CMDQ_IMMEDIATE_VALUE,
439 CMDQ_CODE_JUMP);
440
441 return err;
442}
443
444static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
445{
446 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
447 struct cmdq_task_cb *cb = &pkt->cb;
448 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
449
450 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
451 unsigned long flags = 0;
452
453 spin_lock_irqsave(&client->lock, flags);
454 if (--client->pkt_cnt == 0)
455 del_timer(&client->timer);
456 else
457 mod_timer(&client->timer, jiffies +
458 msecs_to_jiffies(client->timeout_ms));
459 spin_unlock_irqrestore(&client->lock, flags);
460 }
461
462 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
463 pkt->cmd_buf_size, DMA_TO_DEVICE);
464 if (cb->cb) {
465 data.data = cb->data;
466 cb->cb(data);
467 }
468}
469
470int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
471 void *data)
472{
473 int err;
474 unsigned long flags = 0;
475 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
476
477 err = cmdq_pkt_finalize(pkt);
478 if (err < 0)
479 return err;
480
481 pkt->cb.cb = cb;
482 pkt->cb.data = data;
483 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
484 pkt->async_cb.data = pkt;
485
486 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
487 pkt->cmd_buf_size, DMA_TO_DEVICE);
488
489 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
490 spin_lock_irqsave(&client->lock, flags);
491 if (client->pkt_cnt++ == 0)
492 mod_timer(&client->timer, jiffies +
493 msecs_to_jiffies(client->timeout_ms));
494 spin_unlock_irqrestore(&client->lock, flags);
495 }
496
497 mbox_send_message(client->chan, pkt);
498 /* We can send next packet immediately, so just call txdone. */
499 mbox_client_txdone(client->chan, 0);
500
501 return 0;
502}
503EXPORT_SYMBOL(cmdq_pkt_flush_async);
504
505struct cmdq_flush_completion {
506 struct completion cmplt;
507 bool err;
508};
509
510static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
511{
512 struct cmdq_flush_completion *cmplt;
513
514 cmplt = (struct cmdq_flush_completion *)data.data;
515 if (data.sta != CMDQ_CB_NORMAL)
516 cmplt->err = true;
517 else
518 cmplt->err = false;
519 complete(&cmplt->cmplt);
520}
521
522int cmdq_pkt_flush(struct cmdq_pkt *pkt)
523{
524 struct cmdq_flush_completion cmplt;
525 int err;
526
527 init_completion(&cmplt.cmplt);
528 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
529 if (err < 0)
530 return err;
531 wait_for_completion(&cmplt.cmplt);
532
533 return cmplt.err ? -EFAULT : 0;
534}
535EXPORT_SYMBOL(cmdq_pkt_flush);
536
537MODULE_LICENSE("GPL v2");