| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2015 MediaTek Inc. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/completion.h> |
| 7 | #include <linux/errno.h> |
| 8 | #include <linux/of_address.h> |
| 9 | #include <linux/soc/mediatek/mtk-cmdq.h> |
| 10 | #include <linux/mailbox_controller.h> |
| 11 | #include <linux/dma-mapping.h> |
| 12 | #include <linux/dmapool.h> |
| 13 | #include <linux/sched/clock.h> |
| 14 | |
| 15 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 16 | #include "cmdq-util.h" |
| 17 | |
| 18 | #if IS_ENABLED(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT) || \ |
| 19 | IS_ENABLED(CONFIG_MTK_CAM_SECURITY_SUPPORT) |
| 20 | #include "cmdq-sec.h" |
| 21 | #endif |
| 22 | |
| 23 | #endif |
| 24 | |
| 25 | #define CMDQ_ARG_A_WRITE_MASK 0xffff |
| 26 | #define CMDQ_WRITE_ENABLE_MASK BIT(0) |
| 27 | #define CMDQ_EOC_IRQ_EN BIT(0) |
| 28 | #define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \ |
| 29 | << 32 | CMDQ_EOC_IRQ_EN) |
| 30 | #define CMDQ_MBOX_BUF_LIMIT 16 /* default limit count */ |
| 31 | |
| 32 | #define CMDQ_PREDUMP_TIMEOUT_MS 200 |
| 33 | |
| 34 | /* sleep for 312 tick, which around 12us */ |
| 35 | #define CMDQ_POLL_TICK 312 |
| 36 | |
| 37 | #define CMDQ_GET_ADDR_H(addr) (sizeof(addr) > 32 ? (addr >> 32) : 0) |
| 38 | #define CMDQ_GET_ARG_B(arg) (((arg) & GENMASK(31, 16)) >> 16) |
| 39 | #define CMDQ_GET_ARG_C(arg) ((arg) & GENMASK(15, 0)) |
| 40 | #define CMDQ_GET_32B_VALUE(argb, argc) ((u32)((argb) << 16) | (argc)) |
| 41 | #define CMDQ_REG_IDX_PREFIX(type) ((type) ? "Reg Index " : "") |
| 42 | #define CMDQ_GET_REG_BASE(addr) ((addr) & GENMASK(31, 16)) |
| 43 | #define CMDQ_GET_REG_OFFSET(addr) ((addr) & GENMASK(15, 0)) |
| 44 | #define CMDQ_GET_ADDR_HIGH(addr) ((u32)((addr >> 16) & GENMASK(31, 0))) |
| 45 | #define CMDQ_ADDR_LOW_BIT BIT(1) |
| 46 | #define CMDQ_GET_ADDR_LOW(addr) ((u16)(addr & GENMASK(15, 0)) | \ |
| 47 | CMDQ_ADDR_LOW_BIT) |
| 48 | #define CMDQ_IMMEDIATE_VALUE 0 |
| 49 | #define CMDQ_REG_TYPE 1 |
| 50 | #define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \ |
| 51 | CMDQ_WFE_WAIT_VALUE) |
| 52 | #define CMDQ_TPR_MASK 0xD0 |
| 53 | #define CMDQ_TPR_TIMEOUT_EN 0xDC |
| 54 | #define CMDQ_GPR_R0_OFF 0x80 |
| 55 | |
| 56 | #define CMDQ_OPERAND_GET_IDX_VALUE(operand) \ |
| 57 | ((operand)->reg ? (operand)->idx : (operand)->value) |
| 58 | #define CMDQ_OPERAND_TYPE(operand) \ |
| 59 | ((operand)->reg ? CMDQ_REG_TYPE : CMDQ_IMMEDIATE_VALUE) |
| 60 | |
| 61 | #define CMDQ_DBG_PERFBEGIN CMDQ_CMD_BUFFER_SIZE |
| 62 | #define CMDQ_DBG_PERFEND (CMDQ_DBG_PERFBEGIN + 4) |
| 63 | |
| 64 | struct client_priv { |
| 65 | struct dma_pool *buf_pool; |
| 66 | u32 pool_limit; |
| 67 | atomic_t buf_cnt; |
| 68 | struct workqueue_struct *flushq; |
| 69 | }; |
| 70 | |
| 71 | struct cmdq_instruction { |
| 72 | u16 arg_c:16; |
| 73 | u16 arg_b:16; |
| 74 | u16 arg_a:16; |
| 75 | u8 s_op:5; |
| 76 | u8 arg_c_type:1; |
| 77 | u8 arg_b_type:1; |
| 78 | u8 arg_a_type:1; |
| 79 | u8 op:8; |
| 80 | }; |
| 81 | |
| 82 | struct cmdq_flush_item { |
| 83 | struct work_struct work; |
| 84 | struct cmdq_pkt *pkt; |
| 85 | cmdq_async_flush_cb cb; |
| 86 | void *data; |
| 87 | cmdq_async_flush_cb err_cb; |
| 88 | void *err_data; |
| 89 | s32 err; |
| 90 | bool done; |
| 91 | }; |
| 92 | |
| 93 | static s8 cmdq_subsys_base_to_id(struct cmdq_base *clt_base, u32 base) |
| 94 | { |
| 95 | u8 i; |
| 96 | |
| 97 | if (!clt_base) |
| 98 | return -EINVAL; |
| 99 | |
| 100 | base = base & 0xFFFF0000; |
| 101 | for (i = 0; i < clt_base->count; i++) { |
| 102 | if (clt_base->subsys[i].base == base) |
| 103 | return clt_base->subsys[i].id; |
| 104 | } |
| 105 | |
| 106 | return -EINVAL; |
| 107 | } |
| 108 | |
| 109 | u32 cmdq_subsys_id_to_base(struct cmdq_base *clt_base, int id) |
| 110 | { |
| 111 | u32 i; |
| 112 | |
| 113 | if (!clt_base) |
| 114 | return 0; |
| 115 | |
| 116 | for (i = 0; i < clt_base->count; i++) { |
| 117 | if (clt_base->subsys[i].id == id) |
| 118 | return clt_base->subsys[i].base; |
| 119 | } |
| 120 | |
| 121 | return 0; |
| 122 | } |
| 123 | EXPORT_SYMBOL(cmdq_subsys_id_to_base); |
| 124 | |
| 125 | int cmdq_pkt_realloc_cmd_buffer(struct cmdq_pkt *pkt, size_t size) |
| 126 | { |
| 127 | while (pkt->buf_size < size) |
| 128 | cmdq_pkt_add_cmd_buffer(pkt); |
| 129 | return 0; |
| 130 | } |
| 131 | EXPORT_SYMBOL(cmdq_pkt_realloc_cmd_buffer); |
| 132 | |
| 133 | struct cmdq_base *cmdq_register_device(struct device *dev) |
| 134 | { |
| 135 | struct cmdq_base *clt_base; |
| 136 | struct of_phandle_args spec; |
| 137 | u32 vals[2] = {0}, idx; |
| 138 | s32 ret; |
| 139 | |
| 140 | clt_base = devm_kzalloc(dev, sizeof(*clt_base), GFP_KERNEL); |
| 141 | if (!clt_base) |
| 142 | return NULL; |
| 143 | |
| 144 | /* parse subsys */ |
| 145 | for (idx = 0; idx < ARRAY_SIZE(clt_base->subsys); idx++) { |
| 146 | if (of_parse_phandle_with_args(dev->of_node, "gce-subsys", |
| 147 | "#gce-subsys-cells", idx, &spec)) |
| 148 | break; |
| 149 | clt_base->subsys[idx].base = spec.args[0]; |
| 150 | clt_base->subsys[idx].id = spec.args[1]; |
| 151 | } |
| 152 | clt_base->count = idx; |
| 153 | |
| 154 | /* parse CPR range */ |
| 155 | ret = of_property_read_u32_array(dev->of_node, "gce-cpr-range", |
| 156 | vals, 2); |
| 157 | if (!ret) { |
| 158 | clt_base->cpr_base = vals[0] + CMDQ_CPR_STRAT_ID; |
| 159 | clt_base->cpr_cnt = vals[1]; |
| 160 | cmdq_msg("support cpr:%d count:%d", vals[0], vals[1]); |
| 161 | } |
| 162 | |
| 163 | return clt_base; |
| 164 | } |
| 165 | EXPORT_SYMBOL(cmdq_register_device); |
| 166 | |
| 167 | struct cmdq_client *cmdq_mbox_create(struct device *dev, int index) |
| 168 | { |
| 169 | struct cmdq_client *client; |
| 170 | struct client_priv *priv; |
| 171 | |
| 172 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
| 173 | if (!client) |
| 174 | return ERR_PTR(-ENOMEM); |
| 175 | |
| 176 | client->client.dev = dev; |
| 177 | client->client.tx_block = false; |
| 178 | client->chan = mbox_request_channel(&client->client, index); |
| 179 | if (IS_ERR(client->chan)) { |
| 180 | cmdq_err("channel request fail:%ld, idx:%d", |
| 181 | PTR_ERR(client->chan), index); |
| 182 | dump_stack(); |
| 183 | kfree(client); |
| 184 | return NULL; |
| 185 | } |
| 186 | |
| 187 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| 188 | if (!priv) { |
| 189 | cmdq_mbox_destroy(client); |
| 190 | return ERR_PTR(-ENOMEM); |
| 191 | } |
| 192 | |
| 193 | priv->pool_limit = CMDQ_MBOX_BUF_LIMIT; |
| 194 | priv->flushq = create_singlethread_workqueue("cmdq_flushq"); |
| 195 | client->cl_priv = (void *)priv; |
| 196 | |
| 197 | mutex_init(&client->chan_mutex); |
| 198 | |
| 199 | return client; |
| 200 | } |
| 201 | EXPORT_SYMBOL(cmdq_mbox_create); |
| 202 | |
| 203 | void cmdq_mbox_stop(struct cmdq_client *cl) |
| 204 | { |
| 205 | cmdq_mbox_channel_stop(cl->chan); |
| 206 | } |
| 207 | EXPORT_SYMBOL(cmdq_mbox_stop); |
| 208 | |
| 209 | void cmdq_mbox_pool_set_limit(struct cmdq_client *cl, u32 limit) |
| 210 | { |
| 211 | struct client_priv *priv = (struct client_priv *)cl->cl_priv; |
| 212 | |
| 213 | priv->pool_limit = limit; |
| 214 | } |
| 215 | EXPORT_SYMBOL(cmdq_mbox_pool_set_limit); |
| 216 | |
| 217 | void cmdq_mbox_pool_create(struct cmdq_client *cl) |
| 218 | { |
| 219 | struct client_priv *priv = (struct client_priv *)cl->cl_priv; |
| 220 | |
| 221 | if (unlikely(priv->buf_pool)) { |
| 222 | cmdq_msg("buffer pool already created"); |
| 223 | return; |
| 224 | } |
| 225 | |
| 226 | priv->buf_pool = dma_pool_create("cmdq", cl->chan->mbox->dev, |
| 227 | CMDQ_BUF_ALLOC_SIZE, 0, 0); |
| 228 | } |
| 229 | EXPORT_SYMBOL(cmdq_mbox_pool_create); |
| 230 | |
| 231 | void cmdq_mbox_pool_clear(struct cmdq_client *cl) |
| 232 | { |
| 233 | struct client_priv *priv = (struct client_priv *)cl->cl_priv; |
| 234 | |
| 235 | /* check pool still in use */ |
| 236 | if (unlikely((atomic_read(&priv->buf_cnt)))) { |
| 237 | cmdq_msg("buffers still in use:%d", |
| 238 | atomic_read(&priv->buf_cnt)); |
| 239 | return; |
| 240 | } |
| 241 | |
| 242 | dma_pool_destroy(priv->buf_pool); |
| 243 | priv->buf_pool = NULL; |
| 244 | } |
| 245 | EXPORT_SYMBOL(cmdq_mbox_pool_clear); |
| 246 | |
| 247 | static void *cmdq_mbox_pool_alloc_impl(struct dma_pool *pool, |
| 248 | dma_addr_t *pa_out, atomic_t *cnt, u32 limit) |
| 249 | { |
| 250 | void *va; |
| 251 | dma_addr_t pa; |
| 252 | |
| 253 | if (atomic_inc_return(cnt) > limit) { |
| 254 | /* not use pool, decrease to value before call */ |
| 255 | atomic_dec(cnt); |
| 256 | return NULL; |
| 257 | } |
| 258 | |
| 259 | va = dma_pool_alloc(pool, GFP_KERNEL, &pa); |
| 260 | if (!va) { |
| 261 | atomic_dec(cnt); |
| 262 | cmdq_err( |
| 263 | "alloc buffer from pool fail va:0x%p pa:%pa pool:0x%p count:%d", |
| 264 | va, &pa, pool, |
| 265 | (s32)atomic_read(cnt)); |
| 266 | return NULL; |
| 267 | } |
| 268 | |
| 269 | *pa_out = pa; |
| 270 | |
| 271 | return va; |
| 272 | } |
| 273 | |
| 274 | static void cmdq_mbox_pool_free_impl(struct dma_pool *pool, void *va, |
| 275 | dma_addr_t pa, atomic_t *cnt) |
| 276 | { |
| 277 | if (unlikely(atomic_read(cnt) <= 0 || !pool)) { |
| 278 | cmdq_err("free pool cnt:%d pool:0x%p", |
| 279 | (s32)atomic_read(cnt), pool); |
| 280 | return; |
| 281 | } |
| 282 | |
| 283 | dma_pool_free(pool, va, pa); |
| 284 | atomic_dec(cnt); |
| 285 | } |
| 286 | |
| 287 | static void *cmdq_mbox_pool_alloc(struct cmdq_client *cl, dma_addr_t *pa_out) |
| 288 | { |
| 289 | struct client_priv *priv = (struct client_priv *)cl->cl_priv; |
| 290 | |
| 291 | if (unlikely(!priv->buf_pool)) { |
| 292 | cmdq_mbox_pool_create(cl); |
| 293 | if (unlikely(!priv->buf_pool)) { |
| 294 | cmdq_err("fail to create dma pool dev:0x%p", |
| 295 | cl->chan->mbox->dev); |
| 296 | return NULL; |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | return cmdq_mbox_pool_alloc_impl(priv->buf_pool, |
| 301 | pa_out, &priv->buf_cnt, priv->pool_limit); |
| 302 | } |
| 303 | |
| 304 | static void cmdq_mbox_pool_free(struct cmdq_client *cl, void *va, dma_addr_t pa) |
| 305 | { |
| 306 | struct client_priv *priv = (struct client_priv *)cl->cl_priv; |
| 307 | |
| 308 | cmdq_mbox_pool_free_impl(priv->buf_pool, va, pa, &priv->buf_cnt); |
| 309 | } |
| 310 | |
| 311 | void *cmdq_mbox_buf_alloc(struct device *dev, dma_addr_t *pa_out) |
| 312 | { |
| 313 | void *va; |
| 314 | dma_addr_t pa = 0; |
| 315 | |
| 316 | va = dma_alloc_coherent(dev, CMDQ_BUF_ALLOC_SIZE, &pa, GFP_KERNEL); |
| 317 | if (!va) { |
| 318 | cmdq_err("alloc dma buffer fail dev:0x%p", dev); |
| 319 | dump_stack(); |
| 320 | return NULL; |
| 321 | } |
| 322 | |
| 323 | *pa_out = pa; |
| 324 | return va; |
| 325 | } |
| 326 | |
| 327 | void cmdq_mbox_buf_free(struct device *dev, void *va, dma_addr_t pa) |
| 328 | { |
| 329 | dma_free_coherent(dev, CMDQ_BUF_ALLOC_SIZE, va, pa); |
| 330 | } |
| 331 | |
| 332 | /* parse event from dts |
| 333 | * |
| 334 | * Example |
| 335 | * |
| 336 | * dts: |
| 337 | * gce-event-names = "disp_rdma0_sof", |
| 338 | * "disp_rdma1_sof", |
| 339 | * "mdp_rdma0_sof"; |
| 340 | * gce-events = <&gce_mbox CMDQ_EVENT_DISP_RDMA0_SOF>, |
| 341 | * <&gce_mbox CMDQ_EVENT_DISP_RDMA1_SOF>, |
| 342 | * <&gce_mbox CMDQ_EVENT_MDP_RDMA0_SOF>; |
| 343 | * |
| 344 | * call: |
| 345 | * s32 rdma0_sof_event_id = cmdq_dev_get_event(dev, "disp_rdma0_sof"); |
| 346 | */ |
| 347 | s32 cmdq_dev_get_event(struct device *dev, const char *name) |
| 348 | { |
| 349 | s32 index = 0; |
| 350 | struct of_phandle_args spec = {0}; |
| 351 | s32 result; |
| 352 | |
| 353 | if (!dev) { |
| 354 | cmdq_err("no device node"); |
| 355 | return -EINVAL; |
| 356 | } |
| 357 | |
| 358 | index = of_property_match_string(dev->of_node, "gce-event-names", name); |
| 359 | if (index < 0) { |
| 360 | cmdq_err("no gce-event-names property or no such event:%s", |
| 361 | name); |
| 362 | return index; |
| 363 | } |
| 364 | |
| 365 | if (of_parse_phandle_with_args(dev->of_node, "gce-events", |
| 366 | "#gce-event-cells", index, &spec)) { |
| 367 | cmdq_err("can't parse gce-events property"); |
| 368 | return -ENODEV; |
| 369 | } |
| 370 | |
| 371 | result = spec.args[0]; |
| 372 | of_node_put(spec.np); |
| 373 | |
| 374 | return result; |
| 375 | } |
| 376 | |
| 377 | struct cmdq_pkt_buffer *cmdq_pkt_alloc_buf(struct cmdq_pkt *pkt) |
| 378 | { |
| 379 | struct cmdq_client *cl = (struct cmdq_client *)pkt->cl; |
| 380 | struct cmdq_pkt_buffer *buf; |
| 381 | |
| 382 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); |
| 383 | if (!buf) |
| 384 | return ERR_PTR(-ENOMEM); |
| 385 | |
| 386 | /* try dma pool if available */ |
| 387 | if (pkt->cur_pool.pool) |
| 388 | buf->va_base = cmdq_mbox_pool_alloc_impl(pkt->cur_pool.pool, |
| 389 | &buf->pa_base, pkt->cur_pool.cnt, *pkt->cur_pool.limit); |
| 390 | else if (cl) { |
| 391 | struct client_priv *priv = (struct client_priv *)cl->cl_priv; |
| 392 | |
| 393 | buf->va_base = cmdq_mbox_pool_alloc(cl, &buf->pa_base); |
| 394 | if (buf->va_base) { |
| 395 | pkt->cur_pool.pool = priv->buf_pool; |
| 396 | pkt->cur_pool.cnt = &priv->buf_cnt; |
| 397 | pkt->cur_pool.limit = &priv->pool_limit; |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | if (buf->va_base) |
| 402 | buf->use_pool = true; |
| 403 | else /* allocate directly */ |
| 404 | buf->va_base = cmdq_mbox_buf_alloc(pkt->dev, |
| 405 | &buf->pa_base); |
| 406 | |
| 407 | if (!buf->va_base) { |
| 408 | cmdq_err("allocate cmd buffer failed"); |
| 409 | kfree(buf); |
| 410 | return ERR_PTR(-ENOMEM); |
| 411 | } |
| 412 | |
| 413 | list_add_tail(&buf->list_entry, &pkt->buf); |
| 414 | pkt->avail_buf_size += CMDQ_CMD_BUFFER_SIZE; |
| 415 | pkt->buf_size += CMDQ_CMD_BUFFER_SIZE; |
| 416 | |
| 417 | return buf; |
| 418 | } |
| 419 | |
| 420 | void cmdq_pkt_free_buf(struct cmdq_pkt *pkt) |
| 421 | { |
| 422 | struct cmdq_client *cl = (struct cmdq_client *)pkt->cl; |
| 423 | struct cmdq_pkt_buffer *buf, *tmp; |
| 424 | |
| 425 | list_for_each_entry_safe(buf, tmp, &pkt->buf, list_entry) { |
| 426 | list_del(&buf->list_entry); |
| 427 | if (buf->use_pool) { |
| 428 | if (pkt->cur_pool.pool) |
| 429 | cmdq_mbox_pool_free_impl(pkt->cur_pool.pool, |
| 430 | buf->va_base, buf->pa_base, |
| 431 | pkt->cur_pool.cnt); |
| 432 | else { |
| 433 | cmdq_err("free pool:%s dev:%#lx pa:%pa cl:%p", |
| 434 | buf->use_pool ? "true" : "false", |
| 435 | (unsigned long)pkt->dev, |
| 436 | &buf->pa_base, |
| 437 | cl); |
| 438 | cmdq_mbox_pool_free(cl, buf->va_base, |
| 439 | buf->pa_base); |
| 440 | } |
| 441 | } else |
| 442 | cmdq_mbox_buf_free(pkt->dev, buf->va_base, |
| 443 | buf->pa_base); |
| 444 | kfree(buf); |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | s32 cmdq_pkt_add_cmd_buffer(struct cmdq_pkt *pkt) |
| 449 | { |
| 450 | s32 status = 0; |
| 451 | struct cmdq_pkt_buffer *buf, *prev; |
| 452 | u64 *prev_va; |
| 453 | |
| 454 | if (list_empty(&pkt->buf)) |
| 455 | prev = NULL; |
| 456 | else |
| 457 | prev = list_last_entry(&pkt->buf, typeof(*prev), list_entry); |
| 458 | |
| 459 | buf = cmdq_pkt_alloc_buf(pkt); |
| 460 | if (unlikely(IS_ERR(buf))) { |
| 461 | status = PTR_ERR(buf); |
| 462 | cmdq_err("alloc singe buffer fail status:%d pkt:0x%p", |
| 463 | status, pkt); |
| 464 | return status; |
| 465 | } |
| 466 | |
| 467 | /* if no previous buffer, success return */ |
| 468 | if (!prev) |
| 469 | return 0; |
| 470 | |
| 471 | /* copy last instruction to head of new buffer and |
| 472 | * use jump to replace |
| 473 | */ |
| 474 | prev_va = (u64 *)(prev->va_base + CMDQ_CMD_BUFFER_SIZE - |
| 475 | CMDQ_INST_SIZE); |
| 476 | *((u64 *)buf->va_base) = *prev_va; |
| 477 | |
| 478 | /* insert jump to jump start of new buffer. |
| 479 | * jump to absolute addr |
| 480 | */ |
| 481 | *prev_va = ((u64)(CMDQ_CODE_JUMP << 24 | 1) << 32) | |
| 482 | (CMDQ_REG_SHIFT_ADDR(buf->pa_base) & 0xFFFFFFFF); |
| 483 | |
| 484 | /* decrease available size since insert 1 jump */ |
| 485 | pkt->avail_buf_size -= CMDQ_INST_SIZE; |
| 486 | /* +1 for jump instruction */ |
| 487 | pkt->cmd_buf_size += CMDQ_INST_SIZE; |
| 488 | |
| 489 | return 0; |
| 490 | } |
| 491 | EXPORT_SYMBOL(cmdq_pkt_add_cmd_buffer); |
| 492 | |
| 493 | void cmdq_mbox_destroy(struct cmdq_client *client) |
| 494 | { |
| 495 | mbox_free_channel(client->chan); |
| 496 | kfree(client->cl_priv); |
| 497 | kfree(client); |
| 498 | } |
| 499 | EXPORT_SYMBOL(cmdq_mbox_destroy); |
| 500 | |
| 501 | struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client) |
| 502 | { |
| 503 | struct cmdq_pkt *pkt; |
| 504 | |
| 505 | pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); |
| 506 | if (!pkt) |
| 507 | return ERR_PTR(-ENOMEM); |
| 508 | INIT_LIST_HEAD(&pkt->buf); |
| 509 | init_completion(&pkt->cmplt); |
| 510 | pkt->cl = (void *)client; |
| 511 | if (client) |
| 512 | pkt->dev = client->chan->mbox->dev; |
| 513 | |
| 514 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 515 | if (client && cmdq_util_is_feature_en(CMDQ_LOG_FEAT_PERF)) |
| 516 | cmdq_pkt_perf_begin(pkt); |
| 517 | #endif |
| 518 | |
| 519 | return pkt; |
| 520 | } |
| 521 | EXPORT_SYMBOL(cmdq_pkt_create); |
| 522 | |
| 523 | void cmdq_pkt_destroy(struct cmdq_pkt *pkt) |
| 524 | { |
| 525 | cmdq_pkt_free_buf(pkt); |
| 526 | kfree(pkt->flush_item); |
| 527 | kfree(pkt); |
| 528 | } |
| 529 | EXPORT_SYMBOL(cmdq_pkt_destroy); |
| 530 | |
| 531 | u64 *cmdq_pkt_get_va_by_offset(struct cmdq_pkt *pkt, size_t offset) |
| 532 | { |
| 533 | size_t offset_remaind = offset; |
| 534 | struct cmdq_pkt_buffer *buf; |
| 535 | |
| 536 | list_for_each_entry(buf, &pkt->buf, list_entry) { |
| 537 | if (offset_remaind >= CMDQ_CMD_BUFFER_SIZE) { |
| 538 | offset_remaind -= CMDQ_CMD_BUFFER_SIZE; |
| 539 | continue; |
| 540 | } |
| 541 | return (u64 *)(buf->va_base + offset_remaind); |
| 542 | } |
| 543 | |
| 544 | return NULL; |
| 545 | } |
| 546 | EXPORT_SYMBOL(cmdq_pkt_get_va_by_offset); |
| 547 | |
| 548 | dma_addr_t cmdq_pkt_get_pa_by_offset(struct cmdq_pkt *pkt, u32 offset) |
| 549 | { |
| 550 | u32 offset_remaind = offset; |
| 551 | struct cmdq_pkt_buffer *buf; |
| 552 | |
| 553 | list_for_each_entry(buf, &pkt->buf, list_entry) { |
| 554 | if (offset_remaind >= CMDQ_CMD_BUFFER_SIZE) { |
| 555 | offset_remaind -= CMDQ_CMD_BUFFER_SIZE; |
| 556 | continue; |
| 557 | } |
| 558 | |
| 559 | return buf->pa_base + offset_remaind; |
| 560 | } |
| 561 | |
| 562 | return 0; |
| 563 | } |
| 564 | EXPORT_SYMBOL(cmdq_pkt_get_pa_by_offset); |
| 565 | |
| 566 | static dma_addr_t cmdq_pkt_get_curr_buf_pa(struct cmdq_pkt *pkt) |
| 567 | { |
| 568 | struct cmdq_pkt_buffer *buf; |
| 569 | |
| 570 | buf = list_last_entry(&pkt->buf, typeof(*buf), list_entry); |
| 571 | |
| 572 | return buf->pa_base + CMDQ_CMD_BUFFER_SIZE - pkt->avail_buf_size; |
| 573 | } |
| 574 | |
| 575 | static bool cmdq_pkt_is_finalized(struct cmdq_pkt *pkt) |
| 576 | { |
| 577 | u64 *expect_eoc; |
| 578 | |
| 579 | if (pkt->cmd_buf_size < CMDQ_INST_SIZE * 2) |
| 580 | return false; |
| 581 | |
| 582 | expect_eoc = cmdq_pkt_get_va_by_offset(pkt, |
| 583 | pkt->cmd_buf_size - CMDQ_INST_SIZE * 2); |
| 584 | if (expect_eoc && *expect_eoc == CMDQ_EOC_CMD) |
| 585 | return true; |
| 586 | |
| 587 | return false; |
| 588 | } |
| 589 | |
| 590 | static void cmdq_pkt_instr_encoder(void *buf, u16 arg_c, u16 arg_b, |
| 591 | u16 arg_a, u8 s_op, u8 arg_c_type, u8 arg_b_type, u8 arg_a_type, u8 op) |
| 592 | { |
| 593 | struct cmdq_instruction *cmdq_inst; |
| 594 | |
| 595 | cmdq_inst = buf; |
| 596 | cmdq_inst->op = op; |
| 597 | cmdq_inst->arg_a_type = arg_a_type; |
| 598 | cmdq_inst->arg_b_type = arg_b_type; |
| 599 | cmdq_inst->arg_c_type = arg_c_type; |
| 600 | cmdq_inst->s_op = s_op; |
| 601 | cmdq_inst->arg_a = arg_a; |
| 602 | cmdq_inst->arg_b = arg_b; |
| 603 | cmdq_inst->arg_c = arg_c; |
| 604 | } |
| 605 | |
| 606 | s32 cmdq_pkt_append_command(struct cmdq_pkt *pkt, u16 arg_c, u16 arg_b, |
| 607 | u16 arg_a, u8 s_op, u8 arg_c_type, u8 arg_b_type, u8 arg_a_type, |
| 608 | enum cmdq_code code) |
| 609 | { |
| 610 | struct cmdq_pkt_buffer *buf; |
| 611 | void *va; |
| 612 | |
| 613 | if (!pkt) |
| 614 | return -EINVAL; |
| 615 | |
| 616 | if (unlikely(!pkt->avail_buf_size)) { |
| 617 | if (cmdq_pkt_add_cmd_buffer(pkt) < 0) |
| 618 | return -ENOMEM; |
| 619 | } |
| 620 | |
| 621 | buf = list_last_entry(&pkt->buf, typeof(*buf), list_entry); |
| 622 | va = buf->va_base + CMDQ_CMD_BUFFER_SIZE - pkt->avail_buf_size; |
| 623 | |
| 624 | cmdq_pkt_instr_encoder(va, arg_c, arg_b, arg_a, s_op, arg_c_type, |
| 625 | arg_b_type, arg_a_type, code); |
| 626 | pkt->cmd_buf_size += CMDQ_INST_SIZE; |
| 627 | pkt->avail_buf_size -= CMDQ_INST_SIZE; |
| 628 | |
| 629 | return 0; |
| 630 | } |
| 631 | |
| 632 | s32 cmdq_pkt_read(struct cmdq_pkt *pkt, struct cmdq_base *clt_base, |
| 633 | dma_addr_t src_addr, u16 dst_reg_idx) |
| 634 | { |
| 635 | s8 subsys; |
| 636 | |
| 637 | if (!(CMDQ_GET_ADDR_H(src_addr))) { |
| 638 | subsys = cmdq_subsys_base_to_id(clt_base, src_addr); |
| 639 | if (subsys >= 0) |
| 640 | return cmdq_pkt_read_reg(pkt, |
| 641 | clt_base->subsys[subsys].id, |
| 642 | CMDQ_GET_REG_OFFSET(src_addr), dst_reg_idx); |
| 643 | } |
| 644 | |
| 645 | return cmdq_pkt_read_addr(pkt, src_addr, dst_reg_idx); |
| 646 | } |
| 647 | EXPORT_SYMBOL(cmdq_pkt_read); |
| 648 | |
| 649 | s32 cmdq_pkt_read_reg(struct cmdq_pkt *pkt, u8 subsys, u16 offset, |
| 650 | u16 dst_reg_idx) |
| 651 | { |
| 652 | return cmdq_pkt_append_command(pkt, 0, offset, dst_reg_idx, subsys, |
| 653 | CMDQ_IMMEDIATE_VALUE, CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE, |
| 654 | CMDQ_CODE_READ_S); |
| 655 | } |
| 656 | EXPORT_SYMBOL(cmdq_pkt_read_reg); |
| 657 | |
| 658 | s32 cmdq_pkt_read_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u16 dst_reg_idx) |
| 659 | { |
| 660 | s32 err; |
| 661 | const u16 src_reg_idx = CMDQ_SPR_FOR_TEMP; |
| 662 | |
| 663 | err = cmdq_pkt_assign_command(pkt, src_reg_idx, |
| 664 | CMDQ_GET_ADDR_HIGH(addr)); |
| 665 | if (err != 0) |
| 666 | return err; |
| 667 | |
| 668 | return cmdq_pkt_append_command(pkt, 0, CMDQ_GET_ADDR_LOW(addr), |
| 669 | dst_reg_idx, src_reg_idx, |
| 670 | CMDQ_IMMEDIATE_VALUE, CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE, |
| 671 | CMDQ_CODE_READ_S); |
| 672 | } |
| 673 | EXPORT_SYMBOL(cmdq_pkt_read_addr); |
| 674 | |
| 675 | s32 cmdq_pkt_write_reg(struct cmdq_pkt *pkt, u8 subsys, |
| 676 | u16 offset, u16 src_reg_idx, u32 mask) |
| 677 | { |
| 678 | int err = 0; |
| 679 | enum cmdq_code op = CMDQ_CODE_WRITE_S; |
| 680 | |
| 681 | if (mask != 0xffffffff) { |
| 682 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask), |
| 683 | CMDQ_GET_ARG_B(~mask), 0, 0, 0, 0, 0, CMDQ_CODE_MASK); |
| 684 | if (err != 0) |
| 685 | return err; |
| 686 | |
| 687 | op = CMDQ_CODE_WRITE_S_W_MASK; |
| 688 | } |
| 689 | |
| 690 | return cmdq_pkt_append_command(pkt, 0, src_reg_idx, offset, subsys, |
| 691 | CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE, CMDQ_IMMEDIATE_VALUE, op); |
| 692 | } |
| 693 | EXPORT_SYMBOL(cmdq_pkt_write_reg); |
| 694 | |
| 695 | s32 cmdq_pkt_write_value(struct cmdq_pkt *pkt, u8 subsys, |
| 696 | u16 offset, u32 value, u32 mask) |
| 697 | { |
| 698 | int err = 0; |
| 699 | enum cmdq_code op = CMDQ_CODE_WRITE_S; |
| 700 | |
| 701 | if (mask != 0xffffffff) { |
| 702 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask), |
| 703 | CMDQ_GET_ARG_B(~mask), 0, 0, 0, 0, 0, CMDQ_CODE_MASK); |
| 704 | if (err != 0) |
| 705 | return err; |
| 706 | |
| 707 | op = CMDQ_CODE_WRITE_S_W_MASK; |
| 708 | } |
| 709 | |
| 710 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value), |
| 711 | CMDQ_GET_ARG_B(value), offset, subsys, |
| 712 | CMDQ_IMMEDIATE_VALUE, CMDQ_IMMEDIATE_VALUE, |
| 713 | CMDQ_IMMEDIATE_VALUE, op); |
| 714 | } |
| 715 | EXPORT_SYMBOL(cmdq_pkt_write_value); |
| 716 | |
| 717 | s32 cmdq_pkt_write_reg_addr(struct cmdq_pkt *pkt, dma_addr_t addr, |
| 718 | u16 src_reg_idx, u32 mask) |
| 719 | { |
| 720 | s32 err; |
| 721 | const u16 dst_reg_idx = CMDQ_SPR_FOR_TEMP; |
| 722 | |
| 723 | err = cmdq_pkt_assign_command(pkt, dst_reg_idx, |
| 724 | CMDQ_GET_ADDR_HIGH(addr)); |
| 725 | if (err != 0) |
| 726 | return err; |
| 727 | |
| 728 | return cmdq_pkt_store_value_reg(pkt, dst_reg_idx, |
| 729 | CMDQ_GET_ADDR_LOW(addr), src_reg_idx, mask); |
| 730 | } |
| 731 | EXPORT_SYMBOL(cmdq_pkt_write_reg_addr); |
| 732 | |
| 733 | s32 cmdq_pkt_write_value_addr(struct cmdq_pkt *pkt, dma_addr_t addr, |
| 734 | u32 value, u32 mask) |
| 735 | { |
| 736 | s32 err; |
| 737 | const u16 dst_reg_idx = CMDQ_SPR_FOR_TEMP; |
| 738 | |
| 739 | /* assign bit 47:16 to spr temp */ |
| 740 | err = cmdq_pkt_assign_command(pkt, dst_reg_idx, |
| 741 | CMDQ_GET_ADDR_HIGH(addr)); |
| 742 | if (err != 0) |
| 743 | return err; |
| 744 | |
| 745 | return cmdq_pkt_store_value(pkt, dst_reg_idx, CMDQ_GET_ADDR_LOW(addr), |
| 746 | value, mask); |
| 747 | } |
| 748 | EXPORT_SYMBOL(cmdq_pkt_write_value_addr); |
| 749 | |
| 750 | s32 cmdq_pkt_store_value(struct cmdq_pkt *pkt, u16 indirect_dst_reg_idx, |
| 751 | u16 dst_addr_low, u32 value, u32 mask) |
| 752 | { |
| 753 | int err = 0; |
| 754 | enum cmdq_code op = CMDQ_CODE_WRITE_S; |
| 755 | |
| 756 | if (mask != 0xffffffff) { |
| 757 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask), |
| 758 | CMDQ_GET_ARG_B(~mask), 0, 0, 0, 0, 0, CMDQ_CODE_MASK); |
| 759 | if (err != 0) |
| 760 | return err; |
| 761 | |
| 762 | op = CMDQ_CODE_WRITE_S_W_MASK; |
| 763 | } |
| 764 | |
| 765 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value), |
| 766 | CMDQ_GET_ARG_B(value), dst_addr_low, |
| 767 | indirect_dst_reg_idx, CMDQ_IMMEDIATE_VALUE, |
| 768 | CMDQ_IMMEDIATE_VALUE, CMDQ_IMMEDIATE_VALUE, op); |
| 769 | } |
| 770 | EXPORT_SYMBOL(cmdq_pkt_store_value); |
| 771 | |
| 772 | s32 cmdq_pkt_store_value_reg(struct cmdq_pkt *pkt, u16 indirect_dst_reg_idx, |
| 773 | u16 dst_addr_low, u16 indirect_src_reg_idx, u32 mask) |
| 774 | { |
| 775 | int err = 0; |
| 776 | enum cmdq_code op = CMDQ_CODE_WRITE_S; |
| 777 | |
| 778 | if (mask != 0xffffffff) { |
| 779 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask), |
| 780 | CMDQ_GET_ARG_B(~mask), 0, 0, 0, 0, 0, CMDQ_CODE_MASK); |
| 781 | if (err != 0) |
| 782 | return err; |
| 783 | |
| 784 | op = CMDQ_CODE_WRITE_S_W_MASK; |
| 785 | } |
| 786 | |
| 787 | if (dst_addr_low) { |
| 788 | return cmdq_pkt_append_command(pkt, 0, indirect_src_reg_idx, |
| 789 | dst_addr_low, indirect_dst_reg_idx, |
| 790 | CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE, |
| 791 | CMDQ_IMMEDIATE_VALUE, op); |
| 792 | } |
| 793 | |
| 794 | return cmdq_pkt_append_command(pkt, 0, |
| 795 | indirect_src_reg_idx, indirect_dst_reg_idx, 0, |
| 796 | CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE, CMDQ_REG_TYPE, op); |
| 797 | } |
| 798 | EXPORT_SYMBOL(cmdq_pkt_store_value_reg); |
| 799 | |
| 800 | s32 cmdq_pkt_write_indriect(struct cmdq_pkt *pkt, struct cmdq_base *clt_base, |
| 801 | dma_addr_t addr, u16 src_reg_idx, u32 mask) |
| 802 | { |
| 803 | const u32 base = CMDQ_GET_ADDR_H(addr) ? 0 : addr & 0xFFFF0000; |
| 804 | s32 subsys; |
| 805 | |
| 806 | subsys = cmdq_subsys_base_to_id(clt_base, base); |
| 807 | if (subsys >= 0) { |
| 808 | return cmdq_pkt_write_reg(pkt, subsys, |
| 809 | base, src_reg_idx, mask); |
| 810 | } |
| 811 | |
| 812 | return cmdq_pkt_write_reg_addr(pkt, addr, src_reg_idx, mask); |
| 813 | } |
| 814 | EXPORT_SYMBOL(cmdq_pkt_write_indriect); |
| 815 | |
| 816 | s32 cmdq_pkt_write(struct cmdq_pkt *pkt, struct cmdq_base *clt_base, |
| 817 | dma_addr_t addr, u32 value, u32 mask) |
| 818 | { |
| 819 | const u32 base = CMDQ_GET_ADDR_H(addr) ? 0 : addr & 0xFFFF0000; |
| 820 | s32 subsys; |
| 821 | |
| 822 | subsys = cmdq_subsys_base_to_id(clt_base, base); |
| 823 | if (subsys >= 0) { |
| 824 | return cmdq_pkt_write_value(pkt, subsys, |
| 825 | CMDQ_GET_REG_OFFSET(addr), value, mask); |
| 826 | } |
| 827 | |
| 828 | return cmdq_pkt_write_value_addr(pkt, addr, value, mask); |
| 829 | } |
| 830 | EXPORT_SYMBOL(cmdq_pkt_write); |
| 831 | |
| 832 | s32 cmdq_pkt_mem_move(struct cmdq_pkt *pkt, struct cmdq_base *clt_base, |
| 833 | dma_addr_t src_addr, dma_addr_t dst_addr, u16 swap_reg_idx) |
| 834 | { |
| 835 | s32 err; |
| 836 | |
| 837 | err = cmdq_pkt_read(pkt, clt_base, src_addr, swap_reg_idx); |
| 838 | if (err != 0) |
| 839 | return err; |
| 840 | |
| 841 | return cmdq_pkt_write_indriect(pkt, clt_base, dst_addr, |
| 842 | swap_reg_idx, ~0); |
| 843 | } |
| 844 | EXPORT_SYMBOL(cmdq_pkt_mem_move); |
| 845 | |
| 846 | s32 cmdq_pkt_assign_command(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) |
| 847 | { |
| 848 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value), |
| 849 | CMDQ_GET_ARG_B(value), reg_idx, |
| 850 | CMDQ_LOGIC_ASSIGN, CMDQ_IMMEDIATE_VALUE, |
| 851 | CMDQ_IMMEDIATE_VALUE, CMDQ_REG_TYPE, |
| 852 | CMDQ_CODE_LOGIC); |
| 853 | } |
| 854 | EXPORT_SYMBOL(cmdq_pkt_assign_command); |
| 855 | |
| 856 | s32 cmdq_pkt_logic_command(struct cmdq_pkt *pkt, enum CMDQ_LOGIC_ENUM s_op, |
| 857 | u16 result_reg_idx, |
| 858 | struct cmdq_operand *left_operand, |
| 859 | struct cmdq_operand *right_operand) |
| 860 | { |
| 861 | u32 left_idx_value; |
| 862 | u32 right_idx_value; |
| 863 | |
| 864 | if (!left_operand || !right_operand) |
| 865 | return -EINVAL; |
| 866 | |
| 867 | left_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(left_operand); |
| 868 | right_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(right_operand); |
| 869 | |
| 870 | return cmdq_pkt_append_command(pkt, right_idx_value, left_idx_value, |
| 871 | result_reg_idx, s_op, CMDQ_OPERAND_TYPE(right_operand), |
| 872 | CMDQ_OPERAND_TYPE(left_operand), CMDQ_REG_TYPE, |
| 873 | CMDQ_CODE_LOGIC); |
| 874 | } |
| 875 | EXPORT_SYMBOL(cmdq_pkt_logic_command); |
| 876 | |
| 877 | s32 cmdq_pkt_jump(struct cmdq_pkt *pkt, s32 offset) |
| 878 | { |
| 879 | s64 off = CMDQ_REG_SHIFT_ADDR((s64)offset); |
| 880 | |
| 881 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(off), |
| 882 | CMDQ_GET_ARG_B(off), 0, 0, 0, 0, 0, CMDQ_CODE_JUMP); |
| 883 | } |
| 884 | EXPORT_SYMBOL(cmdq_pkt_jump); |
| 885 | |
| 886 | s32 cmdq_pkt_jump_addr(struct cmdq_pkt *pkt, u32 addr) |
| 887 | { |
| 888 | dma_addr_t to_addr = CMDQ_REG_SHIFT_ADDR(addr); |
| 889 | |
| 890 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(to_addr), |
| 891 | CMDQ_GET_ARG_B(to_addr), 1, 0, 0, 0, 0, CMDQ_CODE_JUMP); |
| 892 | } |
| 893 | EXPORT_SYMBOL(cmdq_pkt_jump_addr); |
| 894 | |
| 895 | s32 cmdq_pkt_cond_jump(struct cmdq_pkt *pkt, |
| 896 | u16 offset_reg_idx, |
| 897 | struct cmdq_operand *left_operand, |
| 898 | struct cmdq_operand *right_operand, |
| 899 | enum CMDQ_CONDITION_ENUM condition_operator) |
| 900 | { |
| 901 | u32 left_idx_value; |
| 902 | u32 right_idx_value; |
| 903 | |
| 904 | if (!left_operand || !right_operand) |
| 905 | return -EINVAL; |
| 906 | |
| 907 | left_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(left_operand); |
| 908 | right_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(right_operand); |
| 909 | |
| 910 | return cmdq_pkt_append_command(pkt, right_idx_value, left_idx_value, |
| 911 | offset_reg_idx, condition_operator, |
| 912 | CMDQ_OPERAND_TYPE(right_operand), |
| 913 | CMDQ_OPERAND_TYPE(left_operand), |
| 914 | CMDQ_REG_TYPE, CMDQ_CODE_JUMP_C_RELATIVE); |
| 915 | } |
| 916 | EXPORT_SYMBOL(cmdq_pkt_cond_jump); |
| 917 | |
| 918 | s32 cmdq_pkt_cond_jump_abs(struct cmdq_pkt *pkt, |
| 919 | u16 addr_reg_idx, |
| 920 | struct cmdq_operand *left_operand, |
| 921 | struct cmdq_operand *right_operand, |
| 922 | enum CMDQ_CONDITION_ENUM condition_operator) |
| 923 | { |
| 924 | u16 left_idx_value; |
| 925 | u16 right_idx_value; |
| 926 | |
| 927 | if (!left_operand || !right_operand) |
| 928 | return -EINVAL; |
| 929 | |
| 930 | left_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(left_operand); |
| 931 | right_idx_value = CMDQ_OPERAND_GET_IDX_VALUE(right_operand); |
| 932 | |
| 933 | return cmdq_pkt_append_command(pkt, right_idx_value, left_idx_value, |
| 934 | addr_reg_idx, condition_operator, |
| 935 | CMDQ_OPERAND_TYPE(right_operand), |
| 936 | CMDQ_OPERAND_TYPE(left_operand), |
| 937 | CMDQ_REG_TYPE, CMDQ_CODE_JUMP_C_ABSOLUTE); |
| 938 | } |
| 939 | EXPORT_SYMBOL(cmdq_pkt_cond_jump_abs); |
| 940 | |
| 941 | s32 cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, u32 value, u32 addr, u32 mask, |
| 942 | u8 reg_gpr) |
| 943 | { |
| 944 | s32 err; |
| 945 | |
| 946 | if (mask != 0xffffffff) { |
| 947 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask), |
| 948 | CMDQ_GET_ARG_B(~mask), 0, 0, 0, 0, 0, CMDQ_CODE_MASK); |
| 949 | if (err != 0) |
| 950 | return err; |
| 951 | |
| 952 | addr = addr | 0x1; |
| 953 | } |
| 954 | |
| 955 | /* Move extra handle APB address to GPR */ |
| 956 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(addr), |
| 957 | CMDQ_GET_ARG_B(addr), 0, reg_gpr, |
| 958 | 0, 0, 1, CMDQ_CODE_MOVE); |
| 959 | if (err != 0) |
| 960 | cmdq_err("%s fail append command move addr to reg err:%d", |
| 961 | __func__, err); |
| 962 | |
| 963 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value), |
| 964 | CMDQ_GET_ARG_B(value), 0, reg_gpr, |
| 965 | 0, 0, 1, CMDQ_CODE_POLL); |
| 966 | if (err != 0) |
| 967 | cmdq_err("%s fail append command poll err:%d", |
| 968 | __func__, err); |
| 969 | |
| 970 | return err; |
| 971 | } |
| 972 | EXPORT_SYMBOL(cmdq_pkt_poll_addr); |
| 973 | |
| 974 | s32 cmdq_pkt_poll_reg(struct cmdq_pkt *pkt, u32 value, u8 subsys, |
| 975 | u16 offset, u32 mask) |
| 976 | { |
| 977 | s32 err; |
| 978 | |
| 979 | if (mask != 0xffffffff) { |
| 980 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(~mask), |
| 981 | CMDQ_GET_ARG_B(~mask), 0, 0, 0, 0, 0, CMDQ_CODE_MASK); |
| 982 | if (err != 0) |
| 983 | return err; |
| 984 | |
| 985 | offset = offset | 0x1; |
| 986 | } |
| 987 | |
| 988 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(value), |
| 989 | CMDQ_GET_ARG_B(value), offset, subsys, 0, 0, 0, CMDQ_CODE_POLL); |
| 990 | } |
| 991 | EXPORT_SYMBOL(cmdq_pkt_poll_reg); |
| 992 | |
| 993 | s32 cmdq_pkt_poll(struct cmdq_pkt *pkt, struct cmdq_base *clt_base, |
| 994 | u32 value, u32 addr, u32 mask, u8 reg_gpr) |
| 995 | { |
| 996 | const u32 base = addr & 0xFFFF0000; |
| 997 | s8 subsys; |
| 998 | |
| 999 | subsys = cmdq_subsys_base_to_id(clt_base, base); |
| 1000 | if (subsys >= 0) |
| 1001 | return cmdq_pkt_poll_reg(pkt, value, subsys, |
| 1002 | CMDQ_GET_REG_OFFSET(addr), mask); |
| 1003 | |
| 1004 | return cmdq_pkt_poll_addr(pkt, value, addr, mask, reg_gpr); |
| 1005 | } |
| 1006 | EXPORT_SYMBOL(cmdq_pkt_poll); |
| 1007 | |
| 1008 | int cmdq_pkt_timer_en(struct cmdq_pkt *pkt) |
| 1009 | { |
| 1010 | struct cmdq_client *cl = pkt->cl; |
| 1011 | const u32 en = 0x8000000; |
| 1012 | phys_addr_t gce_pa; |
| 1013 | |
| 1014 | if (!cl) |
| 1015 | return -EINVAL; |
| 1016 | |
| 1017 | gce_pa = cmdq_mbox_get_base_pa(cl->chan); |
| 1018 | |
| 1019 | return cmdq_pkt_write(pkt, NULL, gce_pa + CMDQ_TPR_MASK, en, en); |
| 1020 | } |
| 1021 | EXPORT_SYMBOL(cmdq_pkt_timer_en); |
| 1022 | |
| 1023 | s32 cmdq_pkt_sleep(struct cmdq_pkt *pkt, u32 tick, u16 reg_gpr) |
| 1024 | { |
| 1025 | const u32 tpr_en = 1 << reg_gpr; |
| 1026 | const u16 event = (u16)CMDQ_EVENT_GPR_TIMER + reg_gpr; |
| 1027 | struct cmdq_client *cl = (struct cmdq_client *)pkt->cl; |
| 1028 | struct cmdq_operand lop, rop; |
| 1029 | const u32 timeout_en = cmdq_mbox_get_base_pa(cl->chan) + |
| 1030 | CMDQ_TPR_TIMEOUT_EN; |
| 1031 | |
| 1032 | /* set target gpr value to max to avoid event trigger |
| 1033 | * before new value write to gpr |
| 1034 | */ |
| 1035 | lop.reg = true; |
| 1036 | lop.idx = CMDQ_TPR_ID; |
| 1037 | rop.reg = false; |
| 1038 | rop.value = 1; |
| 1039 | cmdq_pkt_logic_command(pkt, CMDQ_LOGIC_SUBTRACT, |
| 1040 | CMDQ_GPR_CNT_ID + reg_gpr, &lop, &rop); |
| 1041 | |
| 1042 | lop.reg = true; |
| 1043 | lop.idx = CMDQ_CPR_TPR_MASK; |
| 1044 | rop.reg = false; |
| 1045 | rop.value = tpr_en; |
| 1046 | cmdq_pkt_logic_command(pkt, CMDQ_LOGIC_OR, CMDQ_CPR_TPR_MASK, |
| 1047 | &lop, &rop); |
| 1048 | cmdq_pkt_write_indriect(pkt, NULL, timeout_en, CMDQ_CPR_TPR_MASK, ~0); |
| 1049 | cmdq_pkt_read(pkt, NULL, timeout_en, CMDQ_SPR_FOR_TEMP); |
| 1050 | cmdq_pkt_clear_event(pkt, event); |
| 1051 | |
| 1052 | if (tick < U16_MAX) { |
| 1053 | lop.reg = true; |
| 1054 | lop.idx = CMDQ_TPR_ID; |
| 1055 | rop.reg = false; |
| 1056 | rop.value = tick; |
| 1057 | cmdq_pkt_logic_command(pkt, CMDQ_LOGIC_ADD, |
| 1058 | CMDQ_GPR_CNT_ID + reg_gpr, &lop, &rop); |
| 1059 | } else { |
| 1060 | cmdq_pkt_assign_command(pkt, CMDQ_SPR_FOR_TEMP, tick); |
| 1061 | lop.reg = true; |
| 1062 | lop.idx = CMDQ_TPR_ID; |
| 1063 | rop.reg = true; |
| 1064 | rop.value = CMDQ_SPR_FOR_TEMP; |
| 1065 | cmdq_pkt_logic_command(pkt, CMDQ_LOGIC_ADD, |
| 1066 | CMDQ_GPR_CNT_ID + reg_gpr, &lop, &rop); |
| 1067 | } |
| 1068 | cmdq_pkt_wfe(pkt, event); |
| 1069 | |
| 1070 | lop.reg = true; |
| 1071 | lop.idx = CMDQ_CPR_TPR_MASK; |
| 1072 | rop.reg = false; |
| 1073 | rop.value = ~tpr_en; |
| 1074 | cmdq_pkt_logic_command(pkt, CMDQ_LOGIC_AND, CMDQ_CPR_TPR_MASK, |
| 1075 | &lop, &rop); |
| 1076 | |
| 1077 | return 0; |
| 1078 | } |
| 1079 | EXPORT_SYMBOL(cmdq_pkt_sleep); |
| 1080 | |
| 1081 | s32 cmdq_pkt_poll_timeout(struct cmdq_pkt *pkt, u32 value, u8 subsys, |
| 1082 | phys_addr_t addr, u32 mask, u16 count, u16 reg_gpr) |
| 1083 | { |
| 1084 | const u16 reg_tmp = CMDQ_SPR_FOR_TEMP; |
| 1085 | const u16 reg_val = CMDQ_THR_SPR_IDX1; |
| 1086 | const u16 reg_poll = CMDQ_THR_SPR_IDX2; |
| 1087 | const u16 reg_counter = CMDQ_THR_SPR_IDX3; |
| 1088 | u32 begin_mark, end_addr_mark, cnt_end_addr_mark = 0, shift_pa; |
| 1089 | dma_addr_t cmd_pa; |
| 1090 | struct cmdq_operand lop, rop; |
| 1091 | struct cmdq_instruction *inst; |
| 1092 | bool absolute = true; |
| 1093 | |
| 1094 | if (pkt->avail_buf_size > PAGE_SIZE) |
| 1095 | absolute = false; |
| 1096 | |
| 1097 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1098 | #if IS_ENABLED(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT) || \ |
| 1099 | IS_ENABLED(CONFIG_MTK_CAM_SECURITY_SUPPORT) |
| 1100 | if (pkt->sec_data) |
| 1101 | absolute = false; |
| 1102 | #endif |
| 1103 | #endif |
| 1104 | |
| 1105 | /* assign compare value as compare target later */ |
| 1106 | cmdq_pkt_assign_command(pkt, reg_val, value); |
| 1107 | |
| 1108 | /* init loop counter as 0, counter can be count poll limit or debug */ |
| 1109 | cmdq_pkt_assign_command(pkt, reg_counter, 0); |
| 1110 | |
| 1111 | /* mark begin offset of this operation */ |
| 1112 | begin_mark = pkt->cmd_buf_size; |
| 1113 | |
| 1114 | /* read target address */ |
| 1115 | if (subsys != SUBSYS_NO_SUPPORT) |
| 1116 | cmdq_pkt_read_reg(pkt, subsys, CMDQ_GET_REG_OFFSET(addr), |
| 1117 | reg_poll); |
| 1118 | else |
| 1119 | cmdq_pkt_read_addr(pkt, addr, reg_poll); |
| 1120 | |
| 1121 | /* mask it */ |
| 1122 | if (mask != ~0) { |
| 1123 | lop.reg = true; |
| 1124 | lop.idx = reg_poll; |
| 1125 | rop.reg = true; |
| 1126 | rop.idx = reg_tmp; |
| 1127 | |
| 1128 | cmdq_pkt_assign_command(pkt, reg_tmp, mask); |
| 1129 | cmdq_pkt_logic_command(pkt, CMDQ_LOGIC_AND, reg_poll, |
| 1130 | &lop, &rop); |
| 1131 | } |
| 1132 | |
| 1133 | /* assign temp spr as empty, shoudl fill in end addr later */ |
| 1134 | end_addr_mark = pkt->cmd_buf_size; |
| 1135 | cmdq_pkt_assign_command(pkt, reg_tmp, 0); |
| 1136 | |
| 1137 | /* compare and jump to end if equal |
| 1138 | * note that end address will fill in later into last instruction |
| 1139 | */ |
| 1140 | lop.reg = true; |
| 1141 | lop.idx = reg_poll; |
| 1142 | rop.reg = true; |
| 1143 | rop.idx = reg_val; |
| 1144 | if (absolute) |
| 1145 | cmdq_pkt_cond_jump_abs(pkt, reg_tmp, &lop, &rop, CMDQ_EQUAL); |
| 1146 | else |
| 1147 | cmdq_pkt_cond_jump(pkt, reg_tmp, &lop, &rop, CMDQ_EQUAL); |
| 1148 | |
| 1149 | /* check if timeup and inc counter */ |
| 1150 | if (count != U16_MAX) { |
| 1151 | if (!absolute) { |
| 1152 | cnt_end_addr_mark = pkt->cmd_buf_size; |
| 1153 | cmdq_pkt_assign_command(pkt, reg_tmp, 0); |
| 1154 | } |
| 1155 | lop.reg = true; |
| 1156 | lop.idx = reg_counter; |
| 1157 | rop.reg = false; |
| 1158 | rop.value = count; |
| 1159 | if (absolute) |
| 1160 | cmdq_pkt_cond_jump_abs(pkt, reg_tmp, &lop, &rop, |
| 1161 | CMDQ_GREATER_THAN_AND_EQUAL); |
| 1162 | else |
| 1163 | cmdq_pkt_cond_jump(pkt, reg_tmp, &lop, &rop, |
| 1164 | CMDQ_GREATER_THAN_AND_EQUAL); |
| 1165 | } |
| 1166 | |
| 1167 | /* always inc counter */ |
| 1168 | lop.reg = true; |
| 1169 | lop.idx = reg_counter; |
| 1170 | rop.reg = false; |
| 1171 | rop.value = 1; |
| 1172 | cmdq_pkt_logic_command(pkt, CMDQ_LOGIC_ADD, reg_counter, &lop, |
| 1173 | &rop); |
| 1174 | |
| 1175 | cmdq_pkt_sleep(pkt, CMDQ_POLL_TICK, reg_gpr); |
| 1176 | |
| 1177 | /* loop to begin */ |
| 1178 | if (absolute) { |
| 1179 | cmd_pa = cmdq_pkt_get_pa_by_offset(pkt, begin_mark); |
| 1180 | cmdq_pkt_jump_addr(pkt, cmd_pa); |
| 1181 | } else { |
| 1182 | /* jump relative back to begin mark */ |
| 1183 | cmdq_pkt_jump(pkt, -(s32)(pkt->cmd_buf_size - begin_mark)); |
| 1184 | } |
| 1185 | |
| 1186 | /* read current buffer pa as end mark and fill preview assign */ |
| 1187 | cmd_pa = cmdq_pkt_get_curr_buf_pa(pkt); |
| 1188 | inst = (struct cmdq_instruction *)cmdq_pkt_get_va_by_offset( |
| 1189 | pkt, end_addr_mark); |
| 1190 | /* instruction may hit boundary case, |
| 1191 | * check if op code is jump and get next instruction if necessary |
| 1192 | */ |
| 1193 | if (inst->op == CMDQ_CODE_JUMP) |
| 1194 | inst = (struct cmdq_instruction *)cmdq_pkt_get_va_by_offset( |
| 1195 | pkt, end_addr_mark + CMDQ_INST_SIZE); |
| 1196 | if (absolute) |
| 1197 | shift_pa = CMDQ_REG_SHIFT_ADDR(cmd_pa); |
| 1198 | else |
| 1199 | shift_pa = CMDQ_REG_SHIFT_ADDR( |
| 1200 | pkt->cmd_buf_size - end_addr_mark - CMDQ_INST_SIZE); |
| 1201 | inst->arg_b = CMDQ_GET_ARG_B(shift_pa); |
| 1202 | inst->arg_c = CMDQ_GET_ARG_C(shift_pa); |
| 1203 | |
| 1204 | /* relative case the counter have different offset */ |
| 1205 | if (cnt_end_addr_mark) { |
| 1206 | inst = (struct cmdq_instruction *)cmdq_pkt_get_va_by_offset( |
| 1207 | pkt, cnt_end_addr_mark); |
| 1208 | if (inst->op == CMDQ_CODE_JUMP) |
| 1209 | inst = (struct cmdq_instruction *) |
| 1210 | cmdq_pkt_get_va_by_offset( |
| 1211 | pkt, end_addr_mark + CMDQ_INST_SIZE); |
| 1212 | shift_pa = CMDQ_REG_SHIFT_ADDR( |
| 1213 | pkt->cmd_buf_size - cnt_end_addr_mark - CMDQ_INST_SIZE); |
| 1214 | inst->arg_b = CMDQ_GET_ARG_B(shift_pa); |
| 1215 | inst->arg_c = CMDQ_GET_ARG_C(shift_pa); |
| 1216 | } |
| 1217 | |
| 1218 | return 0; |
| 1219 | } |
| 1220 | EXPORT_SYMBOL(cmdq_pkt_poll_timeout); |
| 1221 | |
| 1222 | void cmdq_pkt_perf_begin(struct cmdq_pkt *pkt) |
| 1223 | { |
| 1224 | dma_addr_t pa; |
| 1225 | struct cmdq_pkt_buffer *buf; |
| 1226 | |
| 1227 | if (!pkt->buf_size) |
| 1228 | if (cmdq_pkt_add_cmd_buffer(pkt) < 0) |
| 1229 | return; |
| 1230 | |
| 1231 | pa = cmdq_pkt_get_pa_by_offset(pkt, 0) + CMDQ_DBG_PERFBEGIN; |
| 1232 | cmdq_pkt_write_indriect(pkt, NULL, pa, CMDQ_TPR_ID, ~0); |
| 1233 | |
| 1234 | buf = list_first_entry(&pkt->buf, typeof(*buf), list_entry); |
| 1235 | *(u32 *)(buf->va_base + CMDQ_DBG_PERFBEGIN) = 0xdeaddead; |
| 1236 | } |
| 1237 | EXPORT_SYMBOL(cmdq_pkt_perf_begin); |
| 1238 | |
| 1239 | void cmdq_pkt_perf_end(struct cmdq_pkt *pkt) |
| 1240 | { |
| 1241 | dma_addr_t pa; |
| 1242 | struct cmdq_pkt_buffer *buf; |
| 1243 | |
| 1244 | if (!pkt->buf_size) |
| 1245 | if (cmdq_pkt_add_cmd_buffer(pkt) < 0) |
| 1246 | return; |
| 1247 | |
| 1248 | pa = cmdq_pkt_get_pa_by_offset(pkt, 0) + CMDQ_DBG_PERFEND; |
| 1249 | cmdq_pkt_write_indriect(pkt, NULL, pa, CMDQ_TPR_ID, ~0); |
| 1250 | |
| 1251 | buf = list_first_entry(&pkt->buf, typeof(*buf), list_entry); |
| 1252 | *(u32 *)(buf->va_base + CMDQ_DBG_PERFEND) = 0xdeaddead; |
| 1253 | } |
| 1254 | EXPORT_SYMBOL(cmdq_pkt_perf_end); |
| 1255 | |
| 1256 | u32 *cmdq_pkt_get_perf_ret(struct cmdq_pkt *pkt) |
| 1257 | { |
| 1258 | struct cmdq_pkt_buffer *buf; |
| 1259 | |
| 1260 | if (!pkt->cmd_buf_size) |
| 1261 | return NULL; |
| 1262 | |
| 1263 | buf = list_first_entry(&pkt->buf, typeof(*buf), |
| 1264 | list_entry); |
| 1265 | |
| 1266 | return (u32 *)(buf->va_base + CMDQ_DBG_PERFBEGIN); |
| 1267 | } |
| 1268 | EXPORT_SYMBOL(cmdq_pkt_get_perf_ret); |
| 1269 | |
| 1270 | int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event) |
| 1271 | { |
| 1272 | u32 arg_b; |
| 1273 | |
| 1274 | if (event >= CMDQ_EVENT_MAX) |
| 1275 | return -EINVAL; |
| 1276 | |
| 1277 | /* |
| 1278 | * WFE arg_b |
| 1279 | * bit 0-11: wait value |
| 1280 | * bit 15: 1 - wait, 0 - no wait |
| 1281 | * bit 16-27: update value |
| 1282 | * bit 31: 1 - update, 0 - no update |
| 1283 | */ |
| 1284 | arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE; |
| 1285 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(arg_b), |
| 1286 | CMDQ_GET_ARG_B(arg_b), event, |
| 1287 | 0, 0, 0, 0, CMDQ_CODE_WFE); |
| 1288 | } |
| 1289 | EXPORT_SYMBOL(cmdq_pkt_wfe); |
| 1290 | |
| 1291 | int cmdq_pkt_wait_no_clear(struct cmdq_pkt *pkt, u16 event) |
| 1292 | { |
| 1293 | u32 arg_b; |
| 1294 | |
| 1295 | if (event >= CMDQ_EVENT_MAX) |
| 1296 | return -EINVAL; |
| 1297 | |
| 1298 | /* |
| 1299 | * WFE arg_b |
| 1300 | * bit 0-11: wait value |
| 1301 | * bit 15: 1 - wait, 0 - no wait |
| 1302 | * bit 16-27: update value |
| 1303 | * bit 31: 1 - update, 0 - no update |
| 1304 | */ |
| 1305 | arg_b = CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE; |
| 1306 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(arg_b), |
| 1307 | CMDQ_GET_ARG_B(arg_b), event, |
| 1308 | 0, 0, 0, 0, CMDQ_CODE_WFE); |
| 1309 | } |
| 1310 | EXPORT_SYMBOL(cmdq_pkt_wait_no_clear); |
| 1311 | |
| 1312 | int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event) |
| 1313 | { |
| 1314 | u32 arg_b; |
| 1315 | |
| 1316 | if (event >= CMDQ_EVENT_MAX) |
| 1317 | return -EINVAL; |
| 1318 | |
| 1319 | /* |
| 1320 | * WFE arg_b |
| 1321 | * bit 0-11: wait value |
| 1322 | * bit 15: 1 - wait, 0 - no wait |
| 1323 | * bit 16-27: update value |
| 1324 | * bit 31: 1 - update, 0 - no update |
| 1325 | */ |
| 1326 | arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE | CMDQ_WFE_WAIT; |
| 1327 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(arg_b), |
| 1328 | CMDQ_GET_ARG_B(arg_b), event, |
| 1329 | 0, 0, 0, 0, CMDQ_CODE_WFE); |
| 1330 | } |
| 1331 | EXPORT_SYMBOL(cmdq_pkt_acquire_event); |
| 1332 | |
| 1333 | s32 cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) |
| 1334 | { |
| 1335 | if (event >= CMDQ_EVENT_MAX) |
| 1336 | return -EINVAL; |
| 1337 | |
| 1338 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(CMDQ_WFE_UPDATE), |
| 1339 | CMDQ_GET_ARG_B(CMDQ_WFE_UPDATE), event, |
| 1340 | 0, 0, 0, 0, CMDQ_CODE_WFE); |
| 1341 | } |
| 1342 | EXPORT_SYMBOL(cmdq_pkt_clear_event); |
| 1343 | |
| 1344 | s32 cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event) |
| 1345 | { |
| 1346 | u32 arg_b; |
| 1347 | |
| 1348 | if (event >= CMDQ_EVENT_MAX) |
| 1349 | return -EINVAL; |
| 1350 | |
| 1351 | arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE; |
| 1352 | return cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(arg_b), |
| 1353 | CMDQ_GET_ARG_B(arg_b), event, |
| 1354 | 0, 0, 0, 0, CMDQ_CODE_WFE); |
| 1355 | } |
| 1356 | EXPORT_SYMBOL(cmdq_pkt_set_event); |
| 1357 | |
| 1358 | s32 cmdq_pkt_handshake_event(struct cmdq_pkt *pkt, u16 event) |
| 1359 | { |
| 1360 | u16 shake_bit = 1 << (event - CMDQ_EVENT_HANDSHAKE); |
| 1361 | |
| 1362 | return cmdq_pkt_assign_command(pkt, CMDQ_HANDSHAKE_REG, shake_bit); |
| 1363 | } |
| 1364 | EXPORT_SYMBOL(cmdq_pkt_handshake_event); |
| 1365 | |
| 1366 | s32 cmdq_pkt_finalize(struct cmdq_pkt *pkt) |
| 1367 | { |
| 1368 | int err; |
| 1369 | |
| 1370 | if (cmdq_pkt_is_finalized(pkt)) |
| 1371 | return 0; |
| 1372 | |
| 1373 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1374 | if (cmdq_util_is_feature_en(CMDQ_LOG_FEAT_PERF)) |
| 1375 | cmdq_pkt_perf_end(pkt); |
| 1376 | |
| 1377 | #if IS_ENABLED(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT) || \ |
| 1378 | IS_ENABLED(CONFIG_MTK_CAM_SECURITY_SUPPORT) |
| 1379 | if (pkt->sec_data) { |
| 1380 | err = cmdq_sec_insert_backup_cookie(pkt); |
| 1381 | if (err) |
| 1382 | return err; |
| 1383 | } |
| 1384 | #endif |
| 1385 | #endif /* end of CONFIG_MTK_CMDQ_MBOX_EXT */ |
| 1386 | |
| 1387 | /* insert EOC and generate IRQ for each command iteration */ |
| 1388 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(CMDQ_EOC_IRQ_EN), |
| 1389 | CMDQ_GET_ARG_B(CMDQ_EOC_IRQ_EN), 0, 0, 0, 0, 0, CMDQ_CODE_EOC); |
| 1390 | if (err < 0) |
| 1391 | return err; |
| 1392 | |
| 1393 | /* JUMP to end */ |
| 1394 | err = cmdq_pkt_jump(pkt, CMDQ_JUMP_PASS); |
| 1395 | if (err < 0) |
| 1396 | return err; |
| 1397 | |
| 1398 | cmdq_log("finalize: add EOC and JUMP cmd"); |
| 1399 | |
| 1400 | return 0; |
| 1401 | } |
| 1402 | EXPORT_SYMBOL(cmdq_pkt_finalize); |
| 1403 | |
| 1404 | s32 cmdq_pkt_finalize_loop(struct cmdq_pkt *pkt) |
| 1405 | { |
| 1406 | u32 start_pa; |
| 1407 | s32 err; |
| 1408 | |
| 1409 | if (cmdq_pkt_is_finalized(pkt)) |
| 1410 | return 0; |
| 1411 | |
| 1412 | /* insert EOC and generate IRQ for each command iteration */ |
| 1413 | err = cmdq_pkt_append_command(pkt, CMDQ_GET_ARG_C(CMDQ_EOC_IRQ_EN), |
| 1414 | CMDQ_GET_ARG_B(CMDQ_EOC_IRQ_EN), 0, 0, 0, 0, 0, CMDQ_CODE_EOC); |
| 1415 | if (err < 0) |
| 1416 | return err; |
| 1417 | |
| 1418 | /* JUMP to start of pkt */ |
| 1419 | start_pa = cmdq_pkt_get_pa_by_offset(pkt, 0); |
| 1420 | err = cmdq_pkt_jump_addr(pkt, start_pa); |
| 1421 | if (err < 0) |
| 1422 | return err; |
| 1423 | |
| 1424 | /* mark pkt as loop */ |
| 1425 | pkt->loop = true; |
| 1426 | |
| 1427 | cmdq_log("finalize: add EOC and JUMP begin cmd"); |
| 1428 | |
| 1429 | return 0; |
| 1430 | } |
| 1431 | EXPORT_SYMBOL(cmdq_pkt_finalize_loop); |
| 1432 | |
| 1433 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1434 | static struct cmdq_flush_item *cmdq_prepare_flush_tiem(struct cmdq_pkt *pkt) |
| 1435 | { |
| 1436 | struct cmdq_flush_item *item; |
| 1437 | |
| 1438 | kfree(pkt->flush_item); |
| 1439 | pkt->flush_item = NULL; |
| 1440 | |
| 1441 | item = kzalloc(sizeof(*item), GFP_KERNEL); |
| 1442 | if (!item) |
| 1443 | return ERR_PTR(-ENOMEM); |
| 1444 | |
| 1445 | pkt->flush_item = item; |
| 1446 | |
| 1447 | return item; |
| 1448 | } |
| 1449 | #endif |
| 1450 | |
| 1451 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1452 | static void cmdq_pkt_err_irq_dump(struct cmdq_pkt *pkt) |
| 1453 | { |
| 1454 | struct cmdq_client *client = pkt->cl; |
| 1455 | dma_addr_t pc = 0; |
| 1456 | struct cmdq_instruction *inst = NULL; |
| 1457 | const char *mod = "CMDQ"; |
| 1458 | struct cmdq_pkt_buffer *buf; |
| 1459 | u32 size = pkt->cmd_buf_size, cnt = 0; |
| 1460 | s32 thread_id = cmdq_mbox_chan_id(client->chan); |
| 1461 | static u8 err_num; |
| 1462 | |
| 1463 | cmdq_msg("%s pkt:%p", __func__, pkt); |
| 1464 | |
| 1465 | cmdq_util_dump_lock(); |
| 1466 | cmdq_util_error_enable(); |
| 1467 | |
| 1468 | cmdq_util_err("begin of error irq %u", err_num++); |
| 1469 | |
| 1470 | cmdq_task_get_thread_pc(client->chan, &pc); |
| 1471 | cmdq_util_err("pkt:%lx thread:%d pc:%lx", |
| 1472 | (unsigned long)pkt, thread_id, (unsigned long)pc); |
| 1473 | |
| 1474 | if (pc) { |
| 1475 | list_for_each_entry(buf, &pkt->buf, list_entry) { |
| 1476 | if (pc < buf->pa_base || |
| 1477 | pc > buf->pa_base + CMDQ_CMD_BUFFER_SIZE) { |
| 1478 | size -= CMDQ_CMD_BUFFER_SIZE; |
| 1479 | cmdq_util_msg("buffer %u va:0x%p pa:%pa", |
| 1480 | cnt, buf->va_base, &buf->pa_base); |
| 1481 | cnt++; |
| 1482 | continue; |
| 1483 | } |
| 1484 | inst = (struct cmdq_instruction *)( |
| 1485 | buf->va_base + (pc - buf->pa_base)); |
| 1486 | |
| 1487 | if (size > CMDQ_CMD_BUFFER_SIZE) |
| 1488 | size = CMDQ_CMD_BUFFER_SIZE; |
| 1489 | |
| 1490 | cmdq_util_msg("error irq buffer %u va:0x%p pa:%pa", |
| 1491 | cnt, buf->va_base, &buf->pa_base); |
| 1492 | cmdq_buf_cmd_parse(buf->va_base, CMDQ_NUM_CMD(size), |
| 1493 | buf->pa_base, pc, NULL); |
| 1494 | |
| 1495 | break; |
| 1496 | } |
| 1497 | } |
| 1498 | |
| 1499 | if (inst) { |
| 1500 | /* not sync case, print raw */ |
| 1501 | cmdq_util_aee(mod, |
| 1502 | "%s(%s) inst:%#018llx thread:%d", |
| 1503 | mod, cmdq_util_hw_name(client->chan), |
| 1504 | *(u64 *)inst, thread_id); |
| 1505 | } else { |
| 1506 | /* no inst available */ |
| 1507 | cmdq_util_aee(mod, |
| 1508 | "%s(%s) instruction not available pc:%#llx thread:%d", |
| 1509 | mod, cmdq_util_hw_name(client->chan), pc, thread_id); |
| 1510 | } |
| 1511 | |
| 1512 | cmdq_util_error_disable(); |
| 1513 | cmdq_util_dump_unlock(); |
| 1514 | } |
| 1515 | |
| 1516 | static void cmdq_flush_async_cb(struct cmdq_cb_data data) |
| 1517 | { |
| 1518 | struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data; |
| 1519 | struct cmdq_flush_item *item = pkt->flush_item; |
| 1520 | struct cmdq_cb_data user_data = { |
| 1521 | .data = item->data, .err = data.err }; |
| 1522 | |
| 1523 | cmdq_log("%s pkt:%p", __func__, pkt); |
| 1524 | |
| 1525 | if (data.err == -EINVAL) |
| 1526 | cmdq_pkt_err_irq_dump(pkt); |
| 1527 | |
| 1528 | if (item->cb) |
| 1529 | item->cb(user_data); |
| 1530 | complete(&pkt->cmplt); |
| 1531 | item->done = true; |
| 1532 | } |
| 1533 | #endif |
| 1534 | |
| 1535 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1536 | static void cmdq_print_wait_summary(void *chan, dma_addr_t pc, |
| 1537 | struct cmdq_instruction *inst) |
| 1538 | { |
| 1539 | #define txt_len 128 |
| 1540 | char text[txt_len]; |
| 1541 | char text_gpr[30] = {0}; |
| 1542 | void *base; |
| 1543 | u32 gprid, val; |
| 1544 | |
| 1545 | cmdq_buf_print_wfe(text, txt_len, (u32)(pc & 0xFFFF), (void *)inst); |
| 1546 | |
| 1547 | if (inst->arg_a >= CMDQ_EVENT_GPR_TIMER && |
| 1548 | inst->arg_a <= CMDQ_EVENT_GPR_TIMER + CMDQ_GPR_R15) { |
| 1549 | base = cmdq_mbox_get_base(chan); |
| 1550 | gprid = inst->arg_a - CMDQ_EVENT_GPR_TIMER; |
| 1551 | val = readl(base + CMDQ_GPR_R0_OFF + gprid * 4); |
| 1552 | |
| 1553 | snprintf(text_gpr, ARRAY_SIZE(text_gpr), |
| 1554 | " GPR R%u:%#x", gprid, val); |
| 1555 | } |
| 1556 | |
| 1557 | cmdq_util_msg("curr inst: %s value:%u%s", |
| 1558 | text, cmdq_get_event(chan, inst->arg_a), text_gpr); |
| 1559 | } |
| 1560 | #endif |
| 1561 | |
| 1562 | void cmdq_pkt_err_dump_cb(struct cmdq_cb_data data) |
| 1563 | { |
| 1564 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1565 | |
| 1566 | static u32 err_num; |
| 1567 | struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data; |
| 1568 | struct cmdq_client *client = pkt->cl; |
| 1569 | struct cmdq_flush_item *item = |
| 1570 | (struct cmdq_flush_item *)pkt->flush_item; |
| 1571 | struct cmdq_instruction *inst = NULL; |
| 1572 | dma_addr_t pc = 0; |
| 1573 | phys_addr_t gce_pa = cmdq_mbox_get_base_pa(client->chan); |
| 1574 | const char *mod = NULL; |
| 1575 | s32 thread_id = cmdq_mbox_chan_id(client->chan); |
| 1576 | |
| 1577 | cmdq_util_dump_lock(); |
| 1578 | |
| 1579 | /* assign error during dump cb */ |
| 1580 | item->err = data.err; |
| 1581 | |
| 1582 | if (err_num == 0) |
| 1583 | cmdq_util_error_enable(); |
| 1584 | |
| 1585 | cmdq_util_err("Begin of Error %u", err_num); |
| 1586 | |
| 1587 | cmdq_dump_core(client->chan); |
| 1588 | |
| 1589 | #if IS_ENABLED(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT) || \ |
| 1590 | IS_ENABLED(CONFIG_MTK_CAM_SECURITY_SUPPORT) |
| 1591 | /* for secure path dump more detail */ |
| 1592 | if (pkt->sec_data) { |
| 1593 | cmdq_util_msg("thd:%d Hidden thread info since it's secure", |
| 1594 | thread_id); |
| 1595 | cmdq_sec_err_dump(pkt, client, (u64 **)&inst, &mod); |
| 1596 | } else { |
| 1597 | cmdq_thread_dump(client->chan, pkt, (u64 **)&inst, &pc); |
| 1598 | } |
| 1599 | |
| 1600 | if (data.err == -ECONNABORTED) { |
| 1601 | cmdq_util_msg("skip since abort"); |
| 1602 | goto done; |
| 1603 | } |
| 1604 | |
| 1605 | #else |
| 1606 | cmdq_thread_dump(client->chan, pkt, (u64 **)&inst, &pc); |
| 1607 | #endif |
| 1608 | |
| 1609 | if (inst && inst->op == CMDQ_CODE_WFE) |
| 1610 | cmdq_print_wait_summary(client->chan, pc, inst); |
| 1611 | else if (inst) |
| 1612 | cmdq_buf_cmd_parse((u64 *)inst, 1, pc, pc, "curr inst:"); |
| 1613 | else |
| 1614 | cmdq_util_msg("curr inst: Not Available"); |
| 1615 | |
| 1616 | if (item->err_cb) { |
| 1617 | struct cmdq_cb_data cb_data = { |
| 1618 | .data = item->err_data, |
| 1619 | .err = data.err |
| 1620 | }; |
| 1621 | |
| 1622 | item->err_cb(cb_data); |
| 1623 | } |
| 1624 | |
| 1625 | cmdq_dump_pkt(pkt, pc, true); |
| 1626 | |
| 1627 | #if IS_ENABLED(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT) || \ |
| 1628 | IS_ENABLED(CONFIG_MTK_CAM_SECURITY_SUPPORT) |
| 1629 | if (!pkt->sec_data) |
| 1630 | cmdq_util_dump_smi(); |
| 1631 | #else |
| 1632 | cmdq_util_dump_smi(); |
| 1633 | #endif |
| 1634 | |
| 1635 | if (inst && inst->op == CMDQ_CODE_WFE) { |
| 1636 | mod = cmdq_event_module_dispatch(gce_pa, inst->arg_a, |
| 1637 | thread_id); |
| 1638 | cmdq_util_aee(mod, |
| 1639 | "DISPATCH:%s(%s) inst:%#018llx OP:WAIT EVENT:%hu thread:%d", |
| 1640 | mod, cmdq_util_hw_name(client->chan), |
| 1641 | *(u64 *)inst, inst->arg_a, thread_id); |
| 1642 | } else if (inst) { |
| 1643 | if (!mod) |
| 1644 | mod = cmdq_thread_module_dispatch(gce_pa, thread_id); |
| 1645 | |
| 1646 | /* not sync case, print raw */ |
| 1647 | cmdq_util_aee(mod, |
| 1648 | "DISPATCH:%s(%s) inst:%#018llx OP:%#04hhx thread:%d", |
| 1649 | mod, cmdq_util_hw_name(client->chan), |
| 1650 | *(u64 *)inst, inst->op, thread_id); |
| 1651 | } else { |
| 1652 | if (!mod) |
| 1653 | mod = "CMDQ"; |
| 1654 | |
| 1655 | /* no inst available */ |
| 1656 | cmdq_util_aee(mod, |
| 1657 | "DISPATCH:%s(%s) unknown instruction thread:%d", |
| 1658 | mod, cmdq_util_hw_name(client->chan), thread_id); |
| 1659 | } |
| 1660 | |
| 1661 | #if IS_ENABLED(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT) || \ |
| 1662 | IS_ENABLED(CONFIG_MTK_CAM_SECURITY_SUPPORT) |
| 1663 | done: |
| 1664 | #endif |
| 1665 | cmdq_util_err("End of Error %u", err_num); |
| 1666 | if (err_num == 0) { |
| 1667 | cmdq_util_error_disable(); |
| 1668 | cmdq_util_set_first_err_mod(client->chan, mod); |
| 1669 | } |
| 1670 | err_num++; |
| 1671 | |
| 1672 | cmdq_util_dump_unlock(); |
| 1673 | |
| 1674 | #else |
| 1675 | cmdq_err("cmdq error:%d", data.err); |
| 1676 | #endif |
| 1677 | } |
| 1678 | |
| 1679 | s32 cmdq_pkt_flush_async(struct cmdq_pkt *pkt, |
| 1680 | cmdq_async_flush_cb cb, void *data) |
| 1681 | { |
| 1682 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1683 | struct cmdq_flush_item *item = cmdq_prepare_flush_tiem(pkt); |
| 1684 | #endif |
| 1685 | struct cmdq_client *client = pkt->cl; |
| 1686 | s32 err; |
| 1687 | |
| 1688 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1689 | if (IS_ERR(item)) |
| 1690 | return -ENOMEM; |
| 1691 | #endif |
| 1692 | |
| 1693 | err = cmdq_pkt_finalize(pkt); |
| 1694 | if (err < 0) |
| 1695 | return err; |
| 1696 | |
| 1697 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1698 | item->cb = cb; |
| 1699 | item->data = data; |
| 1700 | pkt->cb.cb = cmdq_flush_async_cb; |
| 1701 | pkt->cb.data = pkt; |
| 1702 | |
| 1703 | item->err_cb = pkt->err_cb.cb; |
| 1704 | item->err_data = pkt->err_cb.data; |
| 1705 | pkt->err_cb.cb = cmdq_pkt_err_dump_cb; |
| 1706 | pkt->err_cb.data = pkt; |
| 1707 | |
| 1708 | pkt->rec_submit = sched_clock(); |
| 1709 | #else |
| 1710 | pkt->cb.cb = cb; |
| 1711 | pkt->cb.data = data; |
| 1712 | #endif |
| 1713 | |
| 1714 | mutex_lock(&client->chan_mutex); |
| 1715 | err = mbox_send_message(client->chan, pkt); |
| 1716 | /* We can send next packet immediately, so just call txdone. */ |
| 1717 | mbox_client_txdone(client->chan, 0); |
| 1718 | mutex_unlock(&client->chan_mutex); |
| 1719 | |
| 1720 | return err; |
| 1721 | } |
| 1722 | EXPORT_SYMBOL(cmdq_pkt_flush_async); |
| 1723 | |
| 1724 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1725 | void cmdq_dump_summary(struct cmdq_client *client, struct cmdq_pkt *pkt) |
| 1726 | { |
| 1727 | struct cmdq_instruction *inst = NULL; |
| 1728 | dma_addr_t pc; |
| 1729 | |
| 1730 | cmdq_dump_core(client->chan); |
| 1731 | cmdq_thread_dump(client->chan, pkt, (u64 **)&inst, &pc); |
| 1732 | if (inst && inst->op == CMDQ_CODE_WFE) |
| 1733 | cmdq_print_wait_summary(client->chan, pc, inst); |
| 1734 | else if (inst) |
| 1735 | cmdq_buf_cmd_parse((u64 *)inst, 1, pc, pc, |
| 1736 | "curr inst:"); |
| 1737 | else |
| 1738 | cmdq_msg("curr inst: Not Available"); |
| 1739 | cmdq_dump_pkt(pkt, pc, false); |
| 1740 | } |
| 1741 | |
| 1742 | static int cmdq_pkt_wait_complete_loop(struct cmdq_pkt *pkt) |
| 1743 | { |
| 1744 | struct cmdq_client *client = pkt->cl; |
| 1745 | struct cmdq_flush_item *item = pkt->flush_item; |
| 1746 | unsigned long ret; |
| 1747 | int cnt = 0; |
| 1748 | u32 timeout_ms = cmdq_mbox_get_thread_timeout((void *)client->chan); |
| 1749 | |
| 1750 | #if IS_ENABLED(CMDQ_MMPROFILE_SUPPORT) |
| 1751 | cmdq_mmp_wait(client->chan, pkt); |
| 1752 | #endif |
| 1753 | |
| 1754 | /* make sure gce won't turn off during dump */ |
| 1755 | cmdq_mbox_enable(client->chan); |
| 1756 | |
| 1757 | do { |
| 1758 | if (timeout_ms == CMDQ_NO_TIMEOUT) { |
| 1759 | wait_for_completion(&pkt->cmplt); |
| 1760 | break; |
| 1761 | } |
| 1762 | |
| 1763 | ret = wait_for_completion_timeout(&pkt->cmplt, |
| 1764 | msecs_to_jiffies(CMDQ_PREDUMP_TIMEOUT_MS)); |
| 1765 | if (ret) |
| 1766 | break; |
| 1767 | |
| 1768 | cmdq_util_dump_lock(); |
| 1769 | cmdq_msg("===== SW timeout Pre-dump %u =====", cnt++); |
| 1770 | cmdq_dump_summary(client, pkt); |
| 1771 | cmdq_util_dump_unlock(); |
| 1772 | } while (1); |
| 1773 | |
| 1774 | cmdq_mbox_disable(client->chan); |
| 1775 | |
| 1776 | return item->err; |
| 1777 | } |
| 1778 | |
| 1779 | int cmdq_pkt_wait_complete(struct cmdq_pkt *pkt) |
| 1780 | { |
| 1781 | struct cmdq_flush_item *item = pkt->flush_item; |
| 1782 | |
| 1783 | if (!item) { |
| 1784 | cmdq_err("pkt need flush from flush async ex:0x%p", pkt); |
| 1785 | return -EINVAL; |
| 1786 | } |
| 1787 | |
| 1788 | pkt->rec_wait = sched_clock(); |
| 1789 | cmdq_trace_begin("%s", __func__); |
| 1790 | |
| 1791 | #if IS_ENABLED(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT) || \ |
| 1792 | IS_ENABLED(CONFIG_MTK_CAM_SECURITY_SUPPORT) |
| 1793 | if (pkt->sec_data) |
| 1794 | cmdq_sec_pkt_wait_complete(pkt); |
| 1795 | else |
| 1796 | cmdq_pkt_wait_complete_loop(pkt); |
| 1797 | #else |
| 1798 | cmdq_pkt_wait_complete_loop(pkt); |
| 1799 | #endif |
| 1800 | |
| 1801 | cmdq_trace_end(); |
| 1802 | cmdq_util_track(pkt); |
| 1803 | |
| 1804 | return item->err; |
| 1805 | } |
| 1806 | EXPORT_SYMBOL(cmdq_pkt_wait_complete); |
| 1807 | #endif |
| 1808 | |
| 1809 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1810 | static void cmdq_pkt_flush_q_wait_work(struct work_struct *w) |
| 1811 | { |
| 1812 | struct cmdq_flush_item *item_q = container_of(w, |
| 1813 | struct cmdq_flush_item, work); |
| 1814 | int ret; |
| 1815 | |
| 1816 | ret = cmdq_pkt_wait_complete(item_q->pkt); |
| 1817 | if (item_q->cb) { |
| 1818 | struct cmdq_cb_data data = {.data = item_q->data, .err = ret}; |
| 1819 | |
| 1820 | item_q->cb(data); |
| 1821 | } |
| 1822 | kfree(item_q); |
| 1823 | } |
| 1824 | #else |
| 1825 | static void cmdq_pkt_flush_q_cb_work(struct work_struct *w) |
| 1826 | { |
| 1827 | struct cmdq_flush_item *item_q = container_of(w, |
| 1828 | struct cmdq_flush_item, work); |
| 1829 | struct cmdq_cb_data data; |
| 1830 | |
| 1831 | data.data = item_q->data; |
| 1832 | data.err = item_q->err; |
| 1833 | item_q->cb(data); |
| 1834 | kfree(item_q); |
| 1835 | } |
| 1836 | #endif |
| 1837 | |
| 1838 | static void cmdq_pkt_flush_q_cb(struct cmdq_cb_data data) |
| 1839 | { |
| 1840 | struct cmdq_flush_item *item_q = (struct cmdq_flush_item *)data.data; |
| 1841 | struct cmdq_client *cl = item_q->pkt->cl; |
| 1842 | struct client_priv *priv = cl->cl_priv; |
| 1843 | |
| 1844 | item_q->err = data.err; |
| 1845 | queue_work(priv->flushq, &item_q->work); |
| 1846 | } |
| 1847 | |
| 1848 | s32 cmdq_pkt_flush_threaded(struct cmdq_pkt *pkt, |
| 1849 | cmdq_async_flush_cb cb, void *data) |
| 1850 | { |
| 1851 | struct cmdq_flush_item *item_q = kzalloc(sizeof(*item_q), GFP_KERNEL); |
| 1852 | s32 err; |
| 1853 | |
| 1854 | if (!item_q) |
| 1855 | return -ENOMEM; |
| 1856 | |
| 1857 | item_q->cb = cb; |
| 1858 | item_q->data = data; |
| 1859 | item_q->pkt = pkt; |
| 1860 | |
| 1861 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1862 | |
| 1863 | INIT_WORK(&item_q->work, cmdq_pkt_flush_q_wait_work); |
| 1864 | err = cmdq_pkt_flush_async(pkt, NULL, NULL); |
| 1865 | if (err >= 0) { |
| 1866 | struct cmdq_cb_data data = {.data = item_q, .err = 0}; |
| 1867 | |
| 1868 | cmdq_pkt_flush_q_cb(data); |
| 1869 | } |
| 1870 | #else |
| 1871 | INIT_WORK(&item_q->work, cmdq_pkt_flush_q_cb_work); |
| 1872 | err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_q_cb, item_q); |
| 1873 | #endif |
| 1874 | return err; |
| 1875 | } |
| 1876 | EXPORT_SYMBOL(cmdq_pkt_flush_threaded); |
| 1877 | |
| 1878 | #if !IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1879 | static void cmdq_pkt_flush_cb(struct cmdq_cb_data data) |
| 1880 | { |
| 1881 | struct cmdq_flush_completion *cmplt = data.data; |
| 1882 | |
| 1883 | cmplt->err = !data.err ? false : true; |
| 1884 | complete(&cmplt->cmplt); |
| 1885 | } |
| 1886 | #endif |
| 1887 | |
| 1888 | int cmdq_pkt_flush(struct cmdq_pkt *pkt) |
| 1889 | { |
| 1890 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 1891 | int err; |
| 1892 | |
| 1893 | err = cmdq_pkt_flush_async(pkt, NULL, NULL); |
| 1894 | if (err < 0) |
| 1895 | return err; |
| 1896 | return cmdq_pkt_wait_complete(pkt); |
| 1897 | #else |
| 1898 | struct cmdq_flush_completion cmplt; |
| 1899 | int err; |
| 1900 | |
| 1901 | cmdq_log("start"); |
| 1902 | |
| 1903 | init_completion(&cmplt.cmplt); |
| 1904 | cmplt.pkt = pkt; |
| 1905 | err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt); |
| 1906 | if (err < 0) |
| 1907 | return err; |
| 1908 | |
| 1909 | wait_for_completion(&cmplt.cmplt); |
| 1910 | |
| 1911 | cmdq_log("done pkt:0x%p err:%d", cmplt.pkt, cmplt.err); |
| 1912 | return cmplt.err ? -EFAULT : 0; |
| 1913 | #endif |
| 1914 | } |
| 1915 | EXPORT_SYMBOL(cmdq_pkt_flush); |
| 1916 | |
| 1917 | static void cmdq_buf_print_read(char *text, u32 txt_sz, |
| 1918 | u32 offset, struct cmdq_instruction *cmdq_inst) |
| 1919 | { |
| 1920 | u32 addr; |
| 1921 | |
| 1922 | if (cmdq_inst->arg_b_type == CMDQ_IMMEDIATE_VALUE && |
| 1923 | (cmdq_inst->arg_b & CMDQ_ADDR_LOW_BIT)) { |
| 1924 | /* 48bit format case */ |
| 1925 | addr = cmdq_inst->arg_b & 0xfffc; |
| 1926 | |
| 1927 | snprintf(text, txt_sz, |
| 1928 | "%#06x %#018llx [Read ] Reg Index %#010x = addr(low) %#06x", |
| 1929 | offset, *((u64 *)cmdq_inst), cmdq_inst->arg_a, addr); |
| 1930 | } else { |
| 1931 | addr = ((u32)(cmdq_inst->arg_b | |
| 1932 | (cmdq_inst->s_op << CMDQ_SUBSYS_SHIFT))); |
| 1933 | |
| 1934 | snprintf(text, txt_sz, |
| 1935 | "%#06x %#018llx [Read ] Reg Index %#010x = %s%#010x", |
| 1936 | offset, *((u64 *)cmdq_inst), cmdq_inst->arg_a, |
| 1937 | cmdq_inst->arg_b_type ? "*Reg Index " : "SubSys Reg ", |
| 1938 | addr); |
| 1939 | } |
| 1940 | } |
| 1941 | |
| 1942 | static void cmdq_buf_print_write(char *text, u32 txt_sz, |
| 1943 | u32 offset, struct cmdq_instruction *cmdq_inst) |
| 1944 | { |
| 1945 | u32 addr; |
| 1946 | |
| 1947 | if (cmdq_inst->arg_a_type == CMDQ_IMMEDIATE_VALUE && |
| 1948 | (cmdq_inst->arg_a & CMDQ_ADDR_LOW_BIT)) { |
| 1949 | /* 48bit format case */ |
| 1950 | addr = cmdq_inst->arg_a & 0xfffc; |
| 1951 | |
| 1952 | snprintf(text, txt_sz, |
| 1953 | "%#06x %#018llx [Write] addr(low) %#06x = %s%#010x%s", |
| 1954 | offset, *((u64 *)cmdq_inst), |
| 1955 | addr, CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_b_type), |
| 1956 | cmdq_inst->arg_b_type ? cmdq_inst->arg_b : |
| 1957 | CMDQ_GET_32B_VALUE(cmdq_inst->arg_b, cmdq_inst->arg_c), |
| 1958 | cmdq_inst->op == CMDQ_CODE_WRITE_S_W_MASK ? |
| 1959 | " with mask" : ""); |
| 1960 | } else { |
| 1961 | addr = ((u32)(cmdq_inst->arg_a | |
| 1962 | (cmdq_inst->s_op << CMDQ_SUBSYS_SHIFT))); |
| 1963 | |
| 1964 | snprintf(text, txt_sz, |
| 1965 | "%#06x %#018llx [Write] %s%#010x = %s%#010x%s", |
| 1966 | offset, *((u64 *)cmdq_inst), |
| 1967 | cmdq_inst->arg_a_type ? "*Reg Index " : "SubSys Reg ", |
| 1968 | addr, CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_b_type), |
| 1969 | cmdq_inst->arg_b_type ? cmdq_inst->arg_b : |
| 1970 | CMDQ_GET_32B_VALUE(cmdq_inst->arg_b, cmdq_inst->arg_c), |
| 1971 | cmdq_inst->op == CMDQ_CODE_WRITE_S_W_MASK ? |
| 1972 | " with mask" : ""); |
| 1973 | } |
| 1974 | } |
| 1975 | |
| 1976 | void cmdq_buf_print_wfe(char *text, u32 txt_sz, |
| 1977 | u32 offset, void *inst) |
| 1978 | { |
| 1979 | struct cmdq_instruction *cmdq_inst = inst; |
| 1980 | u32 cmd = CMDQ_GET_32B_VALUE(cmdq_inst->arg_b, cmdq_inst->arg_c); |
| 1981 | u32 event_op = cmd & 0x80008000; |
| 1982 | u16 update_to = cmdq_inst->arg_b & GENMASK(11, 0); |
| 1983 | u16 wait_to = cmdq_inst->arg_c & GENMASK(11, 0); |
| 1984 | |
| 1985 | switch (event_op) { |
| 1986 | case 0x80000000: |
| 1987 | snprintf(text, txt_sz, |
| 1988 | "%#06x %#018llx [Sync ] %s event %u to %u", |
| 1989 | offset, *((u64 *)cmdq_inst), |
| 1990 | update_to ? "set" : "clear", |
| 1991 | cmdq_inst->arg_a, |
| 1992 | update_to); |
| 1993 | break; |
| 1994 | case 0x8000: |
| 1995 | snprintf(text, txt_sz, |
| 1996 | "%#06x %#018llx [Sync ] wait for event %u become %u", |
| 1997 | offset, *((u64 *)cmdq_inst), |
| 1998 | cmdq_inst->arg_a, |
| 1999 | wait_to); |
| 2000 | break; |
| 2001 | case 0x80008000: |
| 2002 | default: |
| 2003 | snprintf(text, txt_sz, |
| 2004 | "%#06x %#018llx [Sync ] wait for event %u become %u and %s to %u", |
| 2005 | offset, *((u64 *)cmdq_inst), |
| 2006 | cmdq_inst->arg_a, |
| 2007 | wait_to, |
| 2008 | update_to ? "set" : "clear", |
| 2009 | update_to); |
| 2010 | break; |
| 2011 | } |
| 2012 | } |
| 2013 | |
| 2014 | static const char *cmdq_parse_logic_sop(enum CMDQ_LOGIC_ENUM s_op) |
| 2015 | { |
| 2016 | switch (s_op) { |
| 2017 | case CMDQ_LOGIC_ASSIGN: |
| 2018 | return "= "; |
| 2019 | case CMDQ_LOGIC_ADD: |
| 2020 | return "+ "; |
| 2021 | case CMDQ_LOGIC_SUBTRACT: |
| 2022 | return "- "; |
| 2023 | case CMDQ_LOGIC_MULTIPLY: |
| 2024 | return "* "; |
| 2025 | case CMDQ_LOGIC_XOR: |
| 2026 | return "^"; |
| 2027 | case CMDQ_LOGIC_NOT: |
| 2028 | return "= ~"; |
| 2029 | case CMDQ_LOGIC_OR: |
| 2030 | return "| "; |
| 2031 | case CMDQ_LOGIC_AND: |
| 2032 | return "& "; |
| 2033 | case CMDQ_LOGIC_LEFT_SHIFT: |
| 2034 | return "<< "; |
| 2035 | case CMDQ_LOGIC_RIGHT_SHIFT: |
| 2036 | return ">> "; |
| 2037 | default: |
| 2038 | return "<error: unsupported logic sop>"; |
| 2039 | } |
| 2040 | } |
| 2041 | |
| 2042 | static const char *cmdq_parse_jump_c_sop(enum CMDQ_CONDITION_ENUM s_op) |
| 2043 | { |
| 2044 | switch (s_op) { |
| 2045 | case CMDQ_EQUAL: |
| 2046 | return "=="; |
| 2047 | case CMDQ_NOT_EQUAL: |
| 2048 | return "!="; |
| 2049 | case CMDQ_GREATER_THAN_AND_EQUAL: |
| 2050 | return ">="; |
| 2051 | case CMDQ_LESS_THAN_AND_EQUAL: |
| 2052 | return "<="; |
| 2053 | case CMDQ_GREATER_THAN: |
| 2054 | return ">"; |
| 2055 | case CMDQ_LESS_THAN: |
| 2056 | return "<"; |
| 2057 | default: |
| 2058 | return "<error: unsupported jump conditional sop>"; |
| 2059 | } |
| 2060 | } |
| 2061 | |
| 2062 | static void cmdq_buf_print_move(char *text, u32 txt_sz, |
| 2063 | u32 offset, struct cmdq_instruction *cmdq_inst) |
| 2064 | { |
| 2065 | u64 val = (u64)cmdq_inst->arg_a | |
| 2066 | CMDQ_GET_32B_VALUE(cmdq_inst->arg_b, cmdq_inst->arg_c); |
| 2067 | |
| 2068 | if (cmdq_inst->arg_a) |
| 2069 | snprintf(text, txt_sz, |
| 2070 | "%#06x %#018llx [Move ] move %#llx to %s%hhu", |
| 2071 | offset, *((u64 *)cmdq_inst), val, |
| 2072 | "Reg Index GPR R", cmdq_inst->s_op); |
| 2073 | else |
| 2074 | snprintf(text, txt_sz, |
| 2075 | "%#06x %#018llx [Move ] mask %#014llx", |
| 2076 | offset, *((u64 *)cmdq_inst), ~val); |
| 2077 | } |
| 2078 | |
| 2079 | static void cmdq_buf_print_logic(char *text, u32 txt_sz, |
| 2080 | u32 offset, struct cmdq_instruction *cmdq_inst) |
| 2081 | { |
| 2082 | switch (cmdq_inst->s_op) { |
| 2083 | case CMDQ_LOGIC_ASSIGN: |
| 2084 | snprintf(text, txt_sz, |
| 2085 | "%#06x %#018llx [Logic] Reg Index %#06x %s%s%#010x", |
| 2086 | offset, *((u64 *)cmdq_inst), cmdq_inst->arg_a, |
| 2087 | cmdq_parse_logic_sop(cmdq_inst->s_op), |
| 2088 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_b_type), |
| 2089 | CMDQ_GET_32B_VALUE(cmdq_inst->arg_b, cmdq_inst->arg_c)); |
| 2090 | break; |
| 2091 | case CMDQ_LOGIC_NOT: |
| 2092 | snprintf(text, txt_sz, |
| 2093 | "%#06x %#018llx [Logic] Reg Index %#06x %s%s%#010x", |
| 2094 | offset, *((u64 *)cmdq_inst), cmdq_inst->arg_a, |
| 2095 | cmdq_parse_logic_sop(cmdq_inst->s_op), |
| 2096 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_b_type), |
| 2097 | cmdq_inst->arg_b); |
| 2098 | break; |
| 2099 | default: |
| 2100 | snprintf(text, txt_sz, |
| 2101 | "%#06x %#018llx [Logic] %s%#010x = %s%#010x %s%s%#010x", |
| 2102 | offset, *((u64 *)cmdq_inst), |
| 2103 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_a_type), |
| 2104 | cmdq_inst->arg_a, |
| 2105 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_b_type), |
| 2106 | cmdq_inst->arg_b, cmdq_parse_logic_sop(cmdq_inst->s_op), |
| 2107 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_c_type), |
| 2108 | cmdq_inst->arg_c); |
| 2109 | break; |
| 2110 | } |
| 2111 | } |
| 2112 | |
| 2113 | static void cmdq_buf_print_write_jump_c(char *text, u32 txt_sz, |
| 2114 | u32 offset, struct cmdq_instruction *cmdq_inst) |
| 2115 | { |
| 2116 | snprintf(text, txt_sz, |
| 2117 | "%#06x %#018llx [Jumpc] %s if (%s%#010x %s %s%#010x) jump %s%#010x", |
| 2118 | offset, *((u64 *)cmdq_inst), |
| 2119 | cmdq_inst->op == CMDQ_CODE_JUMP_C_ABSOLUTE ? |
| 2120 | "absolute" : "relative", |
| 2121 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_b_type), |
| 2122 | cmdq_inst->arg_b, cmdq_parse_jump_c_sop(cmdq_inst->s_op), |
| 2123 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_c_type), cmdq_inst->arg_c, |
| 2124 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_a_type), cmdq_inst->arg_a); |
| 2125 | } |
| 2126 | |
| 2127 | static void cmdq_buf_print_poll(char *text, u32 txt_sz, |
| 2128 | u32 offset, struct cmdq_instruction *cmdq_inst) |
| 2129 | { |
| 2130 | u32 addr = ((u32)(cmdq_inst->arg_a | |
| 2131 | (cmdq_inst->s_op << CMDQ_SUBSYS_SHIFT))); |
| 2132 | |
| 2133 | snprintf(text, txt_sz, |
| 2134 | "%#06x %#018llx [Poll ] poll %s%#010x = %s%#010x", |
| 2135 | offset, *((u64 *)cmdq_inst), |
| 2136 | cmdq_inst->arg_a_type ? "*Reg Index " : "SubSys Reg ", |
| 2137 | addr, |
| 2138 | CMDQ_REG_IDX_PREFIX(cmdq_inst->arg_b_type), |
| 2139 | CMDQ_GET_32B_VALUE(cmdq_inst->arg_b, cmdq_inst->arg_c)); |
| 2140 | } |
| 2141 | |
| 2142 | static void cmdq_buf_print_jump(char *text, u32 txt_sz, |
| 2143 | u32 offset, struct cmdq_instruction *cmdq_inst) |
| 2144 | { |
| 2145 | u32 dst = ((u32)cmdq_inst->arg_b) << 16 | cmdq_inst->arg_c; |
| 2146 | |
| 2147 | snprintf(text, txt_sz, |
| 2148 | "%#06x %#018llx [Jump ] jump %s %#llx", |
| 2149 | offset, *((u64 *)cmdq_inst), |
| 2150 | cmdq_inst->arg_a ? "absolute addr" : "relative offset", |
| 2151 | cmdq_inst->arg_a ? CMDQ_REG_REVERT_ADDR((u64)dst) : |
| 2152 | CMDQ_REG_REVERT_ADDR((s64)(s32)dst)); |
| 2153 | } |
| 2154 | |
| 2155 | static void cmdq_buf_print_misc(char *text, u32 txt_sz, |
| 2156 | u32 offset, struct cmdq_instruction *cmdq_inst) |
| 2157 | { |
| 2158 | char *cmd_str; |
| 2159 | |
| 2160 | switch (cmdq_inst->op) { |
| 2161 | case CMDQ_CODE_EOC: |
| 2162 | cmd_str = "eoc"; |
| 2163 | break; |
| 2164 | default: |
| 2165 | cmd_str = "unknown"; |
| 2166 | break; |
| 2167 | } |
| 2168 | |
| 2169 | snprintf(text, txt_sz, "%#06x %#018llx %s", |
| 2170 | offset, *((u64 *)cmdq_inst), cmd_str); |
| 2171 | } |
| 2172 | |
| 2173 | void cmdq_buf_cmd_parse(u64 *buf, u32 cmd_nr, dma_addr_t buf_pa, |
| 2174 | dma_addr_t cur_pa, const char *info) |
| 2175 | { |
| 2176 | #define txt_sz 128 |
| 2177 | static char text[txt_sz]; |
| 2178 | struct cmdq_instruction *cmdq_inst = (struct cmdq_instruction *)buf; |
| 2179 | u32 i; |
| 2180 | |
| 2181 | for (i = 0; i < cmd_nr; i++) { |
| 2182 | switch (cmdq_inst[i].op) { |
| 2183 | case CMDQ_CODE_WRITE_S: |
| 2184 | case CMDQ_CODE_WRITE_S_W_MASK: |
| 2185 | cmdq_buf_print_write(text, txt_sz, (u32)buf_pa, |
| 2186 | &cmdq_inst[i]); |
| 2187 | break; |
| 2188 | case CMDQ_CODE_WFE: |
| 2189 | cmdq_buf_print_wfe(text, txt_sz, (u32)buf_pa, |
| 2190 | (void *)&cmdq_inst[i]); |
| 2191 | break; |
| 2192 | case CMDQ_CODE_MOVE: |
| 2193 | cmdq_buf_print_move(text, txt_sz, (u32)buf_pa, |
| 2194 | &cmdq_inst[i]); |
| 2195 | break; |
| 2196 | case CMDQ_CODE_READ_S: |
| 2197 | cmdq_buf_print_read(text, txt_sz, (u32)buf_pa, |
| 2198 | &cmdq_inst[i]); |
| 2199 | break; |
| 2200 | case CMDQ_CODE_LOGIC: |
| 2201 | cmdq_buf_print_logic(text, txt_sz, (u32)buf_pa, |
| 2202 | &cmdq_inst[i]); |
| 2203 | break; |
| 2204 | case CMDQ_CODE_JUMP_C_ABSOLUTE: |
| 2205 | case CMDQ_CODE_JUMP_C_RELATIVE: |
| 2206 | cmdq_buf_print_write_jump_c(text, txt_sz, (u32)buf_pa, |
| 2207 | &cmdq_inst[i]); |
| 2208 | break; |
| 2209 | case CMDQ_CODE_POLL: |
| 2210 | cmdq_buf_print_poll(text, txt_sz, (u32)buf_pa, |
| 2211 | &cmdq_inst[i]); |
| 2212 | break; |
| 2213 | case CMDQ_CODE_JUMP: |
| 2214 | cmdq_buf_print_jump(text, txt_sz, (u32)buf_pa, |
| 2215 | &cmdq_inst[i]); |
| 2216 | break; |
| 2217 | default: |
| 2218 | cmdq_buf_print_misc(text, txt_sz, (u32)buf_pa, |
| 2219 | &cmdq_inst[i]); |
| 2220 | break; |
| 2221 | } |
| 2222 | cmdq_util_msg("%s%s", |
| 2223 | info ? info : (buf_pa == cur_pa ? ">>" : " "), |
| 2224 | text); |
| 2225 | buf_pa += CMDQ_INST_SIZE; |
| 2226 | } |
| 2227 | } |
| 2228 | |
| 2229 | s32 cmdq_pkt_dump_buf(struct cmdq_pkt *pkt, dma_addr_t curr_pa) |
| 2230 | { |
| 2231 | struct cmdq_pkt_buffer *buf; |
| 2232 | u32 size, cnt = 0; |
| 2233 | |
| 2234 | list_for_each_entry(buf, &pkt->buf, list_entry) { |
| 2235 | if (list_is_last(&buf->list_entry, &pkt->buf)) { |
| 2236 | size = CMDQ_CMD_BUFFER_SIZE - pkt->avail_buf_size; |
| 2237 | } else if (cnt > 2 && !(curr_pa >= buf->pa_base && |
| 2238 | curr_pa < buf->pa_base + CMDQ_BUF_ALLOC_SIZE)) { |
| 2239 | cmdq_util_msg( |
| 2240 | "buffer %u va:0x%p pa:%pa %#018llx (skip detail) %#018llx", |
| 2241 | cnt, buf->va_base, &buf->pa_base, |
| 2242 | *((u64 *)buf->va_base), |
| 2243 | *((u64 *)(buf->va_base + |
| 2244 | CMDQ_CMD_BUFFER_SIZE - CMDQ_INST_SIZE))); |
| 2245 | cnt++; |
| 2246 | continue; |
| 2247 | } else { |
| 2248 | size = CMDQ_CMD_BUFFER_SIZE; |
| 2249 | } |
| 2250 | cmdq_util_msg("buffer %u va:0x%p pa:%pa", |
| 2251 | cnt, buf->va_base, &buf->pa_base); |
| 2252 | cmdq_buf_cmd_parse(buf->va_base, CMDQ_NUM_CMD(size), |
| 2253 | buf->pa_base, curr_pa, NULL); |
| 2254 | cnt++; |
| 2255 | } |
| 2256 | |
| 2257 | return 0; |
| 2258 | } |
| 2259 | EXPORT_SYMBOL(cmdq_pkt_dump_buf); |
| 2260 | |
| 2261 | int cmdq_dump_pkt(struct cmdq_pkt *pkt, dma_addr_t pc, bool dump_ist) |
| 2262 | { |
| 2263 | if (!pkt) |
| 2264 | return -EINVAL; |
| 2265 | |
| 2266 | cmdq_util_msg( |
| 2267 | "pkt:0x%p(%#x) size:%zu/%zu avail size:%zu priority:%u%s", |
| 2268 | pkt, (u32)(unsigned long)pkt, pkt->cmd_buf_size, |
| 2269 | pkt->buf_size, pkt->avail_buf_size, |
| 2270 | pkt->priority, pkt->loop ? " loop" : ""); |
| 2271 | #if IS_ENABLED(CONFIG_MTK_CMDQ_MBOX_EXT) |
| 2272 | cmdq_util_msg( |
| 2273 | "submit:%llu trigger:%llu wait:%llu irq:%llu", |
| 2274 | pkt->rec_submit, pkt->rec_trigger, |
| 2275 | pkt->rec_wait, pkt->rec_irq); |
| 2276 | #endif |
| 2277 | if (dump_ist) |
| 2278 | cmdq_pkt_dump_buf(pkt, pc); |
| 2279 | |
| 2280 | return 0; |
| 2281 | } |
| 2282 | EXPORT_SYMBOL(cmdq_dump_pkt); |
| 2283 | |
| 2284 | void cmdq_pkt_set_err_cb(struct cmdq_pkt *pkt, |
| 2285 | cmdq_async_flush_cb cb, void *data) |
| 2286 | { |
| 2287 | pkt->err_cb.cb = cb; |
| 2288 | pkt->err_cb.data = (void *)data; |
| 2289 | } |
| 2290 | EXPORT_SYMBOL(cmdq_pkt_set_err_cb); |
| 2291 | |