blob: 6a2646c481eceb2d94f736ffe0070810d25d9f65 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3// Copyright (c) 2018, Linaro Limited
4
5#include <linux/completion.h>
6#include <linux/device.h>
7#include <linux/dma-buf.h>
8#include <linux/dma-mapping.h>
9#include <linux/idr.h>
10#include <linux/list.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/of_address.h>
14#include <linux/of.h>
15#include <linux/sort.h>
16#include <linux/of_platform.h>
17#include <linux/rpmsg.h>
18#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <uapi/misc/fastrpc.h>
21
22#define ADSP_DOMAIN_ID (0)
23#define MDSP_DOMAIN_ID (1)
24#define SDSP_DOMAIN_ID (2)
25#define CDSP_DOMAIN_ID (3)
26#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
27#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
28#define FASTRPC_ALIGN 128
29#define FASTRPC_MAX_FDLIST 16
30#define FASTRPC_MAX_CRCLIST 64
31#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
32#define FASTRPC_CTX_MAX (256)
33#define FASTRPC_INIT_HANDLE 1
34#define FASTRPC_CTXID_MASK (0xFF0)
35#define INIT_FILELEN_MAX (64 * 1024 * 1024)
36#define FASTRPC_DEVICE_NAME "fastrpc"
37
38/* Retrives number of input buffers from the scalars parameter */
39#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
40
41/* Retrives number of output buffers from the scalars parameter */
42#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
43
44/* Retrives number of input handles from the scalars parameter */
45#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
46
47/* Retrives number of output handles from the scalars parameter */
48#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
49
50#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
51 REMOTE_SCALARS_OUTBUFS(sc) + \
52 REMOTE_SCALARS_INHANDLES(sc)+ \
53 REMOTE_SCALARS_OUTHANDLES(sc))
54#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
55 (((attr & 0x07) << 29) | \
56 ((method & 0x1f) << 24) | \
57 ((in & 0xff) << 16) | \
58 ((out & 0xff) << 8) | \
59 ((oin & 0x0f) << 4) | \
60 (oout & 0x0f))
61
62#define FASTRPC_SCALARS(method, in, out) \
63 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
64
65#define FASTRPC_CREATE_PROCESS_NARGS 6
66/* Remote Method id table */
67#define FASTRPC_RMID_INIT_ATTACH 0
68#define FASTRPC_RMID_INIT_RELEASE 1
69#define FASTRPC_RMID_INIT_CREATE 6
70#define FASTRPC_RMID_INIT_CREATE_ATTR 7
71#define FASTRPC_RMID_INIT_CREATE_STATIC 8
72
73#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
74
75static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
76 "sdsp", "cdsp"};
77struct fastrpc_phy_page {
78 u64 addr; /* physical address */
79 u64 size; /* size of contiguous region */
80};
81
82struct fastrpc_invoke_buf {
83 u32 num; /* number of contiguous regions */
84 u32 pgidx; /* index to start of contiguous region */
85};
86
87struct fastrpc_remote_arg {
88 u64 pv;
89 u64 len;
90};
91
92struct fastrpc_msg {
93 int pid; /* process group id */
94 int tid; /* thread id */
95 u64 ctx; /* invoke caller context */
96 u32 handle; /* handle to invoke */
97 u32 sc; /* scalars structure describing the data */
98 u64 addr; /* physical address */
99 u64 size; /* size of contiguous region */
100};
101
102struct fastrpc_invoke_rsp {
103 u64 ctx; /* invoke caller context */
104 int retval; /* invoke return value */
105};
106
107struct fastrpc_buf_overlap {
108 u64 start;
109 u64 end;
110 int raix;
111 u64 mstart;
112 u64 mend;
113 u64 offset;
114};
115
116struct fastrpc_buf {
117 struct fastrpc_user *fl;
118 struct dma_buf *dmabuf;
119 struct device *dev;
120 void *virt;
121 u64 phys;
122 u64 size;
123 /* Lock for dma buf attachments */
124 struct mutex lock;
125 struct list_head attachments;
126};
127
128struct fastrpc_dma_buf_attachment {
129 struct device *dev;
130 struct sg_table sgt;
131 struct list_head node;
132};
133
134struct fastrpc_map {
135 struct list_head node;
136 struct fastrpc_user *fl;
137 int fd;
138 struct dma_buf *buf;
139 struct sg_table *table;
140 struct dma_buf_attachment *attach;
141 u64 phys;
142 u64 size;
143 void *va;
144 u64 len;
145 struct kref refcount;
146};
147
148struct fastrpc_invoke_ctx {
149 int nscalars;
150 int nbufs;
151 int retval;
152 int pid;
153 int tgid;
154 u32 sc;
155 u32 *crc;
156 u64 ctxid;
157 u64 msg_sz;
158 struct kref refcount;
159 struct list_head node; /* list of ctxs */
160 struct completion work;
161 struct work_struct put_work;
162 struct fastrpc_msg msg;
163 struct fastrpc_user *fl;
164 struct fastrpc_remote_arg *rpra;
165 struct fastrpc_map **maps;
166 struct fastrpc_buf *buf;
167 struct fastrpc_invoke_args *args;
168 struct fastrpc_buf_overlap *olaps;
169 struct fastrpc_channel_ctx *cctx;
170};
171
172struct fastrpc_session_ctx {
173 struct device *dev;
174 int sid;
175 bool used;
176 bool valid;
177};
178
179struct fastrpc_channel_ctx {
180 int domain_id;
181 int sesscount;
182 struct rpmsg_device *rpdev;
183 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
184 spinlock_t lock;
185 struct idr ctx_idr;
186 struct list_head users;
187 struct miscdevice miscdev;
188 struct kref refcount;
189};
190
191struct fastrpc_user {
192 struct list_head user;
193 struct list_head maps;
194 struct list_head pending;
195
196 struct fastrpc_channel_ctx *cctx;
197 struct fastrpc_session_ctx *sctx;
198 struct fastrpc_buf *init_mem;
199
200 int tgid;
201 int pd;
202 /* Lock for lists */
203 spinlock_t lock;
204 /* lock for allocations */
205 struct mutex mutex;
206};
207
208static void fastrpc_free_map(struct kref *ref)
209{
210 struct fastrpc_map *map;
211
212 map = container_of(ref, struct fastrpc_map, refcount);
213
214 if (map->table) {
215 dma_buf_unmap_attachment(map->attach, map->table,
216 DMA_BIDIRECTIONAL);
217 dma_buf_detach(map->buf, map->attach);
218 dma_buf_put(map->buf);
219 }
220
221 if (map->fl) {
222 spin_lock(&map->fl->lock);
223 list_del(&map->node);
224 spin_unlock(&map->fl->lock);
225 map->fl = NULL;
226 }
227
228 kfree(map);
229}
230
231static void fastrpc_map_put(struct fastrpc_map *map)
232{
233 if (map)
234 kref_put(&map->refcount, fastrpc_free_map);
235}
236
237static int fastrpc_map_get(struct fastrpc_map *map)
238{
239 if (!map)
240 return -ENOENT;
241
242 return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
243}
244
245static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
246 struct fastrpc_map **ppmap)
247{
248 struct fastrpc_map *map = NULL;
249
250 mutex_lock(&fl->mutex);
251 list_for_each_entry(map, &fl->maps, node) {
252 if (map->fd == fd) {
253 fastrpc_map_get(map);
254 *ppmap = map;
255 mutex_unlock(&fl->mutex);
256 return 0;
257 }
258 }
259 mutex_unlock(&fl->mutex);
260
261 return -ENOENT;
262}
263
264static void fastrpc_buf_free(struct fastrpc_buf *buf)
265{
266 dma_free_coherent(buf->dev, buf->size, buf->virt,
267 FASTRPC_PHYS(buf->phys));
268 kfree(buf);
269}
270
271static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
272 u64 size, struct fastrpc_buf **obuf)
273{
274 struct fastrpc_buf *buf;
275
276 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
277 if (!buf)
278 return -ENOMEM;
279
280 INIT_LIST_HEAD(&buf->attachments);
281 mutex_init(&buf->lock);
282
283 buf->fl = fl;
284 buf->virt = NULL;
285 buf->phys = 0;
286 buf->size = size;
287 buf->dev = dev;
288
289 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
290 GFP_KERNEL);
291 if (!buf->virt) {
292 mutex_destroy(&buf->lock);
293 kfree(buf);
294 return -ENOMEM;
295 }
296
297 if (fl->sctx && fl->sctx->sid)
298 buf->phys += ((u64)fl->sctx->sid << 32);
299
300 *obuf = buf;
301
302 return 0;
303}
304
305static void fastrpc_channel_ctx_free(struct kref *ref)
306{
307 struct fastrpc_channel_ctx *cctx;
308
309 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
310
311 kfree(cctx);
312}
313
314static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
315{
316 kref_get(&cctx->refcount);
317}
318
319static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
320{
321 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
322}
323
324static void fastrpc_context_free(struct kref *ref)
325{
326 struct fastrpc_invoke_ctx *ctx;
327 struct fastrpc_channel_ctx *cctx;
328 unsigned long flags;
329 int i;
330
331 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
332 cctx = ctx->cctx;
333
334 for (i = 0; i < ctx->nscalars; i++)
335 fastrpc_map_put(ctx->maps[i]);
336
337 if (ctx->buf)
338 fastrpc_buf_free(ctx->buf);
339
340 spin_lock_irqsave(&cctx->lock, flags);
341 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
342 spin_unlock_irqrestore(&cctx->lock, flags);
343
344 kfree(ctx->maps);
345 kfree(ctx->olaps);
346 kfree(ctx);
347
348 fastrpc_channel_ctx_put(cctx);
349}
350
351static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
352{
353 kref_get(&ctx->refcount);
354}
355
356static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
357{
358 kref_put(&ctx->refcount, fastrpc_context_free);
359}
360
361static void fastrpc_context_put_wq(struct work_struct *work)
362{
363 struct fastrpc_invoke_ctx *ctx =
364 container_of(work, struct fastrpc_invoke_ctx, put_work);
365
366 fastrpc_context_put(ctx);
367}
368
369#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
370static int olaps_cmp(const void *a, const void *b)
371{
372 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
373 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
374 /* sort with lowest starting buffer first */
375 int st = CMP(pa->start, pb->start);
376 /* sort with highest ending buffer first */
377 int ed = CMP(pb->end, pa->end);
378
379 return st == 0 ? ed : st;
380}
381
382static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
383{
384 u64 max_end = 0;
385 int i;
386
387 for (i = 0; i < ctx->nbufs; ++i) {
388 ctx->olaps[i].start = ctx->args[i].ptr;
389 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
390 ctx->olaps[i].raix = i;
391 }
392
393 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
394
395 for (i = 0; i < ctx->nbufs; ++i) {
396 /* Falling inside previous range */
397 if (ctx->olaps[i].start < max_end) {
398 ctx->olaps[i].mstart = max_end;
399 ctx->olaps[i].mend = ctx->olaps[i].end;
400 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
401
402 if (ctx->olaps[i].end > max_end) {
403 max_end = ctx->olaps[i].end;
404 } else {
405 ctx->olaps[i].mend = 0;
406 ctx->olaps[i].mstart = 0;
407 }
408
409 } else {
410 ctx->olaps[i].mend = ctx->olaps[i].end;
411 ctx->olaps[i].mstart = ctx->olaps[i].start;
412 ctx->olaps[i].offset = 0;
413 max_end = ctx->olaps[i].end;
414 }
415 }
416}
417
418static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
419 struct fastrpc_user *user, u32 kernel, u32 sc,
420 struct fastrpc_invoke_args *args)
421{
422 struct fastrpc_channel_ctx *cctx = user->cctx;
423 struct fastrpc_invoke_ctx *ctx = NULL;
424 unsigned long flags;
425 int ret;
426
427 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
428 if (!ctx)
429 return ERR_PTR(-ENOMEM);
430
431 INIT_LIST_HEAD(&ctx->node);
432 ctx->fl = user;
433 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
434 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
435 REMOTE_SCALARS_OUTBUFS(sc);
436
437 if (ctx->nscalars) {
438 ctx->maps = kcalloc(ctx->nscalars,
439 sizeof(*ctx->maps), GFP_KERNEL);
440 if (!ctx->maps) {
441 kfree(ctx);
442 return ERR_PTR(-ENOMEM);
443 }
444 ctx->olaps = kcalloc(ctx->nscalars,
445 sizeof(*ctx->olaps), GFP_KERNEL);
446 if (!ctx->olaps) {
447 kfree(ctx->maps);
448 kfree(ctx);
449 return ERR_PTR(-ENOMEM);
450 }
451 ctx->args = args;
452 fastrpc_get_buff_overlaps(ctx);
453 }
454
455 /* Released in fastrpc_context_put() */
456 fastrpc_channel_ctx_get(cctx);
457
458 ctx->sc = sc;
459 ctx->retval = -1;
460 ctx->pid = current->pid;
461 ctx->tgid = user->tgid;
462 ctx->cctx = cctx;
463 init_completion(&ctx->work);
464 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
465
466 spin_lock(&user->lock);
467 list_add_tail(&ctx->node, &user->pending);
468 spin_unlock(&user->lock);
469
470 spin_lock_irqsave(&cctx->lock, flags);
471 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
472 FASTRPC_CTX_MAX, GFP_ATOMIC);
473 if (ret < 0) {
474 spin_unlock_irqrestore(&cctx->lock, flags);
475 goto err_idr;
476 }
477 ctx->ctxid = ret << 4;
478 spin_unlock_irqrestore(&cctx->lock, flags);
479
480 kref_init(&ctx->refcount);
481
482 return ctx;
483err_idr:
484 spin_lock(&user->lock);
485 list_del(&ctx->node);
486 spin_unlock(&user->lock);
487 fastrpc_channel_ctx_put(cctx);
488 kfree(ctx->maps);
489 kfree(ctx->olaps);
490 kfree(ctx);
491
492 return ERR_PTR(ret);
493}
494
495static struct sg_table *
496fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
497 enum dma_data_direction dir)
498{
499 struct fastrpc_dma_buf_attachment *a = attachment->priv;
500 struct sg_table *table;
501
502 table = &a->sgt;
503
504 if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
505 return ERR_PTR(-ENOMEM);
506
507 return table;
508}
509
510static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
511 struct sg_table *table,
512 enum dma_data_direction dir)
513{
514 dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
515}
516
517static void fastrpc_release(struct dma_buf *dmabuf)
518{
519 struct fastrpc_buf *buffer = dmabuf->priv;
520
521 fastrpc_buf_free(buffer);
522}
523
524static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
525 struct dma_buf_attachment *attachment)
526{
527 struct fastrpc_dma_buf_attachment *a;
528 struct fastrpc_buf *buffer = dmabuf->priv;
529 int ret;
530
531 a = kzalloc(sizeof(*a), GFP_KERNEL);
532 if (!a)
533 return -ENOMEM;
534
535 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
536 FASTRPC_PHYS(buffer->phys), buffer->size);
537 if (ret < 0) {
538 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
539 kfree(a);
540 return -EINVAL;
541 }
542
543 a->dev = attachment->dev;
544 INIT_LIST_HEAD(&a->node);
545 attachment->priv = a;
546
547 mutex_lock(&buffer->lock);
548 list_add(&a->node, &buffer->attachments);
549 mutex_unlock(&buffer->lock);
550
551 return 0;
552}
553
554static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
555 struct dma_buf_attachment *attachment)
556{
557 struct fastrpc_dma_buf_attachment *a = attachment->priv;
558 struct fastrpc_buf *buffer = dmabuf->priv;
559
560 mutex_lock(&buffer->lock);
561 list_del(&a->node);
562 mutex_unlock(&buffer->lock);
563 sg_free_table(&a->sgt);
564 kfree(a);
565}
566
567static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
568{
569 struct fastrpc_buf *buf = dmabuf->priv;
570
571 return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
572}
573
574static void *fastrpc_vmap(struct dma_buf *dmabuf)
575{
576 struct fastrpc_buf *buf = dmabuf->priv;
577
578 return buf->virt;
579}
580
581static int fastrpc_mmap(struct dma_buf *dmabuf,
582 struct vm_area_struct *vma)
583{
584 struct fastrpc_buf *buf = dmabuf->priv;
585 size_t size = vma->vm_end - vma->vm_start;
586
587 return dma_mmap_coherent(buf->dev, vma, buf->virt,
588 FASTRPC_PHYS(buf->phys), size);
589}
590
591static const struct dma_buf_ops fastrpc_dma_buf_ops = {
592 .attach = fastrpc_dma_buf_attach,
593 .detach = fastrpc_dma_buf_detatch,
594 .map_dma_buf = fastrpc_map_dma_buf,
595 .unmap_dma_buf = fastrpc_unmap_dma_buf,
596 .mmap = fastrpc_mmap,
597 .map = fastrpc_kmap,
598 .vmap = fastrpc_vmap,
599 .release = fastrpc_release,
600};
601
602static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
603 u64 len, struct fastrpc_map **ppmap)
604{
605 struct fastrpc_session_ctx *sess = fl->sctx;
606 struct fastrpc_map *map = NULL;
607 int err = 0;
608
609 if (!fastrpc_map_find(fl, fd, ppmap))
610 return 0;
611
612 map = kzalloc(sizeof(*map), GFP_KERNEL);
613 if (!map)
614 return -ENOMEM;
615
616 INIT_LIST_HEAD(&map->node);
617 map->fl = fl;
618 map->fd = fd;
619 map->buf = dma_buf_get(fd);
620 if (IS_ERR(map->buf)) {
621 err = PTR_ERR(map->buf);
622 goto get_err;
623 }
624
625 map->attach = dma_buf_attach(map->buf, sess->dev);
626 if (IS_ERR(map->attach)) {
627 dev_err(sess->dev, "Failed to attach dmabuf\n");
628 err = PTR_ERR(map->attach);
629 goto attach_err;
630 }
631
632 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
633 if (IS_ERR(map->table)) {
634 err = PTR_ERR(map->table);
635 goto map_err;
636 }
637
638 map->phys = sg_dma_address(map->table->sgl);
639 map->phys += ((u64)fl->sctx->sid << 32);
640 map->size = len;
641 map->va = sg_virt(map->table->sgl);
642 map->len = len;
643 kref_init(&map->refcount);
644
645 spin_lock(&fl->lock);
646 list_add_tail(&map->node, &fl->maps);
647 spin_unlock(&fl->lock);
648 *ppmap = map;
649
650 return 0;
651
652map_err:
653 dma_buf_detach(map->buf, map->attach);
654attach_err:
655 dma_buf_put(map->buf);
656get_err:
657 kfree(map);
658
659 return err;
660}
661
662/*
663 * Fastrpc payload buffer with metadata looks like:
664 *
665 * >>>>>> START of METADATA <<<<<<<<<
666 * +---------------------------------+
667 * | Arguments |
668 * | type:(struct fastrpc_remote_arg)|
669 * | (0 - N) |
670 * +---------------------------------+
671 * | Invoke Buffer list |
672 * | type:(struct fastrpc_invoke_buf)|
673 * | (0 - N) |
674 * +---------------------------------+
675 * | Page info list |
676 * | type:(struct fastrpc_phy_page) |
677 * | (0 - N) |
678 * +---------------------------------+
679 * | Optional info |
680 * |(can be specific to SoC/Firmware)|
681 * +---------------------------------+
682 * >>>>>>>> END of METADATA <<<<<<<<<
683 * +---------------------------------+
684 * | Inline ARGS |
685 * | (0-N) |
686 * +---------------------------------+
687 */
688
689static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
690{
691 int size = 0;
692
693 size = (sizeof(struct fastrpc_remote_arg) +
694 sizeof(struct fastrpc_invoke_buf) +
695 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
696 sizeof(u64) * FASTRPC_MAX_FDLIST +
697 sizeof(u32) * FASTRPC_MAX_CRCLIST;
698
699 return size;
700}
701
702static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
703{
704 u64 size = 0;
705 int oix;
706
707 size = ALIGN(metalen, FASTRPC_ALIGN);
708 for (oix = 0; oix < ctx->nbufs; oix++) {
709 int i = ctx->olaps[oix].raix;
710
711 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
712
713 if (ctx->olaps[oix].offset == 0)
714 size = ALIGN(size, FASTRPC_ALIGN);
715
716 size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
717 }
718 }
719
720 return size;
721}
722
723static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
724{
725 struct device *dev = ctx->fl->sctx->dev;
726 int i, err;
727
728 for (i = 0; i < ctx->nscalars; ++i) {
729 /* Make sure reserved field is set to 0 */
730 if (ctx->args[i].reserved)
731 return -EINVAL;
732
733 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
734 ctx->args[i].length == 0)
735 continue;
736
737 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
738 ctx->args[i].length, &ctx->maps[i]);
739 if (err) {
740 dev_err(dev, "Error Creating map %d\n", err);
741 return -EINVAL;
742 }
743
744 }
745 return 0;
746}
747
748static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
749{
750 struct device *dev = ctx->fl->sctx->dev;
751 struct fastrpc_remote_arg *rpra;
752 struct fastrpc_invoke_buf *list;
753 struct fastrpc_phy_page *pages;
754 int inbufs, i, oix, err = 0;
755 u64 len, rlen, pkt_size;
756 u64 pg_start, pg_end;
757 uintptr_t args;
758 int metalen;
759
760 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
761 metalen = fastrpc_get_meta_size(ctx);
762 pkt_size = fastrpc_get_payload_size(ctx, metalen);
763
764 err = fastrpc_create_maps(ctx);
765 if (err)
766 return err;
767
768 ctx->msg_sz = pkt_size;
769
770 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
771 if (err)
772 return err;
773
774 rpra = ctx->buf->virt;
775 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
776 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
777 sizeof(*rpra));
778 args = (uintptr_t)ctx->buf->virt + metalen;
779 rlen = pkt_size - metalen;
780 ctx->rpra = rpra;
781
782 for (oix = 0; oix < ctx->nbufs; ++oix) {
783 int mlen;
784
785 i = ctx->olaps[oix].raix;
786 len = ctx->args[i].length;
787
788 rpra[i].pv = 0;
789 rpra[i].len = len;
790 list[i].num = len ? 1 : 0;
791 list[i].pgidx = i;
792
793 if (!len)
794 continue;
795
796 if (ctx->maps[i]) {
797 struct vm_area_struct *vma = NULL;
798
799 rpra[i].pv = (u64) ctx->args[i].ptr;
800 pages[i].addr = ctx->maps[i]->phys;
801
802 vma = find_vma(current->mm, ctx->args[i].ptr);
803 if (vma)
804 pages[i].addr += ctx->args[i].ptr -
805 vma->vm_start;
806
807 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
808 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
809 PAGE_SHIFT;
810 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
811
812 } else {
813
814 if (ctx->olaps[oix].offset == 0) {
815 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
816 args = ALIGN(args, FASTRPC_ALIGN);
817 }
818
819 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
820
821 if (rlen < mlen)
822 goto bail;
823
824 rpra[i].pv = args - ctx->olaps[oix].offset;
825 pages[i].addr = ctx->buf->phys -
826 ctx->olaps[oix].offset +
827 (pkt_size - rlen);
828 pages[i].addr = pages[i].addr & PAGE_MASK;
829
830 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
831 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
832 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
833 args = args + mlen;
834 rlen -= mlen;
835 }
836
837 if (i < inbufs && !ctx->maps[i]) {
838 void *dst = (void *)(uintptr_t)rpra[i].pv;
839 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
840
841 if (!kernel) {
842 if (copy_from_user(dst, (void __user *)src,
843 len)) {
844 err = -EFAULT;
845 goto bail;
846 }
847 } else {
848 memcpy(dst, src, len);
849 }
850 }
851 }
852
853 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
854 rpra[i].pv = (u64) ctx->args[i].ptr;
855 rpra[i].len = ctx->args[i].length;
856 list[i].num = ctx->args[i].length ? 1 : 0;
857 list[i].pgidx = i;
858 pages[i].addr = ctx->maps[i]->phys;
859 pages[i].size = ctx->maps[i]->size;
860 }
861
862bail:
863 if (err)
864 dev_err(dev, "Error: get invoke args failed:%d\n", err);
865
866 return err;
867}
868
869static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
870 u32 kernel)
871{
872 struct fastrpc_remote_arg *rpra = ctx->rpra;
873 int i, inbufs;
874
875 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
876
877 for (i = inbufs; i < ctx->nbufs; ++i) {
878 void *src = (void *)(uintptr_t)rpra[i].pv;
879 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
880 u64 len = rpra[i].len;
881
882 if (!kernel) {
883 if (copy_to_user((void __user *)dst, src, len))
884 return -EFAULT;
885 } else {
886 memcpy(dst, src, len);
887 }
888 }
889
890 return 0;
891}
892
893static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
894 struct fastrpc_invoke_ctx *ctx,
895 u32 kernel, uint32_t handle)
896{
897 struct fastrpc_channel_ctx *cctx;
898 struct fastrpc_user *fl = ctx->fl;
899 struct fastrpc_msg *msg = &ctx->msg;
900 int ret;
901
902 cctx = fl->cctx;
903 msg->pid = fl->tgid;
904 msg->tid = current->pid;
905
906 if (kernel)
907 msg->pid = 0;
908
909 msg->ctx = ctx->ctxid | fl->pd;
910 msg->handle = handle;
911 msg->sc = ctx->sc;
912 msg->addr = ctx->buf ? ctx->buf->phys : 0;
913 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
914 fastrpc_context_get(ctx);
915
916 ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
917
918 if (ret)
919 fastrpc_context_put(ctx);
920
921 return ret;
922
923}
924
925static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
926 u32 handle, u32 sc,
927 struct fastrpc_invoke_args *args)
928{
929 struct fastrpc_invoke_ctx *ctx = NULL;
930 int err = 0;
931
932 if (!fl->sctx)
933 return -EINVAL;
934
935 if (!fl->cctx->rpdev)
936 return -EPIPE;
937
938 if (handle == FASTRPC_INIT_HANDLE && !kernel) {
939 dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
940 return -EPERM;
941 }
942
943 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
944 if (IS_ERR(ctx))
945 return PTR_ERR(ctx);
946
947 if (ctx->nscalars) {
948 err = fastrpc_get_args(kernel, ctx);
949 if (err)
950 goto bail;
951 }
952
953 /* make sure that all CPU memory writes are seen by DSP */
954 dma_wmb();
955 /* Send invoke buffer to remote dsp */
956 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
957 if (err)
958 goto bail;
959
960 /* Wait for remote dsp to respond or time out */
961 err = wait_for_completion_interruptible(&ctx->work);
962 if (err)
963 goto bail;
964
965 /* Check the response from remote dsp */
966 err = ctx->retval;
967 if (err)
968 goto bail;
969
970 if (ctx->nscalars) {
971 /* make sure that all memory writes by DSP are seen by CPU */
972 dma_rmb();
973 /* populate all the output buffers with results */
974 err = fastrpc_put_args(ctx, kernel);
975 if (err)
976 goto bail;
977 }
978
979bail:
980 /* We are done with this compute context, remove it from pending list */
981 spin_lock(&fl->lock);
982 list_del(&ctx->node);
983 spin_unlock(&fl->lock);
984 fastrpc_context_put(ctx);
985
986 if (err)
987 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
988
989 return err;
990}
991
992static int fastrpc_init_create_process(struct fastrpc_user *fl,
993 char __user *argp)
994{
995 struct fastrpc_init_create init;
996 struct fastrpc_invoke_args *args;
997 struct fastrpc_phy_page pages[1];
998 struct fastrpc_map *map = NULL;
999 struct fastrpc_buf *imem = NULL;
1000 int memlen;
1001 int err;
1002 struct {
1003 int pgid;
1004 u32 namelen;
1005 u32 filelen;
1006 u32 pageslen;
1007 u32 attrs;
1008 u32 siglen;
1009 } inbuf;
1010 u32 sc;
1011
1012 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
1013 if (!args)
1014 return -ENOMEM;
1015
1016 if (copy_from_user(&init, argp, sizeof(init))) {
1017 err = -EFAULT;
1018 goto err;
1019 }
1020
1021 if (init.filelen > INIT_FILELEN_MAX) {
1022 err = -EINVAL;
1023 goto err;
1024 }
1025
1026 inbuf.pgid = fl->tgid;
1027 inbuf.namelen = strlen(current->comm) + 1;
1028 inbuf.filelen = init.filelen;
1029 inbuf.pageslen = 1;
1030 inbuf.attrs = init.attrs;
1031 inbuf.siglen = init.siglen;
1032 fl->pd = 1;
1033
1034 if (init.filelen && init.filefd) {
1035 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
1036 if (err)
1037 goto err;
1038 }
1039
1040 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1041 1024 * 1024);
1042 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1043 &imem);
1044 if (err)
1045 goto err_alloc;
1046
1047 fl->init_mem = imem;
1048 args[0].ptr = (u64)(uintptr_t)&inbuf;
1049 args[0].length = sizeof(inbuf);
1050 args[0].fd = -1;
1051
1052 args[1].ptr = (u64)(uintptr_t)current->comm;
1053 args[1].length = inbuf.namelen;
1054 args[1].fd = -1;
1055
1056 args[2].ptr = (u64) init.file;
1057 args[2].length = inbuf.filelen;
1058 args[2].fd = init.filefd;
1059
1060 pages[0].addr = imem->phys;
1061 pages[0].size = imem->size;
1062
1063 args[3].ptr = (u64)(uintptr_t) pages;
1064 args[3].length = 1 * sizeof(*pages);
1065 args[3].fd = -1;
1066
1067 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1068 args[4].length = sizeof(inbuf.attrs);
1069 args[4].fd = -1;
1070
1071 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1072 args[5].length = sizeof(inbuf.siglen);
1073 args[5].fd = -1;
1074
1075 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1076 if (init.attrs)
1077 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
1078
1079 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1080 sc, args);
1081 if (err)
1082 goto err_invoke;
1083
1084 kfree(args);
1085
1086 return 0;
1087
1088err_invoke:
1089 fl->init_mem = NULL;
1090 fastrpc_buf_free(imem);
1091err_alloc:
1092 fastrpc_map_put(map);
1093err:
1094 kfree(args);
1095
1096 return err;
1097}
1098
1099static struct fastrpc_session_ctx *fastrpc_session_alloc(
1100 struct fastrpc_channel_ctx *cctx)
1101{
1102 struct fastrpc_session_ctx *session = NULL;
1103 unsigned long flags;
1104 int i;
1105
1106 spin_lock_irqsave(&cctx->lock, flags);
1107 for (i = 0; i < cctx->sesscount; i++) {
1108 if (!cctx->session[i].used && cctx->session[i].valid) {
1109 cctx->session[i].used = true;
1110 session = &cctx->session[i];
1111 break;
1112 }
1113 }
1114 spin_unlock_irqrestore(&cctx->lock, flags);
1115
1116 return session;
1117}
1118
1119static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1120 struct fastrpc_session_ctx *session)
1121{
1122 unsigned long flags;
1123
1124 spin_lock_irqsave(&cctx->lock, flags);
1125 session->used = false;
1126 spin_unlock_irqrestore(&cctx->lock, flags);
1127}
1128
1129static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1130{
1131 struct fastrpc_invoke_args args[1];
1132 int tgid = 0;
1133 u32 sc;
1134
1135 tgid = fl->tgid;
1136 args[0].ptr = (u64)(uintptr_t) &tgid;
1137 args[0].length = sizeof(tgid);
1138 args[0].fd = -1;
1139 args[0].reserved = 0;
1140 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1141
1142 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1143 sc, &args[0]);
1144}
1145
1146static int fastrpc_device_release(struct inode *inode, struct file *file)
1147{
1148 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1149 struct fastrpc_channel_ctx *cctx = fl->cctx;
1150 struct fastrpc_invoke_ctx *ctx, *n;
1151 struct fastrpc_map *map, *m;
1152 unsigned long flags;
1153
1154 fastrpc_release_current_dsp_process(fl);
1155
1156 spin_lock_irqsave(&cctx->lock, flags);
1157 list_del(&fl->user);
1158 spin_unlock_irqrestore(&cctx->lock, flags);
1159
1160 if (fl->init_mem)
1161 fastrpc_buf_free(fl->init_mem);
1162
1163 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1164 list_del(&ctx->node);
1165 fastrpc_context_put(ctx);
1166 }
1167
1168 list_for_each_entry_safe(map, m, &fl->maps, node)
1169 fastrpc_map_put(map);
1170
1171 fastrpc_session_free(cctx, fl->sctx);
1172 fastrpc_channel_ctx_put(cctx);
1173
1174 mutex_destroy(&fl->mutex);
1175 kfree(fl);
1176 file->private_data = NULL;
1177
1178 return 0;
1179}
1180
1181static int fastrpc_device_open(struct inode *inode, struct file *filp)
1182{
1183 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1184 struct fastrpc_user *fl = NULL;
1185 unsigned long flags;
1186
1187 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1188 if (!fl)
1189 return -ENOMEM;
1190
1191 /* Released in fastrpc_device_release() */
1192 fastrpc_channel_ctx_get(cctx);
1193
1194 filp->private_data = fl;
1195 spin_lock_init(&fl->lock);
1196 mutex_init(&fl->mutex);
1197 INIT_LIST_HEAD(&fl->pending);
1198 INIT_LIST_HEAD(&fl->maps);
1199 INIT_LIST_HEAD(&fl->user);
1200 fl->tgid = current->tgid;
1201 fl->cctx = cctx;
1202
1203 fl->sctx = fastrpc_session_alloc(cctx);
1204 if (!fl->sctx) {
1205 dev_err(&cctx->rpdev->dev, "No session available\n");
1206 mutex_destroy(&fl->mutex);
1207 kfree(fl);
1208
1209 return -EBUSY;
1210 }
1211
1212 spin_lock_irqsave(&cctx->lock, flags);
1213 list_add_tail(&fl->user, &cctx->users);
1214 spin_unlock_irqrestore(&cctx->lock, flags);
1215
1216 return 0;
1217}
1218
1219static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1220{
1221 struct fastrpc_alloc_dma_buf bp;
1222 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1223 struct fastrpc_buf *buf = NULL;
1224 int err;
1225
1226 if (copy_from_user(&bp, argp, sizeof(bp)))
1227 return -EFAULT;
1228
1229 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1230 if (err)
1231 return err;
1232 exp_info.ops = &fastrpc_dma_buf_ops;
1233 exp_info.size = bp.size;
1234 exp_info.flags = O_RDWR;
1235 exp_info.priv = buf;
1236 buf->dmabuf = dma_buf_export(&exp_info);
1237 if (IS_ERR(buf->dmabuf)) {
1238 err = PTR_ERR(buf->dmabuf);
1239 fastrpc_buf_free(buf);
1240 return err;
1241 }
1242
1243 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1244 if (bp.fd < 0) {
1245 dma_buf_put(buf->dmabuf);
1246 return -EINVAL;
1247 }
1248
1249 if (copy_to_user(argp, &bp, sizeof(bp))) {
1250 /*
1251 * The usercopy failed, but we can't do much about it, as
1252 * dma_buf_fd() already called fd_install() and made the
1253 * file descriptor accessible for the current process. It
1254 * might already be closed and dmabuf no longer valid when
1255 * we reach this point. Therefore "leak" the fd and rely on
1256 * the process exit path to do any required cleanup.
1257 */
1258 return -EFAULT;
1259 }
1260
1261 return 0;
1262}
1263
1264static int fastrpc_init_attach(struct fastrpc_user *fl)
1265{
1266 struct fastrpc_invoke_args args[1];
1267 int tgid = fl->tgid;
1268 u32 sc;
1269
1270 args[0].ptr = (u64)(uintptr_t) &tgid;
1271 args[0].length = sizeof(tgid);
1272 args[0].fd = -1;
1273 args[0].reserved = 0;
1274 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1275 fl->pd = 0;
1276
1277 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1278 sc, &args[0]);
1279}
1280
1281static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1282{
1283 struct fastrpc_invoke_args *args = NULL;
1284 struct fastrpc_invoke inv;
1285 u32 nscalars;
1286 int err;
1287
1288 if (copy_from_user(&inv, argp, sizeof(inv)))
1289 return -EFAULT;
1290
1291 /* nscalars is truncated here to max supported value */
1292 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1293 if (nscalars) {
1294 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1295 if (!args)
1296 return -ENOMEM;
1297
1298 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1299 nscalars * sizeof(*args))) {
1300 kfree(args);
1301 return -EFAULT;
1302 }
1303 }
1304
1305 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1306 kfree(args);
1307
1308 return err;
1309}
1310
1311static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1312 unsigned long arg)
1313{
1314 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1315 char __user *argp = (char __user *)arg;
1316 int err;
1317
1318 switch (cmd) {
1319 case FASTRPC_IOCTL_INVOKE:
1320 err = fastrpc_invoke(fl, argp);
1321 break;
1322 case FASTRPC_IOCTL_INIT_ATTACH:
1323 err = fastrpc_init_attach(fl);
1324 break;
1325 case FASTRPC_IOCTL_INIT_CREATE:
1326 err = fastrpc_init_create_process(fl, argp);
1327 break;
1328 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1329 err = fastrpc_dmabuf_alloc(fl, argp);
1330 break;
1331 default:
1332 err = -ENOTTY;
1333 break;
1334 }
1335
1336 return err;
1337}
1338
1339static const struct file_operations fastrpc_fops = {
1340 .open = fastrpc_device_open,
1341 .release = fastrpc_device_release,
1342 .unlocked_ioctl = fastrpc_device_ioctl,
1343 .compat_ioctl = fastrpc_device_ioctl,
1344};
1345
1346static int fastrpc_cb_probe(struct platform_device *pdev)
1347{
1348 struct fastrpc_channel_ctx *cctx;
1349 struct fastrpc_session_ctx *sess;
1350 struct device *dev = &pdev->dev;
1351 int i, sessions = 0;
1352 unsigned long flags;
1353 int rc;
1354
1355 cctx = dev_get_drvdata(dev->parent);
1356 if (!cctx)
1357 return -EINVAL;
1358
1359 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1360
1361 spin_lock_irqsave(&cctx->lock, flags);
1362 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
1363 dev_err(&pdev->dev, "too many sessions\n");
1364 spin_unlock_irqrestore(&cctx->lock, flags);
1365 return -ENOSPC;
1366 }
1367 sess = &cctx->session[cctx->sesscount++];
1368 sess->used = false;
1369 sess->valid = true;
1370 sess->dev = dev;
1371 dev_set_drvdata(dev, sess);
1372
1373 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1374 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1375
1376 if (sessions > 0) {
1377 struct fastrpc_session_ctx *dup_sess;
1378
1379 for (i = 1; i < sessions; i++) {
1380 if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
1381 break;
1382 dup_sess = &cctx->session[cctx->sesscount++];
1383 memcpy(dup_sess, sess, sizeof(*dup_sess));
1384 }
1385 }
1386 spin_unlock_irqrestore(&cctx->lock, flags);
1387 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1388 if (rc) {
1389 dev_err(dev, "32-bit DMA enable failed\n");
1390 return rc;
1391 }
1392
1393 return 0;
1394}
1395
1396static int fastrpc_cb_remove(struct platform_device *pdev)
1397{
1398 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1399 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1400 unsigned long flags;
1401 int i;
1402
1403 spin_lock_irqsave(&cctx->lock, flags);
1404 for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
1405 if (cctx->session[i].sid == sess->sid) {
1406 cctx->session[i].valid = false;
1407 cctx->sesscount--;
1408 }
1409 }
1410 spin_unlock_irqrestore(&cctx->lock, flags);
1411
1412 return 0;
1413}
1414
1415static const struct of_device_id fastrpc_match_table[] = {
1416 { .compatible = "qcom,fastrpc-compute-cb", },
1417 {}
1418};
1419
1420static struct platform_driver fastrpc_cb_driver = {
1421 .probe = fastrpc_cb_probe,
1422 .remove = fastrpc_cb_remove,
1423 .driver = {
1424 .name = "qcom,fastrpc-cb",
1425 .of_match_table = fastrpc_match_table,
1426 .suppress_bind_attrs = true,
1427 },
1428};
1429
1430static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1431{
1432 struct device *rdev = &rpdev->dev;
1433 struct fastrpc_channel_ctx *data;
1434 int i, err, domain_id = -1;
1435 const char *domain;
1436
1437 err = of_property_read_string(rdev->of_node, "label", &domain);
1438 if (err) {
1439 dev_info(rdev, "FastRPC Domain not specified in DT\n");
1440 return err;
1441 }
1442
1443 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1444 if (!strcmp(domains[i], domain)) {
1445 domain_id = i;
1446 break;
1447 }
1448 }
1449
1450 if (domain_id < 0) {
1451 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1452 return -EINVAL;
1453 }
1454
1455 data = kzalloc(sizeof(*data), GFP_KERNEL);
1456 if (!data)
1457 return -ENOMEM;
1458
1459 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1460 data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
1461 domains[domain_id]);
1462 data->miscdev.fops = &fastrpc_fops;
1463 err = misc_register(&data->miscdev);
1464 if (err) {
1465 kfree(data);
1466 return err;
1467 }
1468
1469 kref_init(&data->refcount);
1470
1471 dev_set_drvdata(&rpdev->dev, data);
1472 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1473 INIT_LIST_HEAD(&data->users);
1474 spin_lock_init(&data->lock);
1475 idr_init(&data->ctx_idr);
1476 data->domain_id = domain_id;
1477 data->rpdev = rpdev;
1478
1479 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1480}
1481
1482static void fastrpc_notify_users(struct fastrpc_user *user)
1483{
1484 struct fastrpc_invoke_ctx *ctx;
1485
1486 spin_lock(&user->lock);
1487 list_for_each_entry(ctx, &user->pending, node) {
1488 ctx->retval = -EPIPE;
1489 complete(&ctx->work);
1490 }
1491 spin_unlock(&user->lock);
1492}
1493
1494static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1495{
1496 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1497 struct fastrpc_user *user;
1498 unsigned long flags;
1499
1500 /* No invocations past this point */
1501 spin_lock_irqsave(&cctx->lock, flags);
1502 cctx->rpdev = NULL;
1503 list_for_each_entry(user, &cctx->users, user)
1504 fastrpc_notify_users(user);
1505 spin_unlock_irqrestore(&cctx->lock, flags);
1506
1507 misc_deregister(&cctx->miscdev);
1508 of_platform_depopulate(&rpdev->dev);
1509
1510 fastrpc_channel_ctx_put(cctx);
1511}
1512
1513static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1514 int len, void *priv, u32 addr)
1515{
1516 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1517 struct fastrpc_invoke_rsp *rsp = data;
1518 struct fastrpc_invoke_ctx *ctx;
1519 unsigned long flags;
1520 unsigned long ctxid;
1521
1522 if (len < sizeof(*rsp))
1523 return -EINVAL;
1524
1525 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1526
1527 spin_lock_irqsave(&cctx->lock, flags);
1528 ctx = idr_find(&cctx->ctx_idr, ctxid);
1529 spin_unlock_irqrestore(&cctx->lock, flags);
1530
1531 if (!ctx) {
1532 dev_err(&rpdev->dev, "No context ID matches response\n");
1533 return -ENOENT;
1534 }
1535
1536 ctx->retval = rsp->retval;
1537 complete(&ctx->work);
1538
1539 /*
1540 * The DMA buffer associated with the context cannot be freed in
1541 * interrupt context so schedule it through a worker thread to
1542 * avoid a kernel BUG.
1543 */
1544 schedule_work(&ctx->put_work);
1545
1546 return 0;
1547}
1548
1549static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1550 { .compatible = "qcom,fastrpc" },
1551 { },
1552};
1553MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1554
1555static struct rpmsg_driver fastrpc_driver = {
1556 .probe = fastrpc_rpmsg_probe,
1557 .remove = fastrpc_rpmsg_remove,
1558 .callback = fastrpc_rpmsg_callback,
1559 .drv = {
1560 .name = "qcom,fastrpc",
1561 .of_match_table = fastrpc_rpmsg_of_match,
1562 },
1563};
1564
1565static int fastrpc_init(void)
1566{
1567 int ret;
1568
1569 ret = platform_driver_register(&fastrpc_cb_driver);
1570 if (ret < 0) {
1571 pr_err("fastrpc: failed to register cb driver\n");
1572 return ret;
1573 }
1574
1575 ret = register_rpmsg_driver(&fastrpc_driver);
1576 if (ret < 0) {
1577 pr_err("fastrpc: failed to register rpmsg driver\n");
1578 platform_driver_unregister(&fastrpc_cb_driver);
1579 return ret;
1580 }
1581
1582 return 0;
1583}
1584module_init(fastrpc_init);
1585
1586static void fastrpc_exit(void)
1587{
1588 platform_driver_unregister(&fastrpc_cb_driver);
1589 unregister_rpmsg_driver(&fastrpc_driver);
1590}
1591module_exit(fastrpc_exit);
1592
1593MODULE_LICENSE("GPL v2");