blob: 038d954457fed0348894ccf079db827cce65706b [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * linux/ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
10 *
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
12 *
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 */
24
25#include <linux/capability.h>
26#include <linux/msg.h>
27#include <linux/spinlock.h>
28#include <linux/init.h>
29#include <linux/mm.h>
30#include <linux/proc_fs.h>
31#include <linux/list.h>
32#include <linux/security.h>
33#include <linux/sched.h>
34#include <linux/syscalls.h>
35#include <linux/audit.h>
36#include <linux/seq_file.h>
37#include <linux/rwsem.h>
38#include <linux/nsproxy.h>
39#include <linux/ipc_namespace.h>
40
41#include <asm/current.h>
42#include <asm/uaccess.h>
lh758261d2023-07-13 05:52:04 -070043#ifdef CONFIG_SYSVIPC_CROSSMSG
44#include <linux/soc/zte/rpm/rpmsg.h>
45#include "cross_msg.h"
46#endif
47
lh9ed821d2023-04-07 01:36:19 -070048#include "util.h"
49
50/*
51 * one msg_receiver structure for each sleeping receiver:
52 */
53struct msg_receiver {
54 struct list_head r_list;
55 struct task_struct *r_tsk;
56
57 int r_mode;
58 long r_msgtype;
59 long r_maxsize;
60
61 struct msg_msg *volatile r_msg;
62};
63
64/* one msg_sender for each sleeping sender */
65struct msg_sender {
66 struct list_head list;
67 struct task_struct *tsk;
68};
69
lh758261d2023-07-13 05:52:04 -070070#ifdef CONFIG_SYSVIPC_CROSSMSG
71int msg_chn_ready;
72struct mutex cross_msg_mutex;
73T_sc_msg_header *msgheader = NULL;
74#endif
75
lh9ed821d2023-04-07 01:36:19 -070076#define SEARCH_ANY 1
77#define SEARCH_EQUAL 2
78#define SEARCH_NOTEQUAL 3
79#define SEARCH_LESSEQUAL 4
80
81#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
82
83#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
84
85static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
86static int newque(struct ipc_namespace *, struct ipc_params *);
87#ifdef CONFIG_PROC_FS
88static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
89#endif
90
91/*
92 * Scale msgmni with the available lowmem size: the memory dedicated to msg
93 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
94 * Also take into account the number of nsproxies created so far.
95 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
96 */
97void recompute_msgmni(struct ipc_namespace *ns)
98{
99 struct sysinfo i;
100 unsigned long allowed;
101 int nb_ns;
102
103 si_meminfo(&i);
104 allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
105 / MSGMNB;
106 nb_ns = atomic_read(&nr_ipc_ns);
107 allowed /= nb_ns;
108
109 if (allowed < MSGMNI) {
110 ns->msg_ctlmni = MSGMNI;
111 return;
112 }
113
114 if (allowed > IPCMNI / nb_ns) {
115 ns->msg_ctlmni = IPCMNI / nb_ns;
116 return;
117 }
118
119 ns->msg_ctlmni = allowed;
120}
121
122void msg_init_ns(struct ipc_namespace *ns)
123{
124 ns->msg_ctlmax = MSGMAX;
125 ns->msg_ctlmnb = MSGMNB;
126
127 recompute_msgmni(ns);
128
129 atomic_set(&ns->msg_bytes, 0);
130 atomic_set(&ns->msg_hdrs, 0);
131 ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
132}
133
134#ifdef CONFIG_IPC_NS
135void msg_exit_ns(struct ipc_namespace *ns)
136{
137 free_ipcs(ns, &msg_ids(ns), freeque);
138 idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
139}
140#endif
141
142void __init msg_init(void)
143{
144 msg_init_ns(&init_ipc_ns);
145
146 printk(KERN_INFO "msgmni has been set to %d\n",
147 init_ipc_ns.msg_ctlmni);
148
149 if (IS_ENABLED(CONFIG_PROC_STRIPPED))
lh758261d2023-07-13 05:52:04 -0700150 return;
lh9ed821d2023-04-07 01:36:19 -0700151
152 ipc_init_proc_interface("sysvipc/msg",
153 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
154 IPC_MSG_IDS, sysvipc_msg_proc_show);
155}
156
157/*
158 * msg_lock_(check_) routines are called in the paths where the rw_mutex
159 * is not held.
160 */
161static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
162{
163 struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
164
165 if (IS_ERR(ipcp))
166 return (struct msg_queue *)ipcp;
167
168 return container_of(ipcp, struct msg_queue, q_perm);
169}
170
171static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
172 int id)
173{
174 struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
175
176 if (IS_ERR(ipcp))
177 return (struct msg_queue *)ipcp;
178
179 return container_of(ipcp, struct msg_queue, q_perm);
180}
181
182static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
183{
184 ipc_rmid(&msg_ids(ns), &s->q_perm);
185}
186
187/**
188 * newque - Create a new msg queue
189 * @ns: namespace
190 * @params: ptr to the structure that contains the key and msgflg
191 *
192 * Called with msg_ids.rw_mutex held (writer)
193 */
194static int newque(struct ipc_namespace *ns, struct ipc_params *params)
195{
196 struct msg_queue *msq;
197 int id, retval;
198 key_t key = params->key;
199 int msgflg = params->flg;
200
201 msq = ipc_rcu_alloc(sizeof(*msq));
202 if (!msq)
203 return -ENOMEM;
204
205 msq->q_perm.mode = msgflg & S_IRWXUGO;
206 msq->q_perm.key = key;
207
208 msq->q_perm.security = NULL;
209 retval = security_msg_queue_alloc(msq);
210 if (retval) {
211 ipc_rcu_putref(msq);
212 return retval;
213 }
214
215 /*
216 * ipc_addid() locks msq
217 */
218 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
219 if (id < 0) {
220 security_msg_queue_free(msq);
221 ipc_rcu_putref(msq);
222 return id;
223 }
224
225 msq->q_stime = msq->q_rtime = 0;
226 msq->q_ctime = get_seconds();
227 msq->q_cbytes = msq->q_qnum = 0;
228 msq->q_qbytes = ns->msg_ctlmnb;
229 msq->q_lspid = msq->q_lrpid = 0;
230 INIT_LIST_HEAD(&msq->q_messages);
231 INIT_LIST_HEAD(&msq->q_receivers);
232 INIT_LIST_HEAD(&msq->q_senders);
233
234 msg_unlock(msq);
235
236 return msq->q_perm.id;
237}
238
239static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
240{
241 mss->tsk = current;
242 current->state = TASK_INTERRUPTIBLE;
243 list_add_tail(&mss->list, &msq->q_senders);
244}
245
246static inline void ss_del(struct msg_sender *mss)
247{
248 if (mss->list.next != NULL)
249 list_del(&mss->list);
250}
251
252static void ss_wakeup(struct list_head *h, int kill)
253{
254 struct list_head *tmp;
255
256 tmp = h->next;
257 while (tmp != h) {
258 struct msg_sender *mss;
259
260 mss = list_entry(tmp, struct msg_sender, list);
261 tmp = tmp->next;
262 if (kill)
263 mss->list.next = NULL;
264 wake_up_process(mss->tsk);
265 }
266}
267
268static void expunge_all(struct msg_queue *msq, int res)
269{
270 struct list_head *tmp;
271
272 tmp = msq->q_receivers.next;
273 while (tmp != &msq->q_receivers) {
274 struct msg_receiver *msr;
275
276 /*
277 * Make sure that the wakeup doesnt preempt
278 * this CPU prematurely. (on PREEMPT_RT)
279 */
280 preempt_disable_rt();
281
282 msr = list_entry(tmp, struct msg_receiver, r_list);
283 tmp = tmp->next;
284 msr->r_msg = NULL;
285 wake_up_process(msr->r_tsk);
286 smp_mb();
287 msr->r_msg = ERR_PTR(res);
288
289 preempt_enable_rt();
290 }
291}
292
293/*
294 * freeque() wakes up waiters on the sender and receiver waiting queue,
295 * removes the message queue from message queue ID IDR, and cleans up all the
296 * messages associated with this queue.
297 *
298 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
299 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
300 */
301static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
302{
303 struct list_head *tmp;
304 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
305
306 expunge_all(msq, -EIDRM);
307 ss_wakeup(&msq->q_senders, 1);
308 msg_rmid(ns, msq);
309 msg_unlock(msq);
310
311 tmp = msq->q_messages.next;
312 while (tmp != &msq->q_messages) {
313 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
314
315 tmp = tmp->next;
316 atomic_dec(&ns->msg_hdrs);
317 free_msg(msg);
318 }
319 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
320 security_msg_queue_free(msq);
321 ipc_lock_by_ptr(&msq->q_perm);
322 ipc_rcu_putref(msq);
323 ipc_unlock(&msq->q_perm);
324}
325
326/*
327 * Called with msg_ids.rw_mutex and ipcp locked.
328 */
329static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
330{
331 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
332
333 return security_msg_queue_associate(msq, msgflg);
334}
335
336SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
337{
338 struct ipc_namespace *ns;
339 struct ipc_ops msg_ops;
340 struct ipc_params msg_params;
341
342 ns = current->nsproxy->ipc_ns;
343
344 msg_ops.getnew = newque;
345 msg_ops.associate = msg_security;
346 msg_ops.more_checks = NULL;
347
348 msg_params.key = key;
349 msg_params.flg = msgflg;
lh758261d2023-07-13 05:52:04 -0700350#ifdef CONFIG_SYSVIPC_CROSSMSG
351 if ((key & CROSS_MSG_MASK) == CROSS_MSG_MASK)
352 msg_params.flg |= IPC_CREAT;
353#endif
lh9ed821d2023-04-07 01:36:19 -0700354
355 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
356}
357
lh758261d2023-07-13 05:52:04 -0700358#ifdef CONFIG_SYSVIPC_CROSSMSG
359static inline unsigned long
360copy_msqid_by_version(void *buf, struct msqid64_ds *in, int version)
361{
362 switch(version) {
363 case IPC_64:
364 memcpy(buf, in, sizeof(*in));
365 return 0;
366 case IPC_OLD:
367 {
368 struct msqid_ds out;
369
370 memset(&out, 0, sizeof(out));
371
372 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
373
374 out.msg_stime = in->msg_stime;
375 out.msg_rtime = in->msg_rtime;
376 out.msg_ctime = in->msg_ctime;
377
378 if (in->msg_cbytes > USHRT_MAX)
379 out.msg_cbytes = USHRT_MAX;
380 else
381 out.msg_cbytes = in->msg_cbytes;
382 out.msg_lcbytes = in->msg_cbytes;
383
384 if (in->msg_qnum > USHRT_MAX)
385 out.msg_qnum = USHRT_MAX;
386 else
387 out.msg_qnum = in->msg_qnum;
388
389 if (in->msg_qbytes > USHRT_MAX)
390 out.msg_qbytes = USHRT_MAX;
391 else
392 out.msg_qbytes = in->msg_qbytes;
393 out.msg_lqbytes = in->msg_qbytes;
394
395 out.msg_lspid = in->msg_lspid;
396 out.msg_lrpid = in->msg_lrpid;
397
398 memcpy(buf, &out, sizeof(out));
399 return 0;
400 }
401 default:
402 return -EINVAL;
403 }
404}
405#endif
406
lh9ed821d2023-04-07 01:36:19 -0700407static inline unsigned long
408copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
409{
410 switch(version) {
411 case IPC_64:
412 return copy_to_user(buf, in, sizeof(*in));
413 case IPC_OLD:
414 {
415 struct msqid_ds out;
416
417 memset(&out, 0, sizeof(out));
418
419 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
420
421 out.msg_stime = in->msg_stime;
422 out.msg_rtime = in->msg_rtime;
423 out.msg_ctime = in->msg_ctime;
424
425 if (in->msg_cbytes > USHRT_MAX)
426 out.msg_cbytes = USHRT_MAX;
427 else
428 out.msg_cbytes = in->msg_cbytes;
429 out.msg_lcbytes = in->msg_cbytes;
430
431 if (in->msg_qnum > USHRT_MAX)
432 out.msg_qnum = USHRT_MAX;
433 else
434 out.msg_qnum = in->msg_qnum;
435
436 if (in->msg_qbytes > USHRT_MAX)
437 out.msg_qbytes = USHRT_MAX;
438 else
439 out.msg_qbytes = in->msg_qbytes;
440 out.msg_lqbytes = in->msg_qbytes;
441
442 out.msg_lspid = in->msg_lspid;
443 out.msg_lrpid = in->msg_lrpid;
444
445 return copy_to_user(buf, &out, sizeof(out));
446 }
447 default:
448 return -EINVAL;
449 }
450}
451
452static inline unsigned long
453copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
454{
455 switch(version) {
456 case IPC_64:
457 if (copy_from_user(out, buf, sizeof(*out)))
458 return -EFAULT;
459 return 0;
460 case IPC_OLD:
461 {
462 struct msqid_ds tbuf_old;
463
464 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
465 return -EFAULT;
466
467 out->msg_perm.uid = tbuf_old.msg_perm.uid;
468 out->msg_perm.gid = tbuf_old.msg_perm.gid;
469 out->msg_perm.mode = tbuf_old.msg_perm.mode;
470
471 if (tbuf_old.msg_qbytes == 0)
472 out->msg_qbytes = tbuf_old.msg_lqbytes;
473 else
474 out->msg_qbytes = tbuf_old.msg_qbytes;
475
476 return 0;
477 }
478 default:
479 return -EINVAL;
480 }
481}
482
483/*
484 * This function handles some msgctl commands which require the rw_mutex
485 * to be held in write mode.
486 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
487 */
488static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
489 struct msqid_ds __user *buf, int version)
490{
491 struct kern_ipc_perm *ipcp;
492 struct msqid64_ds uninitialized_var(msqid64);
493 struct msg_queue *msq;
494 int err;
495
496 if (cmd == IPC_SET) {
497 if (copy_msqid_from_user(&msqid64, buf, version))
498 return -EFAULT;
499 }
500
501 ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd,
502 &msqid64.msg_perm, msqid64.msg_qbytes);
503 if (IS_ERR(ipcp))
504 return PTR_ERR(ipcp);
505
506 msq = container_of(ipcp, struct msg_queue, q_perm);
507
508 err = security_msg_queue_msgctl(msq, cmd);
509 if (err)
510 goto out_unlock;
511
512 switch (cmd) {
513 case IPC_RMID:
514 freeque(ns, ipcp);
515 goto out_up;
516 case IPC_SET:
517 if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
518 !capable(CAP_SYS_RESOURCE)) {
519 err = -EPERM;
520 goto out_unlock;
521 }
522
523 msq->q_qbytes = msqid64.msg_qbytes;
524
525 ipc_update_perm(&msqid64.msg_perm, ipcp);
526 msq->q_ctime = get_seconds();
527 /* sleeping receivers might be excluded by
528 * stricter permissions.
529 */
530 expunge_all(msq, -EAGAIN);
531 /* sleeping senders might be able to send
532 * due to a larger queue size.
533 */
534 ss_wakeup(&msq->q_senders, 0);
535 break;
536 default:
537 err = -EINVAL;
538 }
539out_unlock:
540 msg_unlock(msq);
541out_up:
542 up_write(&msg_ids(ns).rw_mutex);
543 return err;
544}
545
lh758261d2023-07-13 05:52:04 -0700546#ifdef CONFIG_SYSVIPC_CROSSMSG
547static int get_msgstat(int msqid, int cmd, struct msqid_ds * buf)
548{
549 struct msg_queue *msq;
550 int err, version;
551 struct ipc_namespace *ns;
552
553 if (msqid < 0 || cmd < 0)
554 return -EINVAL;
555
556 version = ipc_parse_version(&cmd);
557 ns = current->nsproxy->ipc_ns;
558
559 switch (cmd) {
560 case IPC_STAT:
561 {
562 struct msqid64_ds tbuf;
563 int success_return;
564
565 if (!buf)
566 return -EFAULT;
567
568
569 msq = msg_lock_check(ns, msqid);
570 if (IS_ERR(msq))
571 return PTR_ERR(msq);
572 success_return = 0;
573
574 err = -EACCES;
575 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
576 goto out_unlock;
577
578 err = security_msg_queue_msgctl(msq, cmd);
579 if (err)
580 goto out_unlock;
581
582 memset(&tbuf, 0, sizeof(tbuf));
583
584 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
585 tbuf.msg_stime = msq->q_stime;
586 tbuf.msg_rtime = msq->q_rtime;
587 tbuf.msg_ctime = msq->q_ctime;
588 tbuf.msg_cbytes = msq->q_cbytes;
589 tbuf.msg_qnum = msq->q_qnum;
590 tbuf.msg_qbytes = msq->q_qbytes;
591 tbuf.msg_lspid = msq->q_lspid;
592 tbuf.msg_lrpid = msq->q_lrpid;
593 msg_unlock(msq);
594 copy_msqid_by_version(buf, &tbuf, version);
595 return success_return;
596 }
597 default:
598 return -EINVAL;
599 }
600
601out_unlock:
602 msg_unlock(msq);
603 return err;
604}
605#endif
606
lh9ed821d2023-04-07 01:36:19 -0700607SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
608{
609 struct msg_queue *msq;
610 int err, version;
611 struct ipc_namespace *ns;
612
613 if (msqid < 0 || cmd < 0)
614 return -EINVAL;
615
616 version = ipc_parse_version(&cmd);
617 ns = current->nsproxy->ipc_ns;
618
619 switch (cmd) {
620 case IPC_INFO:
621 case MSG_INFO:
622 {
623 struct msginfo msginfo;
624 int max_id;
625
626 if (!buf)
627 return -EFAULT;
628 /*
629 * We must not return kernel stack data.
630 * due to padding, it's not enough
631 * to set all member fields.
632 */
633 err = security_msg_queue_msgctl(NULL, cmd);
634 if (err)
635 return err;
636
637 memset(&msginfo, 0, sizeof(msginfo));
638 msginfo.msgmni = ns->msg_ctlmni;
639 msginfo.msgmax = ns->msg_ctlmax;
640 msginfo.msgmnb = ns->msg_ctlmnb;
641 msginfo.msgssz = MSGSSZ;
642 msginfo.msgseg = MSGSEG;
643 down_read(&msg_ids(ns).rw_mutex);
644 if (cmd == MSG_INFO) {
645 msginfo.msgpool = msg_ids(ns).in_use;
646 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
647 msginfo.msgtql = atomic_read(&ns->msg_bytes);
648 } else {
649 msginfo.msgmap = MSGMAP;
650 msginfo.msgpool = MSGPOOL;
651 msginfo.msgtql = MSGTQL;
652 }
653 max_id = ipc_get_maxid(&msg_ids(ns));
654 up_read(&msg_ids(ns).rw_mutex);
655 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
656 return -EFAULT;
657 return (max_id < 0) ? 0 : max_id;
658 }
659 case MSG_STAT: /* msqid is an index rather than a msg queue id */
660 case IPC_STAT:
661 {
662 struct msqid64_ds tbuf;
663 int success_return;
664
665 if (!buf)
666 return -EFAULT;
667
668 if (cmd == MSG_STAT) {
669 msq = msg_lock(ns, msqid);
670 if (IS_ERR(msq))
671 return PTR_ERR(msq);
672 success_return = msq->q_perm.id;
673 } else {
674 msq = msg_lock_check(ns, msqid);
675 if (IS_ERR(msq))
676 return PTR_ERR(msq);
677 success_return = 0;
678 }
679 err = -EACCES;
680 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
681 goto out_unlock;
682
683 err = security_msg_queue_msgctl(msq, cmd);
684 if (err)
685 goto out_unlock;
686
687 memset(&tbuf, 0, sizeof(tbuf));
688
689 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
690 tbuf.msg_stime = msq->q_stime;
691 tbuf.msg_rtime = msq->q_rtime;
692 tbuf.msg_ctime = msq->q_ctime;
693 tbuf.msg_cbytes = msq->q_cbytes;
694 tbuf.msg_qnum = msq->q_qnum;
695 tbuf.msg_qbytes = msq->q_qbytes;
696 tbuf.msg_lspid = msq->q_lspid;
697 tbuf.msg_lrpid = msq->q_lrpid;
698 msg_unlock(msq);
699 if (copy_msqid_to_user(buf, &tbuf, version))
700 return -EFAULT;
701 return success_return;
702 }
703 case IPC_SET:
704 case IPC_RMID:
705 err = msgctl_down(ns, msqid, cmd, buf, version);
706 return err;
707 default:
708 return -EINVAL;
709 }
710
711out_unlock:
712 msg_unlock(msq);
713 return err;
714}
715
716static int testmsg(struct msg_msg *msg, long type, int mode)
717{
718 switch(mode)
719 {
720 case SEARCH_ANY:
721 return 1;
722 case SEARCH_LESSEQUAL:
723 if (msg->m_type <=type)
724 return 1;
725 break;
726 case SEARCH_EQUAL:
727 if (msg->m_type == type)
728 return 1;
729 break;
730 case SEARCH_NOTEQUAL:
731 if (msg->m_type != type)
732 return 1;
733 break;
734 }
735 return 0;
736}
737
738static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
739{
740 struct list_head *tmp;
741
742 tmp = msq->q_receivers.next;
743 while (tmp != &msq->q_receivers) {
744 struct msg_receiver *msr;
745
746 msr = list_entry(tmp, struct msg_receiver, r_list);
747 tmp = tmp->next;
748 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
749 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
750 msr->r_msgtype, msr->r_mode)) {
751
752 /*
753 * Make sure that the wakeup doesnt preempt
754 * this CPU prematurely. (on PREEMPT_RT)
755 */
756 preempt_disable_rt();
757
758 list_del(&msr->r_list);
759 if (msr->r_maxsize < msg->m_ts) {
760 msr->r_msg = NULL;
761 wake_up_process(msr->r_tsk);
762 smp_mb();
763 msr->r_msg = ERR_PTR(-E2BIG);
764 } else {
765 msr->r_msg = NULL;
766 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
767 msq->q_rtime = get_seconds();
768 wake_up_process(msr->r_tsk);
769 smp_mb();
770 msr->r_msg = msg;
771 preempt_enable_rt();
772
773 return 1;
774 }
775 preempt_enable_rt();
776 }
777 }
778 return 0;
779}
780
781long do_msgsnd(int msqid, long mtype, void __user *mtext,
782 size_t msgsz, int msgflg)
783{
784 struct msg_queue *msq;
785 struct msg_msg *msg;
786 int err;
787 struct ipc_namespace *ns;
788
789 ns = current->nsproxy->ipc_ns;
790
791 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
792 return -EINVAL;
793 if (mtype < 1)
794 return -EINVAL;
795
796 msg = load_msg(mtext, msgsz);
797 if (IS_ERR(msg))
798 return PTR_ERR(msg);
799
800 msg->m_type = mtype;
801 msg->m_ts = msgsz;
802
803 msq = msg_lock_check(ns, msqid);
804 if (IS_ERR(msq)) {
805 err = PTR_ERR(msq);
806 goto out_free;
807 }
808
809 for (;;) {
810 struct msg_sender s;
811
812 err = -EACCES;
813 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
814 goto out_unlock_free;
815
816 err = security_msg_queue_msgsnd(msq, msg, msgflg);
817 if (err)
818 goto out_unlock_free;
819
820 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
821 1 + msq->q_qnum <= msq->q_qbytes) {
822 break;
823 }
824
825 /* queue full, wait: */
826 if (msgflg & IPC_NOWAIT) {
827 err = -EAGAIN;
828 goto out_unlock_free;
829 }
830 ss_add(msq, &s);
831 ipc_rcu_getref(msq);
832 msg_unlock(msq);
833 schedule();
834
835 ipc_lock_by_ptr(&msq->q_perm);
836 ipc_rcu_putref(msq);
837 if (msq->q_perm.deleted) {
838 err = -EIDRM;
839 goto out_unlock_free;
840 }
841 ss_del(&s);
842
843 if (signal_pending(current)) {
844 err = -ERESTARTNOHAND;
845 goto out_unlock_free;
846 }
847 }
848
849 msq->q_lspid = task_tgid_vnr(current);
850 msq->q_stime = get_seconds();
851
852 if (!pipelined_send(msq, msg)) {
853 /* no one is waiting for this message, enqueue it */
854 list_add_tail(&msg->m_list, &msq->q_messages);
855 msq->q_cbytes += msgsz;
856 msq->q_qnum++;
857 atomic_add(msgsz, &ns->msg_bytes);
858 atomic_inc(&ns->msg_hdrs);
859 }
860
861 err = 0;
862 msg = NULL;
863
864out_unlock_free:
865 msg_unlock(msq);
866out_free:
867 if (msg != NULL)
868 free_msg(msg);
869 return err;
870}
871
lh758261d2023-07-13 05:52:04 -0700872#ifdef CONFIG_SYSVIPC_CROSSMSG
873long do_kmsgsnd(int msqid, struct msgbuf* msgp, size_t msgsz, int msgflg)
874{
875 struct msg_queue *msq;
876 struct msg_msg *msg;
877 int err;
878 struct ipc_namespace *ns;
879 long mtype;
880 char *mtext;
881
882 ns = current->nsproxy->ipc_ns;
883
884 mtype = msgp->mtype;
885 mtext = msgp->mtext;
886
887 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
888 return -EINVAL;
889 if (mtype < 1)
890 return -EINVAL;
891
892 msg = load_kmsg(mtext, msgsz);
893 if (IS_ERR(msg))
894 return PTR_ERR(msg);
895
896 msg->m_type = mtype;
897 msg->m_ts = msgsz;
898
899 msq = msg_lock_check(ns, msqid);
900 if (IS_ERR(msq)) {
901 err = PTR_ERR(msq);
902 goto out_free;
903 }
904
905 for (;;) {
906 struct msg_sender s;
907
908 err = -EACCES;
909 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
910 goto out_unlock_free;
911
912 err = security_msg_queue_msgsnd(msq, msg, msgflg);
913 if (err)
914 goto out_unlock_free;
915
916 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
917 1 + msq->q_qnum <= msq->q_qbytes) {
918 break;
919 }
920
921 /* queue full, wait: */
922 if (msgflg & IPC_NOWAIT) {
923 err = -EAGAIN;
924 goto out_unlock_free;
925 }
926 ss_add(msq, &s);
927 ipc_rcu_getref(msq);
928 msg_unlock(msq);
929 schedule();
930
931 ipc_lock_by_ptr(&msq->q_perm);
932 ipc_rcu_putref(msq);
933 if (msq->q_perm.deleted) {
934 err = -EIDRM;
935 goto out_unlock_free;
936 }
937 ss_del(&s);
938
939 if (signal_pending(current)) {
940 err = -ERESTARTNOHAND;
941 goto out_unlock_free;
942 }
943 }
944
945 msq->q_lspid = task_tgid_vnr(current);
946 msq->q_stime = get_seconds();
947
948 if (!pipelined_send(msq, msg)) {
949 /* no one is waiting for this message, enqueue it */
950 list_add_tail(&msg->m_list, &msq->q_messages);
951 msq->q_cbytes += msgsz;
952 msq->q_qnum++;
953 atomic_add(msgsz, &ns->msg_bytes);
954 atomic_inc(&ns->msg_hdrs);
955 }
956
957 err = 0;
958 msg = NULL;
959
960out_unlock_free:
961 msg_unlock(msq);
962out_free:
963 if (msg != NULL)
964 free_msg(msg);
965 return err;
966
967}
968#endif
969
lh9ed821d2023-04-07 01:36:19 -0700970SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
971 int, msgflg)
972{
973 long mtype;
974
975 if (get_user(mtype, &msgp->mtype))
976 return -EFAULT;
lh758261d2023-07-13 05:52:04 -0700977
978#ifdef CONFIG_SYSVIPC_CROSSMSG
979 struct msqid_ds msgque;
980 struct ipc_perm *ipcp = &msgque.msg_perm;
981 T_ZDrvRpMsg_Msg Icp_Msg;
982 T_sc_msg_header *msgrcvheader = NULL;
983 long *typeheader = NULL;
984 char *textheader = NULL;
985 int ret, key;
986 size_t alen;
987
988 ret = get_msgstat(msqid, IPC_STAT, &msgque);
989 if (ret < 0) {
990 return ret;
991 }
992 key = ipcp->key;
993 //跨核处理
994 if (msg_chn_ready && ((key & CROSS_MSG_MASK) == CROSS_MSG_MASK)) {
995#if CROSS_DEBUG
996 printk(KERN_INFO CROSS_PRINT "msg key beyond (%x)\n", key);
997#endif
998 mutex_lock(&cross_msg_mutex);
999 alen = sizeof(T_sc_msg_header) + sizeof(long) + msgsz;
1000 if (alen > CROSS_MSG_SIZE) {
1001 mutex_unlock(&cross_msg_mutex);
1002 return -EINVAL;
1003 }
1004 if (!msgheader)
1005 msgheader = (T_sc_msg_header *)kmalloc(CROSS_MSG_SIZE, GFP_KERNEL);
1006 if (!msgheader) {
1007 panic(CROSS_PRINT "Failed malloc send msgheader!\n");
1008 }
1009 memset(msgheader, 0, sizeof(T_sc_msg_header));
1010 msgheader->head = CROSS_MSG_HEAD;
1011 msgheader->ops = MSGSND_F;
1012 msgheader->sndp.getp.key = key;
1013 msgheader->sndp.getp.msgflg = ipcp->mode | IPC_CREAT;
1014 msgheader->sndp.msgflg = msgflg;
1015 msgheader->sndp.msgsz = msgsz;
1016 typeheader = (long *)(msgheader + 1);
1017 *typeheader = mtype;
1018 textheader = (char *)(typeheader + 1);
1019 ret = copy_from_user(textheader, msgp->mtext, alen - sizeof(T_sc_msg_header) - sizeof(long));
1020 if (ret < 0) {
1021 mutex_unlock(&cross_msg_mutex);
1022 return -EFAULT;
1023 }
1024#if CROSS_DEBUG
1025 printk(KERN_INFO CROSS_PRINT "msg send text:(%s) msgtyp:(%d) msgsize:(%d)\n", textheader, *typeheader, msgsz);
1026#endif
1027 Icp_Msg.actorID = CROSS_MSG_ACT;
1028 Icp_Msg.chID = CROSS_MSG_CHN;
1029 Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
1030 Icp_Msg.buf = msgheader;
1031 Icp_Msg.len = alen;
1032
1033 ret = zDrvRpMsg_Write_Cap(&Icp_Msg);
1034 if(ret < 0) {
1035 printk(KERN_ERR CROSS_PRINT "write rpmsg error:(%d)\n", ret);
1036 mutex_unlock(&cross_msg_mutex);
1037 return ret;
1038 }
1039 else {
1040#if CROSS_DEBUG
1041 printk(KERN_INFO CROSS_PRINT "write rpmsg ok:(%d)\n", ret);
1042#endif
1043 }
1044 Icp_Msg.actorID = CROSS_MSG_ACT;
1045 Icp_Msg.chID = CROSS_MSG_CHN;
1046 Icp_Msg.flag = 0;
1047 Icp_Msg.buf = msgheader;
1048 Icp_Msg.len = CROSS_MSG_SIZE;
1049
1050 ret = zDrvRpMsg_Read_Cap(&Icp_Msg);
1051 mutex_unlock(&cross_msg_mutex);
1052 if(ret < 0) {
1053 printk(KERN_ERR CROSS_PRINT "read rpmsg error:(%d)\n", ret);
1054 return ret;
1055 }
1056 else {
1057#if CROSS_DEBUG
1058 printk(KERN_INFO CROSS_PRINT "read rpmsg ok:(%d)\n", ret);
1059#endif
1060 }
1061 msgrcvheader = (T_sc_msg_header *)Icp_Msg.buf;
1062 ret = msgrcvheader->ret;
1063#if CROSS_DEBUG
1064 printk(KERN_INFO CROSS_PRINT "msgsnd return number:(%x)\n", ret);
1065#endif
1066 return ret;
1067 }
1068#endif
1069
lh9ed821d2023-04-07 01:36:19 -07001070 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
1071}
1072
1073static inline int convert_mode(long *msgtyp, int msgflg)
1074{
1075 /*
1076 * find message of correct type.
1077 * msgtyp = 0 => get first.
1078 * msgtyp > 0 => get first message of matching type.
1079 * msgtyp < 0 => get message with least type must be < abs(msgtype).
1080 */
1081 if (*msgtyp == 0)
1082 return SEARCH_ANY;
1083 if (*msgtyp < 0) {
1084 *msgtyp = -*msgtyp;
1085 return SEARCH_LESSEQUAL;
1086 }
1087 if (msgflg & MSG_EXCEPT)
1088 return SEARCH_NOTEQUAL;
1089 return SEARCH_EQUAL;
1090}
1091
1092long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
1093 size_t msgsz, long msgtyp, int msgflg)
1094{
1095 struct msg_queue *msq;
1096 struct msg_msg *msg;
1097 int mode;
1098 struct ipc_namespace *ns;
1099
1100 if (msqid < 0 || (long) msgsz < 0)
1101 return -EINVAL;
1102 mode = convert_mode(&msgtyp, msgflg);
1103 ns = current->nsproxy->ipc_ns;
1104
1105 msq = msg_lock_check(ns, msqid);
1106 if (IS_ERR(msq))
1107 return PTR_ERR(msq);
1108
1109 for (;;) {
1110 struct msg_receiver msr_d;
1111 struct list_head *tmp;
1112
1113 msg = ERR_PTR(-EACCES);
1114 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
1115 goto out_unlock;
1116
1117 msg = ERR_PTR(-EAGAIN);
1118 tmp = msq->q_messages.next;
1119 while (tmp != &msq->q_messages) {
1120 struct msg_msg *walk_msg;
1121
1122 walk_msg = list_entry(tmp, struct msg_msg, m_list);
1123 if (testmsg(walk_msg, msgtyp, mode) &&
1124 !security_msg_queue_msgrcv(msq, walk_msg, current,
1125 msgtyp, mode)) {
1126
1127 msg = walk_msg;
1128 if (mode == SEARCH_LESSEQUAL &&
1129 walk_msg->m_type != 1) {
1130 msg = walk_msg;
1131 msgtyp = walk_msg->m_type - 1;
1132 } else {
1133 msg = walk_msg;
1134 break;
1135 }
1136 }
1137 tmp = tmp->next;
1138 }
1139 if (!IS_ERR(msg)) {
1140 /*
1141 * Found a suitable message.
1142 * Unlink it from the queue.
1143 */
1144 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
1145 msg = ERR_PTR(-E2BIG);
1146 goto out_unlock;
1147 }
1148 list_del(&msg->m_list);
1149 msq->q_qnum--;
1150 msq->q_rtime = get_seconds();
1151 msq->q_lrpid = task_tgid_vnr(current);
1152 msq->q_cbytes -= msg->m_ts;
1153 atomic_sub(msg->m_ts, &ns->msg_bytes);
1154 atomic_dec(&ns->msg_hdrs);
1155 ss_wakeup(&msq->q_senders, 0);
1156 msg_unlock(msq);
1157 break;
1158 }
1159 /* No message waiting. Wait for a message */
1160 if (msgflg & IPC_NOWAIT) {
1161 msg = ERR_PTR(-ENOMSG);
1162 goto out_unlock;
1163 }
1164 list_add_tail(&msr_d.r_list, &msq->q_receivers);
1165 msr_d.r_tsk = current;
1166 msr_d.r_msgtype = msgtyp;
1167 msr_d.r_mode = mode;
1168 if (msgflg & MSG_NOERROR)
1169 msr_d.r_maxsize = INT_MAX;
1170 else
1171 msr_d.r_maxsize = msgsz;
1172 msr_d.r_msg = ERR_PTR(-EAGAIN);
1173 current->state = TASK_INTERRUPTIBLE;
1174 msg_unlock(msq);
1175
1176 schedule();
1177
1178 /* Lockless receive, part 1:
1179 * Disable preemption. We don't hold a reference to the queue
1180 * and getting a reference would defeat the idea of a lockless
1181 * operation, thus the code relies on rcu to guarantee the
1182 * existence of msq:
1183 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
1184 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
1185 * rcu_read_lock() prevents preemption between reading r_msg
1186 * and the spin_lock() inside ipc_lock_by_ptr().
1187 */
1188 rcu_read_lock();
1189
1190 /* Lockless receive, part 2:
1191 * Wait until pipelined_send or expunge_all are outside of
1192 * wake_up_process(). There is a race with exit(), see
1193 * ipc/mqueue.c for the details.
1194 */
1195 msg = (struct msg_msg*)msr_d.r_msg;
1196 while (msg == NULL) {
1197 cpu_relax();
1198 msg = (struct msg_msg *)msr_d.r_msg;
1199 }
1200
1201 /* Lockless receive, part 3:
1202 * If there is a message or an error then accept it without
1203 * locking.
1204 */
1205 if (msg != ERR_PTR(-EAGAIN)) {
1206 rcu_read_unlock();
1207 break;
1208 }
1209
1210 /* Lockless receive, part 3:
1211 * Acquire the queue spinlock.
1212 */
1213 ipc_lock_by_ptr(&msq->q_perm);
1214 rcu_read_unlock();
1215
1216 /* Lockless receive, part 4:
1217 * Repeat test after acquiring the spinlock.
1218 */
1219 msg = (struct msg_msg*)msr_d.r_msg;
1220 if (msg != ERR_PTR(-EAGAIN))
1221 goto out_unlock;
1222
1223 list_del(&msr_d.r_list);
1224 if (signal_pending(current)) {
1225 msg = ERR_PTR(-ERESTARTNOHAND);
1226out_unlock:
1227 msg_unlock(msq);
1228 break;
1229 }
1230 }
1231 if (IS_ERR(msg))
1232 return PTR_ERR(msg);
1233
1234 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
1235 *pmtype = msg->m_type;
1236 if (store_msg(mtext, msg, msgsz))
1237 msgsz = -EFAULT;
1238
1239 free_msg(msg);
1240
1241 return msgsz;
1242}
1243
1244SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
1245 long, msgtyp, int, msgflg)
1246{
1247 long err, mtype;
1248
1249 err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
1250 if (err < 0)
1251 goto out;
1252
1253 if (put_user(mtype, &msgp->mtype))
1254 err = -EFAULT;
1255out:
1256 return err;
1257}
1258
1259#ifdef CONFIG_PROC_FS
1260static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
1261{
1262 struct msg_queue *msq = it;
1263
1264 return seq_printf(s,
1265 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
1266 msq->q_perm.key,
1267 msq->q_perm.id,
1268 msq->q_perm.mode,
1269 msq->q_cbytes,
1270 msq->q_qnum,
1271 msq->q_lspid,
1272 msq->q_lrpid,
1273 msq->q_perm.uid,
1274 msq->q_perm.gid,
1275 msq->q_perm.cuid,
1276 msq->q_perm.cgid,
1277 msq->q_stime,
1278 msq->q_rtime,
1279 msq->q_ctime);
1280}
1281#endif