blob: 201da2d4477f4da04b73b9561a970d2d880975c4 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * linux/ipc/util.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Sep 1997 - Call suser() last after "normal" permission checks so we
6 * get BSD style process accounting right.
7 * Occurs in several places in the IPC code.
8 * Chris Evans, <chris@ferret.lmh.ox.ac.uk>
9 * Nov 1999 - ipc helper functions, unified SMP locking
10 * Manfred Spraul <manfred@colorfullife.com>
11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
12 * Mingming Cao <cmm@us.ibm.com>
13 * Mar 2006 - support for audit of ipc object properties
14 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
15 * Jun 2006 - namespaces ssupport
16 * OpenVZ, SWsoft Inc.
17 * Pavel Emelianov <xemul@openvz.org>
18 */
19
20#include <linux/mm.h>
21#include <linux/shm.h>
22#include <linux/init.h>
23#include <linux/msg.h>
24#include <linux/vmalloc.h>
25#include <linux/slab.h>
26#include <linux/capability.h>
27#include <linux/highuid.h>
28#include <linux/security.h>
29#include <linux/rcupdate.h>
30#include <linux/workqueue.h>
31#include <linux/seq_file.h>
32#include <linux/proc_fs.h>
33#include <linux/audit.h>
34#include <linux/nsproxy.h>
35#include <linux/rwsem.h>
36#include <linux/memory.h>
37#include <linux/ipc_namespace.h>
38
39#include <asm/unistd.h>
40
41#include "util.h"
42
43struct ipc_proc_iface {
44 const char *path;
45 const char *header;
46 int ids;
47 int (*show)(struct seq_file *, void *);
48};
49
50#ifdef CONFIG_MEMORY_HOTPLUG
51
52static void ipc_memory_notifier(struct work_struct *work)
53{
54 ipcns_notify(IPCNS_MEMCHANGED);
55}
56
57static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
58
59
60static int ipc_memory_callback(struct notifier_block *self,
61 unsigned long action, void *arg)
62{
63 switch (action) {
64 case MEM_ONLINE: /* memory successfully brought online */
65 case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */
66 /*
67 * This is done by invoking the ipcns notifier chain with the
68 * IPC_MEMCHANGED event.
69 * In order not to keep the lock on the hotplug memory chain
70 * for too long, queue a work item that will, when waken up,
71 * activate the ipcns notification chain.
72 * No need to keep several ipc work items on the queue.
73 */
74 if (!work_pending(&ipc_memory_wq))
75 schedule_work(&ipc_memory_wq);
76 break;
77 case MEM_GOING_ONLINE:
78 case MEM_GOING_OFFLINE:
79 case MEM_CANCEL_ONLINE:
80 case MEM_CANCEL_OFFLINE:
81 default:
82 break;
83 }
84
85 return NOTIFY_OK;
86}
87
88#endif /* CONFIG_MEMORY_HOTPLUG */
89
90/**
91 * ipc_init - initialise IPC subsystem
92 *
93 * The various system5 IPC resources (semaphores, messages and shared
94 * memory) are initialised
95 * A callback routine is registered into the memory hotplug notifier
96 * chain: since msgmni scales to lowmem this callback routine will be
97 * called upon successful memory add / remove to recompute msmgni.
98 */
99
100static int __init ipc_init(void)
101{
102 sem_init();
103 msg_init();
104 shm_init();
105 hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI);
106 register_ipcns_notifier(&init_ipc_ns);
107 return 0;
108}
109__initcall(ipc_init);
110
111/**
112 * ipc_init_ids - initialise IPC identifiers
113 * @ids: Identifier set
114 *
115 * Set up the sequence range to use for the ipc identifier range (limited
116 * below IPCMNI) then initialise the ids idr.
117 */
118
119void ipc_init_ids(struct ipc_ids *ids)
120{
121 init_rwsem(&ids->rw_mutex);
122
123 ids->in_use = 0;
124 ids->seq = 0;
125 {
126 int seq_limit = INT_MAX/SEQ_MULTIPLIER;
127 if (seq_limit > USHRT_MAX)
128 ids->seq_max = USHRT_MAX;
129 else
130 ids->seq_max = seq_limit;
131 }
132
133 idr_init(&ids->ipcs_idr);
134}
135
136#ifdef CONFIG_PROC_FS
137static const struct file_operations sysvipc_proc_fops;
138/**
139 * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface.
140 * @path: Path in procfs
141 * @header: Banner to be printed at the beginning of the file.
142 * @ids: ipc id table to iterate.
143 * @show: show routine.
144 */
145void __init ipc_init_proc_interface(const char *path, const char *header,
146 int ids, int (*show)(struct seq_file *, void *))
147{
148 struct proc_dir_entry *pde;
149 struct ipc_proc_iface *iface;
150
151 if (IS_ENABLED(CONFIG_PROC_STRIPPED))
152 return 0;
153
154 iface = kmalloc(sizeof(*iface), GFP_KERNEL);
155 if (!iface)
156 return;
157 iface->path = path;
158 iface->header = header;
159 iface->ids = ids;
160 iface->show = show;
161
162 pde = proc_create_data(path,
163 S_IRUGO, /* world readable */
164 NULL, /* parent dir */
165 &sysvipc_proc_fops,
166 iface);
167 if (!pde) {
168 kfree(iface);
169 }
170}
171#endif
172
173/**
174 * ipc_findkey - find a key in an ipc identifier set
175 * @ids: Identifier set
176 * @key: The key to find
177 *
178 * Requires ipc_ids.rw_mutex locked.
179 * Returns the LOCKED pointer to the ipc structure if found or NULL
180 * if not.
181 * If key is found ipc points to the owning ipc structure
182 */
183
184static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
185{
186 struct kern_ipc_perm *ipc;
187 int next_id;
188 int total;
189
190 for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
191 ipc = idr_find(&ids->ipcs_idr, next_id);
192
193 if (ipc == NULL)
194 continue;
195
196 if (ipc->key != key) {
197 total++;
198 continue;
199 }
200
201 ipc_lock_by_ptr(ipc);
202 return ipc;
203 }
204
205 return NULL;
206}
207
208/**
209 * ipc_get_maxid - get the last assigned id
210 * @ids: IPC identifier set
211 *
212 * Called with ipc_ids.rw_mutex held.
213 */
214
215int ipc_get_maxid(struct ipc_ids *ids)
216{
217 struct kern_ipc_perm *ipc;
218 int max_id = -1;
219 int total, id;
220
221 if (ids->in_use == 0)
222 return -1;
223
224 if (ids->in_use == IPCMNI)
225 return IPCMNI - 1;
226
227 /* Look for the last assigned id */
228 total = 0;
229 for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
230 ipc = idr_find(&ids->ipcs_idr, id);
231 if (ipc != NULL) {
232 max_id = id;
233 total++;
234 }
235 }
236 return max_id;
237}
238
239/**
240 * ipc_addid - add an IPC identifier
241 * @ids: IPC identifier set
242 * @new: new IPC permission set
243 * @size: limit for the number of used ids
244 *
245 * Add an entry 'new' to the IPC ids idr. The permissions object is
246 * initialised and the first free entry is set up and the id assigned
247 * is returned. The 'new' entry is returned in a locked state on success.
248 * On failure the entry is not locked and a negative err-code is returned.
249 *
250 * Called with ipc_ids.rw_mutex held as a writer.
251 */
252
253int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
254{
255 uid_t euid;
256 gid_t egid;
257 int id, err;
258
259 if (size > IPCMNI)
260 size = IPCMNI;
261
262 if (ids->in_use >= size)
263 return -ENOSPC;
264
265 spin_lock_init(&new->lock);
266 new->deleted = 0;
267 rcu_read_lock();
268 spin_lock(&new->lock);
269
270 err = idr_get_new(&ids->ipcs_idr, new, &id);
271 if (err) {
272 spin_unlock(&new->lock);
273 rcu_read_unlock();
274 return err;
275 }
276
277 ids->in_use++;
278
279 current_euid_egid(&euid, &egid);
280 new->cuid = new->uid = euid;
281 new->gid = new->cgid = egid;
282
283 new->seq = ids->seq++;
284 if(ids->seq > ids->seq_max)
285 ids->seq = 0;
286
287 new->id = ipc_buildid(id, new->seq);
288 return id;
289}
290
291/**
292 * ipcget_new - create a new ipc object
293 * @ns: namespace
294 * @ids: IPC identifer set
295 * @ops: the actual creation routine to call
296 * @params: its parameters
297 *
298 * This routine is called by sys_msgget, sys_semget() and sys_shmget()
299 * when the key is IPC_PRIVATE.
300 */
301static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
302 struct ipc_ops *ops, struct ipc_params *params)
303{
304 int err;
305retry:
306 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
307
308 if (!err)
309 return -ENOMEM;
310
311 down_write(&ids->rw_mutex);
312 err = ops->getnew(ns, params);
313 up_write(&ids->rw_mutex);
314
315 if (err == -EAGAIN)
316 goto retry;
317
318 return err;
319}
320
321/**
322 * ipc_check_perms - check security and permissions for an IPC
323 * @ns: IPC namespace
324 * @ipcp: ipc permission set
325 * @ops: the actual security routine to call
326 * @params: its parameters
327 *
328 * This routine is called by sys_msgget(), sys_semget() and sys_shmget()
329 * when the key is not IPC_PRIVATE and that key already exists in the
330 * ids IDR.
331 *
332 * On success, the IPC id is returned.
333 *
334 * It is called with ipc_ids.rw_mutex and ipcp->lock held.
335 */
336static int ipc_check_perms(struct ipc_namespace *ns,
337 struct kern_ipc_perm *ipcp,
338 struct ipc_ops *ops,
339 struct ipc_params *params)
340{
341 int err;
342
343 if (ipcperms(ns, ipcp, params->flg))
344 err = -EACCES;
345 else {
346 err = ops->associate(ipcp, params->flg);
347 if (!err)
348 err = ipcp->id;
349 }
350
351 return err;
352}
353
354/**
355 * ipcget_public - get an ipc object or create a new one
356 * @ns: namespace
357 * @ids: IPC identifer set
358 * @ops: the actual creation routine to call
359 * @params: its parameters
360 *
361 * This routine is called by sys_msgget, sys_semget() and sys_shmget()
362 * when the key is not IPC_PRIVATE.
363 * It adds a new entry if the key is not found and does some permission
364 * / security checkings if the key is found.
365 *
366 * On success, the ipc id is returned.
367 */
368static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
369 struct ipc_ops *ops, struct ipc_params *params)
370{
371 struct kern_ipc_perm *ipcp;
372 int flg = params->flg;
373 int err;
374retry:
375 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
376
377 /*
378 * Take the lock as a writer since we are potentially going to add
379 * a new entry + read locks are not "upgradable"
380 */
381 down_write(&ids->rw_mutex);
382 ipcp = ipc_findkey(ids, params->key);
383 if (ipcp == NULL) {
384 /* key not used */
385 if (!(flg & IPC_CREAT))
386 err = -ENOENT;
387 else if (!err)
388 err = -ENOMEM;
389 else
390 err = ops->getnew(ns, params);
391 } else {
392 /* ipc object has been locked by ipc_findkey() */
393
394 if (flg & IPC_CREAT && flg & IPC_EXCL)
395 err = -EEXIST;
396 else {
397 err = 0;
398 if (ops->more_checks)
399 err = ops->more_checks(ipcp, params);
400 if (!err)
401 /*
402 * ipc_check_perms returns the IPC id on
403 * success
404 */
405 err = ipc_check_perms(ns, ipcp, ops, params);
406 }
407 ipc_unlock(ipcp);
408 }
409 up_write(&ids->rw_mutex);
410
411 if (err == -EAGAIN)
412 goto retry;
413
414 return err;
415}
416
417
418/**
419 * ipc_rmid - remove an IPC identifier
420 * @ids: IPC identifier set
421 * @ipcp: ipc perm structure containing the identifier to remove
422 *
423 * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
424 * before this function is called, and remain locked on the exit.
425 */
426
427void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
428{
429 int lid = ipcid_to_idx(ipcp->id);
430
431 idr_remove(&ids->ipcs_idr, lid);
432
433 ids->in_use--;
434
435 ipcp->deleted = 1;
436
437 return;
438}
439
440/**
441 * ipc_alloc - allocate ipc space
442 * @size: size desired
443 *
444 * Allocate memory from the appropriate pools and return a pointer to it.
445 * NULL is returned if the allocation fails
446 */
447
448void* ipc_alloc(int size)
449{
450 void* out;
451 if(size > PAGE_SIZE)
452 out = vmalloc(size);
453 else
454 out = kmalloc(size, GFP_KERNEL);
455 return out;
456}
457
458/**
459 * ipc_free - free ipc space
460 * @ptr: pointer returned by ipc_alloc
461 * @size: size of block
462 *
463 * Free a block created with ipc_alloc(). The caller must know the size
464 * used in the allocation call.
465 */
466
467void ipc_free(void* ptr, int size)
468{
469 if(size > PAGE_SIZE)
470 vfree(ptr);
471 else
472 kfree(ptr);
473}
474
475/*
476 * rcu allocations:
477 * There are three headers that are prepended to the actual allocation:
478 * - during use: ipc_rcu_hdr.
479 * - during the rcu grace period: ipc_rcu_grace.
480 * - [only if vmalloc]: ipc_rcu_sched.
481 * Their lifetime doesn't overlap, thus the headers share the same memory.
482 * Unlike a normal union, they are right-aligned, thus some container_of
483 * forward/backward casting is necessary:
484 */
485struct ipc_rcu_hdr
486{
487 int refcount;
488 int is_vmalloc;
489 void *data[0];
490};
491
492
493struct ipc_rcu_grace
494{
495 struct rcu_head rcu;
496 /* "void *" makes sure alignment of following data is sane. */
497 void *data[0];
498};
499
500struct ipc_rcu_sched
501{
502 struct work_struct work;
503 /* "void *" makes sure alignment of following data is sane. */
504 void *data[0];
505};
506
507#define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
508 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
509#define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
510 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
511
512static inline int rcu_use_vmalloc(int size)
513{
514 /* Too big for a single page? */
515 if (HDRLEN_KMALLOC + size > PAGE_SIZE)
516 return 1;
517 return 0;
518}
519
520/**
521 * ipc_rcu_alloc - allocate ipc and rcu space
522 * @size: size desired
523 *
524 * Allocate memory for the rcu header structure + the object.
525 * Returns the pointer to the object.
526 * NULL is returned if the allocation fails.
527 */
528
529void* ipc_rcu_alloc(int size)
530{
531 void* out;
532 /*
533 * We prepend the allocation with the rcu struct, and
534 * workqueue if necessary (for vmalloc).
535 */
536 if (rcu_use_vmalloc(size)) {
537 out = vmalloc(HDRLEN_VMALLOC + size);
538 if (out) {
539 out += HDRLEN_VMALLOC;
540 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
541 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
542 }
543 } else {
544 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
545 if (out) {
546 out += HDRLEN_KMALLOC;
547 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
548 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
549 }
550 }
551
552 return out;
553}
554
555void ipc_rcu_getref(void *ptr)
556{
557 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
558}
559
560static void ipc_do_vfree(struct work_struct *work)
561{
562 vfree(container_of(work, struct ipc_rcu_sched, work));
563}
564
565/**
566 * ipc_schedule_free - free ipc + rcu space
567 * @head: RCU callback structure for queued work
568 *
569 * Since RCU callback function is called in bh,
570 * we need to defer the vfree to schedule_work().
571 */
572static void ipc_schedule_free(struct rcu_head *head)
573{
574 struct ipc_rcu_grace *grace;
575 struct ipc_rcu_sched *sched;
576
577 grace = container_of(head, struct ipc_rcu_grace, rcu);
578 sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
579 data[0]);
580
581 INIT_WORK(&sched->work, ipc_do_vfree);
582 schedule_work(&sched->work);
583}
584
585void ipc_rcu_putref(void *ptr)
586{
587 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
588 return;
589
590 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
591 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
592 ipc_schedule_free);
593 } else {
594 kfree_rcu(container_of(ptr, struct ipc_rcu_grace, data), rcu);
595 }
596}
597
598/**
599 * ipcperms - check IPC permissions
600 * @ns: IPC namespace
601 * @ipcp: IPC permission set
602 * @flag: desired permission set.
603 *
604 * Check user, group, other permissions for access
605 * to ipc resources. return 0 if allowed
606 *
607 * @flag will most probably be 0 or S_...UGO from <linux/stat.h>
608 */
609
610int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
611{
612 uid_t euid = current_euid();
613 int requested_mode, granted_mode;
614
615 audit_ipc_obj(ipcp);
616 requested_mode = (flag >> 6) | (flag >> 3) | flag;
617 granted_mode = ipcp->mode;
618 if (euid == ipcp->cuid ||
619 euid == ipcp->uid)
620 granted_mode >>= 6;
621 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
622 granted_mode >>= 3;
623 /* is there some bit set in requested_mode but not in granted_mode? */
624 if ((requested_mode & ~granted_mode & 0007) &&
625 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
626 return -1;
627
628 return security_ipc_permission(ipcp, flag);
629}
630
631/*
632 * Functions to convert between the kern_ipc_perm structure and the
633 * old/new ipc_perm structures
634 */
635
636/**
637 * kernel_to_ipc64_perm - convert kernel ipc permissions to user
638 * @in: kernel permissions
639 * @out: new style IPC permissions
640 *
641 * Turn the kernel object @in into a set of permissions descriptions
642 * for returning to userspace (@out).
643 */
644
645
646void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
647{
648 out->key = in->key;
649 out->uid = in->uid;
650 out->gid = in->gid;
651 out->cuid = in->cuid;
652 out->cgid = in->cgid;
653 out->mode = in->mode;
654 out->seq = in->seq;
655}
656
657/**
658 * ipc64_perm_to_ipc_perm - convert new ipc permissions to old
659 * @in: new style IPC permissions
660 * @out: old style IPC permissions
661 *
662 * Turn the new style permissions object @in into a compatibility
663 * object and store it into the @out pointer.
664 */
665
666void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
667{
668 out->key = in->key;
669 SET_UID(out->uid, in->uid);
670 SET_GID(out->gid, in->gid);
671 SET_UID(out->cuid, in->cuid);
672 SET_GID(out->cgid, in->cgid);
673 out->mode = in->mode;
674 out->seq = in->seq;
675}
676
677/**
678 * ipc_lock - Lock an ipc structure without rw_mutex held
679 * @ids: IPC identifier set
680 * @id: ipc id to look for
681 *
682 * Look for an id in the ipc ids idr and lock the associated ipc object.
683 *
684 * The ipc object is locked on exit.
685 */
686
687struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
688{
689 struct kern_ipc_perm *out;
690 int lid = ipcid_to_idx(id);
691
692 rcu_read_lock();
693 out = idr_find(&ids->ipcs_idr, lid);
694 if (out == NULL) {
695 rcu_read_unlock();
696 return ERR_PTR(-EINVAL);
697 }
698
699 spin_lock(&out->lock);
700
701 /* ipc_rmid() may have already freed the ID while ipc_lock
702 * was spinning: here verify that the structure is still valid
703 */
704 if (out->deleted) {
705 spin_unlock(&out->lock);
706 rcu_read_unlock();
707 return ERR_PTR(-EINVAL);
708 }
709
710 return out;
711}
712
713struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
714{
715 struct kern_ipc_perm *out;
716
717 out = ipc_lock(ids, id);
718 if (IS_ERR(out))
719 return out;
720
721 if (ipc_checkid(out, id)) {
722 ipc_unlock(out);
723 return ERR_PTR(-EIDRM);
724 }
725
726 return out;
727}
728
729/**
730 * ipcget - Common sys_*get() code
731 * @ns : namsepace
732 * @ids : IPC identifier set
733 * @ops : operations to be called on ipc object creation, permission checks
734 * and further checks
735 * @params : the parameters needed by the previous operations.
736 *
737 * Common routine called by sys_msgget(), sys_semget() and sys_shmget().
738 */
739int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
740 struct ipc_ops *ops, struct ipc_params *params)
741{
742 if (params->key == IPC_PRIVATE)
743 return ipcget_new(ns, ids, ops, params);
744 else
745 return ipcget_public(ns, ids, ops, params);
746}
747
748/**
749 * ipc_update_perm - update the permissions of an IPC.
750 * @in: the permission given as input.
751 * @out: the permission of the ipc to set.
752 */
753void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
754{
755 out->uid = in->uid;
756 out->gid = in->gid;
757 out->mode = (out->mode & ~S_IRWXUGO)
758 | (in->mode & S_IRWXUGO);
759}
760
761/**
762 * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
763 * @ns: the ipc namespace
764 * @ids: the table of ids where to look for the ipc
765 * @id: the id of the ipc to retrieve
766 * @cmd: the cmd to check
767 * @perm: the permission to set
768 * @extra_perm: one extra permission parameter used by msq
769 *
770 * This function does some common audit and permissions check for some IPC_XXX
771 * cmd and is called from semctl_down, shmctl_down and msgctl_down.
772 * It must be called without any lock held and
773 * - retrieves the ipc with the given id in the given table.
774 * - performs some audit and permission check, depending on the given cmd
775 * - returns the ipc with both ipc and rw_mutex locks held in case of success
776 * or an err-code without any lock held otherwise.
777 */
778struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
779 struct ipc_ids *ids, int id, int cmd,
780 struct ipc64_perm *perm, int extra_perm)
781{
782 struct kern_ipc_perm *ipcp;
783 uid_t euid;
784 int err;
785
786 down_write(&ids->rw_mutex);
787 ipcp = ipc_lock_check(ids, id);
788 if (IS_ERR(ipcp)) {
789 err = PTR_ERR(ipcp);
790 goto out_up;
791 }
792
793 audit_ipc_obj(ipcp);
794 if (cmd == IPC_SET)
795 audit_ipc_set_perm(extra_perm, perm->uid,
796 perm->gid, perm->mode);
797
798 euid = current_euid();
799 if (euid == ipcp->cuid || euid == ipcp->uid ||
800 ns_capable(ns->user_ns, CAP_SYS_ADMIN))
801 return ipcp;
802
803 err = -EPERM;
804 ipc_unlock(ipcp);
805out_up:
806 up_write(&ids->rw_mutex);
807 return ERR_PTR(err);
808}
809
810#ifdef __ARCH_WANT_IPC_PARSE_VERSION
811
812
813/**
814 * ipc_parse_version - IPC call version
815 * @cmd: pointer to command
816 *
817 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
818 * The @cmd value is turned from an encoding command and version into
819 * just the command code.
820 */
821
822int ipc_parse_version (int *cmd)
823{
824 if (*cmd & IPC_64) {
825 *cmd ^= IPC_64;
826 return IPC_64;
827 } else {
828 return IPC_OLD;
829 }
830}
831
832#endif /* __ARCH_WANT_IPC_PARSE_VERSION */
833
834#ifdef CONFIG_PROC_FS
835struct ipc_proc_iter {
836 struct ipc_namespace *ns;
837 struct ipc_proc_iface *iface;
838};
839
840/*
841 * This routine locks the ipc structure found at least at position pos.
842 */
843static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
844 loff_t *new_pos)
845{
846 struct kern_ipc_perm *ipc;
847 int total, id;
848
849 total = 0;
850 for (id = 0; id < pos && total < ids->in_use; id++) {
851 ipc = idr_find(&ids->ipcs_idr, id);
852 if (ipc != NULL)
853 total++;
854 }
855
856 if (total >= ids->in_use)
857 return NULL;
858
859 for ( ; pos < IPCMNI; pos++) {
860 ipc = idr_find(&ids->ipcs_idr, pos);
861 if (ipc != NULL) {
862 *new_pos = pos + 1;
863 ipc_lock_by_ptr(ipc);
864 return ipc;
865 }
866 }
867
868 /* Out of range - return NULL to terminate iteration */
869 return NULL;
870}
871
872static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
873{
874 struct ipc_proc_iter *iter = s->private;
875 struct ipc_proc_iface *iface = iter->iface;
876 struct kern_ipc_perm *ipc = it;
877
878 /* If we had an ipc id locked before, unlock it */
879 if (ipc && ipc != SEQ_START_TOKEN)
880 ipc_unlock(ipc);
881
882 return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos);
883}
884
885/*
886 * File positions: pos 0 -> header, pos n -> ipc id = n - 1.
887 * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
888 */
889static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
890{
891 struct ipc_proc_iter *iter = s->private;
892 struct ipc_proc_iface *iface = iter->iface;
893 struct ipc_ids *ids;
894
895 ids = &iter->ns->ids[iface->ids];
896
897 /*
898 * Take the lock - this will be released by the corresponding
899 * call to stop().
900 */
901 down_read(&ids->rw_mutex);
902
903 /* pos < 0 is invalid */
904 if (*pos < 0)
905 return NULL;
906
907 /* pos == 0 means header */
908 if (*pos == 0)
909 return SEQ_START_TOKEN;
910
911 /* Find the (pos-1)th ipc */
912 return sysvipc_find_ipc(ids, *pos - 1, pos);
913}
914
915static void sysvipc_proc_stop(struct seq_file *s, void *it)
916{
917 struct kern_ipc_perm *ipc = it;
918 struct ipc_proc_iter *iter = s->private;
919 struct ipc_proc_iface *iface = iter->iface;
920 struct ipc_ids *ids;
921
922 /* If we had a locked structure, release it */
923 if (ipc && ipc != SEQ_START_TOKEN)
924 ipc_unlock(ipc);
925
926 ids = &iter->ns->ids[iface->ids];
927 /* Release the lock we took in start() */
928 up_read(&ids->rw_mutex);
929}
930
931static int sysvipc_proc_show(struct seq_file *s, void *it)
932{
933 struct ipc_proc_iter *iter = s->private;
934 struct ipc_proc_iface *iface = iter->iface;
935
936 if (it == SEQ_START_TOKEN)
937 return seq_puts(s, iface->header);
938
939 return iface->show(s, it);
940}
941
942static const struct seq_operations sysvipc_proc_seqops = {
943 .start = sysvipc_proc_start,
944 .stop = sysvipc_proc_stop,
945 .next = sysvipc_proc_next,
946 .show = sysvipc_proc_show,
947};
948
949static int sysvipc_proc_open(struct inode *inode, struct file *file)
950{
951 int ret;
952 struct seq_file *seq;
953 struct ipc_proc_iter *iter;
954
955 ret = -ENOMEM;
956 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
957 if (!iter)
958 goto out;
959
960 ret = seq_open(file, &sysvipc_proc_seqops);
961 if (ret)
962 goto out_kfree;
963
964 seq = file->private_data;
965 seq->private = iter;
966
967 iter->iface = PDE(inode)->data;
968 iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
969out:
970 return ret;
971out_kfree:
972 kfree(iter);
973 goto out;
974}
975
976static int sysvipc_proc_release(struct inode *inode, struct file *file)
977{
978 struct seq_file *seq = file->private_data;
979 struct ipc_proc_iter *iter = seq->private;
980 put_ipc_ns(iter->ns);
981 return seq_release_private(inode, file);
982}
983
984static const struct file_operations sysvipc_proc_fops = {
985 .open = sysvipc_proc_open,
986 .read = seq_read,
987 .llseek = seq_lseek,
988 .release = sysvipc_proc_release,
989};
990#endif /* CONFIG_PROC_FS */