blob: 054c4e0c6c910db58125323016b211ae456b46d8 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 */
23
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/hugetlb.h>
27#include <linux/shm.h>
28#include <linux/init.h>
29#include <linux/file.h>
30#include <linux/mman.h>
31#include <linux/shmem_fs.h>
32#include <linux/security.h>
33#include <linux/syscalls.h>
34#include <linux/audit.h>
35#include <linux/capability.h>
36#include <linux/ptrace.h>
37#include <linux/seq_file.h>
38#include <linux/rwsem.h>
39#include <linux/nsproxy.h>
40#include <linux/mount.h>
41#include <linux/ipc_namespace.h>
42
43#include <asm/uaccess.h>
44
45#include "util.h"
46
47struct shm_file_data {
48 int id;
49 struct ipc_namespace *ns;
50 struct file *file;
51 const struct vm_operations_struct *vm_ops;
52};
53
54#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55
56static const struct file_operations shm_file_operations;
57static const struct vm_operations_struct shm_vm_ops;
58
59#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
60
61#define shm_unlock(shp) \
62 ipc_unlock(&(shp)->shm_perm)
63
64static int newseg(struct ipc_namespace *, struct ipc_params *);
65static void shm_open(struct vm_area_struct *vma);
66static void shm_close(struct vm_area_struct *vma);
67static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68#ifdef CONFIG_PROC_FS
69static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70#endif
71
72void shm_init_ns(struct ipc_namespace *ns)
73{
74 ns->shm_ctlmax = SHMMAX;
75 ns->shm_ctlall = SHMALL;
76 ns->shm_ctlmni = SHMMNI;
77 ns->shm_rmid_forced = 0;
78 ns->shm_tot = 0;
79 ipc_init_ids(&shm_ids(ns));
80}
81
82/*
83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
84 * Only shm_ids.rw_mutex remains locked on exit.
85 */
86static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
87{
88 struct shmid_kernel *shp;
89 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
90
91 if (shp->shm_nattch){
92 shp->shm_perm.mode |= SHM_DEST;
93 /* Do not find it any more */
94 shp->shm_perm.key = IPC_PRIVATE;
95 shm_unlock(shp);
96 } else
97 shm_destroy(ns, shp);
98}
99
100#ifdef CONFIG_IPC_NS
101void shm_exit_ns(struct ipc_namespace *ns)
102{
103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
104 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
105}
106#endif
107
108static int __init ipc_ns_init(void)
109{
110 shm_init_ns(&init_ipc_ns);
111 return 0;
112}
113
114pure_initcall(ipc_ns_init);
115
116void __init shm_init (void)
117{
118 if (IS_ENABLED(CONFIG_PROC_STRIPPED))
119 return 0;
120
121 ipc_init_proc_interface("sysvipc/shm",
122#if BITS_PER_LONG <= 32
123 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
124#else
125 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
126#endif
127 IPC_SHM_IDS, sysvipc_shm_proc_show);
128}
129
130/*
131 * shm_lock_(check_) routines are called in the paths where the rw_mutex
132 * is not necessarily held.
133 */
134static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
135{
136 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
137
138 if (IS_ERR(ipcp))
139 return (struct shmid_kernel *)ipcp;
140
141 return container_of(ipcp, struct shmid_kernel, shm_perm);
142}
143
144static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
145{
146 rcu_read_lock();
147 spin_lock(&ipcp->shm_perm.lock);
148}
149
150static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
151 int id)
152{
153 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
154
155 if (IS_ERR(ipcp))
156 return (struct shmid_kernel *)ipcp;
157
158 return container_of(ipcp, struct shmid_kernel, shm_perm);
159}
160
161static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
162{
163 ipc_rmid(&shm_ids(ns), &s->shm_perm);
164}
165
166
167/* This is called by fork, once for every shm attach. */
168static void shm_open(struct vm_area_struct *vma)
169{
170 struct file *file = vma->vm_file;
171 struct shm_file_data *sfd = shm_file_data(file);
172 struct shmid_kernel *shp;
173
174 shp = shm_lock(sfd->ns, sfd->id);
175 BUG_ON(IS_ERR(shp));
176 shp->shm_atim = get_seconds();
177 shp->shm_lprid = task_tgid_vnr(current);
178 shp->shm_nattch++;
179 shm_unlock(shp);
180}
181
182/*
183 * shm_destroy - free the struct shmid_kernel
184 *
185 * @ns: namespace
186 * @shp: struct to free
187 *
188 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
189 * but returns with shp unlocked and freed.
190 */
191static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
192{
193 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
194 shm_rmid(ns, shp);
195 shm_unlock(shp);
196 if (!is_file_hugepages(shp->shm_file))
197 shmem_lock(shp->shm_file, 0, shp->mlock_user);
198 else if (shp->mlock_user)
199 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
200 shp->mlock_user);
201 fput (shp->shm_file);
202 security_shm_free(shp);
203 ipc_rcu_putref(shp);
204}
205
206/*
207 * shm_may_destroy - identifies whether shm segment should be destroyed now
208 *
209 * Returns true if and only if there are no active users of the segment and
210 * one of the following is true:
211 *
212 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
213 *
214 * 2) sysctl kernel.shm_rmid_forced is set to 1.
215 */
216static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
217{
218 return (shp->shm_nattch == 0) &&
219 (ns->shm_rmid_forced ||
220 (shp->shm_perm.mode & SHM_DEST));
221}
222
223/*
224 * remove the attach descriptor vma.
225 * free memory for segment if it is marked destroyed.
226 * The descriptor has already been removed from the current->mm->mmap list
227 * and will later be kfree()d.
228 */
229static void shm_close(struct vm_area_struct *vma)
230{
231 struct file * file = vma->vm_file;
232 struct shm_file_data *sfd = shm_file_data(file);
233 struct shmid_kernel *shp;
234 struct ipc_namespace *ns = sfd->ns;
235
236 down_write(&shm_ids(ns).rw_mutex);
237 /* remove from the list of attaches of the shm segment */
238 shp = shm_lock(ns, sfd->id);
239 BUG_ON(IS_ERR(shp));
240 shp->shm_lprid = task_tgid_vnr(current);
241 shp->shm_dtim = get_seconds();
242 shp->shm_nattch--;
243 if (shm_may_destroy(ns, shp))
244 shm_destroy(ns, shp);
245 else
246 shm_unlock(shp);
247 up_write(&shm_ids(ns).rw_mutex);
248}
249
250/* Called with ns->shm_ids(ns).rw_mutex locked */
251static int shm_try_destroy_current(int id, void *p, void *data)
252{
253 struct ipc_namespace *ns = data;
254 struct kern_ipc_perm *ipcp = p;
255 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
256
257 if (shp->shm_creator != current)
258 return 0;
259
260 /*
261 * Mark it as orphaned to destroy the segment when
262 * kernel.shm_rmid_forced is changed.
263 * It is noop if the following shm_may_destroy() returns true.
264 */
265 shp->shm_creator = NULL;
266
267 /*
268 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
269 * is not set, it shouldn't be deleted here.
270 */
271 if (!ns->shm_rmid_forced)
272 return 0;
273
274 if (shm_may_destroy(ns, shp)) {
275 shm_lock_by_ptr(shp);
276 shm_destroy(ns, shp);
277 }
278 return 0;
279}
280
281/* Called with ns->shm_ids(ns).rw_mutex locked */
282static int shm_try_destroy_orphaned(int id, void *p, void *data)
283{
284 struct ipc_namespace *ns = data;
285 struct kern_ipc_perm *ipcp = p;
286 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
287
288 /*
289 * We want to destroy segments without users and with already
290 * exit'ed originating process.
291 *
292 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
293 */
294 if (shp->shm_creator != NULL)
295 return 0;
296
297 if (shm_may_destroy(ns, shp)) {
298 shm_lock_by_ptr(shp);
299 shm_destroy(ns, shp);
300 }
301 return 0;
302}
303
304void shm_destroy_orphaned(struct ipc_namespace *ns)
305{
306 down_write(&shm_ids(ns).rw_mutex);
307 if (shm_ids(ns).in_use)
308 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
309 up_write(&shm_ids(ns).rw_mutex);
310}
311
312
313void exit_shm(struct task_struct *task)
314{
315 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
316
317 if (shm_ids(ns).in_use == 0)
318 return;
319
320 /* Destroy all already created segments, but not mapped yet */
321 down_write(&shm_ids(ns).rw_mutex);
322 if (shm_ids(ns).in_use)
323 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
324 up_write(&shm_ids(ns).rw_mutex);
325}
326
327static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
328{
329 struct file *file = vma->vm_file;
330 struct shm_file_data *sfd = shm_file_data(file);
331
332 return sfd->vm_ops->fault(vma, vmf);
333}
334
335#ifdef CONFIG_NUMA
336static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
337{
338 struct file *file = vma->vm_file;
339 struct shm_file_data *sfd = shm_file_data(file);
340 int err = 0;
341 if (sfd->vm_ops->set_policy)
342 err = sfd->vm_ops->set_policy(vma, new);
343 return err;
344}
345
346static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
347 unsigned long addr)
348{
349 struct file *file = vma->vm_file;
350 struct shm_file_data *sfd = shm_file_data(file);
351 struct mempolicy *pol = NULL;
352
353 if (sfd->vm_ops->get_policy)
354 pol = sfd->vm_ops->get_policy(vma, addr);
355 else if (vma->vm_policy)
356 pol = vma->vm_policy;
357
358 return pol;
359}
360#endif
361
362static int shm_mmap(struct file * file, struct vm_area_struct * vma)
363{
364 struct shm_file_data *sfd = shm_file_data(file);
365 int ret;
366
367 ret = sfd->file->f_op->mmap(sfd->file, vma);
368 if (ret != 0)
369 return ret;
370 sfd->vm_ops = vma->vm_ops;
371#ifdef CONFIG_MMU
372 BUG_ON(!sfd->vm_ops->fault);
373#endif
374 vma->vm_ops = &shm_vm_ops;
375 shm_open(vma);
376
377 return ret;
378}
379
380static int shm_release(struct inode *ino, struct file *file)
381{
382 struct shm_file_data *sfd = shm_file_data(file);
383
384 put_ipc_ns(sfd->ns);
385 shm_file_data(file) = NULL;
386 kfree(sfd);
387 return 0;
388}
389
390static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
391{
392 struct shm_file_data *sfd = shm_file_data(file);
393
394 if (!sfd->file->f_op->fsync)
395 return -EINVAL;
396 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
397}
398
399static unsigned long shm_get_unmapped_area(struct file *file,
400 unsigned long addr, unsigned long len, unsigned long pgoff,
401 unsigned long flags)
402{
403 struct shm_file_data *sfd = shm_file_data(file);
404 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
405 pgoff, flags);
406}
407
408static const struct file_operations shm_file_operations = {
409 .mmap = shm_mmap,
410 .fsync = shm_fsync,
411 .release = shm_release,
412#ifndef CONFIG_MMU
413 .get_unmapped_area = shm_get_unmapped_area,
414#endif
415 .llseek = noop_llseek,
416};
417
418static const struct file_operations shm_file_operations_huge = {
419 .mmap = shm_mmap,
420 .fsync = shm_fsync,
421 .release = shm_release,
422 .get_unmapped_area = shm_get_unmapped_area,
423 .llseek = noop_llseek,
424};
425
426int is_file_shm_hugepages(struct file *file)
427{
428 return file->f_op == &shm_file_operations_huge;
429}
430
431static const struct vm_operations_struct shm_vm_ops = {
432 .open = shm_open, /* callback for a new vm-area open */
433 .close = shm_close, /* callback for when the vm-area is released */
434 .fault = shm_fault,
435#if defined(CONFIG_NUMA)
436 .set_policy = shm_set_policy,
437 .get_policy = shm_get_policy,
438#endif
439};
440
441/**
442 * newseg - Create a new shared memory segment
443 * @ns: namespace
444 * @params: ptr to the structure that contains key, size and shmflg
445 *
446 * Called with shm_ids.rw_mutex held as a writer.
447 */
448
449static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
450{
451 key_t key = params->key;
452 int shmflg = params->flg;
453 size_t size = params->u.size;
454 int error;
455 struct shmid_kernel *shp;
456 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
457 struct file * file;
458 char name[13];
459 int id;
460 vm_flags_t acctflag = 0;
461
462 if (size < SHMMIN || size > ns->shm_ctlmax)
463 return -EINVAL;
464
465 if (ns->shm_tot + numpages > ns->shm_ctlall)
466 return -ENOSPC;
467
468 shp = ipc_rcu_alloc(sizeof(*shp));
469 if (!shp)
470 return -ENOMEM;
471
472 shp->shm_perm.key = key;
473 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
474 shp->mlock_user = NULL;
475
476 shp->shm_perm.security = NULL;
477 error = security_shm_alloc(shp);
478 if (error) {
479 ipc_rcu_putref(shp);
480 return error;
481 }
482
483 sprintf (name, "SYSV%08x", key);
484 if (shmflg & SHM_HUGETLB) {
485 size_t hugesize = ALIGN(size, huge_page_size(&default_hstate));
486
487 /* hugetlb_file_setup applies strict accounting */
488 if (shmflg & SHM_NORESERVE)
489 acctflag = VM_NORESERVE;
490 file = hugetlb_file_setup(name, hugesize, acctflag,
491 &shp->mlock_user, HUGETLB_SHMFS_INODE);
492 } else {
493 /*
494 * Do not allow no accounting for OVERCOMMIT_NEVER, even
495 * if it's asked for.
496 */
497 if ((shmflg & SHM_NORESERVE) &&
498 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
499 acctflag = VM_NORESERVE;
500 file = shmem_file_setup(name, size, acctflag);
501 }
502 error = PTR_ERR(file);
503 if (IS_ERR(file))
504 goto no_file;
505
506 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
507 if (id < 0) {
508 error = id;
509 goto no_id;
510 }
511
512 shp->shm_cprid = task_tgid_vnr(current);
513 shp->shm_lprid = 0;
514 shp->shm_atim = shp->shm_dtim = 0;
515 shp->shm_ctim = get_seconds();
516 shp->shm_segsz = size;
517 shp->shm_nattch = 0;
518 shp->shm_file = file;
519 shp->shm_creator = current;
520 /*
521 * shmid gets reported as "inode#" in /proc/pid/maps.
522 * proc-ps tools use this. Changing this will break them.
523 */
524 file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
525
526 ns->shm_tot += numpages;
527 error = shp->shm_perm.id;
528 shm_unlock(shp);
529 return error;
530
531no_id:
532 if (is_file_hugepages(file) && shp->mlock_user)
533 user_shm_unlock(size, shp->mlock_user);
534 fput(file);
535no_file:
536 security_shm_free(shp);
537 ipc_rcu_putref(shp);
538 return error;
539}
540
541/*
542 * Called with shm_ids.rw_mutex and ipcp locked.
543 */
544static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
545{
546 struct shmid_kernel *shp;
547
548 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
549 return security_shm_associate(shp, shmflg);
550}
551
552/*
553 * Called with shm_ids.rw_mutex and ipcp locked.
554 */
555static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
556 struct ipc_params *params)
557{
558 struct shmid_kernel *shp;
559
560 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
561 if (shp->shm_segsz < params->u.size)
562 return -EINVAL;
563
564 return 0;
565}
566
567SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
568{
569 struct ipc_namespace *ns;
570 struct ipc_ops shm_ops;
571 struct ipc_params shm_params;
572
573 ns = current->nsproxy->ipc_ns;
574
575 shm_ops.getnew = newseg;
576 shm_ops.associate = shm_security;
577 shm_ops.more_checks = shm_more_checks;
578
579 shm_params.key = key;
580 shm_params.flg = shmflg;
581 shm_params.u.size = size;
582
583 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
584}
585
586static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
587{
588 switch(version) {
589 case IPC_64:
590 return copy_to_user(buf, in, sizeof(*in));
591 case IPC_OLD:
592 {
593 struct shmid_ds out;
594
595 memset(&out, 0, sizeof(out));
596 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
597 out.shm_segsz = in->shm_segsz;
598 out.shm_atime = in->shm_atime;
599 out.shm_dtime = in->shm_dtime;
600 out.shm_ctime = in->shm_ctime;
601 out.shm_cpid = in->shm_cpid;
602 out.shm_lpid = in->shm_lpid;
603 out.shm_nattch = in->shm_nattch;
604
605 return copy_to_user(buf, &out, sizeof(out));
606 }
607 default:
608 return -EINVAL;
609 }
610}
611
612static inline unsigned long
613copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
614{
615 switch(version) {
616 case IPC_64:
617 if (copy_from_user(out, buf, sizeof(*out)))
618 return -EFAULT;
619 return 0;
620 case IPC_OLD:
621 {
622 struct shmid_ds tbuf_old;
623
624 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
625 return -EFAULT;
626
627 out->shm_perm.uid = tbuf_old.shm_perm.uid;
628 out->shm_perm.gid = tbuf_old.shm_perm.gid;
629 out->shm_perm.mode = tbuf_old.shm_perm.mode;
630
631 return 0;
632 }
633 default:
634 return -EINVAL;
635 }
636}
637
638static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
639{
640 switch(version) {
641 case IPC_64:
642 return copy_to_user(buf, in, sizeof(*in));
643 case IPC_OLD:
644 {
645 struct shminfo out;
646
647 if(in->shmmax > INT_MAX)
648 out.shmmax = INT_MAX;
649 else
650 out.shmmax = (int)in->shmmax;
651
652 out.shmmin = in->shmmin;
653 out.shmmni = in->shmmni;
654 out.shmseg = in->shmseg;
655 out.shmall = in->shmall;
656
657 return copy_to_user(buf, &out, sizeof(out));
658 }
659 default:
660 return -EINVAL;
661 }
662}
663
664/*
665 * Calculate and add used RSS and swap pages of a shm.
666 * Called with shm_ids.rw_mutex held as a reader
667 */
668static void shm_add_rss_swap(struct shmid_kernel *shp,
669 unsigned long *rss_add, unsigned long *swp_add)
670{
671 struct inode *inode;
672
673 inode = shp->shm_file->f_path.dentry->d_inode;
674
675 if (is_file_hugepages(shp->shm_file)) {
676 struct address_space *mapping = inode->i_mapping;
677 struct hstate *h = hstate_file(shp->shm_file);
678 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
679 } else {
680#ifdef CONFIG_SHMEM
681 struct shmem_inode_info *info = SHMEM_I(inode);
682 spin_lock(&info->lock);
683 *rss_add += inode->i_mapping->nrpages;
684 *swp_add += info->swapped;
685 spin_unlock(&info->lock);
686#else
687 *rss_add += inode->i_mapping->nrpages;
688#endif
689 }
690}
691
692/*
693 * Called with shm_ids.rw_mutex held as a reader
694 */
695static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
696 unsigned long *swp)
697{
698 int next_id;
699 int total, in_use;
700
701 *rss = 0;
702 *swp = 0;
703
704 in_use = shm_ids(ns).in_use;
705
706 for (total = 0, next_id = 0; total < in_use; next_id++) {
707 struct kern_ipc_perm *ipc;
708 struct shmid_kernel *shp;
709
710 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
711 if (ipc == NULL)
712 continue;
713 shp = container_of(ipc, struct shmid_kernel, shm_perm);
714
715 shm_add_rss_swap(shp, rss, swp);
716
717 total++;
718 }
719}
720
721/*
722 * This function handles some shmctl commands which require the rw_mutex
723 * to be held in write mode.
724 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
725 */
726static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
727 struct shmid_ds __user *buf, int version)
728{
729 struct kern_ipc_perm *ipcp;
730 struct shmid64_ds shmid64;
731 struct shmid_kernel *shp;
732 int err;
733
734 if (cmd == IPC_SET) {
735 if (copy_shmid_from_user(&shmid64, buf, version))
736 return -EFAULT;
737 }
738
739 ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
740 &shmid64.shm_perm, 0);
741 if (IS_ERR(ipcp))
742 return PTR_ERR(ipcp);
743
744 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
745
746 err = security_shm_shmctl(shp, cmd);
747 if (err)
748 goto out_unlock;
749 switch (cmd) {
750 case IPC_RMID:
751 do_shm_rmid(ns, ipcp);
752 goto out_up;
753 case IPC_SET:
754 ipc_update_perm(&shmid64.shm_perm, ipcp);
755 shp->shm_ctim = get_seconds();
756 break;
757 default:
758 err = -EINVAL;
759 }
760out_unlock:
761 shm_unlock(shp);
762out_up:
763 up_write(&shm_ids(ns).rw_mutex);
764 return err;
765}
766
767SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
768{
769 struct shmid_kernel *shp;
770 int err, version;
771 struct ipc_namespace *ns;
772
773 if (cmd < 0 || shmid < 0) {
774 err = -EINVAL;
775 goto out;
776 }
777
778 version = ipc_parse_version(&cmd);
779 ns = current->nsproxy->ipc_ns;
780
781 switch (cmd) { /* replace with proc interface ? */
782 case IPC_INFO:
783 {
784 struct shminfo64 shminfo;
785
786 err = security_shm_shmctl(NULL, cmd);
787 if (err)
788 return err;
789
790 memset(&shminfo, 0, sizeof(shminfo));
791 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
792 shminfo.shmmax = ns->shm_ctlmax;
793 shminfo.shmall = ns->shm_ctlall;
794
795 shminfo.shmmin = SHMMIN;
796 if(copy_shminfo_to_user (buf, &shminfo, version))
797 return -EFAULT;
798
799 down_read(&shm_ids(ns).rw_mutex);
800 err = ipc_get_maxid(&shm_ids(ns));
801 up_read(&shm_ids(ns).rw_mutex);
802
803 if(err<0)
804 err = 0;
805 goto out;
806 }
807 case SHM_INFO:
808 {
809 struct shm_info shm_info;
810
811 err = security_shm_shmctl(NULL, cmd);
812 if (err)
813 return err;
814
815 memset(&shm_info, 0, sizeof(shm_info));
816 down_read(&shm_ids(ns).rw_mutex);
817 shm_info.used_ids = shm_ids(ns).in_use;
818 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
819 shm_info.shm_tot = ns->shm_tot;
820 shm_info.swap_attempts = 0;
821 shm_info.swap_successes = 0;
822 err = ipc_get_maxid(&shm_ids(ns));
823 up_read(&shm_ids(ns).rw_mutex);
824 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
825 err = -EFAULT;
826 goto out;
827 }
828
829 err = err < 0 ? 0 : err;
830 goto out;
831 }
832 case SHM_STAT:
833 case IPC_STAT:
834 {
835 struct shmid64_ds tbuf;
836 int result;
837
838 if (cmd == SHM_STAT) {
839 shp = shm_lock(ns, shmid);
840 if (IS_ERR(shp)) {
841 err = PTR_ERR(shp);
842 goto out;
843 }
844 result = shp->shm_perm.id;
845 } else {
846 shp = shm_lock_check(ns, shmid);
847 if (IS_ERR(shp)) {
848 err = PTR_ERR(shp);
849 goto out;
850 }
851 result = 0;
852 }
853 err = -EACCES;
854 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
855 goto out_unlock;
856 err = security_shm_shmctl(shp, cmd);
857 if (err)
858 goto out_unlock;
859 memset(&tbuf, 0, sizeof(tbuf));
860 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
861 tbuf.shm_segsz = shp->shm_segsz;
862 tbuf.shm_atime = shp->shm_atim;
863 tbuf.shm_dtime = shp->shm_dtim;
864 tbuf.shm_ctime = shp->shm_ctim;
865 tbuf.shm_cpid = shp->shm_cprid;
866 tbuf.shm_lpid = shp->shm_lprid;
867 tbuf.shm_nattch = shp->shm_nattch;
868 shm_unlock(shp);
869 if(copy_shmid_to_user (buf, &tbuf, version))
870 err = -EFAULT;
871 else
872 err = result;
873 goto out;
874 }
875 case SHM_LOCK:
876 case SHM_UNLOCK:
877 {
878 struct file *shm_file;
879
880 shp = shm_lock_check(ns, shmid);
881 if (IS_ERR(shp)) {
882 err = PTR_ERR(shp);
883 goto out;
884 }
885
886 audit_ipc_obj(&(shp->shm_perm));
887
888 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
889 uid_t euid = current_euid();
890 err = -EPERM;
891 if (euid != shp->shm_perm.uid &&
892 euid != shp->shm_perm.cuid)
893 goto out_unlock;
894 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
895 goto out_unlock;
896 }
897
898 err = security_shm_shmctl(shp, cmd);
899 if (err)
900 goto out_unlock;
901
902 shm_file = shp->shm_file;
903 if (is_file_hugepages(shm_file))
904 goto out_unlock;
905
906 if (cmd == SHM_LOCK) {
907 struct user_struct *user = current_user();
908 err = shmem_lock(shm_file, 1, user);
909 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
910 shp->shm_perm.mode |= SHM_LOCKED;
911 shp->mlock_user = user;
912 }
913 goto out_unlock;
914 }
915
916 /* SHM_UNLOCK */
917 if (!(shp->shm_perm.mode & SHM_LOCKED))
918 goto out_unlock;
919 shmem_lock(shm_file, 0, shp->mlock_user);
920 shp->shm_perm.mode &= ~SHM_LOCKED;
921 shp->mlock_user = NULL;
922 get_file(shm_file);
923 shm_unlock(shp);
924 shmem_unlock_mapping(shm_file->f_mapping);
925 fput(shm_file);
926 goto out;
927 }
928 case IPC_RMID:
929 case IPC_SET:
930 err = shmctl_down(ns, shmid, cmd, buf, version);
931 return err;
932 default:
933 return -EINVAL;
934 }
935
936out_unlock:
937 shm_unlock(shp);
938out:
939 return err;
940}
941
942/*
943 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
944 *
945 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
946 * "raddr" thing points to kernel space, and there has to be a wrapper around
947 * this.
948 */
949long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
950{
951 struct shmid_kernel *shp;
952 unsigned long addr;
953 unsigned long size;
954 struct file * file;
955 int err;
956 unsigned long flags;
957 unsigned long prot;
958 int acc_mode;
959 unsigned long user_addr;
960 struct ipc_namespace *ns;
961 struct shm_file_data *sfd;
962 struct path path;
963 fmode_t f_mode;
964
965 err = -EINVAL;
966 if (shmid < 0)
967 goto out;
968 else if ((addr = (ulong)shmaddr)) {
969 if (addr & (SHMLBA-1)) {
970 if (shmflg & SHM_RND){
971 addr &= ~(SHMLBA-1); /* round down */
972
973 /*Fix for HUB CVE-2017-5669
974 * Ensure that the round-down is non-nil
975 * when remapping. This can happen for
976 * cases when addr < shmlba.
977 */
978 if (!addr && (shmflg & SHM_REMAP))
979 goto out;
980 }else
981
982#ifndef __ARCH_FORCE_SHMLBA
983 if (addr & ~PAGE_MASK)
984#endif
985 goto out;
986 }
987 flags = MAP_SHARED | MAP_FIXED;
988 } else {
989 if ((shmflg & SHM_REMAP))
990 goto out;
991
992 flags = MAP_SHARED;
993 }
994
995 if (shmflg & SHM_RDONLY) {
996 prot = PROT_READ;
997 acc_mode = S_IRUGO;
998 f_mode = FMODE_READ;
999 } else {
1000 prot = PROT_READ | PROT_WRITE;
1001 acc_mode = S_IRUGO | S_IWUGO;
1002 f_mode = FMODE_READ | FMODE_WRITE;
1003 }
1004 if (shmflg & SHM_EXEC) {
1005 prot |= PROT_EXEC;
1006 acc_mode |= S_IXUGO;
1007 }
1008
1009 /*
1010 * We cannot rely on the fs check since SYSV IPC does have an
1011 * additional creator id...
1012 */
1013 ns = current->nsproxy->ipc_ns;
1014 shp = shm_lock_check(ns, shmid);
1015 if (IS_ERR(shp)) {
1016 err = PTR_ERR(shp);
1017 goto out;
1018 }
1019
1020 err = -EACCES;
1021 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1022 goto out_unlock;
1023
1024 err = security_shm_shmat(shp, shmaddr, shmflg);
1025 if (err)
1026 goto out_unlock;
1027
1028 path = shp->shm_file->f_path;
1029 path_get(&path);
1030 shp->shm_nattch++;
1031 size = i_size_read(path.dentry->d_inode);
1032 shm_unlock(shp);
1033
1034 err = -ENOMEM;
1035 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1036 if (!sfd)
1037 goto out_put_dentry;
1038
1039 file = alloc_file(&path, f_mode,
1040 is_file_hugepages(shp->shm_file) ?
1041 &shm_file_operations_huge :
1042 &shm_file_operations);
1043 if (!file)
1044 goto out_free;
1045
1046 file->private_data = sfd;
1047 file->f_mapping = shp->shm_file->f_mapping;
1048 sfd->id = shp->shm_perm.id;
1049 sfd->ns = get_ipc_ns(ns);
1050 sfd->file = shp->shm_file;
1051 sfd->vm_ops = NULL;
1052
1053 down_write(&current->mm->mmap_sem);
1054 if (addr && !(shmflg & SHM_REMAP)) {
1055 err = -EINVAL;
1056 if (find_vma_intersection(current->mm, addr, addr + size))
1057 goto invalid;
1058 /*
1059 * If shm segment goes below stack, make sure there is some
1060 * space left for the stack to grow (at least 4 pages).
1061 */
1062 if (addr < current->mm->start_stack &&
1063 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1064 goto invalid;
1065 }
1066
1067 user_addr = do_mmap (file, addr, size, prot, flags, 0);
1068 *raddr = user_addr;
1069 err = 0;
1070 if (IS_ERR_VALUE(user_addr))
1071 err = (long)user_addr;
1072invalid:
1073 up_write(&current->mm->mmap_sem);
1074
1075 fput(file);
1076
1077out_nattch:
1078 down_write(&shm_ids(ns).rw_mutex);
1079 shp = shm_lock(ns, shmid);
1080 BUG_ON(IS_ERR(shp));
1081 shp->shm_nattch--;
1082 if (shm_may_destroy(ns, shp))
1083 shm_destroy(ns, shp);
1084 else
1085 shm_unlock(shp);
1086 up_write(&shm_ids(ns).rw_mutex);
1087
1088out:
1089 return err;
1090
1091out_unlock:
1092 shm_unlock(shp);
1093 goto out;
1094
1095out_free:
1096 kfree(sfd);
1097out_put_dentry:
1098 path_put(&path);
1099 goto out_nattch;
1100}
1101
1102SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1103{
1104 unsigned long ret;
1105 long err;
1106
1107 err = do_shmat(shmid, shmaddr, shmflg, &ret);
1108 if (err)
1109 return err;
1110 force_successful_syscall_return();
1111 return (long)ret;
1112}
1113
1114/*
1115 * detach and kill segment if marked destroyed.
1116 * The work is done in shm_close.
1117 */
1118SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1119{
1120 struct mm_struct *mm = current->mm;
1121 struct vm_area_struct *vma;
1122 unsigned long addr = (unsigned long)shmaddr;
1123 int retval = -EINVAL;
1124#ifdef CONFIG_MMU
1125 loff_t size = 0;
1126 struct vm_area_struct *next;
1127#endif
1128
1129 if (addr & ~PAGE_MASK)
1130 return retval;
1131
1132 down_write(&mm->mmap_sem);
1133
1134 /*
1135 * This function tries to be smart and unmap shm segments that
1136 * were modified by partial mlock or munmap calls:
1137 * - It first determines the size of the shm segment that should be
1138 * unmapped: It searches for a vma that is backed by shm and that
1139 * started at address shmaddr. It records it's size and then unmaps
1140 * it.
1141 * - Then it unmaps all shm vmas that started at shmaddr and that
1142 * are within the initially determined size.
1143 * Errors from do_munmap are ignored: the function only fails if
1144 * it's called with invalid parameters or if it's called to unmap
1145 * a part of a vma. Both calls in this function are for full vmas,
1146 * the parameters are directly copied from the vma itself and always
1147 * valid - therefore do_munmap cannot fail. (famous last words?)
1148 */
1149 /*
1150 * If it had been mremap()'d, the starting address would not
1151 * match the usual checks anyway. So assume all vma's are
1152 * above the starting address given.
1153 */
1154 vma = find_vma(mm, addr);
1155
1156#ifdef CONFIG_MMU
1157 while (vma) {
1158 next = vma->vm_next;
1159
1160 /*
1161 * Check if the starting address would match, i.e. it's
1162 * a fragment created by mprotect() and/or munmap(), or it
1163 * otherwise it starts at this address with no hassles.
1164 */
1165 if ((vma->vm_ops == &shm_vm_ops) &&
1166 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1167
1168
1169 size = vma->vm_file->f_path.dentry->d_inode->i_size;
1170 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1171 /*
1172 * We discovered the size of the shm segment, so
1173 * break out of here and fall through to the next
1174 * loop that uses the size information to stop
1175 * searching for matching vma's.
1176 */
1177 retval = 0;
1178 vma = next;
1179 break;
1180 }
1181 vma = next;
1182 }
1183
1184 /*
1185 * We need look no further than the maximum address a fragment
1186 * could possibly have landed at. Also cast things to loff_t to
1187 * prevent overflows and make comparisons vs. equal-width types.
1188 */
1189 size = PAGE_ALIGN(size);
1190 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1191 next = vma->vm_next;
1192
1193 /* finding a matching vma now does not alter retval */
1194 if ((vma->vm_ops == &shm_vm_ops) &&
1195 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1196
1197 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1198 vma = next;
1199 }
1200
1201#else /* CONFIG_MMU */
1202 /* under NOMMU conditions, the exact address to be destroyed must be
1203 * given */
1204 retval = -EINVAL;
1205 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1206 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1207 retval = 0;
1208 }
1209
1210#endif
1211
1212 up_write(&mm->mmap_sem);
1213 return retval;
1214}
1215
1216#ifdef CONFIG_PROC_FS
1217static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1218{
1219 struct shmid_kernel *shp = it;
1220 unsigned long rss = 0, swp = 0;
1221
1222 shm_add_rss_swap(shp, &rss, &swp);
1223
1224#if BITS_PER_LONG <= 32
1225#define SIZE_SPEC "%10lu"
1226#else
1227#define SIZE_SPEC "%21lu"
1228#endif
1229
1230 return seq_printf(s,
1231 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1232 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1233 SIZE_SPEC " " SIZE_SPEC "\n",
1234 shp->shm_perm.key,
1235 shp->shm_perm.id,
1236 shp->shm_perm.mode,
1237 shp->shm_segsz,
1238 shp->shm_cprid,
1239 shp->shm_lprid,
1240 shp->shm_nattch,
1241 shp->shm_perm.uid,
1242 shp->shm_perm.gid,
1243 shp->shm_perm.cuid,
1244 shp->shm_perm.cgid,
1245 shp->shm_atim,
1246 shp->shm_dtim,
1247 shp->shm_ctim,
1248 rss * PAGE_SIZE,
1249 swp * PAGE_SIZE);
1250}
1251#endif