2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-10-04 05:01:26 +08:00
|
|
|
* linux/ipc/msgutil.c
|
2005-04-17 06:20:36 +08:00
|
|
|
* Copyright (C) 1999, 2004 Manfred Spraul
|
|
|
|
*
|
|
|
|
* This file is released under GNU General Public Licence version 2 or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* See the file COPYING for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/ipc.h>
|
2009-04-07 10:01:08 +08:00
|
|
|
#include <linux/ipc_namespace.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
|
|
|
#include "util.h"
|
|
|
|
|
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 10:01:10 +08:00
|
|
|
DEFINE_SPINLOCK(mq_lock);
|
|
|
|
|
2009-04-07 10:01:08 +08:00
|
|
|
/*
|
|
|
|
* The next 2 defines are here bc this is the only file
|
|
|
|
* compiled when either CONFIG_SYSVIPC and CONFIG_POSIX_MQUEUE
|
|
|
|
* and not CONFIG_IPC_NS.
|
|
|
|
*/
|
|
|
|
struct ipc_namespace init_ipc_ns = {
|
namespaces: ipc namespaces: implement support for posix msqueues
Implement multiple mounts of the mqueue file system, and link it to usage
of CLONE_NEWIPC.
Each ipc ns has a corresponding mqueuefs superblock. When a user does
clone(CLONE_NEWIPC) or unshare(CLONE_NEWIPC), the unshare will cause an
internal mount of a new mqueuefs sb linked to the new ipc ns.
When a user does 'mount -t mqueue mqueue /dev/mqueue', he mounts the
mqueuefs superblock.
Posix message queues can be worked with both through the mq_* system calls
(see mq_overview(7)), and through the VFS through the mqueue mount. Any
usage of mq_open() and friends will work with the acting task's ipc
namespace. Any actions through the VFS will work with the mqueuefs in
which the file was created. So if a user doesn't remount mqueuefs after
unshare(CLONE_NEWIPC), mq_open("/ab") will not be reflected in "ls
/dev/mqueue".
If task a mounts mqueue for ipc_ns:1, then clones task b with a new ipcns,
ipcns:2, and then task a is the last task in ipc_ns:1 to exit, then (1)
ipc_ns:1 will be freed, (2) it's superblock will live on until task b
umounts the corresponding mqueuefs, and vfs actions will continue to
succeed, but (3) sb->s_fs_info will be NULL for the sb corresponding to
the deceased ipc_ns:1.
To make this happen, we must protect the ipc reference count when
a) a task exits and drops its ipcns->count, since it might be dropping
it to 0 and freeing the ipcns
b) a task accesses the ipcns through its mqueuefs interface, since it
bumps the ipcns refcount and might race with the last task in the ipcns
exiting.
So the kref is changed to an atomic_t so we can use
atomic_dec_and_lock(&ns->count,mq_lock), and every access to the ipcns
through ns = mqueuefs_sb->s_fs_info is protected by the same lock.
Signed-off-by: Cedric Le Goater <clg@fr.ibm.com>
Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-07 10:01:10 +08:00
|
|
|
.count = ATOMIC_INIT(1),
|
2011-03-24 07:43:23 +08:00
|
|
|
.user_ns = &init_user_ns,
|
2009-04-07 10:01:08 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
atomic_t nr_ipc_ns = ATOMIC_INIT(1);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct msg_msgseg {
|
|
|
|
struct msg_msgseg* next;
|
|
|
|
/* the next part of the message follows immediately */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg))
|
|
|
|
#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg))
|
|
|
|
|
|
|
|
struct msg_msg *load_msg(const void __user *src, int len)
|
|
|
|
{
|
|
|
|
struct msg_msg *msg;
|
|
|
|
struct msg_msgseg **pseg;
|
|
|
|
int err;
|
|
|
|
int alen;
|
|
|
|
|
|
|
|
alen = len;
|
|
|
|
if (alen > DATALEN_MSG)
|
|
|
|
alen = DATALEN_MSG;
|
|
|
|
|
2006-12-13 16:35:56 +08:00
|
|
|
msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (msg == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
msg->next = NULL;
|
|
|
|
msg->security = NULL;
|
|
|
|
|
|
|
|
if (copy_from_user(msg + 1, src, alen)) {
|
|
|
|
err = -EFAULT;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
len -= alen;
|
|
|
|
src = ((char __user *)src) + alen;
|
|
|
|
pseg = &msg->next;
|
|
|
|
while (len > 0) {
|
|
|
|
struct msg_msgseg *seg;
|
|
|
|
alen = len;
|
|
|
|
if (alen > DATALEN_SEG)
|
|
|
|
alen = DATALEN_SEG;
|
2006-12-13 16:35:56 +08:00
|
|
|
seg = kmalloc(sizeof(*seg) + alen,
|
2005-04-17 06:20:36 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (seg == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
*pseg = seg;
|
|
|
|
seg->next = NULL;
|
|
|
|
if (copy_from_user(seg + 1, src, alen)) {
|
|
|
|
err = -EFAULT;
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
pseg = &seg->next;
|
|
|
|
len -= alen;
|
|
|
|
src = ((char __user *)src) + alen;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = security_msg_msg_alloc(msg);
|
|
|
|
if (err)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
return msg;
|
|
|
|
|
|
|
|
out_err:
|
|
|
|
free_msg(msg);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int store_msg(void __user *dest, struct msg_msg *msg, int len)
|
|
|
|
{
|
|
|
|
int alen;
|
|
|
|
struct msg_msgseg *seg;
|
|
|
|
|
|
|
|
alen = len;
|
|
|
|
if (alen > DATALEN_MSG)
|
|
|
|
alen = DATALEN_MSG;
|
|
|
|
if (copy_to_user(dest, msg + 1, alen))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
len -= alen;
|
|
|
|
dest = ((char __user *)dest) + alen;
|
|
|
|
seg = msg->next;
|
|
|
|
while (len > 0) {
|
|
|
|
alen = len;
|
|
|
|
if (alen > DATALEN_SEG)
|
|
|
|
alen = DATALEN_SEG;
|
|
|
|
if (copy_to_user(dest, seg + 1, alen))
|
|
|
|
return -1;
|
|
|
|
len -= alen;
|
|
|
|
dest = ((char __user *)dest) + alen;
|
|
|
|
seg = seg->next;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void free_msg(struct msg_msg *msg)
|
|
|
|
{
|
|
|
|
struct msg_msgseg *seg;
|
|
|
|
|
|
|
|
security_msg_msg_free(msg);
|
|
|
|
|
|
|
|
seg = msg->next;
|
|
|
|
kfree(msg);
|
|
|
|
while (seg != NULL) {
|
|
|
|
struct msg_msgseg *tmp = seg->next;
|
|
|
|
kfree(seg);
|
|
|
|
seg = tmp;
|
|
|
|
}
|
|
|
|
}
|