2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
13 * This file is released under the GPL.
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/vmalloc.h>
28 #include <linux/netlink.h>
29 #include <linux/syscalls.h>
30 #include <linux/audit.h>
31 #include <linux/signal.h>
32 #include <linux/mutex.h>
33 #include <linux/nsproxy.h>
34 #include <linux/pid.h>
35 #include <linux/ipc_namespace.h>
36 #include <linux/user_namespace.h>
37 #include <linux/slab.h>
42 #define MQUEUE_MAGIC 0x19800202
43 #define DIRENT_SIZE 20
44 #define FILENT_SIZE 80
52 struct posix_msg_tree_node {
53 struct rb_node rb_node;
54 struct list_head msg_list;
58 struct ext_wait_queue { /* queue of sleeping tasks */
59 struct task_struct *task;
60 struct list_head list;
61 struct msg_msg *msg; /* ptr of loaded message */
62 int state; /* one of STATE_* values */
65 struct mqueue_inode_info {
67 struct inode vfs_inode;
68 wait_queue_head_t wait_q;
70 struct rb_root msg_tree;
71 struct posix_msg_tree_node *node_cache;
74 struct sigevent notify;
75 struct pid *notify_owner;
76 struct user_namespace *notify_user_ns;
77 struct user_struct *user; /* user who created, for accounting */
78 struct sock *notify_sock;
79 struct sk_buff *notify_cookie;
81 /* for tasks waiting for free space and messages, respectively */
82 struct ext_wait_queue e_wait_q[2];
84 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
87 static const struct inode_operations mqueue_dir_inode_operations;
88 static const struct file_operations mqueue_file_operations;
89 static const struct super_operations mqueue_super_ops;
90 static void remove_notification(struct mqueue_inode_info *info);
92 static struct kmem_cache *mqueue_inode_cachep;
94 static struct ctl_table_header *mq_sysctl_table;
96 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
98 return container_of(inode, struct mqueue_inode_info, vfs_inode);
102 * This routine should be called with the mq_lock held.
104 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
106 return get_ipc_ns(inode->i_sb->s_fs_info);
109 static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
111 struct ipc_namespace *ns;
114 ns = __get_ns_from_inode(inode);
115 spin_unlock(&mq_lock);
119 /* Auxiliary functions to manipulate messages' list */
120 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
122 struct rb_node **p, *parent = NULL;
123 struct posix_msg_tree_node *leaf;
125 p = &info->msg_tree.rb_node;
128 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
130 if (likely(leaf->priority == msg->m_type))
132 else if (msg->m_type < leaf->priority)
137 if (info->node_cache) {
138 leaf = info->node_cache;
139 info->node_cache = NULL;
141 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
144 INIT_LIST_HEAD(&leaf->msg_list);
146 leaf->priority = msg->m_type;
147 rb_link_node(&leaf->rb_node, parent, p);
148 rb_insert_color(&leaf->rb_node, &info->msg_tree);
150 info->attr.mq_curmsgs++;
151 info->qsize += msg->m_ts;
152 list_add_tail(&msg->m_list, &leaf->msg_list);
156 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
158 struct rb_node **p, *parent = NULL;
159 struct posix_msg_tree_node *leaf;
163 p = &info->msg_tree.rb_node;
167 * During insert, low priorities go to the left and high to the
168 * right. On receive, we want the highest priorities first, so
169 * walk all the way to the right.
174 if (info->attr.mq_curmsgs) {
175 pr_warn_once("Inconsistency in POSIX message queue, "
176 "no tree element, but supposedly messages "
178 info->attr.mq_curmsgs = 0;
182 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
183 if (unlikely(list_empty(&leaf->msg_list))) {
184 pr_warn_once("Inconsistency in POSIX message queue, "
185 "empty leaf node but we haven't implemented "
186 "lazy leaf delete!\n");
187 rb_erase(&leaf->rb_node, &info->msg_tree);
188 if (info->node_cache) {
191 info->node_cache = leaf;
195 msg = list_first_entry(&leaf->msg_list,
196 struct msg_msg, m_list);
197 list_del(&msg->m_list);
198 if (list_empty(&leaf->msg_list)) {
199 rb_erase(&leaf->rb_node, &info->msg_tree);
200 if (info->node_cache) {
203 info->node_cache = leaf;
207 info->attr.mq_curmsgs--;
208 info->qsize -= msg->m_ts;
212 static struct inode *mqueue_get_inode(struct super_block *sb,
213 struct ipc_namespace *ipc_ns, umode_t mode,
214 struct mq_attr *attr)
216 struct user_struct *u = current_user();
220 inode = new_inode(sb);
224 inode->i_ino = get_next_ino();
225 inode->i_mode = mode;
226 inode->i_uid = current_fsuid();
227 inode->i_gid = current_fsgid();
228 inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
231 struct mqueue_inode_info *info;
232 unsigned long mq_bytes, mq_treesize;
234 inode->i_fop = &mqueue_file_operations;
235 inode->i_size = FILENT_SIZE;
236 /* mqueue specific info */
237 info = MQUEUE_I(inode);
238 spin_lock_init(&info->lock);
239 init_waitqueue_head(&info->wait_q);
240 INIT_LIST_HEAD(&info->e_wait_q[0].list);
241 INIT_LIST_HEAD(&info->e_wait_q[1].list);
242 info->notify_owner = NULL;
243 info->notify_user_ns = NULL;
245 info->user = NULL; /* set when all is ok */
246 info->msg_tree = RB_ROOT;
247 info->node_cache = NULL;
248 memset(&info->attr, 0, sizeof(info->attr));
249 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
250 ipc_ns->mq_msg_default);
251 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
252 ipc_ns->mq_msgsize_default);
254 info->attr.mq_maxmsg = attr->mq_maxmsg;
255 info->attr.mq_msgsize = attr->mq_msgsize;
258 * We used to allocate a static array of pointers and account
259 * the size of that array as well as one msg_msg struct per
260 * possible message into the queue size. That's no longer
261 * accurate as the queue is now an rbtree and will grow and
262 * shrink depending on usage patterns. We can, however, still
263 * account one msg_msg struct per message, but the nodes are
264 * allocated depending on priority usage, and most programs
265 * only use one, or a handful, of priorities. However, since
266 * this is pinned memory, we need to assume worst case, so
267 * that means the min(mq_maxmsg, max_priorities) * struct
268 * posix_msg_tree_node.
270 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
271 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
272 sizeof(struct posix_msg_tree_node);
274 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
275 info->attr.mq_msgsize);
278 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
279 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
280 spin_unlock(&mq_lock);
281 /* mqueue_evict_inode() releases info->messages */
285 u->mq_bytes += mq_bytes;
286 spin_unlock(&mq_lock);
289 info->user = get_uid(u);
290 } else if (S_ISDIR(mode)) {
292 /* Some things misbehave if size == 0 on a directory */
293 inode->i_size = 2 * DIRENT_SIZE;
294 inode->i_op = &mqueue_dir_inode_operations;
295 inode->i_fop = &simple_dir_operations;
305 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
308 struct ipc_namespace *ns = data;
310 sb->s_blocksize = PAGE_CACHE_SIZE;
311 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
312 sb->s_magic = MQUEUE_MAGIC;
313 sb->s_op = &mqueue_super_ops;
315 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
317 return PTR_ERR(inode);
319 sb->s_root = d_make_root(inode);
325 static struct dentry *mqueue_mount(struct file_system_type *fs_type,
326 int flags, const char *dev_name,
329 if (!(flags & MS_KERNMOUNT)) {
330 struct ipc_namespace *ns = current->nsproxy->ipc_ns;
331 /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
332 * over the ipc namespace.
334 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
335 return ERR_PTR(-EPERM);
339 return mount_ns(fs_type, flags, data, mqueue_fill_super);
342 static void init_once(void *foo)
344 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
346 inode_init_once(&p->vfs_inode);
349 static struct inode *mqueue_alloc_inode(struct super_block *sb)
351 struct mqueue_inode_info *ei;
353 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
356 return &ei->vfs_inode;
359 static void mqueue_i_callback(struct rcu_head *head)
361 struct inode *inode = container_of(head, struct inode, i_rcu);
362 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
365 static void mqueue_destroy_inode(struct inode *inode)
367 call_rcu(&inode->i_rcu, mqueue_i_callback);
370 static void mqueue_evict_inode(struct inode *inode)
372 struct mqueue_inode_info *info;
373 struct user_struct *user;
374 unsigned long mq_bytes, mq_treesize;
375 struct ipc_namespace *ipc_ns;
376 struct msg_msg *msg, *nmsg;
381 if (S_ISDIR(inode->i_mode))
384 ipc_ns = get_ns_from_inode(inode);
385 info = MQUEUE_I(inode);
386 spin_lock(&info->lock);
387 while ((msg = msg_get(info)) != NULL)
388 list_add_tail(&msg->m_list, &tmp_msg);
389 kfree(info->node_cache);
390 spin_unlock(&info->lock);
392 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
393 list_del(&msg->m_list);
397 /* Total amount of bytes accounted for the mqueue */
398 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
399 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
400 sizeof(struct posix_msg_tree_node);
402 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
403 info->attr.mq_msgsize);
408 user->mq_bytes -= mq_bytes;
410 * get_ns_from_inode() ensures that the
411 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
412 * to which we now hold a reference, or it is NULL.
413 * We can't put it here under mq_lock, though.
416 ipc_ns->mq_queues_count--;
417 spin_unlock(&mq_lock);
424 static int mqueue_create(struct inode *dir, struct dentry *dentry,
425 umode_t mode, bool excl)
428 struct mq_attr *attr = dentry->d_fsdata;
430 struct ipc_namespace *ipc_ns;
433 ipc_ns = __get_ns_from_inode(dir);
439 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
440 !capable(CAP_SYS_RESOURCE)) {
444 ipc_ns->mq_queues_count++;
445 spin_unlock(&mq_lock);
447 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
449 error = PTR_ERR(inode);
451 ipc_ns->mq_queues_count--;
456 dir->i_size += DIRENT_SIZE;
457 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
459 d_instantiate(dentry, inode);
463 spin_unlock(&mq_lock);
469 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
471 struct inode *inode = d_inode(dentry);
473 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
474 dir->i_size -= DIRENT_SIZE;
481 * This is routine for system read from queue file.
482 * To avoid mess with doing here some sort of mq_receive we allow
483 * to read only queue size & notification info (the only values
484 * that are interesting from user point of view and aren't accessible
485 * through std routines)
487 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
488 size_t count, loff_t *off)
490 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
491 char buffer[FILENT_SIZE];
494 spin_lock(&info->lock);
495 snprintf(buffer, sizeof(buffer),
496 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
498 info->notify_owner ? info->notify.sigev_notify : 0,
499 (info->notify_owner &&
500 info->notify.sigev_notify == SIGEV_SIGNAL) ?
501 info->notify.sigev_signo : 0,
502 pid_vnr(info->notify_owner));
503 spin_unlock(&info->lock);
504 buffer[sizeof(buffer)-1] = '\0';
506 ret = simple_read_from_buffer(u_data, count, off, buffer,
511 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME;
515 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
517 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
519 spin_lock(&info->lock);
520 if (task_tgid(current) == info->notify_owner)
521 remove_notification(info);
523 spin_unlock(&info->lock);
527 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
529 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
532 poll_wait(filp, &info->wait_q, poll_tab);
534 spin_lock(&info->lock);
535 if (info->attr.mq_curmsgs)
536 retval = POLLIN | POLLRDNORM;
538 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
539 retval |= POLLOUT | POLLWRNORM;
540 spin_unlock(&info->lock);
545 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
546 static void wq_add(struct mqueue_inode_info *info, int sr,
547 struct ext_wait_queue *ewp)
549 struct ext_wait_queue *walk;
553 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
554 if (walk->task->static_prio <= current->static_prio) {
555 list_add_tail(&ewp->list, &walk->list);
559 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
563 * Puts current task to sleep. Caller must hold queue lock. After return
567 static int wq_sleep(struct mqueue_inode_info *info, int sr,
568 ktime_t *timeout, struct ext_wait_queue *ewp)
573 wq_add(info, sr, ewp);
576 __set_current_state(TASK_INTERRUPTIBLE);
578 spin_unlock(&info->lock);
579 time = schedule_hrtimeout_range_clock(timeout, 0,
580 HRTIMER_MODE_ABS, CLOCK_REALTIME);
582 if (ewp->state == STATE_READY) {
586 spin_lock(&info->lock);
587 if (ewp->state == STATE_READY) {
591 if (signal_pending(current)) {
592 retval = -ERESTARTSYS;
600 list_del(&ewp->list);
602 spin_unlock(&info->lock);
608 * Returns waiting task that should be serviced first or NULL if none exists
610 static struct ext_wait_queue *wq_get_first_waiter(
611 struct mqueue_inode_info *info, int sr)
613 struct list_head *ptr;
615 ptr = info->e_wait_q[sr].list.prev;
616 if (ptr == &info->e_wait_q[sr].list)
618 return list_entry(ptr, struct ext_wait_queue, list);
622 static inline void set_cookie(struct sk_buff *skb, char code)
624 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
628 * The next function is only to split too long sys_mq_timedsend
630 static void __do_notify(struct mqueue_inode_info *info)
633 * invoked when there is registered process and there isn't process
634 * waiting synchronously for message AND state of queue changed from
635 * empty to not empty. Here we are sure that no one is waiting
637 if (info->notify_owner &&
638 info->attr.mq_curmsgs == 1) {
639 struct siginfo sig_i;
640 switch (info->notify.sigev_notify) {
646 sig_i.si_signo = info->notify.sigev_signo;
648 sig_i.si_code = SI_MESGQ;
649 sig_i.si_value = info->notify.sigev_value;
650 /* map current pid/uid into info->owner's namespaces */
652 sig_i.si_pid = task_tgid_nr_ns(current,
653 ns_of_pid(info->notify_owner));
654 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
657 kill_pid_info(info->notify.sigev_signo,
658 &sig_i, info->notify_owner);
661 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
662 netlink_sendskb(info->notify_sock, info->notify_cookie);
665 /* after notification unregisters process */
666 put_pid(info->notify_owner);
667 put_user_ns(info->notify_user_ns);
668 info->notify_owner = NULL;
669 info->notify_user_ns = NULL;
671 wake_up(&info->wait_q);
674 static int prepare_timeout(const struct timespec __user *u_abs_timeout,
675 ktime_t *expires, struct timespec *ts)
677 if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
679 if (!timespec_valid(ts))
682 *expires = timespec_to_ktime(*ts);
686 static void remove_notification(struct mqueue_inode_info *info)
688 if (info->notify_owner != NULL &&
689 info->notify.sigev_notify == SIGEV_THREAD) {
690 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
691 netlink_sendskb(info->notify_sock, info->notify_cookie);
693 put_pid(info->notify_owner);
694 put_user_ns(info->notify_user_ns);
695 info->notify_owner = NULL;
696 info->notify_user_ns = NULL;
699 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
702 unsigned long total_size;
704 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
706 if (capable(CAP_SYS_RESOURCE)) {
707 if (attr->mq_maxmsg > HARD_MSGMAX ||
708 attr->mq_msgsize > HARD_MSGSIZEMAX)
711 if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
712 attr->mq_msgsize > ipc_ns->mq_msgsize_max)
715 /* check for overflow */
716 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
718 mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
719 min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
720 sizeof(struct posix_msg_tree_node);
721 total_size = attr->mq_maxmsg * attr->mq_msgsize;
722 if (total_size + mq_treesize < total_size)
728 * Invoked when creating a new queue via sys_mq_open
730 static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
731 struct path *path, int oflag, umode_t mode,
732 struct mq_attr *attr)
734 const struct cred *cred = current_cred();
738 ret = mq_attr_ok(ipc_ns, attr);
741 /* store for use during create */
742 path->dentry->d_fsdata = attr;
744 struct mq_attr def_attr;
746 def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
747 ipc_ns->mq_msg_default);
748 def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
749 ipc_ns->mq_msgsize_default);
750 ret = mq_attr_ok(ipc_ns, &def_attr);
755 mode &= ~current_umask();
756 ret = vfs_create2(path->mnt, dir, path->dentry, mode, true);
757 path->dentry->d_fsdata = NULL;
760 return dentry_open(path, oflag, cred);
763 /* Opens existing queue */
764 static struct file *do_open(struct path *path, int oflag)
766 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
767 MAY_READ | MAY_WRITE };
769 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
770 return ERR_PTR(-EINVAL);
771 acc = oflag2acc[oflag & O_ACCMODE];
772 if (inode_permission2(path->mnt, d_inode(path->dentry), acc))
773 return ERR_PTR(-EACCES);
774 return dentry_open(path, oflag, current_cred());
777 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
778 struct mq_attr __user *, u_attr)
782 struct filename *name;
785 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
786 struct vfsmount *mnt = ipc_ns->mq_mnt;
787 struct dentry *root = mnt->mnt_root;
790 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
793 audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
795 if (IS_ERR(name = getname(u_name)))
796 return PTR_ERR(name);
798 fd = get_unused_fd_flags(O_CLOEXEC);
802 ro = mnt_want_write(mnt); /* we'll drop it in any case */
804 mutex_lock(&d_inode(root)->i_mutex);
805 path.dentry = lookup_one_len2(name->name, mnt, root, strlen(name->name));
806 if (IS_ERR(path.dentry)) {
807 error = PTR_ERR(path.dentry);
810 path.mnt = mntget(mnt);
812 if (oflag & O_CREAT) {
813 if (d_really_is_positive(path.dentry)) { /* entry already exists */
814 audit_inode(name, path.dentry, 0);
815 if (oflag & O_EXCL) {
819 filp = do_open(&path, oflag);
825 audit_inode_parent_hidden(name, root);
826 filp = do_create(ipc_ns, d_inode(root),
828 u_attr ? &attr : NULL);
831 if (d_really_is_negative(path.dentry)) {
835 audit_inode(name, path.dentry, 0);
836 filp = do_open(&path, oflag);
840 fd_install(fd, filp);
842 error = PTR_ERR(filp);
850 mutex_unlock(&d_inode(root)->i_mutex);
858 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
861 struct filename *name;
862 struct dentry *dentry;
863 struct inode *inode = NULL;
864 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
865 struct vfsmount *mnt = ipc_ns->mq_mnt;
867 name = getname(u_name);
869 return PTR_ERR(name);
871 audit_inode_parent_hidden(name, mnt->mnt_root);
872 err = mnt_want_write(mnt);
875 mutex_lock_nested(&d_inode(mnt->mnt_root)->i_mutex, I_MUTEX_PARENT);
876 dentry = lookup_one_len2(name->name, mnt, mnt->mnt_root,
878 if (IS_ERR(dentry)) {
879 err = PTR_ERR(dentry);
883 inode = d_inode(dentry);
888 err = vfs_unlink2(mnt, d_inode(dentry->d_parent), dentry, NULL);
893 mutex_unlock(&d_inode(mnt->mnt_root)->i_mutex);
903 /* Pipelined send and receive functions.
905 * If a receiver finds no waiting message, then it registers itself in the
906 * list of waiting receivers. A sender checks that list before adding the new
907 * message into the message array. If there is a waiting receiver, then it
908 * bypasses the message array and directly hands the message over to the
909 * receiver. The receiver accepts the message and returns without grabbing the
912 * - Set pointer to message.
913 * - Queue the receiver task for later wakeup (without the info->lock).
914 * - Update its state to STATE_READY. Now the receiver can continue.
915 * - Wake up the process after the lock is dropped. Should the process wake up
916 * before this wakeup (due to a timeout or a signal) it will either see
917 * STATE_READY and continue or acquire the lock to check the state again.
919 * The same algorithm is used for senders.
922 /* pipelined_send() - send a message directly to the task waiting in
923 * sys_mq_timedreceive() (without inserting message into a queue).
925 static inline void pipelined_send(struct wake_q_head *wake_q,
926 struct mqueue_inode_info *info,
927 struct msg_msg *message,
928 struct ext_wait_queue *receiver)
930 receiver->msg = message;
931 list_del(&receiver->list);
932 wake_q_add(wake_q, receiver->task);
934 * Rely on the implicit cmpxchg barrier from wake_q_add such
935 * that we can ensure that updating receiver->state is the last
936 * write operation: As once set, the receiver can continue,
937 * and if we don't have the reference count from the wake_q,
938 * yet, at that point we can later have a use-after-free
939 * condition and bogus wakeup.
941 receiver->state = STATE_READY;
944 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
945 * gets its message and put to the queue (we have one free place for sure). */
946 static inline void pipelined_receive(struct wake_q_head *wake_q,
947 struct mqueue_inode_info *info)
949 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
953 wake_up_interruptible(&info->wait_q);
956 if (msg_insert(sender->msg, info))
959 list_del(&sender->list);
960 wake_q_add(wake_q, sender->task);
961 sender->state = STATE_READY;
964 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
965 size_t, msg_len, unsigned int, msg_prio,
966 const struct timespec __user *, u_abs_timeout)
970 struct ext_wait_queue wait;
971 struct ext_wait_queue *receiver;
972 struct msg_msg *msg_ptr;
973 struct mqueue_inode_info *info;
974 ktime_t expires, *timeout = NULL;
976 struct posix_msg_tree_node *new_leaf = NULL;
981 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
987 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
990 audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
993 if (unlikely(!f.file)) {
998 inode = file_inode(f.file);
999 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1003 info = MQUEUE_I(inode);
1006 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1011 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1016 /* First try to allocate memory, before doing anything with
1017 * existing queues. */
1018 msg_ptr = load_msg(u_msg_ptr, msg_len);
1019 if (IS_ERR(msg_ptr)) {
1020 ret = PTR_ERR(msg_ptr);
1023 msg_ptr->m_ts = msg_len;
1024 msg_ptr->m_type = msg_prio;
1027 * msg_insert really wants us to have a valid, spare node struct so
1028 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1029 * fall back to that if necessary.
1031 if (!info->node_cache)
1032 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1034 spin_lock(&info->lock);
1036 if (!info->node_cache && new_leaf) {
1037 /* Save our speculative allocation into the cache */
1038 INIT_LIST_HEAD(&new_leaf->msg_list);
1039 info->node_cache = new_leaf;
1045 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1046 if (f.file->f_flags & O_NONBLOCK) {
1049 wait.task = current;
1050 wait.msg = (void *) msg_ptr;
1051 wait.state = STATE_NONE;
1052 ret = wq_sleep(info, SEND, timeout, &wait);
1054 * wq_sleep must be called with info->lock held, and
1055 * returns with the lock released
1060 receiver = wq_get_first_waiter(info, RECV);
1062 pipelined_send(&wake_q, info, msg_ptr, receiver);
1064 /* adds message to the queue */
1065 ret = msg_insert(msg_ptr, info);
1070 inode->i_atime = inode->i_mtime = inode->i_ctime =
1074 spin_unlock(&info->lock);
1085 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1086 size_t, msg_len, unsigned int __user *, u_msg_prio,
1087 const struct timespec __user *, u_abs_timeout)
1090 struct msg_msg *msg_ptr;
1092 struct inode *inode;
1093 struct mqueue_inode_info *info;
1094 struct ext_wait_queue wait;
1095 ktime_t expires, *timeout = NULL;
1097 struct posix_msg_tree_node *new_leaf = NULL;
1099 if (u_abs_timeout) {
1100 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
1106 audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
1109 if (unlikely(!f.file)) {
1114 inode = file_inode(f.file);
1115 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1119 info = MQUEUE_I(inode);
1122 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1127 /* checks if buffer is big enough */
1128 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1134 * msg_insert really wants us to have a valid, spare node struct so
1135 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1136 * fall back to that if necessary.
1138 if (!info->node_cache)
1139 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1141 spin_lock(&info->lock);
1143 if (!info->node_cache && new_leaf) {
1144 /* Save our speculative allocation into the cache */
1145 INIT_LIST_HEAD(&new_leaf->msg_list);
1146 info->node_cache = new_leaf;
1151 if (info->attr.mq_curmsgs == 0) {
1152 if (f.file->f_flags & O_NONBLOCK) {
1153 spin_unlock(&info->lock);
1156 wait.task = current;
1157 wait.state = STATE_NONE;
1158 ret = wq_sleep(info, RECV, timeout, &wait);
1164 msg_ptr = msg_get(info);
1166 inode->i_atime = inode->i_mtime = inode->i_ctime =
1169 /* There is now free space in queue. */
1170 pipelined_receive(&wake_q, info);
1171 spin_unlock(&info->lock);
1176 ret = msg_ptr->m_ts;
1178 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1179 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1191 * Notes: the case when user wants us to deregister (with NULL as pointer)
1192 * and he isn't currently owner of notification, will be silently discarded.
1193 * It isn't explicitly defined in the POSIX.
1195 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1196 const struct sigevent __user *, u_notification)
1201 struct inode *inode;
1202 struct sigevent notification;
1203 struct mqueue_inode_info *info;
1206 if (u_notification) {
1207 if (copy_from_user(¬ification, u_notification,
1208 sizeof(struct sigevent)))
1212 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL);
1216 if (u_notification != NULL) {
1217 if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1218 notification.sigev_notify != SIGEV_SIGNAL &&
1219 notification.sigev_notify != SIGEV_THREAD))
1221 if (notification.sigev_notify == SIGEV_SIGNAL &&
1222 !valid_signal(notification.sigev_signo)) {
1225 if (notification.sigev_notify == SIGEV_THREAD) {
1228 /* create the notify skb */
1229 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1234 if (copy_from_user(nc->data,
1235 notification.sigev_value.sival_ptr,
1236 NOTIFY_COOKIE_LEN)) {
1241 /* TODO: add a header? */
1242 skb_put(nc, NOTIFY_COOKIE_LEN);
1243 /* and attach it to the socket */
1245 f = fdget(notification.sigev_signo);
1250 sock = netlink_getsockbyfilp(f.file);
1253 ret = PTR_ERR(sock);
1258 timeo = MAX_SCHEDULE_TIMEOUT;
1259 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1278 inode = file_inode(f.file);
1279 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1283 info = MQUEUE_I(inode);
1286 spin_lock(&info->lock);
1287 if (u_notification == NULL) {
1288 if (info->notify_owner == task_tgid(current)) {
1289 remove_notification(info);
1290 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1292 } else if (info->notify_owner != NULL) {
1295 switch (notification.sigev_notify) {
1297 info->notify.sigev_notify = SIGEV_NONE;
1300 info->notify_sock = sock;
1301 info->notify_cookie = nc;
1304 info->notify.sigev_notify = SIGEV_THREAD;
1307 info->notify.sigev_signo = notification.sigev_signo;
1308 info->notify.sigev_value = notification.sigev_value;
1309 info->notify.sigev_notify = SIGEV_SIGNAL;
1313 info->notify_owner = get_pid(task_tgid(current));
1314 info->notify_user_ns = get_user_ns(current_user_ns());
1315 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1317 spin_unlock(&info->lock);
1322 netlink_detachskb(sock, nc);
1329 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1330 const struct mq_attr __user *, u_mqstat,
1331 struct mq_attr __user *, u_omqstat)
1334 struct mq_attr mqstat, omqstat;
1336 struct inode *inode;
1337 struct mqueue_inode_info *info;
1339 if (u_mqstat != NULL) {
1340 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1342 if (mqstat.mq_flags & (~O_NONBLOCK))
1352 inode = file_inode(f.file);
1353 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1357 info = MQUEUE_I(inode);
1359 spin_lock(&info->lock);
1361 omqstat = info->attr;
1362 omqstat.mq_flags = f.file->f_flags & O_NONBLOCK;
1364 audit_mq_getsetattr(mqdes, &mqstat);
1365 spin_lock(&f.file->f_lock);
1366 if (mqstat.mq_flags & O_NONBLOCK)
1367 f.file->f_flags |= O_NONBLOCK;
1369 f.file->f_flags &= ~O_NONBLOCK;
1370 spin_unlock(&f.file->f_lock);
1372 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1375 spin_unlock(&info->lock);
1378 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1379 sizeof(struct mq_attr)))
1388 static const struct inode_operations mqueue_dir_inode_operations = {
1389 .lookup = simple_lookup,
1390 .create = mqueue_create,
1391 .unlink = mqueue_unlink,
1394 static const struct file_operations mqueue_file_operations = {
1395 .flush = mqueue_flush_file,
1396 .poll = mqueue_poll_file,
1397 .read = mqueue_read_file,
1398 .llseek = default_llseek,
1401 static const struct super_operations mqueue_super_ops = {
1402 .alloc_inode = mqueue_alloc_inode,
1403 .destroy_inode = mqueue_destroy_inode,
1404 .evict_inode = mqueue_evict_inode,
1405 .statfs = simple_statfs,
1408 static struct file_system_type mqueue_fs_type = {
1410 .mount = mqueue_mount,
1411 .kill_sb = kill_litter_super,
1412 .fs_flags = FS_USERNS_MOUNT,
1415 int mq_init_ns(struct ipc_namespace *ns)
1417 ns->mq_queues_count = 0;
1418 ns->mq_queues_max = DFLT_QUEUESMAX;
1419 ns->mq_msg_max = DFLT_MSGMAX;
1420 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1421 ns->mq_msg_default = DFLT_MSG;
1422 ns->mq_msgsize_default = DFLT_MSGSIZE;
1424 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1425 if (IS_ERR(ns->mq_mnt)) {
1426 int err = PTR_ERR(ns->mq_mnt);
1433 void mq_clear_sbinfo(struct ipc_namespace *ns)
1435 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1438 void mq_put_mnt(struct ipc_namespace *ns)
1440 kern_unmount(ns->mq_mnt);
1443 static int __init init_mqueue_fs(void)
1447 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1448 sizeof(struct mqueue_inode_info), 0,
1449 SLAB_HWCACHE_ALIGN, init_once);
1450 if (mqueue_inode_cachep == NULL)
1453 /* ignore failures - they are not fatal */
1454 mq_sysctl_table = mq_register_sysctl_table();
1456 error = register_filesystem(&mqueue_fs_type);
1460 spin_lock_init(&mq_lock);
1462 error = mq_init_ns(&init_ipc_ns);
1464 goto out_filesystem;
1469 unregister_filesystem(&mqueue_fs_type);
1471 if (mq_sysctl_table)
1472 unregister_sysctl_table(mq_sysctl_table);
1473 kmem_cache_destroy(mqueue_inode_cachep);
1477 device_initcall(init_mqueue_fs);