2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h" /* audit_signal_info() */
49 * SLAB caches for signal bits.
52 static struct kmem_cache *sigqueue_cachep;
54 int print_fatal_signals __read_mostly;
56 static void __user *sig_handler(struct task_struct *t, int sig)
58 return t->sighand->action[sig - 1].sa.sa_handler;
61 static int sig_handler_ignored(void __user *handler, int sig)
63 /* Is it explicitly or implicitly ignored? */
64 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
72 handler = sig_handler(t, sig);
74 /* SIGKILL and SIGSTOP may not be sent to the global init */
75 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
78 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
79 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
82 /* Only allow kernel generated signals to this kthread */
83 if (unlikely((t->flags & PF_KTHREAD) &&
84 (handler == SIG_KTHREAD_KERNEL) && !force))
87 return sig_handler_ignored(handler, sig);
90 static int sig_ignored(struct task_struct *t, int sig, bool force)
93 * Blocked signals are never ignored, since the
94 * signal handler may change by the time it is
97 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
101 * Tracers may want to know about even ignored signal unless it
102 * is SIGKILL which can't be reported anyway but can be ignored
103 * by SIGNAL_UNKILLABLE task.
105 if (t->ptrace && sig != SIGKILL)
108 return sig_task_ignored(t, sig, force);
112 * Re-calculate pending state from the set of locally pending
113 * signals, globally pending signals, and blocked signals.
115 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
120 switch (_NSIG_WORDS) {
122 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
123 ready |= signal->sig[i] &~ blocked->sig[i];
126 case 4: ready = signal->sig[3] &~ blocked->sig[3];
127 ready |= signal->sig[2] &~ blocked->sig[2];
128 ready |= signal->sig[1] &~ blocked->sig[1];
129 ready |= signal->sig[0] &~ blocked->sig[0];
132 case 2: ready = signal->sig[1] &~ blocked->sig[1];
133 ready |= signal->sig[0] &~ blocked->sig[0];
136 case 1: ready = signal->sig[0] &~ blocked->sig[0];
141 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
143 static int recalc_sigpending_tsk(struct task_struct *t)
145 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
146 PENDING(&t->pending, &t->blocked) ||
147 PENDING(&t->signal->shared_pending, &t->blocked)) {
148 set_tsk_thread_flag(t, TIF_SIGPENDING);
152 * We must never clear the flag in another thread, or in current
153 * when it's possible the current syscall is returning -ERESTART*.
154 * So we don't clear it here, and only callers who know they should do.
160 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
161 * This is superfluous when called on current, the wakeup is a harmless no-op.
163 void recalc_sigpending_and_wake(struct task_struct *t)
165 if (recalc_sigpending_tsk(t))
166 signal_wake_up(t, 0);
169 void recalc_sigpending(void)
171 if (!recalc_sigpending_tsk(current) && !freezing(current))
172 clear_thread_flag(TIF_SIGPENDING);
176 /* Given the mask, find the first available signal that should be serviced. */
178 #define SYNCHRONOUS_MASK \
179 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
180 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
182 int next_signal(struct sigpending *pending, sigset_t *mask)
184 unsigned long i, *s, *m, x;
187 s = pending->signal.sig;
191 * Handle the first word specially: it contains the
192 * synchronous signals that need to be dequeued first.
196 if (x & SYNCHRONOUS_MASK)
197 x &= SYNCHRONOUS_MASK;
202 switch (_NSIG_WORDS) {
204 for (i = 1; i < _NSIG_WORDS; ++i) {
208 sig = ffz(~x) + i*_NSIG_BPW + 1;
217 sig = ffz(~x) + _NSIG_BPW + 1;
228 static inline void print_dropped_signal(int sig)
230 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
232 if (!print_fatal_signals)
235 if (!__ratelimit(&ratelimit_state))
238 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
239 current->comm, current->pid, sig);
243 * task_set_jobctl_pending - set jobctl pending bits
245 * @mask: pending bits to set
247 * Clear @mask from @task->jobctl. @mask must be subset of
248 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
249 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
250 * cleared. If @task is already being killed or exiting, this function
254 * Must be called with @task->sighand->siglock held.
257 * %true if @mask is set, %false if made noop because @task was dying.
259 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
261 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
262 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
263 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
265 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
268 if (mask & JOBCTL_STOP_SIGMASK)
269 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
271 task->jobctl |= mask;
276 * task_clear_jobctl_trapping - clear jobctl trapping bit
279 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
280 * Clear it and wake up the ptracer. Note that we don't need any further
281 * locking. @task->siglock guarantees that @task->parent points to the
285 * Must be called with @task->sighand->siglock held.
287 void task_clear_jobctl_trapping(struct task_struct *task)
289 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
290 task->jobctl &= ~JOBCTL_TRAPPING;
291 smp_mb(); /* advised by wake_up_bit() */
292 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
297 * task_clear_jobctl_pending - clear jobctl pending bits
299 * @mask: pending bits to clear
301 * Clear @mask from @task->jobctl. @mask must be subset of
302 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
303 * STOP bits are cleared together.
305 * If clearing of @mask leaves no stop or trap pending, this function calls
306 * task_clear_jobctl_trapping().
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
313 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
315 if (mask & JOBCTL_STOP_PENDING)
316 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
318 task->jobctl &= ~mask;
320 if (!(task->jobctl & JOBCTL_PENDING_MASK))
321 task_clear_jobctl_trapping(task);
325 * task_participate_group_stop - participate in a group stop
326 * @task: task participating in a group stop
328 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
329 * Group stop states are cleared and the group stop count is consumed if
330 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
331 * stop, the appropriate %SIGNAL_* flags are set.
334 * Must be called with @task->sighand->siglock held.
337 * %true if group stop completion should be notified to the parent, %false
340 static bool task_participate_group_stop(struct task_struct *task)
342 struct signal_struct *sig = task->signal;
343 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
345 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
347 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
352 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
353 sig->group_stop_count--;
356 * Tell the caller to notify completion iff we are entering into a
357 * fresh group stop. Read comment in do_signal_stop() for details.
359 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
360 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
367 * allocate a new signal queue record
368 * - this may be called without locks if and only if t == current, otherwise an
369 * appropriate lock must be held to stop the target task from exiting
371 static struct sigqueue *
372 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
374 struct sigqueue *q = NULL;
375 struct user_struct *user;
378 * Protect access to @t credentials. This can go away when all
379 * callers hold rcu read lock.
382 user = get_uid(__task_cred(t)->user);
383 atomic_inc(&user->sigpending);
386 if (override_rlimit ||
387 atomic_read(&user->sigpending) <=
388 task_rlimit(t, RLIMIT_SIGPENDING)) {
389 q = kmem_cache_alloc(sigqueue_cachep, flags);
391 print_dropped_signal(sig);
394 if (unlikely(q == NULL)) {
395 atomic_dec(&user->sigpending);
398 INIT_LIST_HEAD(&q->list);
406 static void __sigqueue_free(struct sigqueue *q)
408 if (q->flags & SIGQUEUE_PREALLOC)
410 atomic_dec(&q->user->sigpending);
412 kmem_cache_free(sigqueue_cachep, q);
415 void flush_sigqueue(struct sigpending *queue)
419 sigemptyset(&queue->signal);
420 while (!list_empty(&queue->list)) {
421 q = list_entry(queue->list.next, struct sigqueue , list);
422 list_del_init(&q->list);
428 * Flush all pending signals for this kthread.
430 void flush_signals(struct task_struct *t)
434 spin_lock_irqsave(&t->sighand->siglock, flags);
435 clear_tsk_thread_flag(t, TIF_SIGPENDING);
436 flush_sigqueue(&t->pending);
437 flush_sigqueue(&t->signal->shared_pending);
438 spin_unlock_irqrestore(&t->sighand->siglock, flags);
441 static void __flush_itimer_signals(struct sigpending *pending)
443 sigset_t signal, retain;
444 struct sigqueue *q, *n;
446 signal = pending->signal;
447 sigemptyset(&retain);
449 list_for_each_entry_safe(q, n, &pending->list, list) {
450 int sig = q->info.si_signo;
452 if (likely(q->info.si_code != SI_TIMER)) {
453 sigaddset(&retain, sig);
455 sigdelset(&signal, sig);
456 list_del_init(&q->list);
461 sigorsets(&pending->signal, &signal, &retain);
464 void flush_itimer_signals(void)
466 struct task_struct *tsk = current;
469 spin_lock_irqsave(&tsk->sighand->siglock, flags);
470 __flush_itimer_signals(&tsk->pending);
471 __flush_itimer_signals(&tsk->signal->shared_pending);
472 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
475 void ignore_signals(struct task_struct *t)
479 for (i = 0; i < _NSIG; ++i)
480 t->sighand->action[i].sa.sa_handler = SIG_IGN;
486 * Flush all handlers for a task.
490 flush_signal_handlers(struct task_struct *t, int force_default)
493 struct k_sigaction *ka = &t->sighand->action[0];
494 for (i = _NSIG ; i != 0 ; i--) {
495 if (force_default || ka->sa.sa_handler != SIG_IGN)
496 ka->sa.sa_handler = SIG_DFL;
498 #ifdef __ARCH_HAS_SA_RESTORER
499 ka->sa.sa_restorer = NULL;
501 sigemptyset(&ka->sa.sa_mask);
506 int unhandled_signal(struct task_struct *tsk, int sig)
508 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
509 if (is_global_init(tsk))
511 if (handler != SIG_IGN && handler != SIG_DFL)
513 /* if ptraced, let the tracer determine */
517 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
520 struct sigqueue *q, *first = NULL;
523 * Collect the siginfo appropriate to this signal. Check if
524 * there is another siginfo for the same signal.
526 list_for_each_entry(q, &list->list, list) {
527 if (q->info.si_signo == sig) {
534 sigdelset(&list->signal, sig);
538 list_del_init(&first->list);
539 copy_siginfo(info, &first->info);
542 (first->flags & SIGQUEUE_PREALLOC) &&
543 (info->si_code == SI_TIMER) &&
544 (info->si_sys_private);
546 __sigqueue_free(first);
549 * Ok, it wasn't in the queue. This must be
550 * a fast-pathed signal or we must have been
551 * out of queue space. So zero out the info.
553 info->si_signo = sig;
555 info->si_code = SI_USER;
561 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
562 siginfo_t *info, bool *resched_timer)
564 int sig = next_signal(pending, mask);
567 collect_signal(sig, pending, info, resched_timer);
572 * Dequeue a signal and return the element to the caller, which is
573 * expected to free it.
575 * All callers have to hold the siglock.
577 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
579 bool resched_timer = false;
582 /* We only dequeue private signals from ourselves, we don't let
583 * signalfd steal them
585 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
587 signr = __dequeue_signal(&tsk->signal->shared_pending,
588 mask, info, &resched_timer);
592 * itimers are process shared and we restart periodic
593 * itimers in the signal delivery path to prevent DoS
594 * attacks in the high resolution timer case. This is
595 * compliant with the old way of self-restarting
596 * itimers, as the SIGALRM is a legacy signal and only
597 * queued once. Changing the restart behaviour to
598 * restart the timer in the signal dequeue path is
599 * reducing the timer noise on heavy loaded !highres
602 if (unlikely(signr == SIGALRM)) {
603 struct hrtimer *tmr = &tsk->signal->real_timer;
605 if (!hrtimer_is_queued(tmr) &&
606 tsk->signal->it_real_incr.tv64 != 0) {
607 hrtimer_forward(tmr, tmr->base->get_time(),
608 tsk->signal->it_real_incr);
609 hrtimer_restart(tmr);
618 if (unlikely(sig_kernel_stop(signr))) {
620 * Set a marker that we have dequeued a stop signal. Our
621 * caller might release the siglock and then the pending
622 * stop signal it is about to process is no longer in the
623 * pending bitmasks, but must still be cleared by a SIGCONT
624 * (and overruled by a SIGKILL). So those cases clear this
625 * shared flag after we've set it. Note that this flag may
626 * remain set after the signal we return is ignored or
627 * handled. That doesn't matter because its only purpose
628 * is to alert stop-signal processing code when another
629 * processor has come along and cleared the flag.
631 current->jobctl |= JOBCTL_STOP_DEQUEUED;
635 * Release the siglock to ensure proper locking order
636 * of timer locks outside of siglocks. Note, we leave
637 * irqs disabled here, since the posix-timers code is
638 * about to disable them again anyway.
640 spin_unlock(&tsk->sighand->siglock);
641 do_schedule_next_timer(info);
642 spin_lock(&tsk->sighand->siglock);
648 * Tell a process that it has a new active signal..
650 * NOTE! we rely on the previous spin_lock to
651 * lock interrupts for us! We can only be called with
652 * "siglock" held, and the local interrupt must
653 * have been disabled when that got acquired!
655 * No need to set need_resched since signal event passing
656 * goes through ->blocked
658 void signal_wake_up_state(struct task_struct *t, unsigned int state)
660 set_tsk_thread_flag(t, TIF_SIGPENDING);
662 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
663 * case. We don't check t->state here because there is a race with it
664 * executing another processor and just now entering stopped state.
665 * By using wake_up_state, we ensure the process will wake up and
666 * handle its death signal.
668 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
673 * Remove signals in mask from the pending set and queue.
674 * Returns 1 if any signals were found.
676 * All callers must be holding the siglock.
678 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
680 struct sigqueue *q, *n;
683 sigandsets(&m, mask, &s->signal);
684 if (sigisemptyset(&m))
687 sigandnsets(&s->signal, &s->signal, mask);
688 list_for_each_entry_safe(q, n, &s->list, list) {
689 if (sigismember(mask, q->info.si_signo)) {
690 list_del_init(&q->list);
697 static inline int is_si_special(const struct siginfo *info)
699 return info <= SEND_SIG_FORCED;
702 static inline bool si_fromuser(const struct siginfo *info)
704 return info == SEND_SIG_NOINFO ||
705 (!is_si_special(info) && SI_FROMUSER(info));
708 static int dequeue_synchronous_signal(siginfo_t *info)
710 struct task_struct *tsk = current;
711 struct sigpending *pending = &tsk->pending;
712 struct sigqueue *q, *sync = NULL;
715 * Might a synchronous signal be in the queue?
717 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
721 * Return the first synchronous signal in the queue.
723 list_for_each_entry(q, &pending->list, list) {
724 /* Synchronous signals have a postive si_code */
725 if ((q->info.si_code > SI_USER) &&
726 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
734 * Check if there is another siginfo for the same signal.
736 list_for_each_entry_continue(q, &pending->list, list) {
737 if (q->info.si_signo == sync->info.si_signo)
741 sigdelset(&pending->signal, sync->info.si_signo);
744 list_del_init(&sync->list);
745 copy_siginfo(info, &sync->info);
746 __sigqueue_free(sync);
747 return info->si_signo;
751 * called with RCU read lock from check_kill_permission()
753 static int kill_ok_by_cred(struct task_struct *t)
755 const struct cred *cred = current_cred();
756 const struct cred *tcred = __task_cred(t);
758 if (uid_eq(cred->euid, tcred->suid) ||
759 uid_eq(cred->euid, tcred->uid) ||
760 uid_eq(cred->uid, tcred->suid) ||
761 uid_eq(cred->uid, tcred->uid))
764 if (ns_capable(tcred->user_ns, CAP_KILL))
771 * Bad permissions for sending the signal
772 * - the caller must hold the RCU read lock
774 static int check_kill_permission(int sig, struct siginfo *info,
775 struct task_struct *t)
780 if (!valid_signal(sig))
783 if (!si_fromuser(info))
786 error = audit_signal_info(sig, t); /* Let audit system see the signal */
790 if (!same_thread_group(current, t) &&
791 !kill_ok_by_cred(t)) {
794 sid = task_session(t);
796 * We don't return the error if sid == NULL. The
797 * task was unhashed, the caller must notice this.
799 if (!sid || sid == task_session(current))
806 return security_task_kill(t, info, sig, 0);
810 * ptrace_trap_notify - schedule trap to notify ptracer
811 * @t: tracee wanting to notify tracer
813 * This function schedules sticky ptrace trap which is cleared on the next
814 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
817 * If @t is running, STOP trap will be taken. If trapped for STOP and
818 * ptracer is listening for events, tracee is woken up so that it can
819 * re-trap for the new event. If trapped otherwise, STOP trap will be
820 * eventually taken without returning to userland after the existing traps
821 * are finished by PTRACE_CONT.
824 * Must be called with @task->sighand->siglock held.
826 static void ptrace_trap_notify(struct task_struct *t)
828 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
829 assert_spin_locked(&t->sighand->siglock);
831 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
832 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
836 * Handle magic process-wide effects of stop/continue signals. Unlike
837 * the signal actions, these happen immediately at signal-generation
838 * time regardless of blocking, ignoring, or handling. This does the
839 * actual continuing for SIGCONT, but not the actual stopping for stop
840 * signals. The process stop is done as a signal action for SIG_DFL.
842 * Returns true if the signal should be actually delivered, otherwise
843 * it should be dropped.
845 static bool prepare_signal(int sig, struct task_struct *p, bool force)
847 struct signal_struct *signal = p->signal;
848 struct task_struct *t;
851 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
852 if (!(signal->flags & SIGNAL_GROUP_EXIT))
853 return sig == SIGKILL;
855 * The process is in the middle of dying, nothing to do.
857 } else if (sig_kernel_stop(sig)) {
859 * This is a stop signal. Remove SIGCONT from all queues.
861 siginitset(&flush, sigmask(SIGCONT));
862 flush_sigqueue_mask(&flush, &signal->shared_pending);
863 for_each_thread(p, t)
864 flush_sigqueue_mask(&flush, &t->pending);
865 } else if (sig == SIGCONT) {
868 * Remove all stop signals from all queues, wake all threads.
870 siginitset(&flush, SIG_KERNEL_STOP_MASK);
871 flush_sigqueue_mask(&flush, &signal->shared_pending);
872 for_each_thread(p, t) {
873 flush_sigqueue_mask(&flush, &t->pending);
874 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
875 if (likely(!(t->ptrace & PT_SEIZED)))
876 wake_up_state(t, __TASK_STOPPED);
878 ptrace_trap_notify(t);
882 * Notify the parent with CLD_CONTINUED if we were stopped.
884 * If we were in the middle of a group stop, we pretend it
885 * was already finished, and then continued. Since SIGCHLD
886 * doesn't queue we report only CLD_STOPPED, as if the next
887 * CLD_CONTINUED was dropped.
890 if (signal->flags & SIGNAL_STOP_STOPPED)
891 why |= SIGNAL_CLD_CONTINUED;
892 else if (signal->group_stop_count)
893 why |= SIGNAL_CLD_STOPPED;
897 * The first thread which returns from do_signal_stop()
898 * will take ->siglock, notice SIGNAL_CLD_MASK, and
899 * notify its parent. See get_signal_to_deliver().
901 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
902 signal->group_stop_count = 0;
903 signal->group_exit_code = 0;
907 return !sig_ignored(p, sig, force);
911 * Test if P wants to take SIG. After we've checked all threads with this,
912 * it's equivalent to finding no threads not blocking SIG. Any threads not
913 * blocking SIG were ruled out because they are not running and already
914 * have pending signals. Such threads will dequeue from the shared queue
915 * as soon as they're available, so putting the signal on the shared queue
916 * will be equivalent to sending it to one such thread.
918 static inline int wants_signal(int sig, struct task_struct *p)
920 if (sigismember(&p->blocked, sig))
922 if (p->flags & PF_EXITING)
926 if (task_is_stopped_or_traced(p))
928 return task_curr(p) || !signal_pending(p);
931 static void complete_signal(int sig, struct task_struct *p, int group)
933 struct signal_struct *signal = p->signal;
934 struct task_struct *t;
937 * Now find a thread we can wake up to take the signal off the queue.
939 * If the main thread wants the signal, it gets first crack.
940 * Probably the least surprising to the average bear.
942 if (wants_signal(sig, p))
944 else if (!group || thread_group_empty(p))
946 * There is just one thread and it does not need to be woken.
947 * It will dequeue unblocked signals before it runs again.
952 * Otherwise try to find a suitable thread.
954 t = signal->curr_target;
955 while (!wants_signal(sig, t)) {
957 if (t == signal->curr_target)
959 * No thread needs to be woken.
960 * Any eligible threads will see
961 * the signal in the queue soon.
965 signal->curr_target = t;
969 * Found a killable thread. If the signal will be fatal,
970 * then start taking the whole group down immediately.
972 if (sig_fatal(p, sig) &&
973 !(signal->flags & SIGNAL_GROUP_EXIT) &&
974 !sigismember(&t->real_blocked, sig) &&
975 (sig == SIGKILL || !p->ptrace)) {
977 * This signal will be fatal to the whole group.
979 if (!sig_kernel_coredump(sig)) {
981 * Start a group exit and wake everybody up.
982 * This way we don't have other threads
983 * running and doing things after a slower
984 * thread has the fatal signal pending.
986 signal->flags = SIGNAL_GROUP_EXIT;
987 signal->group_exit_code = sig;
988 signal->group_stop_count = 0;
991 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
992 sigaddset(&t->pending.signal, SIGKILL);
993 signal_wake_up(t, 1);
994 } while_each_thread(p, t);
1000 * The signal is already in the shared-pending queue.
1001 * Tell the chosen thread to wake up and dequeue it.
1003 signal_wake_up(t, sig == SIGKILL);
1007 static inline int legacy_queue(struct sigpending *signals, int sig)
1009 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1012 #ifdef CONFIG_USER_NS
1013 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1015 if (current_user_ns() == task_cred_xxx(t, user_ns))
1018 if (SI_FROMKERNEL(info))
1022 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1023 make_kuid(current_user_ns(), info->si_uid));
1027 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1033 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1034 int group, int from_ancestor_ns)
1036 struct sigpending *pending;
1038 int override_rlimit;
1039 int ret = 0, result;
1041 assert_spin_locked(&t->sighand->siglock);
1043 result = TRACE_SIGNAL_IGNORED;
1044 if (!prepare_signal(sig, t,
1045 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1048 pending = group ? &t->signal->shared_pending : &t->pending;
1050 * Short-circuit ignored signals and support queuing
1051 * exactly one non-rt signal, so that we can get more
1052 * detailed information about the cause of the signal.
1054 result = TRACE_SIGNAL_ALREADY_PENDING;
1055 if (legacy_queue(pending, sig))
1058 result = TRACE_SIGNAL_DELIVERED;
1060 * fast-pathed signals for kernel-internal things like SIGSTOP
1063 if (info == SEND_SIG_FORCED)
1067 * Real-time signals must be queued if sent by sigqueue, or
1068 * some other real-time mechanism. It is implementation
1069 * defined whether kill() does so. We attempt to do so, on
1070 * the principle of least surprise, but since kill is not
1071 * allowed to fail with EAGAIN when low on memory we just
1072 * make sure at least one signal gets delivered and don't
1073 * pass on the info struct.
1076 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1078 override_rlimit = 0;
1080 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1083 list_add_tail(&q->list, &pending->list);
1084 switch ((unsigned long) info) {
1085 case (unsigned long) SEND_SIG_NOINFO:
1086 q->info.si_signo = sig;
1087 q->info.si_errno = 0;
1088 q->info.si_code = SI_USER;
1089 q->info.si_pid = task_tgid_nr_ns(current,
1090 task_active_pid_ns(t));
1091 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1093 case (unsigned long) SEND_SIG_PRIV:
1094 q->info.si_signo = sig;
1095 q->info.si_errno = 0;
1096 q->info.si_code = SI_KERNEL;
1101 copy_siginfo(&q->info, info);
1102 if (from_ancestor_ns)
1107 userns_fixup_signal_uid(&q->info, t);
1109 } else if (!is_si_special(info)) {
1110 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1112 * Queue overflow, abort. We may abort if the
1113 * signal was rt and sent by user using something
1114 * other than kill().
1116 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1121 * This is a silent loss of information. We still
1122 * send the signal, but the *info bits are lost.
1124 result = TRACE_SIGNAL_LOSE_INFO;
1129 signalfd_notify(t, sig);
1130 sigaddset(&pending->signal, sig);
1131 complete_signal(sig, t, group);
1133 trace_signal_generate(sig, info, t, group, result);
1137 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1140 int from_ancestor_ns = 0;
1142 #ifdef CONFIG_PID_NS
1143 from_ancestor_ns = si_fromuser(info) &&
1144 !task_pid_nr_ns(current, task_active_pid_ns(t));
1147 return __send_signal(sig, info, t, group, from_ancestor_ns);
1150 static void print_fatal_signal(int signr)
1152 struct pt_regs *regs = signal_pt_regs();
1153 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1155 #if defined(__i386__) && !defined(__arch_um__)
1156 printk(KERN_INFO "code at %08lx: ", regs->ip);
1159 for (i = 0; i < 16; i++) {
1162 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1164 printk(KERN_CONT "%02x ", insn);
1167 printk(KERN_CONT "\n");
1174 static int __init setup_print_fatal_signals(char *str)
1176 get_option (&str, &print_fatal_signals);
1181 __setup("print-fatal-signals=", setup_print_fatal_signals);
1184 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1186 return send_signal(sig, info, p, 1);
1190 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1192 return send_signal(sig, info, t, 0);
1195 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1198 unsigned long flags;
1201 if (lock_task_sighand(p, &flags)) {
1202 ret = send_signal(sig, info, p, group);
1203 unlock_task_sighand(p, &flags);
1210 * Force a signal that the process can't ignore: if necessary
1211 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1213 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1214 * since we do not want to have a signal handler that was blocked
1215 * be invoked when user space had explicitly blocked it.
1217 * We don't want to have recursive SIGSEGV's etc, for example,
1218 * that is why we also clear SIGNAL_UNKILLABLE.
1221 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1223 unsigned long int flags;
1224 int ret, blocked, ignored;
1225 struct k_sigaction *action;
1227 spin_lock_irqsave(&t->sighand->siglock, flags);
1228 action = &t->sighand->action[sig-1];
1229 ignored = action->sa.sa_handler == SIG_IGN;
1230 blocked = sigismember(&t->blocked, sig);
1231 if (blocked || ignored) {
1232 action->sa.sa_handler = SIG_DFL;
1234 sigdelset(&t->blocked, sig);
1235 recalc_sigpending_and_wake(t);
1238 if (action->sa.sa_handler == SIG_DFL)
1239 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1240 ret = specific_send_sig_info(sig, info, t);
1241 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1247 * Nuke all other threads in the group.
1249 int zap_other_threads(struct task_struct *p)
1251 struct task_struct *t = p;
1254 p->signal->group_stop_count = 0;
1256 while_each_thread(p, t) {
1257 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1260 /* Don't bother with already dead threads */
1263 sigaddset(&t->pending.signal, SIGKILL);
1264 signal_wake_up(t, 1);
1270 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1271 unsigned long *flags)
1273 struct sighand_struct *sighand;
1277 * Disable interrupts early to avoid deadlocks.
1278 * See rcu_read_unlock() comment header for details.
1280 local_irq_save(*flags);
1282 sighand = rcu_dereference(tsk->sighand);
1283 if (unlikely(sighand == NULL)) {
1285 local_irq_restore(*flags);
1289 * This sighand can be already freed and even reused, but
1290 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1291 * initializes ->siglock: this slab can't go away, it has
1292 * the same object type, ->siglock can't be reinitialized.
1294 * We need to ensure that tsk->sighand is still the same
1295 * after we take the lock, we can race with de_thread() or
1296 * __exit_signal(). In the latter case the next iteration
1297 * must see ->sighand == NULL.
1299 spin_lock(&sighand->siglock);
1300 if (likely(sighand == tsk->sighand)) {
1304 spin_unlock(&sighand->siglock);
1306 local_irq_restore(*flags);
1313 * send signal info to all the members of a group
1315 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1320 ret = check_kill_permission(sig, info, p);
1324 ret = do_send_sig_info(sig, info, p, true);
1330 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1331 * control characters do (^C, ^Z etc)
1332 * - the caller must hold at least a readlock on tasklist_lock
1334 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1336 struct task_struct *p = NULL;
1337 int retval, success;
1341 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1342 int err = group_send_sig_info(sig, info, p);
1345 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1346 return success ? 0 : retval;
1349 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1352 struct task_struct *p;
1356 p = pid_task(pid, PIDTYPE_PID);
1358 error = group_send_sig_info(sig, info, p);
1360 if (likely(!p || error != -ESRCH))
1364 * The task was unhashed in between, try again. If it
1365 * is dead, pid_task() will return NULL, if we race with
1366 * de_thread() it will find the new leader.
1371 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1375 error = kill_pid_info(sig, info, find_vpid(pid));
1380 static int kill_as_cred_perm(const struct cred *cred,
1381 struct task_struct *target)
1383 const struct cred *pcred = __task_cred(target);
1384 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1385 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1390 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1391 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1392 const struct cred *cred, u32 secid)
1395 struct task_struct *p;
1396 unsigned long flags;
1398 if (!valid_signal(sig))
1402 p = pid_task(pid, PIDTYPE_PID);
1407 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1411 ret = security_task_kill(p, info, sig, secid);
1416 if (lock_task_sighand(p, &flags)) {
1417 ret = __send_signal(sig, info, p, 1, 0);
1418 unlock_task_sighand(p, &flags);
1426 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1429 * kill_something_info() interprets pid in interesting ways just like kill(2).
1431 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1432 * is probably wrong. Should make it like BSD or SYSV.
1435 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1441 ret = kill_pid_info(sig, info, find_vpid(pid));
1446 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1450 read_lock(&tasklist_lock);
1452 ret = __kill_pgrp_info(sig, info,
1453 pid ? find_vpid(-pid) : task_pgrp(current));
1455 int retval = 0, count = 0;
1456 struct task_struct * p;
1458 for_each_process(p) {
1459 if (task_pid_vnr(p) > 1 &&
1460 !same_thread_group(p, current)) {
1461 int err = group_send_sig_info(sig, info, p);
1467 ret = count ? retval : -ESRCH;
1469 read_unlock(&tasklist_lock);
1475 * These are for backward compatibility with the rest of the kernel source.
1478 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1481 * Make sure legacy kernel users don't send in bad values
1482 * (normal paths check this in check_kill_permission).
1484 if (!valid_signal(sig))
1487 return do_send_sig_info(sig, info, p, false);
1490 #define __si_special(priv) \
1491 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1494 send_sig(int sig, struct task_struct *p, int priv)
1496 return send_sig_info(sig, __si_special(priv), p);
1500 force_sig(int sig, struct task_struct *p)
1502 force_sig_info(sig, SEND_SIG_PRIV, p);
1506 * When things go south during signal handling, we
1507 * will force a SIGSEGV. And if the signal that caused
1508 * the problem was already a SIGSEGV, we'll want to
1509 * make sure we don't even try to deliver the signal..
1512 force_sigsegv(int sig, struct task_struct *p)
1514 if (sig == SIGSEGV) {
1515 unsigned long flags;
1516 spin_lock_irqsave(&p->sighand->siglock, flags);
1517 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1518 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1520 force_sig(SIGSEGV, p);
1524 int kill_pgrp(struct pid *pid, int sig, int priv)
1528 read_lock(&tasklist_lock);
1529 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1530 read_unlock(&tasklist_lock);
1534 EXPORT_SYMBOL(kill_pgrp);
1536 int kill_pid(struct pid *pid, int sig, int priv)
1538 return kill_pid_info(sig, __si_special(priv), pid);
1540 EXPORT_SYMBOL(kill_pid);
1543 * These functions support sending signals using preallocated sigqueue
1544 * structures. This is needed "because realtime applications cannot
1545 * afford to lose notifications of asynchronous events, like timer
1546 * expirations or I/O completions". In the case of POSIX Timers
1547 * we allocate the sigqueue structure from the timer_create. If this
1548 * allocation fails we are able to report the failure to the application
1549 * with an EAGAIN error.
1551 struct sigqueue *sigqueue_alloc(void)
1553 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1556 q->flags |= SIGQUEUE_PREALLOC;
1561 void sigqueue_free(struct sigqueue *q)
1563 unsigned long flags;
1564 spinlock_t *lock = ¤t->sighand->siglock;
1566 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1568 * We must hold ->siglock while testing q->list
1569 * to serialize with collect_signal() or with
1570 * __exit_signal()->flush_sigqueue().
1572 spin_lock_irqsave(lock, flags);
1573 q->flags &= ~SIGQUEUE_PREALLOC;
1575 * If it is queued it will be freed when dequeued,
1576 * like the "regular" sigqueue.
1578 if (!list_empty(&q->list))
1580 spin_unlock_irqrestore(lock, flags);
1586 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1588 int sig = q->info.si_signo;
1589 struct sigpending *pending;
1590 unsigned long flags;
1593 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1596 if (!likely(lock_task_sighand(t, &flags)))
1599 ret = 1; /* the signal is ignored */
1600 result = TRACE_SIGNAL_IGNORED;
1601 if (!prepare_signal(sig, t, false))
1605 if (unlikely(!list_empty(&q->list))) {
1607 * If an SI_TIMER entry is already queue just increment
1608 * the overrun count.
1610 BUG_ON(q->info.si_code != SI_TIMER);
1611 q->info.si_overrun++;
1612 result = TRACE_SIGNAL_ALREADY_PENDING;
1615 q->info.si_overrun = 0;
1617 signalfd_notify(t, sig);
1618 pending = group ? &t->signal->shared_pending : &t->pending;
1619 list_add_tail(&q->list, &pending->list);
1620 sigaddset(&pending->signal, sig);
1621 complete_signal(sig, t, group);
1622 result = TRACE_SIGNAL_DELIVERED;
1624 trace_signal_generate(sig, &q->info, t, group, result);
1625 unlock_task_sighand(t, &flags);
1631 * Let a parent know about the death of a child.
1632 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1634 * Returns true if our parent ignored us and so we've switched to
1637 bool do_notify_parent(struct task_struct *tsk, int sig)
1639 struct siginfo info;
1640 unsigned long flags;
1641 struct sighand_struct *psig;
1642 bool autoreap = false;
1643 cputime_t utime, stime;
1647 /* do_notify_parent_cldstop should have been called instead. */
1648 BUG_ON(task_is_stopped_or_traced(tsk));
1650 BUG_ON(!tsk->ptrace &&
1651 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1653 if (sig != SIGCHLD) {
1655 * This is only possible if parent == real_parent.
1656 * Check if it has changed security domain.
1658 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1662 info.si_signo = sig;
1665 * We are under tasklist_lock here so our parent is tied to
1666 * us and cannot change.
1668 * task_active_pid_ns will always return the same pid namespace
1669 * until a task passes through release_task.
1671 * write_lock() currently calls preempt_disable() which is the
1672 * same as rcu_read_lock(), but according to Oleg, this is not
1673 * correct to rely on this
1676 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1677 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1681 task_cputime(tsk, &utime, &stime);
1682 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1683 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1685 info.si_status = tsk->exit_code & 0x7f;
1686 if (tsk->exit_code & 0x80)
1687 info.si_code = CLD_DUMPED;
1688 else if (tsk->exit_code & 0x7f)
1689 info.si_code = CLD_KILLED;
1691 info.si_code = CLD_EXITED;
1692 info.si_status = tsk->exit_code >> 8;
1695 psig = tsk->parent->sighand;
1696 spin_lock_irqsave(&psig->siglock, flags);
1697 if (!tsk->ptrace && sig == SIGCHLD &&
1698 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1699 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1701 * We are exiting and our parent doesn't care. POSIX.1
1702 * defines special semantics for setting SIGCHLD to SIG_IGN
1703 * or setting the SA_NOCLDWAIT flag: we should be reaped
1704 * automatically and not left for our parent's wait4 call.
1705 * Rather than having the parent do it as a magic kind of
1706 * signal handler, we just set this to tell do_exit that we
1707 * can be cleaned up without becoming a zombie. Note that
1708 * we still call __wake_up_parent in this case, because a
1709 * blocked sys_wait4 might now return -ECHILD.
1711 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1712 * is implementation-defined: we do (if you don't want
1713 * it, just use SIG_IGN instead).
1716 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1719 if (valid_signal(sig) && sig)
1720 __group_send_sig_info(sig, &info, tsk->parent);
1721 __wake_up_parent(tsk, tsk->parent);
1722 spin_unlock_irqrestore(&psig->siglock, flags);
1728 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1729 * @tsk: task reporting the state change
1730 * @for_ptracer: the notification is for ptracer
1731 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1733 * Notify @tsk's parent that the stopped/continued state has changed. If
1734 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1735 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1738 * Must be called with tasklist_lock at least read locked.
1740 static void do_notify_parent_cldstop(struct task_struct *tsk,
1741 bool for_ptracer, int why)
1743 struct siginfo info;
1744 unsigned long flags;
1745 struct task_struct *parent;
1746 struct sighand_struct *sighand;
1747 cputime_t utime, stime;
1750 parent = tsk->parent;
1752 tsk = tsk->group_leader;
1753 parent = tsk->real_parent;
1756 info.si_signo = SIGCHLD;
1759 * see comment in do_notify_parent() about the following 4 lines
1762 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1763 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1766 task_cputime(tsk, &utime, &stime);
1767 info.si_utime = cputime_to_clock_t(utime);
1768 info.si_stime = cputime_to_clock_t(stime);
1773 info.si_status = SIGCONT;
1776 info.si_status = tsk->signal->group_exit_code & 0x7f;
1779 info.si_status = tsk->exit_code & 0x7f;
1785 sighand = parent->sighand;
1786 spin_lock_irqsave(&sighand->siglock, flags);
1787 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1788 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1789 __group_send_sig_info(SIGCHLD, &info, parent);
1791 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1793 __wake_up_parent(tsk, parent);
1794 spin_unlock_irqrestore(&sighand->siglock, flags);
1797 static inline int may_ptrace_stop(void)
1799 if (!likely(current->ptrace))
1802 * Are we in the middle of do_coredump?
1803 * If so and our tracer is also part of the coredump stopping
1804 * is a deadlock situation, and pointless because our tracer
1805 * is dead so don't allow us to stop.
1806 * If SIGKILL was already sent before the caller unlocked
1807 * ->siglock we must see ->core_state != NULL. Otherwise it
1808 * is safe to enter schedule().
1810 * This is almost outdated, a task with the pending SIGKILL can't
1811 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1812 * after SIGKILL was already dequeued.
1814 if (unlikely(current->mm->core_state) &&
1815 unlikely(current->mm == current->parent->mm))
1822 * Return non-zero if there is a SIGKILL that should be waking us up.
1823 * Called with the siglock held.
1825 static int sigkill_pending(struct task_struct *tsk)
1827 return sigismember(&tsk->pending.signal, SIGKILL) ||
1828 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1832 * This must be called with current->sighand->siglock held.
1834 * This should be the path for all ptrace stops.
1835 * We always set current->last_siginfo while stopped here.
1836 * That makes it a way to test a stopped process for
1837 * being ptrace-stopped vs being job-control-stopped.
1839 * If we actually decide not to stop at all because the tracer
1840 * is gone, we keep current->exit_code unless clear_code.
1842 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1843 __releases(¤t->sighand->siglock)
1844 __acquires(¤t->sighand->siglock)
1846 bool gstop_done = false;
1848 if (arch_ptrace_stop_needed(exit_code, info)) {
1850 * The arch code has something special to do before a
1851 * ptrace stop. This is allowed to block, e.g. for faults
1852 * on user stack pages. We can't keep the siglock while
1853 * calling arch_ptrace_stop, so we must release it now.
1854 * To preserve proper semantics, we must do this before
1855 * any signal bookkeeping like checking group_stop_count.
1856 * Meanwhile, a SIGKILL could come in before we retake the
1857 * siglock. That must prevent us from sleeping in TASK_TRACED.
1858 * So after regaining the lock, we must check for SIGKILL.
1860 spin_unlock_irq(¤t->sighand->siglock);
1861 arch_ptrace_stop(exit_code, info);
1862 spin_lock_irq(¤t->sighand->siglock);
1863 if (sigkill_pending(current))
1868 * We're committing to trapping. TRACED should be visible before
1869 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1870 * Also, transition to TRACED and updates to ->jobctl should be
1871 * atomic with respect to siglock and should be done after the arch
1872 * hook as siglock is released and regrabbed across it.
1874 set_current_state(TASK_TRACED);
1876 current->last_siginfo = info;
1877 current->exit_code = exit_code;
1880 * If @why is CLD_STOPPED, we're trapping to participate in a group
1881 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1882 * across siglock relocks since INTERRUPT was scheduled, PENDING
1883 * could be clear now. We act as if SIGCONT is received after
1884 * TASK_TRACED is entered - ignore it.
1886 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1887 gstop_done = task_participate_group_stop(current);
1889 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1890 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1891 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1892 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1894 /* entering a trap, clear TRAPPING */
1895 task_clear_jobctl_trapping(current);
1897 spin_unlock_irq(¤t->sighand->siglock);
1898 read_lock(&tasklist_lock);
1899 if (may_ptrace_stop()) {
1901 * Notify parents of the stop.
1903 * While ptraced, there are two parents - the ptracer and
1904 * the real_parent of the group_leader. The ptracer should
1905 * know about every stop while the real parent is only
1906 * interested in the completion of group stop. The states
1907 * for the two don't interact with each other. Notify
1908 * separately unless they're gonna be duplicates.
1910 do_notify_parent_cldstop(current, true, why);
1911 if (gstop_done && ptrace_reparented(current))
1912 do_notify_parent_cldstop(current, false, why);
1915 * Don't want to allow preemption here, because
1916 * sys_ptrace() needs this task to be inactive.
1918 * XXX: implement read_unlock_no_resched().
1921 read_unlock(&tasklist_lock);
1922 preempt_enable_no_resched();
1923 freezable_schedule();
1926 * By the time we got the lock, our tracer went away.
1927 * Don't drop the lock yet, another tracer may come.
1929 * If @gstop_done, the ptracer went away between group stop
1930 * completion and here. During detach, it would have set
1931 * JOBCTL_STOP_PENDING on us and we'll re-enter
1932 * TASK_STOPPED in do_signal_stop() on return, so notifying
1933 * the real parent of the group stop completion is enough.
1936 do_notify_parent_cldstop(current, false, why);
1938 /* tasklist protects us from ptrace_freeze_traced() */
1939 __set_current_state(TASK_RUNNING);
1941 current->exit_code = 0;
1942 read_unlock(&tasklist_lock);
1946 * We are back. Now reacquire the siglock before touching
1947 * last_siginfo, so that we are sure to have synchronized with
1948 * any signal-sending on another CPU that wants to examine it.
1950 spin_lock_irq(¤t->sighand->siglock);
1951 current->last_siginfo = NULL;
1953 /* LISTENING can be set only during STOP traps, clear it */
1954 current->jobctl &= ~JOBCTL_LISTENING;
1957 * Queued signals ignored us while we were stopped for tracing.
1958 * So check for any that we should take before resuming user mode.
1959 * This sets TIF_SIGPENDING, but never clears it.
1961 recalc_sigpending_tsk(current);
1964 static void ptrace_do_notify(int signr, int exit_code, int why)
1968 memset(&info, 0, sizeof info);
1969 info.si_signo = signr;
1970 info.si_code = exit_code;
1971 info.si_pid = task_pid_vnr(current);
1972 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1974 /* Let the debugger run. */
1975 ptrace_stop(exit_code, why, 1, &info);
1978 void ptrace_notify(int exit_code)
1980 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1981 if (unlikely(current->task_works))
1984 spin_lock_irq(¤t->sighand->siglock);
1985 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1986 spin_unlock_irq(¤t->sighand->siglock);
1990 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1991 * @signr: signr causing group stop if initiating
1993 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1994 * and participate in it. If already set, participate in the existing
1995 * group stop. If participated in a group stop (and thus slept), %true is
1996 * returned with siglock released.
1998 * If ptraced, this function doesn't handle stop itself. Instead,
1999 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2000 * untouched. The caller must ensure that INTERRUPT trap handling takes
2001 * places afterwards.
2004 * Must be called with @current->sighand->siglock held, which is released
2008 * %false if group stop is already cancelled or ptrace trap is scheduled.
2009 * %true if participated in group stop.
2011 static bool do_signal_stop(int signr)
2012 __releases(¤t->sighand->siglock)
2014 struct signal_struct *sig = current->signal;
2016 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2017 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2018 struct task_struct *t;
2020 /* signr will be recorded in task->jobctl for retries */
2021 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2023 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2024 unlikely(signal_group_exit(sig)))
2027 * There is no group stop already in progress. We must
2030 * While ptraced, a task may be resumed while group stop is
2031 * still in effect and then receive a stop signal and
2032 * initiate another group stop. This deviates from the
2033 * usual behavior as two consecutive stop signals can't
2034 * cause two group stops when !ptraced. That is why we
2035 * also check !task_is_stopped(t) below.
2037 * The condition can be distinguished by testing whether
2038 * SIGNAL_STOP_STOPPED is already set. Don't generate
2039 * group_exit_code in such case.
2041 * This is not necessary for SIGNAL_STOP_CONTINUED because
2042 * an intervening stop signal is required to cause two
2043 * continued events regardless of ptrace.
2045 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2046 sig->group_exit_code = signr;
2048 sig->group_stop_count = 0;
2050 if (task_set_jobctl_pending(current, signr | gstop))
2051 sig->group_stop_count++;
2054 while_each_thread(current, t) {
2056 * Setting state to TASK_STOPPED for a group
2057 * stop is always done with the siglock held,
2058 * so this check has no races.
2060 if (!task_is_stopped(t) &&
2061 task_set_jobctl_pending(t, signr | gstop)) {
2062 sig->group_stop_count++;
2063 if (likely(!(t->ptrace & PT_SEIZED)))
2064 signal_wake_up(t, 0);
2066 ptrace_trap_notify(t);
2071 if (likely(!current->ptrace)) {
2075 * If there are no other threads in the group, or if there
2076 * is a group stop in progress and we are the last to stop,
2077 * report to the parent.
2079 if (task_participate_group_stop(current))
2080 notify = CLD_STOPPED;
2082 __set_current_state(TASK_STOPPED);
2083 spin_unlock_irq(¤t->sighand->siglock);
2086 * Notify the parent of the group stop completion. Because
2087 * we're not holding either the siglock or tasklist_lock
2088 * here, ptracer may attach inbetween; however, this is for
2089 * group stop and should always be delivered to the real
2090 * parent of the group leader. The new ptracer will get
2091 * its notification when this task transitions into
2095 read_lock(&tasklist_lock);
2096 do_notify_parent_cldstop(current, false, notify);
2097 read_unlock(&tasklist_lock);
2100 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2101 freezable_schedule();
2105 * While ptraced, group stop is handled by STOP trap.
2106 * Schedule it and let the caller deal with it.
2108 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2114 * do_jobctl_trap - take care of ptrace jobctl traps
2116 * When PT_SEIZED, it's used for both group stop and explicit
2117 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2118 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2119 * the stop signal; otherwise, %SIGTRAP.
2121 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2122 * number as exit_code and no siginfo.
2125 * Must be called with @current->sighand->siglock held, which may be
2126 * released and re-acquired before returning with intervening sleep.
2128 static void do_jobctl_trap(void)
2130 struct signal_struct *signal = current->signal;
2131 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2133 if (current->ptrace & PT_SEIZED) {
2134 if (!signal->group_stop_count &&
2135 !(signal->flags & SIGNAL_STOP_STOPPED))
2137 WARN_ON_ONCE(!signr);
2138 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2141 WARN_ON_ONCE(!signr);
2142 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2143 current->exit_code = 0;
2147 static int ptrace_signal(int signr, siginfo_t *info)
2149 ptrace_signal_deliver();
2151 * We do not check sig_kernel_stop(signr) but set this marker
2152 * unconditionally because we do not know whether debugger will
2153 * change signr. This flag has no meaning unless we are going
2154 * to stop after return from ptrace_stop(). In this case it will
2155 * be checked in do_signal_stop(), we should only stop if it was
2156 * not cleared by SIGCONT while we were sleeping. See also the
2157 * comment in dequeue_signal().
2159 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2160 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2162 /* We're back. Did the debugger cancel the sig? */
2163 signr = current->exit_code;
2167 current->exit_code = 0;
2170 * Update the siginfo structure if the signal has
2171 * changed. If the debugger wanted something
2172 * specific in the siginfo structure then it should
2173 * have updated *info via PTRACE_SETSIGINFO.
2175 if (signr != info->si_signo) {
2176 info->si_signo = signr;
2178 info->si_code = SI_USER;
2180 info->si_pid = task_pid_vnr(current->parent);
2181 info->si_uid = from_kuid_munged(current_user_ns(),
2182 task_uid(current->parent));
2186 /* If the (new) signal is now blocked, requeue it. */
2187 if (sigismember(¤t->blocked, signr)) {
2188 specific_send_sig_info(signr, info, current);
2195 int get_signal(struct ksignal *ksig)
2197 struct sighand_struct *sighand = current->sighand;
2198 struct signal_struct *signal = current->signal;
2201 if (unlikely(current->task_works))
2204 if (unlikely(uprobe_deny_signal()))
2208 * Do this once, we can't return to user-mode if freezing() == T.
2209 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2210 * thus do not need another check after return.
2215 spin_lock_irq(&sighand->siglock);
2217 * Every stopped thread goes here after wakeup. Check to see if
2218 * we should notify the parent, prepare_signal(SIGCONT) encodes
2219 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2221 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2224 if (signal->flags & SIGNAL_CLD_CONTINUED)
2225 why = CLD_CONTINUED;
2229 signal->flags &= ~SIGNAL_CLD_MASK;
2231 spin_unlock_irq(&sighand->siglock);
2234 * Notify the parent that we're continuing. This event is
2235 * always per-process and doesn't make whole lot of sense
2236 * for ptracers, who shouldn't consume the state via
2237 * wait(2) either, but, for backward compatibility, notify
2238 * the ptracer of the group leader too unless it's gonna be
2241 read_lock(&tasklist_lock);
2242 do_notify_parent_cldstop(current, false, why);
2244 if (ptrace_reparented(current->group_leader))
2245 do_notify_parent_cldstop(current->group_leader,
2247 read_unlock(&tasklist_lock);
2252 /* Has this task already been marked for death? */
2253 if (signal_group_exit(signal)) {
2254 ksig->info.si_signo = signr = SIGKILL;
2255 sigdelset(¤t->pending.signal, SIGKILL);
2256 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2257 &sighand->action[SIGKILL - 1]);
2258 recalc_sigpending();
2263 struct k_sigaction *ka;
2265 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2269 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2271 spin_unlock_irq(&sighand->siglock);
2276 * Signals generated by the execution of an instruction
2277 * need to be delivered before any other pending signals
2278 * so that the instruction pointer in the signal stack
2279 * frame points to the faulting instruction.
2281 signr = dequeue_synchronous_signal(&ksig->info);
2283 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2286 break; /* will return 0 */
2288 if (unlikely(current->ptrace) && signr != SIGKILL) {
2289 signr = ptrace_signal(signr, &ksig->info);
2294 ka = &sighand->action[signr-1];
2296 /* Trace actually delivered signals. */
2297 trace_signal_deliver(signr, &ksig->info, ka);
2299 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2301 if (ka->sa.sa_handler != SIG_DFL) {
2302 /* Run the handler. */
2305 if (ka->sa.sa_flags & SA_ONESHOT)
2306 ka->sa.sa_handler = SIG_DFL;
2308 break; /* will return non-zero "signr" value */
2312 * Now we are doing the default action for this signal.
2314 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2318 * Global init gets no signals it doesn't want.
2319 * Container-init gets no signals it doesn't want from same
2322 * Note that if global/container-init sees a sig_kernel_only()
2323 * signal here, the signal must have been generated internally
2324 * or must have come from an ancestor namespace. In either
2325 * case, the signal cannot be dropped.
2327 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2328 !sig_kernel_only(signr))
2331 if (sig_kernel_stop(signr)) {
2333 * The default action is to stop all threads in
2334 * the thread group. The job control signals
2335 * do nothing in an orphaned pgrp, but SIGSTOP
2336 * always works. Note that siglock needs to be
2337 * dropped during the call to is_orphaned_pgrp()
2338 * because of lock ordering with tasklist_lock.
2339 * This allows an intervening SIGCONT to be posted.
2340 * We need to check for that and bail out if necessary.
2342 if (signr != SIGSTOP) {
2343 spin_unlock_irq(&sighand->siglock);
2345 /* signals can be posted during this window */
2347 if (is_current_pgrp_orphaned())
2350 spin_lock_irq(&sighand->siglock);
2353 if (likely(do_signal_stop(ksig->info.si_signo))) {
2354 /* It released the siglock. */
2359 * We didn't actually stop, due to a race
2360 * with SIGCONT or something like that.
2366 spin_unlock_irq(&sighand->siglock);
2369 * Anything else is fatal, maybe with a core dump.
2371 current->flags |= PF_SIGNALED;
2373 if (sig_kernel_coredump(signr)) {
2374 if (print_fatal_signals)
2375 print_fatal_signal(ksig->info.si_signo);
2376 proc_coredump_connector(current);
2378 * If it was able to dump core, this kills all
2379 * other threads in the group and synchronizes with
2380 * their demise. If we lost the race with another
2381 * thread getting here, it set group_exit_code
2382 * first and our do_group_exit call below will use
2383 * that value and ignore the one we pass it.
2385 do_coredump(&ksig->info);
2389 * Death signals, no core dump.
2391 do_group_exit(ksig->info.si_signo);
2394 spin_unlock_irq(&sighand->siglock);
2397 return ksig->sig > 0;
2401 * signal_delivered -
2402 * @ksig: kernel signal struct
2403 * @stepping: nonzero if debugger single-step or block-step in use
2405 * This function should be called when a signal has successfully been
2406 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2407 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2408 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2410 static void signal_delivered(struct ksignal *ksig, int stepping)
2414 /* A signal was successfully delivered, and the
2415 saved sigmask was stored on the signal frame,
2416 and will be restored by sigreturn. So we can
2417 simply clear the restore sigmask flag. */
2418 clear_restore_sigmask();
2420 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2421 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2422 sigaddset(&blocked, ksig->sig);
2423 set_current_blocked(&blocked);
2424 tracehook_signal_handler(stepping);
2427 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2430 force_sigsegv(ksig->sig, current);
2432 signal_delivered(ksig, stepping);
2436 * It could be that complete_signal() picked us to notify about the
2437 * group-wide signal. Other threads should be notified now to take
2438 * the shared signals in @which since we will not.
2440 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2443 struct task_struct *t;
2445 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2446 if (sigisemptyset(&retarget))
2450 while_each_thread(tsk, t) {
2451 if (t->flags & PF_EXITING)
2454 if (!has_pending_signals(&retarget, &t->blocked))
2456 /* Remove the signals this thread can handle. */
2457 sigandsets(&retarget, &retarget, &t->blocked);
2459 if (!signal_pending(t))
2460 signal_wake_up(t, 0);
2462 if (sigisemptyset(&retarget))
2467 void exit_signals(struct task_struct *tsk)
2473 * @tsk is about to have PF_EXITING set - lock out users which
2474 * expect stable threadgroup.
2476 threadgroup_change_begin(tsk);
2478 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2479 tsk->flags |= PF_EXITING;
2480 threadgroup_change_end(tsk);
2484 spin_lock_irq(&tsk->sighand->siglock);
2486 * From now this task is not visible for group-wide signals,
2487 * see wants_signal(), do_signal_stop().
2489 tsk->flags |= PF_EXITING;
2491 threadgroup_change_end(tsk);
2493 if (!signal_pending(tsk))
2496 unblocked = tsk->blocked;
2497 signotset(&unblocked);
2498 retarget_shared_pending(tsk, &unblocked);
2500 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2501 task_participate_group_stop(tsk))
2502 group_stop = CLD_STOPPED;
2504 spin_unlock_irq(&tsk->sighand->siglock);
2507 * If group stop has completed, deliver the notification. This
2508 * should always go to the real parent of the group leader.
2510 if (unlikely(group_stop)) {
2511 read_lock(&tasklist_lock);
2512 do_notify_parent_cldstop(tsk, false, group_stop);
2513 read_unlock(&tasklist_lock);
2517 EXPORT_SYMBOL(recalc_sigpending);
2518 EXPORT_SYMBOL_GPL(dequeue_signal);
2519 EXPORT_SYMBOL(flush_signals);
2520 EXPORT_SYMBOL(force_sig);
2521 EXPORT_SYMBOL(send_sig);
2522 EXPORT_SYMBOL(send_sig_info);
2523 EXPORT_SYMBOL(sigprocmask);
2526 * System call entry points.
2530 * sys_restart_syscall - restart a system call
2532 SYSCALL_DEFINE0(restart_syscall)
2534 struct restart_block *restart = ¤t->restart_block;
2535 return restart->fn(restart);
2538 long do_no_restart_syscall(struct restart_block *param)
2543 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2545 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2546 sigset_t newblocked;
2547 /* A set of now blocked but previously unblocked signals. */
2548 sigandnsets(&newblocked, newset, ¤t->blocked);
2549 retarget_shared_pending(tsk, &newblocked);
2551 tsk->blocked = *newset;
2552 recalc_sigpending();
2556 * set_current_blocked - change current->blocked mask
2559 * It is wrong to change ->blocked directly, this helper should be used
2560 * to ensure the process can't miss a shared signal we are going to block.
2562 void set_current_blocked(sigset_t *newset)
2564 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2565 __set_current_blocked(newset);
2568 void __set_current_blocked(const sigset_t *newset)
2570 struct task_struct *tsk = current;
2573 * In case the signal mask hasn't changed, there is nothing we need
2574 * to do. The current->blocked shouldn't be modified by other task.
2576 if (sigequalsets(&tsk->blocked, newset))
2579 spin_lock_irq(&tsk->sighand->siglock);
2580 __set_task_blocked(tsk, newset);
2581 spin_unlock_irq(&tsk->sighand->siglock);
2585 * This is also useful for kernel threads that want to temporarily
2586 * (or permanently) block certain signals.
2588 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2589 * interface happily blocks "unblockable" signals like SIGKILL
2592 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2594 struct task_struct *tsk = current;
2597 /* Lockless, only current can change ->blocked, never from irq */
2599 *oldset = tsk->blocked;
2603 sigorsets(&newset, &tsk->blocked, set);
2606 sigandnsets(&newset, &tsk->blocked, set);
2615 __set_current_blocked(&newset);
2620 * sys_rt_sigprocmask - change the list of currently blocked signals
2621 * @how: whether to add, remove, or set signals
2622 * @nset: stores pending signals
2623 * @oset: previous value of signal mask if non-null
2624 * @sigsetsize: size of sigset_t type
2626 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2627 sigset_t __user *, oset, size_t, sigsetsize)
2629 sigset_t old_set, new_set;
2632 /* XXX: Don't preclude handling different sized sigset_t's. */
2633 if (sigsetsize != sizeof(sigset_t))
2636 old_set = current->blocked;
2639 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2641 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2643 error = sigprocmask(how, &new_set, NULL);
2649 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2656 #ifdef CONFIG_COMPAT
2657 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2658 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2661 sigset_t old_set = current->blocked;
2663 /* XXX: Don't preclude handling different sized sigset_t's. */
2664 if (sigsetsize != sizeof(sigset_t))
2668 compat_sigset_t new32;
2671 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2674 sigset_from_compat(&new_set, &new32);
2675 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2677 error = sigprocmask(how, &new_set, NULL);
2682 compat_sigset_t old32;
2683 sigset_to_compat(&old32, &old_set);
2684 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2689 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2690 (sigset_t __user *)oset, sigsetsize);
2695 static int do_sigpending(void *set, unsigned long sigsetsize)
2697 if (sigsetsize > sizeof(sigset_t))
2700 spin_lock_irq(¤t->sighand->siglock);
2701 sigorsets(set, ¤t->pending.signal,
2702 ¤t->signal->shared_pending.signal);
2703 spin_unlock_irq(¤t->sighand->siglock);
2705 /* Outside the lock because only this thread touches it. */
2706 sigandsets(set, ¤t->blocked, set);
2711 * sys_rt_sigpending - examine a pending signal that has been raised
2713 * @uset: stores pending signals
2714 * @sigsetsize: size of sigset_t type or larger
2716 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2719 int err = do_sigpending(&set, sigsetsize);
2720 if (!err && copy_to_user(uset, &set, sigsetsize))
2725 #ifdef CONFIG_COMPAT
2726 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2727 compat_size_t, sigsetsize)
2731 int err = do_sigpending(&set, sigsetsize);
2733 compat_sigset_t set32;
2734 sigset_to_compat(&set32, &set);
2735 /* we can get here only if sigsetsize <= sizeof(set) */
2736 if (copy_to_user(uset, &set32, sigsetsize))
2741 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2746 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2748 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2752 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2754 if (from->si_code < 0)
2755 return __copy_to_user(to, from, sizeof(siginfo_t))
2758 * If you change siginfo_t structure, please be sure
2759 * this code is fixed accordingly.
2760 * Please remember to update the signalfd_copyinfo() function
2761 * inside fs/signalfd.c too, in case siginfo_t changes.
2762 * It should never copy any pad contained in the structure
2763 * to avoid security leaks, but must copy the generic
2764 * 3 ints plus the relevant union member.
2766 err = __put_user(from->si_signo, &to->si_signo);
2767 err |= __put_user(from->si_errno, &to->si_errno);
2768 err |= __put_user((short)from->si_code, &to->si_code);
2769 switch (from->si_code & __SI_MASK) {
2771 err |= __put_user(from->si_pid, &to->si_pid);
2772 err |= __put_user(from->si_uid, &to->si_uid);
2775 err |= __put_user(from->si_tid, &to->si_tid);
2776 err |= __put_user(from->si_overrun, &to->si_overrun);
2777 err |= __put_user(from->si_ptr, &to->si_ptr);
2780 err |= __put_user(from->si_band, &to->si_band);
2781 err |= __put_user(from->si_fd, &to->si_fd);
2784 err |= __put_user(from->si_addr, &to->si_addr);
2785 #ifdef __ARCH_SI_TRAPNO
2786 err |= __put_user(from->si_trapno, &to->si_trapno);
2788 #ifdef BUS_MCEERR_AO
2790 * Other callers might not initialize the si_lsb field,
2791 * so check explicitly for the right codes here.
2793 if (from->si_signo == SIGBUS &&
2794 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2795 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2798 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2799 err |= __put_user(from->si_lower, &to->si_lower);
2800 err |= __put_user(from->si_upper, &to->si_upper);
2805 err |= __put_user(from->si_pid, &to->si_pid);
2806 err |= __put_user(from->si_uid, &to->si_uid);
2807 err |= __put_user(from->si_status, &to->si_status);
2808 err |= __put_user(from->si_utime, &to->si_utime);
2809 err |= __put_user(from->si_stime, &to->si_stime);
2811 case __SI_RT: /* This is not generated by the kernel as of now. */
2812 case __SI_MESGQ: /* But this is */
2813 err |= __put_user(from->si_pid, &to->si_pid);
2814 err |= __put_user(from->si_uid, &to->si_uid);
2815 err |= __put_user(from->si_ptr, &to->si_ptr);
2817 #ifdef __ARCH_SIGSYS
2819 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2820 err |= __put_user(from->si_syscall, &to->si_syscall);
2821 err |= __put_user(from->si_arch, &to->si_arch);
2824 default: /* this is just in case for now ... */
2825 err |= __put_user(from->si_pid, &to->si_pid);
2826 err |= __put_user(from->si_uid, &to->si_uid);
2835 * do_sigtimedwait - wait for queued signals specified in @which
2836 * @which: queued signals to wait for
2837 * @info: if non-null, the signal's siginfo is returned here
2838 * @ts: upper bound on process time suspension
2840 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2841 const struct timespec *ts)
2843 struct task_struct *tsk = current;
2844 long timeout = MAX_SCHEDULE_TIMEOUT;
2845 sigset_t mask = *which;
2849 if (!timespec_valid(ts))
2851 timeout = timespec_to_jiffies(ts);
2853 * We can be close to the next tick, add another one
2854 * to ensure we will wait at least the time asked for.
2856 if (ts->tv_sec || ts->tv_nsec)
2861 * Invert the set of allowed signals to get those we want to block.
2863 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2866 spin_lock_irq(&tsk->sighand->siglock);
2867 sig = dequeue_signal(tsk, &mask, info);
2868 if (!sig && timeout) {
2870 * None ready, temporarily unblock those we're interested
2871 * while we are sleeping in so that we'll be awakened when
2872 * they arrive. Unblocking is always fine, we can avoid
2873 * set_current_blocked().
2875 tsk->real_blocked = tsk->blocked;
2876 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2877 recalc_sigpending();
2878 spin_unlock_irq(&tsk->sighand->siglock);
2880 timeout = freezable_schedule_timeout_interruptible(timeout);
2882 spin_lock_irq(&tsk->sighand->siglock);
2883 __set_task_blocked(tsk, &tsk->real_blocked);
2884 sigemptyset(&tsk->real_blocked);
2885 sig = dequeue_signal(tsk, &mask, info);
2887 spin_unlock_irq(&tsk->sighand->siglock);
2891 return timeout ? -EINTR : -EAGAIN;
2895 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2897 * @uthese: queued signals to wait for
2898 * @uinfo: if non-null, the signal's siginfo is returned here
2899 * @uts: upper bound on process time suspension
2900 * @sigsetsize: size of sigset_t type
2902 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2903 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2911 /* XXX: Don't preclude handling different sized sigset_t's. */
2912 if (sigsetsize != sizeof(sigset_t))
2915 if (copy_from_user(&these, uthese, sizeof(these)))
2919 if (copy_from_user(&ts, uts, sizeof(ts)))
2923 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2925 if (ret > 0 && uinfo) {
2926 if (copy_siginfo_to_user(uinfo, &info))
2934 * sys_kill - send a signal to a process
2935 * @pid: the PID of the process
2936 * @sig: signal to be sent
2938 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2940 struct siginfo info;
2942 info.si_signo = sig;
2944 info.si_code = SI_USER;
2945 info.si_pid = task_tgid_vnr(current);
2946 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2948 return kill_something_info(sig, &info, pid);
2952 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2954 struct task_struct *p;
2958 p = find_task_by_vpid(pid);
2959 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2960 error = check_kill_permission(sig, info, p);
2962 * The null signal is a permissions and process existence
2963 * probe. No signal is actually delivered.
2965 if (!error && sig) {
2966 error = do_send_sig_info(sig, info, p, false);
2968 * If lock_task_sighand() failed we pretend the task
2969 * dies after receiving the signal. The window is tiny,
2970 * and the signal is private anyway.
2972 if (unlikely(error == -ESRCH))
2981 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2983 struct siginfo info = {};
2985 info.si_signo = sig;
2987 info.si_code = SI_TKILL;
2988 info.si_pid = task_tgid_vnr(current);
2989 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2991 return do_send_specific(tgid, pid, sig, &info);
2995 * sys_tgkill - send signal to one specific thread
2996 * @tgid: the thread group ID of the thread
2997 * @pid: the PID of the thread
2998 * @sig: signal to be sent
3000 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3001 * exists but it's not belonging to the target process anymore. This
3002 * method solves the problem of threads exiting and PIDs getting reused.
3004 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3006 /* This is only valid for single tasks */
3007 if (pid <= 0 || tgid <= 0)
3010 return do_tkill(tgid, pid, sig);
3014 * sys_tkill - send signal to one specific task
3015 * @pid: the PID of the task
3016 * @sig: signal to be sent
3018 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3020 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3022 /* This is only valid for single tasks */
3026 return do_tkill(0, pid, sig);
3029 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3031 /* Not even root can pretend to send signals from the kernel.
3032 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3034 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3035 (task_pid_vnr(current) != pid))
3038 info->si_signo = sig;
3040 /* POSIX.1b doesn't mention process groups. */
3041 return kill_proc_info(sig, info, pid);
3045 * sys_rt_sigqueueinfo - send signal information to a signal
3046 * @pid: the PID of the thread
3047 * @sig: signal to be sent
3048 * @uinfo: signal info to be sent
3050 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3051 siginfo_t __user *, uinfo)
3054 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3056 return do_rt_sigqueueinfo(pid, sig, &info);
3059 #ifdef CONFIG_COMPAT
3060 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3063 struct compat_siginfo __user *, uinfo)
3065 siginfo_t info = {};
3066 int ret = copy_siginfo_from_user32(&info, uinfo);
3069 return do_rt_sigqueueinfo(pid, sig, &info);
3073 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3075 /* This is only valid for single tasks */
3076 if (pid <= 0 || tgid <= 0)
3079 /* Not even root can pretend to send signals from the kernel.
3080 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3082 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3083 (task_pid_vnr(current) != pid))
3086 info->si_signo = sig;
3088 return do_send_specific(tgid, pid, sig, info);
3091 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3092 siginfo_t __user *, uinfo)
3096 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3099 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3102 #ifdef CONFIG_COMPAT
3103 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3107 struct compat_siginfo __user *, uinfo)
3109 siginfo_t info = {};
3111 if (copy_siginfo_from_user32(&info, uinfo))
3113 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3118 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3120 void kernel_sigaction(int sig, __sighandler_t action)
3122 spin_lock_irq(¤t->sighand->siglock);
3123 current->sighand->action[sig - 1].sa.sa_handler = action;
3124 if (action == SIG_IGN) {
3128 sigaddset(&mask, sig);
3130 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3131 flush_sigqueue_mask(&mask, ¤t->pending);
3132 recalc_sigpending();
3134 spin_unlock_irq(¤t->sighand->siglock);
3136 EXPORT_SYMBOL(kernel_sigaction);
3138 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3140 struct task_struct *p = current, *t;
3141 struct k_sigaction *k;
3144 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3147 k = &p->sighand->action[sig-1];
3149 spin_lock_irq(&p->sighand->siglock);
3154 sigdelsetmask(&act->sa.sa_mask,
3155 sigmask(SIGKILL) | sigmask(SIGSTOP));
3159 * "Setting a signal action to SIG_IGN for a signal that is
3160 * pending shall cause the pending signal to be discarded,
3161 * whether or not it is blocked."
3163 * "Setting a signal action to SIG_DFL for a signal that is
3164 * pending and whose default action is to ignore the signal
3165 * (for example, SIGCHLD), shall cause the pending signal to
3166 * be discarded, whether or not it is blocked"
3168 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3170 sigaddset(&mask, sig);
3171 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3172 for_each_thread(p, t)
3173 flush_sigqueue_mask(&mask, &t->pending);
3177 spin_unlock_irq(&p->sighand->siglock);
3182 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3187 oss.ss_sp = (void __user *) current->sas_ss_sp;
3188 oss.ss_size = current->sas_ss_size;
3189 oss.ss_flags = sas_ss_flags(sp);
3197 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3199 error = __get_user(ss_sp, &uss->ss_sp) |
3200 __get_user(ss_flags, &uss->ss_flags) |
3201 __get_user(ss_size, &uss->ss_size);
3206 if (on_sig_stack(sp))
3211 * Note - this code used to test ss_flags incorrectly:
3212 * old code may have been written using ss_flags==0
3213 * to mean ss_flags==SS_ONSTACK (as this was the only
3214 * way that worked) - this fix preserves that older
3217 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3220 if (ss_flags == SS_DISABLE) {
3225 if (ss_size < MINSIGSTKSZ)
3229 current->sas_ss_sp = (unsigned long) ss_sp;
3230 current->sas_ss_size = ss_size;
3236 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3238 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3239 __put_user(oss.ss_size, &uoss->ss_size) |
3240 __put_user(oss.ss_flags, &uoss->ss_flags);
3246 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3248 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3251 int restore_altstack(const stack_t __user *uss)
3253 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3254 /* squash all but EFAULT for now */
3255 return err == -EFAULT ? err : 0;
3258 int __save_altstack(stack_t __user *uss, unsigned long sp)
3260 struct task_struct *t = current;
3261 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3262 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3263 __put_user(t->sas_ss_size, &uss->ss_size);
3266 #ifdef CONFIG_COMPAT
3267 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3268 const compat_stack_t __user *, uss_ptr,
3269 compat_stack_t __user *, uoss_ptr)
3276 compat_stack_t uss32;
3278 memset(&uss, 0, sizeof(stack_t));
3279 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3281 uss.ss_sp = compat_ptr(uss32.ss_sp);
3282 uss.ss_flags = uss32.ss_flags;
3283 uss.ss_size = uss32.ss_size;
3287 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3288 (stack_t __force __user *) &uoss,
3289 compat_user_stack_pointer());
3291 if (ret >= 0 && uoss_ptr) {
3292 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3293 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3294 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3295 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3301 int compat_restore_altstack(const compat_stack_t __user *uss)
3303 int err = compat_sys_sigaltstack(uss, NULL);
3304 /* squash all but -EFAULT for now */
3305 return err == -EFAULT ? err : 0;
3308 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3310 struct task_struct *t = current;
3311 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3312 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3313 __put_user(t->sas_ss_size, &uss->ss_size);
3317 #ifdef __ARCH_WANT_SYS_SIGPENDING
3320 * sys_sigpending - examine pending signals
3321 * @set: where mask of pending signal is returned
3323 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3325 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3330 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3332 * sys_sigprocmask - examine and change blocked signals
3333 * @how: whether to add, remove, or set signals
3334 * @nset: signals to add or remove (if non-null)
3335 * @oset: previous value of signal mask if non-null
3337 * Some platforms have their own version with special arguments;
3338 * others support only sys_rt_sigprocmask.
3341 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3342 old_sigset_t __user *, oset)
3344 old_sigset_t old_set, new_set;
3345 sigset_t new_blocked;
3347 old_set = current->blocked.sig[0];
3350 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3353 new_blocked = current->blocked;
3357 sigaddsetmask(&new_blocked, new_set);
3360 sigdelsetmask(&new_blocked, new_set);
3363 new_blocked.sig[0] = new_set;
3369 set_current_blocked(&new_blocked);
3373 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3379 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3381 #ifndef CONFIG_ODD_RT_SIGACTION
3383 * sys_rt_sigaction - alter an action taken by a process
3384 * @sig: signal to be sent
3385 * @act: new sigaction
3386 * @oact: used to save the previous sigaction
3387 * @sigsetsize: size of sigset_t type
3389 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3390 const struct sigaction __user *, act,
3391 struct sigaction __user *, oact,
3394 struct k_sigaction new_sa, old_sa;
3397 /* XXX: Don't preclude handling different sized sigset_t's. */
3398 if (sigsetsize != sizeof(sigset_t))
3402 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3406 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3409 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3415 #ifdef CONFIG_COMPAT
3416 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3417 const struct compat_sigaction __user *, act,
3418 struct compat_sigaction __user *, oact,
3419 compat_size_t, sigsetsize)
3421 struct k_sigaction new_ka, old_ka;
3422 compat_sigset_t mask;
3423 #ifdef __ARCH_HAS_SA_RESTORER
3424 compat_uptr_t restorer;
3428 /* XXX: Don't preclude handling different sized sigset_t's. */
3429 if (sigsetsize != sizeof(compat_sigset_t))
3433 compat_uptr_t handler;
3434 ret = get_user(handler, &act->sa_handler);
3435 new_ka.sa.sa_handler = compat_ptr(handler);
3436 #ifdef __ARCH_HAS_SA_RESTORER
3437 ret |= get_user(restorer, &act->sa_restorer);
3438 new_ka.sa.sa_restorer = compat_ptr(restorer);
3440 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3441 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3444 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3447 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3449 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3450 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3452 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3453 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3454 #ifdef __ARCH_HAS_SA_RESTORER
3455 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3456 &oact->sa_restorer);
3462 #endif /* !CONFIG_ODD_RT_SIGACTION */
3464 #ifdef CONFIG_OLD_SIGACTION
3465 SYSCALL_DEFINE3(sigaction, int, sig,
3466 const struct old_sigaction __user *, act,
3467 struct old_sigaction __user *, oact)
3469 struct k_sigaction new_ka, old_ka;
3474 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3475 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3476 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3477 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3478 __get_user(mask, &act->sa_mask))
3480 #ifdef __ARCH_HAS_KA_RESTORER
3481 new_ka.ka_restorer = NULL;
3483 siginitset(&new_ka.sa.sa_mask, mask);
3486 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3489 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3490 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3491 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3492 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3493 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3500 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3501 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3502 const struct compat_old_sigaction __user *, act,
3503 struct compat_old_sigaction __user *, oact)
3505 struct k_sigaction new_ka, old_ka;
3507 compat_old_sigset_t mask;
3508 compat_uptr_t handler, restorer;
3511 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3512 __get_user(handler, &act->sa_handler) ||
3513 __get_user(restorer, &act->sa_restorer) ||
3514 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3515 __get_user(mask, &act->sa_mask))
3518 #ifdef __ARCH_HAS_KA_RESTORER
3519 new_ka.ka_restorer = NULL;
3521 new_ka.sa.sa_handler = compat_ptr(handler);
3522 new_ka.sa.sa_restorer = compat_ptr(restorer);
3523 siginitset(&new_ka.sa.sa_mask, mask);
3526 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3529 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3530 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3531 &oact->sa_handler) ||
3532 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3533 &oact->sa_restorer) ||
3534 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3535 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3542 #ifdef CONFIG_SGETMASK_SYSCALL
3545 * For backwards compatibility. Functionality superseded by sigprocmask.
3547 SYSCALL_DEFINE0(sgetmask)
3550 return current->blocked.sig[0];
3553 SYSCALL_DEFINE1(ssetmask, int, newmask)
3555 int old = current->blocked.sig[0];
3558 siginitset(&newset, newmask);
3559 set_current_blocked(&newset);
3563 #endif /* CONFIG_SGETMASK_SYSCALL */
3565 #ifdef __ARCH_WANT_SYS_SIGNAL
3567 * For backwards compatibility. Functionality superseded by sigaction.
3569 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3571 struct k_sigaction new_sa, old_sa;
3574 new_sa.sa.sa_handler = handler;
3575 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3576 sigemptyset(&new_sa.sa.sa_mask);
3578 ret = do_sigaction(sig, &new_sa, &old_sa);
3580 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3582 #endif /* __ARCH_WANT_SYS_SIGNAL */
3584 #ifdef __ARCH_WANT_SYS_PAUSE
3586 SYSCALL_DEFINE0(pause)
3588 while (!signal_pending(current)) {
3589 __set_current_state(TASK_INTERRUPTIBLE);
3592 return -ERESTARTNOHAND;
3597 static int sigsuspend(sigset_t *set)
3599 current->saved_sigmask = current->blocked;
3600 set_current_blocked(set);
3602 __set_current_state(TASK_INTERRUPTIBLE);
3604 set_restore_sigmask();
3605 return -ERESTARTNOHAND;
3609 * sys_rt_sigsuspend - replace the signal mask for a value with the
3610 * @unewset value until a signal is received
3611 * @unewset: new signal mask value
3612 * @sigsetsize: size of sigset_t type
3614 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3618 /* XXX: Don't preclude handling different sized sigset_t's. */
3619 if (sigsetsize != sizeof(sigset_t))
3622 if (copy_from_user(&newset, unewset, sizeof(newset)))
3624 return sigsuspend(&newset);
3627 #ifdef CONFIG_COMPAT
3628 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3632 compat_sigset_t newset32;
3634 /* XXX: Don't preclude handling different sized sigset_t's. */
3635 if (sigsetsize != sizeof(sigset_t))
3638 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3640 sigset_from_compat(&newset, &newset32);
3641 return sigsuspend(&newset);
3643 /* on little-endian bitmaps don't care about granularity */
3644 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3649 #ifdef CONFIG_OLD_SIGSUSPEND
3650 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3653 siginitset(&blocked, mask);
3654 return sigsuspend(&blocked);
3657 #ifdef CONFIG_OLD_SIGSUSPEND3
3658 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3661 siginitset(&blocked, mask);
3662 return sigsuspend(&blocked);
3666 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3671 void __init signals_init(void)
3673 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3676 #ifdef CONFIG_KGDB_KDB
3677 #include <linux/kdb.h>
3679 * kdb_send_sig_info - Allows kdb to send signals without exposing
3680 * signal internals. This function checks if the required locks are
3681 * available before calling the main signal code, to avoid kdb
3685 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3687 static struct task_struct *kdb_prev_t;
3689 if (!spin_trylock(&t->sighand->siglock)) {
3690 kdb_printf("Can't do kill command now.\n"
3691 "The sigmask lock is held somewhere else in "
3692 "kernel, try again later\n");
3695 spin_unlock(&t->sighand->siglock);
3696 new_t = kdb_prev_t != t;
3698 if (t->state != TASK_RUNNING && new_t) {
3699 kdb_printf("Process is not RUNNING, sending a signal from "
3700 "kdb risks deadlock\n"
3701 "on the run queue locks. "
3702 "The signal has _not_ been sent.\n"
3703 "Reissue the kill command if you want to risk "
3707 sig = info->si_signo;
3708 if (send_sig_info(sig, info, t))
3709 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3712 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3714 #endif /* CONFIG_KGDB_KDB */