2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
22 #include <linux/file.h>
24 #include <linux/proc_fs.h>
25 #include <linux/tty.h>
26 #include <linux/binfmts.h>
27 #include <linux/coredump.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 #include <linux/ptrace.h>
31 #include <linux/signal.h>
32 #include <linux/signalfd.h>
33 #include <linux/ratelimit.h>
34 #include <linux/tracehook.h>
35 #include <linux/capability.h>
36 #include <linux/freezer.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/nsproxy.h>
39 #include <linux/user_namespace.h>
40 #include <linux/uprobes.h>
41 #include <linux/compat.h>
42 #include <linux/cn_proc.h>
43 #include <linux/compiler.h>
44 #include <linux/posix-timers.h>
45 #include <linux/livepatch.h>
46 #include <linux/cgroup.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/signal.h>
51 #include <asm/param.h>
52 #include <linux/uaccess.h>
53 #include <asm/unistd.h>
54 #include <asm/siginfo.h>
55 #include <asm/cacheflush.h>
56 #include "audit.h" /* audit_signal_info() */
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 /* Only allow kernel generated signals to this kthread */
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
97 return sig_handler_ignored(handler, sig);
100 static bool sig_ignored(struct task_struct *t, int sig, bool force)
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
115 if (t->ptrace && sig != SIGKILL)
118 return sig_task_ignored(t, sig, force);
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
125 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
130 switch (_NSIG_WORDS) {
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
151 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 static bool recalc_sigpending_tsk(struct task_struct *t)
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 void recalc_sigpending_and_wake(struct task_struct *t)
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
181 void recalc_sigpending(void)
183 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
184 !klp_patch_pending(current))
185 clear_thread_flag(TIF_SIGPENDING);
189 void calculate_sigpending(void)
191 /* Have any signals or users of TIF_SIGPENDING been delayed
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 spin_unlock_irq(¤t->sighand->siglock);
200 /* Given the mask, find the first available signal that should be serviced. */
202 #define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206 int next_signal(struct sigpending *pending, sigset_t *mask)
208 unsigned long i, *s, *m, x;
211 s = pending->signal.sig;
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
226 switch (_NSIG_WORDS) {
228 for (i = 1; i < _NSIG_WORDS; ++i) {
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
241 sig = ffz(~x) + _NSIG_BPW + 1;
252 static inline void print_dropped_signal(int sig)
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256 if (!print_fatal_signals)
259 if (!__ratelimit(&ratelimit_state))
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
267 * task_set_jobctl_pending - set jobctl pending bits
269 * @mask: pending bits to set
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
278 * Must be called with @task->sighand->siglock held.
281 * %true if @mask is set, %false if made noop because @task was dying.
283 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295 task->jobctl |= mask;
300 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_trapping(struct task_struct *task)
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb(); /* advised by wake_up_bit() */
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
321 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @mask: pending bits to clear
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
333 * Must be called with @task->sighand->siglock held.
335 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342 task->jobctl &= ~mask;
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353 * Group stop states are cleared and the group stop count is consumed if
354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355 * stop, the appropriate %SIGNAL_* flags are set.
358 * Must be called with @task->sighand->siglock held.
361 * %true if group stop completion should be notified to the parent, %false
364 static bool task_participate_group_stop(struct task_struct *task)
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
390 void task_join_group_stop(struct task_struct *task)
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 /* Have the new thread join an on-going signal group stop */
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
408 * appropriate lock must be held to stop the target task from exiting
410 static struct sigqueue *
411 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
413 struct sigqueue *q = NULL;
414 struct user_struct *user;
418 * Protect access to @t credentials. This can go away when all
419 * callers hold rcu read lock.
421 * NOTE! A pending signal will hold on to the user refcount,
422 * and we get/put the refcount only when the sigpending count
423 * changes from/to zero.
426 user = __task_cred(t)->user;
427 sigpending = atomic_inc_return(&user->sigpending);
432 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
433 q = kmem_cache_alloc(sigqueue_cachep, flags);
435 print_dropped_signal(sig);
438 if (unlikely(q == NULL)) {
439 if (atomic_dec_and_test(&user->sigpending))
442 INIT_LIST_HEAD(&q->list);
450 static void __sigqueue_free(struct sigqueue *q)
452 if (q->flags & SIGQUEUE_PREALLOC)
454 if (atomic_dec_and_test(&q->user->sigpending))
456 kmem_cache_free(sigqueue_cachep, q);
459 void flush_sigqueue(struct sigpending *queue)
463 sigemptyset(&queue->signal);
464 while (!list_empty(&queue->list)) {
465 q = list_entry(queue->list.next, struct sigqueue , list);
466 list_del_init(&q->list);
472 * Flush all pending signals for this kthread.
474 void flush_signals(struct task_struct *t)
478 spin_lock_irqsave(&t->sighand->siglock, flags);
479 clear_tsk_thread_flag(t, TIF_SIGPENDING);
480 flush_sigqueue(&t->pending);
481 flush_sigqueue(&t->signal->shared_pending);
482 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485 #ifdef CONFIG_POSIX_TIMERS
486 static void __flush_itimer_signals(struct sigpending *pending)
488 sigset_t signal, retain;
489 struct sigqueue *q, *n;
491 signal = pending->signal;
492 sigemptyset(&retain);
494 list_for_each_entry_safe(q, n, &pending->list, list) {
495 int sig = q->info.si_signo;
497 if (likely(q->info.si_code != SI_TIMER)) {
498 sigaddset(&retain, sig);
500 sigdelset(&signal, sig);
501 list_del_init(&q->list);
506 sigorsets(&pending->signal, &signal, &retain);
509 void flush_itimer_signals(void)
511 struct task_struct *tsk = current;
514 spin_lock_irqsave(&tsk->sighand->siglock, flags);
515 __flush_itimer_signals(&tsk->pending);
516 __flush_itimer_signals(&tsk->signal->shared_pending);
517 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521 void ignore_signals(struct task_struct *t)
525 for (i = 0; i < _NSIG; ++i)
526 t->sighand->action[i].sa.sa_handler = SIG_IGN;
532 * Flush all handlers for a task.
536 flush_signal_handlers(struct task_struct *t, int force_default)
539 struct k_sigaction *ka = &t->sighand->action[0];
540 for (i = _NSIG ; i != 0 ; i--) {
541 if (force_default || ka->sa.sa_handler != SIG_IGN)
542 ka->sa.sa_handler = SIG_DFL;
544 #ifdef __ARCH_HAS_SA_RESTORER
545 ka->sa.sa_restorer = NULL;
547 sigemptyset(&ka->sa.sa_mask);
552 bool unhandled_signal(struct task_struct *tsk, int sig)
554 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
555 if (is_global_init(tsk))
558 if (handler != SIG_IGN && handler != SIG_DFL)
561 /* if ptraced, let the tracer determine */
565 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
568 struct sigqueue *q, *first = NULL;
571 * Collect the siginfo appropriate to this signal. Check if
572 * there is another siginfo for the same signal.
574 list_for_each_entry(q, &list->list, list) {
575 if (q->info.si_signo == sig) {
582 sigdelset(&list->signal, sig);
586 list_del_init(&first->list);
587 copy_siginfo(info, &first->info);
590 (first->flags & SIGQUEUE_PREALLOC) &&
591 (info->si_code == SI_TIMER) &&
592 (info->si_sys_private);
594 __sigqueue_free(first);
597 * Ok, it wasn't in the queue. This must be
598 * a fast-pathed signal or we must have been
599 * out of queue space. So zero out the info.
602 info->si_signo = sig;
604 info->si_code = SI_USER;
610 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
611 siginfo_t *info, bool *resched_timer)
613 int sig = next_signal(pending, mask);
616 collect_signal(sig, pending, info, resched_timer);
621 * Dequeue a signal and return the element to the caller, which is
622 * expected to free it.
624 * All callers have to hold the siglock.
626 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
628 bool resched_timer = false;
631 /* We only dequeue private signals from ourselves, we don't let
632 * signalfd steal them
634 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
636 signr = __dequeue_signal(&tsk->signal->shared_pending,
637 mask, info, &resched_timer);
638 #ifdef CONFIG_POSIX_TIMERS
642 * itimers are process shared and we restart periodic
643 * itimers in the signal delivery path to prevent DoS
644 * attacks in the high resolution timer case. This is
645 * compliant with the old way of self-restarting
646 * itimers, as the SIGALRM is a legacy signal and only
647 * queued once. Changing the restart behaviour to
648 * restart the timer in the signal dequeue path is
649 * reducing the timer noise on heavy loaded !highres
652 if (unlikely(signr == SIGALRM)) {
653 struct hrtimer *tmr = &tsk->signal->real_timer;
655 if (!hrtimer_is_queued(tmr) &&
656 tsk->signal->it_real_incr != 0) {
657 hrtimer_forward(tmr, tmr->base->get_time(),
658 tsk->signal->it_real_incr);
659 hrtimer_restart(tmr);
669 if (unlikely(sig_kernel_stop(signr))) {
671 * Set a marker that we have dequeued a stop signal. Our
672 * caller might release the siglock and then the pending
673 * stop signal it is about to process is no longer in the
674 * pending bitmasks, but must still be cleared by a SIGCONT
675 * (and overruled by a SIGKILL). So those cases clear this
676 * shared flag after we've set it. Note that this flag may
677 * remain set after the signal we return is ignored or
678 * handled. That doesn't matter because its only purpose
679 * is to alert stop-signal processing code when another
680 * processor has come along and cleared the flag.
682 current->jobctl |= JOBCTL_STOP_DEQUEUED;
684 #ifdef CONFIG_POSIX_TIMERS
687 * Release the siglock to ensure proper locking order
688 * of timer locks outside of siglocks. Note, we leave
689 * irqs disabled here, since the posix-timers code is
690 * about to disable them again anyway.
692 spin_unlock(&tsk->sighand->siglock);
693 posixtimer_rearm(info);
694 spin_lock(&tsk->sighand->siglock);
696 /* Don't expose the si_sys_private value to userspace */
697 info->si_sys_private = 0;
703 static int dequeue_synchronous_signal(siginfo_t *info)
705 struct task_struct *tsk = current;
706 struct sigpending *pending = &tsk->pending;
707 struct sigqueue *q, *sync = NULL;
710 * Might a synchronous signal be in the queue?
712 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
716 * Return the first synchronous signal in the queue.
718 list_for_each_entry(q, &pending->list, list) {
719 /* Synchronous signals have a postive si_code */
720 if ((q->info.si_code > SI_USER) &&
721 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
729 * Check if there is another siginfo for the same signal.
731 list_for_each_entry_continue(q, &pending->list, list) {
732 if (q->info.si_signo == sync->info.si_signo)
736 sigdelset(&pending->signal, sync->info.si_signo);
739 list_del_init(&sync->list);
740 copy_siginfo(info, &sync->info);
741 __sigqueue_free(sync);
742 return info->si_signo;
746 * Tell a process that it has a new active signal..
748 * NOTE! we rely on the previous spin_lock to
749 * lock interrupts for us! We can only be called with
750 * "siglock" held, and the local interrupt must
751 * have been disabled when that got acquired!
753 * No need to set need_resched since signal event passing
754 * goes through ->blocked
756 void signal_wake_up_state(struct task_struct *t, unsigned int state)
758 set_tsk_thread_flag(t, TIF_SIGPENDING);
760 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
761 * case. We don't check t->state here because there is a race with it
762 * executing another processor and just now entering stopped state.
763 * By using wake_up_state, we ensure the process will wake up and
764 * handle its death signal.
766 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
771 * Remove signals in mask from the pending set and queue.
772 * Returns 1 if any signals were found.
774 * All callers must be holding the siglock.
776 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
778 struct sigqueue *q, *n;
781 sigandsets(&m, mask, &s->signal);
782 if (sigisemptyset(&m))
785 sigandnsets(&s->signal, &s->signal, mask);
786 list_for_each_entry_safe(q, n, &s->list, list) {
787 if (sigismember(mask, q->info.si_signo)) {
788 list_del_init(&q->list);
794 static inline int is_si_special(const struct siginfo *info)
796 return info <= SEND_SIG_FORCED;
799 static inline bool si_fromuser(const struct siginfo *info)
801 return info == SEND_SIG_NOINFO ||
802 (!is_si_special(info) && SI_FROMUSER(info));
806 * called with RCU read lock from check_kill_permission()
808 static bool kill_ok_by_cred(struct task_struct *t)
810 const struct cred *cred = current_cred();
811 const struct cred *tcred = __task_cred(t);
813 return uid_eq(cred->euid, tcred->suid) ||
814 uid_eq(cred->euid, tcred->uid) ||
815 uid_eq(cred->uid, tcred->suid) ||
816 uid_eq(cred->uid, tcred->uid) ||
817 ns_capable(tcred->user_ns, CAP_KILL);
821 * Bad permissions for sending the signal
822 * - the caller must hold the RCU read lock
824 static int check_kill_permission(int sig, struct siginfo *info,
825 struct task_struct *t)
830 if (!valid_signal(sig))
833 if (!si_fromuser(info))
836 error = audit_signal_info(sig, t); /* Let audit system see the signal */
840 if (!same_thread_group(current, t) &&
841 !kill_ok_by_cred(t)) {
844 sid = task_session(t);
846 * We don't return the error if sid == NULL. The
847 * task was unhashed, the caller must notice this.
849 if (!sid || sid == task_session(current))
856 return security_task_kill(t, info, sig, NULL);
860 * ptrace_trap_notify - schedule trap to notify ptracer
861 * @t: tracee wanting to notify tracer
863 * This function schedules sticky ptrace trap which is cleared on the next
864 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
867 * If @t is running, STOP trap will be taken. If trapped for STOP and
868 * ptracer is listening for events, tracee is woken up so that it can
869 * re-trap for the new event. If trapped otherwise, STOP trap will be
870 * eventually taken without returning to userland after the existing traps
871 * are finished by PTRACE_CONT.
874 * Must be called with @task->sighand->siglock held.
876 static void ptrace_trap_notify(struct task_struct *t)
878 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
879 assert_spin_locked(&t->sighand->siglock);
881 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
882 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
886 * Handle magic process-wide effects of stop/continue signals. Unlike
887 * the signal actions, these happen immediately at signal-generation
888 * time regardless of blocking, ignoring, or handling. This does the
889 * actual continuing for SIGCONT, but not the actual stopping for stop
890 * signals. The process stop is done as a signal action for SIG_DFL.
892 * Returns true if the signal should be actually delivered, otherwise
893 * it should be dropped.
895 static bool prepare_signal(int sig, struct task_struct *p, bool force)
897 struct signal_struct *signal = p->signal;
898 struct task_struct *t;
901 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
902 if (!(signal->flags & SIGNAL_GROUP_EXIT))
903 return sig == SIGKILL;
905 * The process is in the middle of dying, nothing to do.
907 } else if (sig_kernel_stop(sig)) {
909 * This is a stop signal. Remove SIGCONT from all queues.
911 siginitset(&flush, sigmask(SIGCONT));
912 flush_sigqueue_mask(&flush, &signal->shared_pending);
913 for_each_thread(p, t)
914 flush_sigqueue_mask(&flush, &t->pending);
915 } else if (sig == SIGCONT) {
918 * Remove all stop signals from all queues, wake all threads.
920 siginitset(&flush, SIG_KERNEL_STOP_MASK);
921 flush_sigqueue_mask(&flush, &signal->shared_pending);
922 for_each_thread(p, t) {
923 flush_sigqueue_mask(&flush, &t->pending);
924 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
925 if (likely(!(t->ptrace & PT_SEIZED)))
926 wake_up_state(t, __TASK_STOPPED);
928 ptrace_trap_notify(t);
932 * Notify the parent with CLD_CONTINUED if we were stopped.
934 * If we were in the middle of a group stop, we pretend it
935 * was already finished, and then continued. Since SIGCHLD
936 * doesn't queue we report only CLD_STOPPED, as if the next
937 * CLD_CONTINUED was dropped.
940 if (signal->flags & SIGNAL_STOP_STOPPED)
941 why |= SIGNAL_CLD_CONTINUED;
942 else if (signal->group_stop_count)
943 why |= SIGNAL_CLD_STOPPED;
947 * The first thread which returns from do_signal_stop()
948 * will take ->siglock, notice SIGNAL_CLD_MASK, and
949 * notify its parent. See get_signal_to_deliver().
951 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
952 signal->group_stop_count = 0;
953 signal->group_exit_code = 0;
957 return !sig_ignored(p, sig, force);
961 * Test if P wants to take SIG. After we've checked all threads with this,
962 * it's equivalent to finding no threads not blocking SIG. Any threads not
963 * blocking SIG were ruled out because they are not running and already
964 * have pending signals. Such threads will dequeue from the shared queue
965 * as soon as they're available, so putting the signal on the shared queue
966 * will be equivalent to sending it to one such thread.
968 static inline bool wants_signal(int sig, struct task_struct *p)
970 if (sigismember(&p->blocked, sig))
973 if (p->flags & PF_EXITING)
979 if (task_is_stopped_or_traced(p))
982 return task_curr(p) || !signal_pending(p);
985 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
987 struct signal_struct *signal = p->signal;
988 struct task_struct *t;
991 * Now find a thread we can wake up to take the signal off the queue.
993 * If the main thread wants the signal, it gets first crack.
994 * Probably the least surprising to the average bear.
996 if (wants_signal(sig, p))
998 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1000 * There is just one thread and it does not need to be woken.
1001 * It will dequeue unblocked signals before it runs again.
1006 * Otherwise try to find a suitable thread.
1008 t = signal->curr_target;
1009 while (!wants_signal(sig, t)) {
1011 if (t == signal->curr_target)
1013 * No thread needs to be woken.
1014 * Any eligible threads will see
1015 * the signal in the queue soon.
1019 signal->curr_target = t;
1023 * Found a killable thread. If the signal will be fatal,
1024 * then start taking the whole group down immediately.
1026 if (sig_fatal(p, sig) &&
1027 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1028 !sigismember(&t->real_blocked, sig) &&
1029 (sig == SIGKILL || !p->ptrace)) {
1031 * This signal will be fatal to the whole group.
1033 if (!sig_kernel_coredump(sig)) {
1035 * Start a group exit and wake everybody up.
1036 * This way we don't have other threads
1037 * running and doing things after a slower
1038 * thread has the fatal signal pending.
1040 signal->flags = SIGNAL_GROUP_EXIT;
1041 signal->group_exit_code = sig;
1042 signal->group_stop_count = 0;
1045 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1046 sigaddset(&t->pending.signal, SIGKILL);
1047 signal_wake_up(t, 1);
1048 } while_each_thread(p, t);
1054 * The signal is already in the shared-pending queue.
1055 * Tell the chosen thread to wake up and dequeue it.
1057 signal_wake_up(t, sig == SIGKILL);
1061 static inline bool legacy_queue(struct sigpending *signals, int sig)
1063 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1066 #ifdef CONFIG_USER_NS
1067 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1069 if (current_user_ns() == task_cred_xxx(t, user_ns))
1072 if (SI_FROMKERNEL(info))
1076 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1077 make_kuid(current_user_ns(), info->si_uid));
1081 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1087 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1088 enum pid_type type, int from_ancestor_ns)
1090 struct sigpending *pending;
1092 int override_rlimit;
1093 int ret = 0, result;
1095 assert_spin_locked(&t->sighand->siglock);
1097 result = TRACE_SIGNAL_IGNORED;
1098 if (!prepare_signal(sig, t,
1099 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1102 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1104 * Short-circuit ignored signals and support queuing
1105 * exactly one non-rt signal, so that we can get more
1106 * detailed information about the cause of the signal.
1108 result = TRACE_SIGNAL_ALREADY_PENDING;
1109 if (legacy_queue(pending, sig))
1112 result = TRACE_SIGNAL_DELIVERED;
1114 * fast-pathed signals for kernel-internal things like SIGSTOP
1117 if (info == SEND_SIG_FORCED)
1121 * Real-time signals must be queued if sent by sigqueue, or
1122 * some other real-time mechanism. It is implementation
1123 * defined whether kill() does so. We attempt to do so, on
1124 * the principle of least surprise, but since kill is not
1125 * allowed to fail with EAGAIN when low on memory we just
1126 * make sure at least one signal gets delivered and don't
1127 * pass on the info struct.
1130 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1132 override_rlimit = 0;
1134 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1136 list_add_tail(&q->list, &pending->list);
1137 switch ((unsigned long) info) {
1138 case (unsigned long) SEND_SIG_NOINFO:
1139 clear_siginfo(&q->info);
1140 q->info.si_signo = sig;
1141 q->info.si_errno = 0;
1142 q->info.si_code = SI_USER;
1143 q->info.si_pid = task_tgid_nr_ns(current,
1144 task_active_pid_ns(t));
1145 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1147 case (unsigned long) SEND_SIG_PRIV:
1148 clear_siginfo(&q->info);
1149 q->info.si_signo = sig;
1150 q->info.si_errno = 0;
1151 q->info.si_code = SI_KERNEL;
1156 copy_siginfo(&q->info, info);
1157 if (from_ancestor_ns)
1162 userns_fixup_signal_uid(&q->info, t);
1164 } else if (!is_si_special(info)) {
1165 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1167 * Queue overflow, abort. We may abort if the
1168 * signal was rt and sent by user using something
1169 * other than kill().
1171 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1176 * This is a silent loss of information. We still
1177 * send the signal, but the *info bits are lost.
1179 result = TRACE_SIGNAL_LOSE_INFO;
1184 signalfd_notify(t, sig);
1185 sigaddset(&pending->signal, sig);
1187 /* Let multiprocess signals appear after on-going forks */
1188 if (type > PIDTYPE_TGID) {
1189 struct multiprocess_signals *delayed;
1190 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1191 sigset_t *signal = &delayed->signal;
1192 /* Can't queue both a stop and a continue signal */
1194 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1195 else if (sig_kernel_stop(sig))
1196 sigdelset(signal, SIGCONT);
1197 sigaddset(signal, sig);
1201 complete_signal(sig, t, type);
1203 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1207 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1210 int from_ancestor_ns = 0;
1212 #ifdef CONFIG_PID_NS
1213 from_ancestor_ns = si_fromuser(info) &&
1214 !task_pid_nr_ns(current, task_active_pid_ns(t));
1217 return __send_signal(sig, info, t, type, from_ancestor_ns);
1220 static void print_fatal_signal(int signr)
1222 struct pt_regs *regs = signal_pt_regs();
1223 pr_info("potentially unexpected fatal signal %d.\n", signr);
1225 #if defined(__i386__) && !defined(__arch_um__)
1226 pr_info("code at %08lx: ", regs->ip);
1229 for (i = 0; i < 16; i++) {
1232 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1234 pr_cont("%02x ", insn);
1244 static int __init setup_print_fatal_signals(char *str)
1246 get_option (&str, &print_fatal_signals);
1251 __setup("print-fatal-signals=", setup_print_fatal_signals);
1254 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1256 return send_signal(sig, info, p, PIDTYPE_TGID);
1260 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1262 return send_signal(sig, info, t, PIDTYPE_PID);
1265 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1268 unsigned long flags;
1271 if (lock_task_sighand(p, &flags)) {
1272 ret = send_signal(sig, info, p, type);
1273 unlock_task_sighand(p, &flags);
1280 * Force a signal that the process can't ignore: if necessary
1281 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1283 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1284 * since we do not want to have a signal handler that was blocked
1285 * be invoked when user space had explicitly blocked it.
1287 * We don't want to have recursive SIGSEGV's etc, for example,
1288 * that is why we also clear SIGNAL_UNKILLABLE.
1291 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1293 unsigned long int flags;
1294 int ret, blocked, ignored;
1295 struct k_sigaction *action;
1297 spin_lock_irqsave(&t->sighand->siglock, flags);
1298 action = &t->sighand->action[sig-1];
1299 ignored = action->sa.sa_handler == SIG_IGN;
1300 blocked = sigismember(&t->blocked, sig);
1301 if (blocked || ignored) {
1302 action->sa.sa_handler = SIG_DFL;
1304 sigdelset(&t->blocked, sig);
1305 recalc_sigpending_and_wake(t);
1309 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1310 * debugging to leave init killable.
1312 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1313 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1314 ret = specific_send_sig_info(sig, info, t);
1315 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1321 * Nuke all other threads in the group.
1323 int zap_other_threads(struct task_struct *p)
1325 struct task_struct *t = p;
1328 p->signal->group_stop_count = 0;
1330 while_each_thread(p, t) {
1331 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1334 /* Don't bother with already dead threads */
1337 sigaddset(&t->pending.signal, SIGKILL);
1338 signal_wake_up(t, 1);
1344 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1345 unsigned long *flags)
1347 struct sighand_struct *sighand;
1351 sighand = rcu_dereference(tsk->sighand);
1352 if (unlikely(sighand == NULL))
1356 * This sighand can be already freed and even reused, but
1357 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1358 * initializes ->siglock: this slab can't go away, it has
1359 * the same object type, ->siglock can't be reinitialized.
1361 * We need to ensure that tsk->sighand is still the same
1362 * after we take the lock, we can race with de_thread() or
1363 * __exit_signal(). In the latter case the next iteration
1364 * must see ->sighand == NULL.
1366 spin_lock_irqsave(&sighand->siglock, *flags);
1367 if (likely(sighand == tsk->sighand))
1369 spin_unlock_irqrestore(&sighand->siglock, *flags);
1377 * send signal info to all the members of a group
1379 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1385 ret = check_kill_permission(sig, info, p);
1389 ret = do_send_sig_info(sig, info, p, type);
1395 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1396 * control characters do (^C, ^Z etc)
1397 * - the caller must hold at least a readlock on tasklist_lock
1399 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1401 struct task_struct *p = NULL;
1402 int retval, success;
1406 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1407 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1410 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1411 return success ? 0 : retval;
1414 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1417 struct task_struct *p;
1421 p = pid_task(pid, PIDTYPE_PID);
1423 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1425 if (likely(!p || error != -ESRCH))
1429 * The task was unhashed in between, try again. If it
1430 * is dead, pid_task() will return NULL, if we race with
1431 * de_thread() it will find the new leader.
1436 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1440 error = kill_pid_info(sig, info, find_vpid(pid));
1445 static inline bool kill_as_cred_perm(const struct cred *cred,
1446 struct task_struct *target)
1448 const struct cred *pcred = __task_cred(target);
1450 return uid_eq(cred->euid, pcred->suid) ||
1451 uid_eq(cred->euid, pcred->uid) ||
1452 uid_eq(cred->uid, pcred->suid) ||
1453 uid_eq(cred->uid, pcred->uid);
1456 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1457 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1458 const struct cred *cred)
1461 struct task_struct *p;
1462 unsigned long flags;
1464 if (!valid_signal(sig))
1468 p = pid_task(pid, PIDTYPE_PID);
1473 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1477 ret = security_task_kill(p, info, sig, cred);
1482 if (lock_task_sighand(p, &flags)) {
1483 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1484 unlock_task_sighand(p, &flags);
1492 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1495 * kill_something_info() interprets pid in interesting ways just like kill(2).
1497 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1498 * is probably wrong. Should make it like BSD or SYSV.
1501 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1507 ret = kill_pid_info(sig, info, find_vpid(pid));
1512 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1516 read_lock(&tasklist_lock);
1518 ret = __kill_pgrp_info(sig, info,
1519 pid ? find_vpid(-pid) : task_pgrp(current));
1521 int retval = 0, count = 0;
1522 struct task_struct * p;
1524 for_each_process(p) {
1525 if (task_pid_vnr(p) > 1 &&
1526 !same_thread_group(p, current)) {
1527 int err = group_send_sig_info(sig, info, p,
1534 ret = count ? retval : -ESRCH;
1536 read_unlock(&tasklist_lock);
1542 * These are for backward compatibility with the rest of the kernel source.
1545 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1548 * Make sure legacy kernel users don't send in bad values
1549 * (normal paths check this in check_kill_permission).
1551 if (!valid_signal(sig))
1554 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1557 #define __si_special(priv) \
1558 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1561 send_sig(int sig, struct task_struct *p, int priv)
1563 return send_sig_info(sig, __si_special(priv), p);
1566 void force_sig(int sig, struct task_struct *p)
1568 force_sig_info(sig, SEND_SIG_PRIV, p);
1572 * When things go south during signal handling, we
1573 * will force a SIGSEGV. And if the signal that caused
1574 * the problem was already a SIGSEGV, we'll want to
1575 * make sure we don't even try to deliver the signal..
1577 void force_sigsegv(int sig, struct task_struct *p)
1579 if (sig == SIGSEGV) {
1580 unsigned long flags;
1581 spin_lock_irqsave(&p->sighand->siglock, flags);
1582 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1583 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1585 force_sig(SIGSEGV, p);
1588 int force_sig_fault(int sig, int code, void __user *addr
1589 ___ARCH_SI_TRAPNO(int trapno)
1590 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1591 , struct task_struct *t)
1593 struct siginfo info;
1595 clear_siginfo(&info);
1596 info.si_signo = sig;
1598 info.si_code = code;
1599 info.si_addr = addr;
1600 #ifdef __ARCH_SI_TRAPNO
1601 info.si_trapno = trapno;
1605 info.si_flags = flags;
1608 return force_sig_info(info.si_signo, &info, t);
1611 int send_sig_fault(int sig, int code, void __user *addr
1612 ___ARCH_SI_TRAPNO(int trapno)
1613 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1614 , struct task_struct *t)
1616 struct siginfo info;
1618 clear_siginfo(&info);
1619 info.si_signo = sig;
1621 info.si_code = code;
1622 info.si_addr = addr;
1623 #ifdef __ARCH_SI_TRAPNO
1624 info.si_trapno = trapno;
1628 info.si_flags = flags;
1631 return send_sig_info(info.si_signo, &info, t);
1634 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1636 struct siginfo info;
1638 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1639 clear_siginfo(&info);
1640 info.si_signo = SIGBUS;
1642 info.si_code = code;
1643 info.si_addr = addr;
1644 info.si_addr_lsb = lsb;
1645 return force_sig_info(info.si_signo, &info, t);
1648 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1650 struct siginfo info;
1652 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1653 clear_siginfo(&info);
1654 info.si_signo = SIGBUS;
1656 info.si_code = code;
1657 info.si_addr = addr;
1658 info.si_addr_lsb = lsb;
1659 return send_sig_info(info.si_signo, &info, t);
1661 EXPORT_SYMBOL(send_sig_mceerr);
1663 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1665 struct siginfo info;
1667 clear_siginfo(&info);
1668 info.si_signo = SIGSEGV;
1670 info.si_code = SEGV_BNDERR;
1671 info.si_addr = addr;
1672 info.si_lower = lower;
1673 info.si_upper = upper;
1674 return force_sig_info(info.si_signo, &info, current);
1678 int force_sig_pkuerr(void __user *addr, u32 pkey)
1680 struct siginfo info;
1682 clear_siginfo(&info);
1683 info.si_signo = SIGSEGV;
1685 info.si_code = SEGV_PKUERR;
1686 info.si_addr = addr;
1687 info.si_pkey = pkey;
1688 return force_sig_info(info.si_signo, &info, current);
1692 /* For the crazy architectures that include trap information in
1693 * the errno field, instead of an actual errno value.
1695 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1697 struct siginfo info;
1699 clear_siginfo(&info);
1700 info.si_signo = SIGTRAP;
1701 info.si_errno = errno;
1702 info.si_code = TRAP_HWBKPT;
1703 info.si_addr = addr;
1704 return force_sig_info(info.si_signo, &info, current);
1707 int kill_pgrp(struct pid *pid, int sig, int priv)
1711 read_lock(&tasklist_lock);
1712 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1713 read_unlock(&tasklist_lock);
1717 EXPORT_SYMBOL(kill_pgrp);
1719 int kill_pid(struct pid *pid, int sig, int priv)
1721 return kill_pid_info(sig, __si_special(priv), pid);
1723 EXPORT_SYMBOL(kill_pid);
1726 * These functions support sending signals using preallocated sigqueue
1727 * structures. This is needed "because realtime applications cannot
1728 * afford to lose notifications of asynchronous events, like timer
1729 * expirations or I/O completions". In the case of POSIX Timers
1730 * we allocate the sigqueue structure from the timer_create. If this
1731 * allocation fails we are able to report the failure to the application
1732 * with an EAGAIN error.
1734 struct sigqueue *sigqueue_alloc(void)
1736 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1739 q->flags |= SIGQUEUE_PREALLOC;
1744 void sigqueue_free(struct sigqueue *q)
1746 unsigned long flags;
1747 spinlock_t *lock = ¤t->sighand->siglock;
1749 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1751 * We must hold ->siglock while testing q->list
1752 * to serialize with collect_signal() or with
1753 * __exit_signal()->flush_sigqueue().
1755 spin_lock_irqsave(lock, flags);
1756 q->flags &= ~SIGQUEUE_PREALLOC;
1758 * If it is queued it will be freed when dequeued,
1759 * like the "regular" sigqueue.
1761 if (!list_empty(&q->list))
1763 spin_unlock_irqrestore(lock, flags);
1769 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1771 int sig = q->info.si_signo;
1772 struct sigpending *pending;
1773 struct task_struct *t;
1774 unsigned long flags;
1777 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1781 t = pid_task(pid, type);
1782 if (!t || !likely(lock_task_sighand(t, &flags)))
1785 ret = 1; /* the signal is ignored */
1786 result = TRACE_SIGNAL_IGNORED;
1787 if (!prepare_signal(sig, t, false))
1791 if (unlikely(!list_empty(&q->list))) {
1793 * If an SI_TIMER entry is already queue just increment
1794 * the overrun count.
1796 BUG_ON(q->info.si_code != SI_TIMER);
1797 q->info.si_overrun++;
1798 result = TRACE_SIGNAL_ALREADY_PENDING;
1801 q->info.si_overrun = 0;
1803 signalfd_notify(t, sig);
1804 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1805 list_add_tail(&q->list, &pending->list);
1806 sigaddset(&pending->signal, sig);
1807 complete_signal(sig, t, type);
1808 result = TRACE_SIGNAL_DELIVERED;
1810 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1811 unlock_task_sighand(t, &flags);
1817 static void do_notify_pidfd(struct task_struct *task)
1821 pid = task_pid(task);
1822 wake_up_all(&pid->wait_pidfd);
1826 * Let a parent know about the death of a child.
1827 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1829 * Returns true if our parent ignored us and so we've switched to
1832 bool do_notify_parent(struct task_struct *tsk, int sig)
1834 struct siginfo info;
1835 unsigned long flags;
1836 struct sighand_struct *psig;
1837 bool autoreap = false;
1842 /* do_notify_parent_cldstop should have been called instead. */
1843 BUG_ON(task_is_stopped_or_traced(tsk));
1845 BUG_ON(!tsk->ptrace &&
1846 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1848 /* Wake up all pidfd waiters */
1849 do_notify_pidfd(tsk);
1851 if (sig != SIGCHLD) {
1853 * This is only possible if parent == real_parent.
1854 * Check if it has changed security domain.
1856 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1860 clear_siginfo(&info);
1861 info.si_signo = sig;
1864 * We are under tasklist_lock here so our parent is tied to
1865 * us and cannot change.
1867 * task_active_pid_ns will always return the same pid namespace
1868 * until a task passes through release_task.
1870 * write_lock() currently calls preempt_disable() which is the
1871 * same as rcu_read_lock(), but according to Oleg, this is not
1872 * correct to rely on this
1875 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1876 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1880 task_cputime(tsk, &utime, &stime);
1881 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1882 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1884 info.si_status = tsk->exit_code & 0x7f;
1885 if (tsk->exit_code & 0x80)
1886 info.si_code = CLD_DUMPED;
1887 else if (tsk->exit_code & 0x7f)
1888 info.si_code = CLD_KILLED;
1890 info.si_code = CLD_EXITED;
1891 info.si_status = tsk->exit_code >> 8;
1894 psig = tsk->parent->sighand;
1895 spin_lock_irqsave(&psig->siglock, flags);
1896 if (!tsk->ptrace && sig == SIGCHLD &&
1897 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1898 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1900 * We are exiting and our parent doesn't care. POSIX.1
1901 * defines special semantics for setting SIGCHLD to SIG_IGN
1902 * or setting the SA_NOCLDWAIT flag: we should be reaped
1903 * automatically and not left for our parent's wait4 call.
1904 * Rather than having the parent do it as a magic kind of
1905 * signal handler, we just set this to tell do_exit that we
1906 * can be cleaned up without becoming a zombie. Note that
1907 * we still call __wake_up_parent in this case, because a
1908 * blocked sys_wait4 might now return -ECHILD.
1910 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1911 * is implementation-defined: we do (if you don't want
1912 * it, just use SIG_IGN instead).
1915 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1918 if (valid_signal(sig) && sig)
1919 __group_send_sig_info(sig, &info, tsk->parent);
1920 __wake_up_parent(tsk, tsk->parent);
1921 spin_unlock_irqrestore(&psig->siglock, flags);
1927 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1928 * @tsk: task reporting the state change
1929 * @for_ptracer: the notification is for ptracer
1930 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1932 * Notify @tsk's parent that the stopped/continued state has changed. If
1933 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1934 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1937 * Must be called with tasklist_lock at least read locked.
1939 static void do_notify_parent_cldstop(struct task_struct *tsk,
1940 bool for_ptracer, int why)
1942 struct siginfo info;
1943 unsigned long flags;
1944 struct task_struct *parent;
1945 struct sighand_struct *sighand;
1949 parent = tsk->parent;
1951 tsk = tsk->group_leader;
1952 parent = tsk->real_parent;
1955 clear_siginfo(&info);
1956 info.si_signo = SIGCHLD;
1959 * see comment in do_notify_parent() about the following 4 lines
1962 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1963 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1966 task_cputime(tsk, &utime, &stime);
1967 info.si_utime = nsec_to_clock_t(utime);
1968 info.si_stime = nsec_to_clock_t(stime);
1973 info.si_status = SIGCONT;
1976 info.si_status = tsk->signal->group_exit_code & 0x7f;
1979 info.si_status = tsk->exit_code & 0x7f;
1985 sighand = parent->sighand;
1986 spin_lock_irqsave(&sighand->siglock, flags);
1987 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1988 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1989 __group_send_sig_info(SIGCHLD, &info, parent);
1991 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1993 __wake_up_parent(tsk, parent);
1994 spin_unlock_irqrestore(&sighand->siglock, flags);
1997 static inline bool may_ptrace_stop(void)
1999 if (!likely(current->ptrace))
2002 * Are we in the middle of do_coredump?
2003 * If so and our tracer is also part of the coredump stopping
2004 * is a deadlock situation, and pointless because our tracer
2005 * is dead so don't allow us to stop.
2006 * If SIGKILL was already sent before the caller unlocked
2007 * ->siglock we must see ->core_state != NULL. Otherwise it
2008 * is safe to enter schedule().
2010 * This is almost outdated, a task with the pending SIGKILL can't
2011 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2012 * after SIGKILL was already dequeued.
2014 if (unlikely(current->mm->core_state) &&
2015 unlikely(current->mm == current->parent->mm))
2022 * Return non-zero if there is a SIGKILL that should be waking us up.
2023 * Called with the siglock held.
2025 static bool sigkill_pending(struct task_struct *tsk)
2027 return sigismember(&tsk->pending.signal, SIGKILL) ||
2028 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2032 * This must be called with current->sighand->siglock held.
2034 * This should be the path for all ptrace stops.
2035 * We always set current->last_siginfo while stopped here.
2036 * That makes it a way to test a stopped process for
2037 * being ptrace-stopped vs being job-control-stopped.
2039 * If we actually decide not to stop at all because the tracer
2040 * is gone, we keep current->exit_code unless clear_code.
2042 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
2043 __releases(¤t->sighand->siglock)
2044 __acquires(¤t->sighand->siglock)
2046 bool gstop_done = false;
2048 if (arch_ptrace_stop_needed(exit_code, info)) {
2050 * The arch code has something special to do before a
2051 * ptrace stop. This is allowed to block, e.g. for faults
2052 * on user stack pages. We can't keep the siglock while
2053 * calling arch_ptrace_stop, so we must release it now.
2054 * To preserve proper semantics, we must do this before
2055 * any signal bookkeeping like checking group_stop_count.
2056 * Meanwhile, a SIGKILL could come in before we retake the
2057 * siglock. That must prevent us from sleeping in TASK_TRACED.
2058 * So after regaining the lock, we must check for SIGKILL.
2060 spin_unlock_irq(¤t->sighand->siglock);
2061 arch_ptrace_stop(exit_code, info);
2062 spin_lock_irq(¤t->sighand->siglock);
2063 if (sigkill_pending(current))
2067 set_special_state(TASK_TRACED);
2070 * We're committing to trapping. TRACED should be visible before
2071 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2072 * Also, transition to TRACED and updates to ->jobctl should be
2073 * atomic with respect to siglock and should be done after the arch
2074 * hook as siglock is released and regrabbed across it.
2079 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2081 * set_current_state() smp_wmb();
2083 * wait_task_stopped()
2084 * task_stopped_code()
2085 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2089 current->last_siginfo = info;
2090 current->exit_code = exit_code;
2093 * If @why is CLD_STOPPED, we're trapping to participate in a group
2094 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2095 * across siglock relocks since INTERRUPT was scheduled, PENDING
2096 * could be clear now. We act as if SIGCONT is received after
2097 * TASK_TRACED is entered - ignore it.
2099 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2100 gstop_done = task_participate_group_stop(current);
2102 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2103 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2104 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2105 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2107 /* entering a trap, clear TRAPPING */
2108 task_clear_jobctl_trapping(current);
2110 spin_unlock_irq(¤t->sighand->siglock);
2111 read_lock(&tasklist_lock);
2112 if (may_ptrace_stop()) {
2114 * Notify parents of the stop.
2116 * While ptraced, there are two parents - the ptracer and
2117 * the real_parent of the group_leader. The ptracer should
2118 * know about every stop while the real parent is only
2119 * interested in the completion of group stop. The states
2120 * for the two don't interact with each other. Notify
2121 * separately unless they're gonna be duplicates.
2123 do_notify_parent_cldstop(current, true, why);
2124 if (gstop_done && ptrace_reparented(current))
2125 do_notify_parent_cldstop(current, false, why);
2128 * Don't want to allow preemption here, because
2129 * sys_ptrace() needs this task to be inactive.
2131 * XXX: implement read_unlock_no_resched().
2134 read_unlock(&tasklist_lock);
2135 cgroup_enter_frozen();
2136 preempt_enable_no_resched();
2137 freezable_schedule();
2138 cgroup_leave_frozen(true);
2141 * By the time we got the lock, our tracer went away.
2142 * Don't drop the lock yet, another tracer may come.
2144 * If @gstop_done, the ptracer went away between group stop
2145 * completion and here. During detach, it would have set
2146 * JOBCTL_STOP_PENDING on us and we'll re-enter
2147 * TASK_STOPPED in do_signal_stop() on return, so notifying
2148 * the real parent of the group stop completion is enough.
2151 do_notify_parent_cldstop(current, false, why);
2153 /* tasklist protects us from ptrace_freeze_traced() */
2154 __set_current_state(TASK_RUNNING);
2156 current->exit_code = 0;
2157 read_unlock(&tasklist_lock);
2161 * We are back. Now reacquire the siglock before touching
2162 * last_siginfo, so that we are sure to have synchronized with
2163 * any signal-sending on another CPU that wants to examine it.
2165 spin_lock_irq(¤t->sighand->siglock);
2166 current->last_siginfo = NULL;
2168 /* LISTENING can be set only during STOP traps, clear it */
2169 current->jobctl &= ~JOBCTL_LISTENING;
2172 * Queued signals ignored us while we were stopped for tracing.
2173 * So check for any that we should take before resuming user mode.
2174 * This sets TIF_SIGPENDING, but never clears it.
2176 recalc_sigpending_tsk(current);
2179 static void ptrace_do_notify(int signr, int exit_code, int why)
2183 clear_siginfo(&info);
2184 info.si_signo = signr;
2185 info.si_code = exit_code;
2186 info.si_pid = task_pid_vnr(current);
2187 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2189 /* Let the debugger run. */
2190 ptrace_stop(exit_code, why, 1, &info);
2193 void ptrace_notify(int exit_code)
2195 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2196 if (unlikely(current->task_works))
2199 spin_lock_irq(¤t->sighand->siglock);
2200 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2201 spin_unlock_irq(¤t->sighand->siglock);
2205 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2206 * @signr: signr causing group stop if initiating
2208 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2209 * and participate in it. If already set, participate in the existing
2210 * group stop. If participated in a group stop (and thus slept), %true is
2211 * returned with siglock released.
2213 * If ptraced, this function doesn't handle stop itself. Instead,
2214 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2215 * untouched. The caller must ensure that INTERRUPT trap handling takes
2216 * places afterwards.
2219 * Must be called with @current->sighand->siglock held, which is released
2223 * %false if group stop is already cancelled or ptrace trap is scheduled.
2224 * %true if participated in group stop.
2226 static bool do_signal_stop(int signr)
2227 __releases(¤t->sighand->siglock)
2229 struct signal_struct *sig = current->signal;
2231 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2232 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2233 struct task_struct *t;
2235 /* signr will be recorded in task->jobctl for retries */
2236 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2238 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2239 unlikely(signal_group_exit(sig)))
2242 * There is no group stop already in progress. We must
2245 * While ptraced, a task may be resumed while group stop is
2246 * still in effect and then receive a stop signal and
2247 * initiate another group stop. This deviates from the
2248 * usual behavior as two consecutive stop signals can't
2249 * cause two group stops when !ptraced. That is why we
2250 * also check !task_is_stopped(t) below.
2252 * The condition can be distinguished by testing whether
2253 * SIGNAL_STOP_STOPPED is already set. Don't generate
2254 * group_exit_code in such case.
2256 * This is not necessary for SIGNAL_STOP_CONTINUED because
2257 * an intervening stop signal is required to cause two
2258 * continued events regardless of ptrace.
2260 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2261 sig->group_exit_code = signr;
2263 sig->group_stop_count = 0;
2265 if (task_set_jobctl_pending(current, signr | gstop))
2266 sig->group_stop_count++;
2269 while_each_thread(current, t) {
2271 * Setting state to TASK_STOPPED for a group
2272 * stop is always done with the siglock held,
2273 * so this check has no races.
2275 if (!task_is_stopped(t) &&
2276 task_set_jobctl_pending(t, signr | gstop)) {
2277 sig->group_stop_count++;
2278 if (likely(!(t->ptrace & PT_SEIZED)))
2279 signal_wake_up(t, 0);
2281 ptrace_trap_notify(t);
2286 if (likely(!current->ptrace)) {
2290 * If there are no other threads in the group, or if there
2291 * is a group stop in progress and we are the last to stop,
2292 * report to the parent.
2294 if (task_participate_group_stop(current))
2295 notify = CLD_STOPPED;
2297 set_special_state(TASK_STOPPED);
2298 spin_unlock_irq(¤t->sighand->siglock);
2301 * Notify the parent of the group stop completion. Because
2302 * we're not holding either the siglock or tasklist_lock
2303 * here, ptracer may attach inbetween; however, this is for
2304 * group stop and should always be delivered to the real
2305 * parent of the group leader. The new ptracer will get
2306 * its notification when this task transitions into
2310 read_lock(&tasklist_lock);
2311 do_notify_parent_cldstop(current, false, notify);
2312 read_unlock(&tasklist_lock);
2315 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2316 cgroup_enter_frozen();
2317 freezable_schedule();
2321 * While ptraced, group stop is handled by STOP trap.
2322 * Schedule it and let the caller deal with it.
2324 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2330 * do_jobctl_trap - take care of ptrace jobctl traps
2332 * When PT_SEIZED, it's used for both group stop and explicit
2333 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2334 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2335 * the stop signal; otherwise, %SIGTRAP.
2337 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2338 * number as exit_code and no siginfo.
2341 * Must be called with @current->sighand->siglock held, which may be
2342 * released and re-acquired before returning with intervening sleep.
2344 static void do_jobctl_trap(void)
2346 struct signal_struct *signal = current->signal;
2347 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2349 if (current->ptrace & PT_SEIZED) {
2350 if (!signal->group_stop_count &&
2351 !(signal->flags & SIGNAL_STOP_STOPPED))
2353 WARN_ON_ONCE(!signr);
2354 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2357 WARN_ON_ONCE(!signr);
2358 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2359 current->exit_code = 0;
2364 * do_freezer_trap - handle the freezer jobctl trap
2366 * Puts the task into frozen state, if only the task is not about to quit.
2367 * In this case it drops JOBCTL_TRAP_FREEZE.
2370 * Must be called with @current->sighand->siglock held,
2371 * which is always released before returning.
2373 static void do_freezer_trap(void)
2374 __releases(¤t->sighand->siglock)
2377 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2378 * let's make another loop to give it a chance to be handled.
2379 * In any case, we'll return back.
2381 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2382 JOBCTL_TRAP_FREEZE) {
2383 spin_unlock_irq(¤t->sighand->siglock);
2388 * Now we're sure that there is no pending fatal signal and no
2389 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2390 * immediately (if there is a non-fatal signal pending), and
2391 * put the task into sleep.
2393 __set_current_state(TASK_INTERRUPTIBLE);
2394 clear_thread_flag(TIF_SIGPENDING);
2395 spin_unlock_irq(¤t->sighand->siglock);
2396 cgroup_enter_frozen();
2397 freezable_schedule();
2400 static int ptrace_signal(int signr, siginfo_t *info)
2403 * We do not check sig_kernel_stop(signr) but set this marker
2404 * unconditionally because we do not know whether debugger will
2405 * change signr. This flag has no meaning unless we are going
2406 * to stop after return from ptrace_stop(). In this case it will
2407 * be checked in do_signal_stop(), we should only stop if it was
2408 * not cleared by SIGCONT while we were sleeping. See also the
2409 * comment in dequeue_signal().
2411 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2412 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2414 /* We're back. Did the debugger cancel the sig? */
2415 signr = current->exit_code;
2419 current->exit_code = 0;
2422 * Update the siginfo structure if the signal has
2423 * changed. If the debugger wanted something
2424 * specific in the siginfo structure then it should
2425 * have updated *info via PTRACE_SETSIGINFO.
2427 if (signr != info->si_signo) {
2428 clear_siginfo(info);
2429 info->si_signo = signr;
2431 info->si_code = SI_USER;
2433 info->si_pid = task_pid_vnr(current->parent);
2434 info->si_uid = from_kuid_munged(current_user_ns(),
2435 task_uid(current->parent));
2439 /* If the (new) signal is now blocked, requeue it. */
2440 if (sigismember(¤t->blocked, signr)) {
2441 specific_send_sig_info(signr, info, current);
2448 bool get_signal(struct ksignal *ksig)
2450 struct sighand_struct *sighand = current->sighand;
2451 struct signal_struct *signal = current->signal;
2454 if (unlikely(current->task_works))
2457 if (unlikely(uprobe_deny_signal()))
2461 * Do this once, we can't return to user-mode if freezing() == T.
2462 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2463 * thus do not need another check after return.
2468 spin_lock_irq(&sighand->siglock);
2470 * Every stopped thread goes here after wakeup. Check to see if
2471 * we should notify the parent, prepare_signal(SIGCONT) encodes
2472 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2474 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2477 if (signal->flags & SIGNAL_CLD_CONTINUED)
2478 why = CLD_CONTINUED;
2482 signal->flags &= ~SIGNAL_CLD_MASK;
2484 spin_unlock_irq(&sighand->siglock);
2487 * Notify the parent that we're continuing. This event is
2488 * always per-process and doesn't make whole lot of sense
2489 * for ptracers, who shouldn't consume the state via
2490 * wait(2) either, but, for backward compatibility, notify
2491 * the ptracer of the group leader too unless it's gonna be
2494 read_lock(&tasklist_lock);
2495 do_notify_parent_cldstop(current, false, why);
2497 if (ptrace_reparented(current->group_leader))
2498 do_notify_parent_cldstop(current->group_leader,
2500 read_unlock(&tasklist_lock);
2505 /* Has this task already been marked for death? */
2506 if (signal_group_exit(signal)) {
2507 ksig->info.si_signo = signr = SIGKILL;
2508 sigdelset(¤t->pending.signal, SIGKILL);
2509 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2510 &sighand->action[SIGKILL - 1]);
2511 recalc_sigpending();
2512 current->jobctl &= ~JOBCTL_TRAP_FREEZE;
2513 spin_unlock_irq(&sighand->siglock);
2514 if (unlikely(cgroup_task_frozen(current)))
2515 cgroup_leave_frozen(true);
2520 struct k_sigaction *ka;
2522 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2526 if (unlikely(current->jobctl &
2527 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2528 if (current->jobctl & JOBCTL_TRAP_MASK) {
2530 spin_unlock_irq(&sighand->siglock);
2531 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2538 * If the task is leaving the frozen state, let's update
2539 * cgroup counters and reset the frozen bit.
2541 if (unlikely(cgroup_task_frozen(current))) {
2542 spin_unlock_irq(&sighand->siglock);
2543 cgroup_leave_frozen(true);
2548 * Signals generated by the execution of an instruction
2549 * need to be delivered before any other pending signals
2550 * so that the instruction pointer in the signal stack
2551 * frame points to the faulting instruction.
2553 signr = dequeue_synchronous_signal(&ksig->info);
2555 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2558 break; /* will return 0 */
2560 if (unlikely(current->ptrace) && signr != SIGKILL) {
2561 signr = ptrace_signal(signr, &ksig->info);
2566 ka = &sighand->action[signr-1];
2568 /* Trace actually delivered signals. */
2569 trace_signal_deliver(signr, &ksig->info, ka);
2571 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2573 if (ka->sa.sa_handler != SIG_DFL) {
2574 /* Run the handler. */
2577 if (ka->sa.sa_flags & SA_ONESHOT)
2578 ka->sa.sa_handler = SIG_DFL;
2580 break; /* will return non-zero "signr" value */
2584 * Now we are doing the default action for this signal.
2586 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2590 * Global init gets no signals it doesn't want.
2591 * Container-init gets no signals it doesn't want from same
2594 * Note that if global/container-init sees a sig_kernel_only()
2595 * signal here, the signal must have been generated internally
2596 * or must have come from an ancestor namespace. In either
2597 * case, the signal cannot be dropped.
2599 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2600 !sig_kernel_only(signr))
2603 if (sig_kernel_stop(signr)) {
2605 * The default action is to stop all threads in
2606 * the thread group. The job control signals
2607 * do nothing in an orphaned pgrp, but SIGSTOP
2608 * always works. Note that siglock needs to be
2609 * dropped during the call to is_orphaned_pgrp()
2610 * because of lock ordering with tasklist_lock.
2611 * This allows an intervening SIGCONT to be posted.
2612 * We need to check for that and bail out if necessary.
2614 if (signr != SIGSTOP) {
2615 spin_unlock_irq(&sighand->siglock);
2617 /* signals can be posted during this window */
2619 if (is_current_pgrp_orphaned())
2622 spin_lock_irq(&sighand->siglock);
2625 if (likely(do_signal_stop(ksig->info.si_signo))) {
2626 /* It released the siglock. */
2631 * We didn't actually stop, due to a race
2632 * with SIGCONT or something like that.
2637 spin_unlock_irq(&sighand->siglock);
2641 * Anything else is fatal, maybe with a core dump.
2643 current->flags |= PF_SIGNALED;
2645 if (sig_kernel_coredump(signr)) {
2646 if (print_fatal_signals)
2647 print_fatal_signal(ksig->info.si_signo);
2648 proc_coredump_connector(current);
2650 * If it was able to dump core, this kills all
2651 * other threads in the group and synchronizes with
2652 * their demise. If we lost the race with another
2653 * thread getting here, it set group_exit_code
2654 * first and our do_group_exit call below will use
2655 * that value and ignore the one we pass it.
2657 do_coredump(&ksig->info);
2661 * Death signals, no core dump.
2663 do_group_exit(ksig->info.si_signo);
2666 spin_unlock_irq(&sighand->siglock);
2669 return ksig->sig > 0;
2673 * signal_delivered -
2674 * @ksig: kernel signal struct
2675 * @stepping: nonzero if debugger single-step or block-step in use
2677 * This function should be called when a signal has successfully been
2678 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2679 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2680 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2682 static void signal_delivered(struct ksignal *ksig, int stepping)
2686 /* A signal was successfully delivered, and the
2687 saved sigmask was stored on the signal frame,
2688 and will be restored by sigreturn. So we can
2689 simply clear the restore sigmask flag. */
2690 clear_restore_sigmask();
2692 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2693 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2694 sigaddset(&blocked, ksig->sig);
2695 set_current_blocked(&blocked);
2696 tracehook_signal_handler(stepping);
2699 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2702 force_sigsegv(ksig->sig, current);
2704 signal_delivered(ksig, stepping);
2708 * It could be that complete_signal() picked us to notify about the
2709 * group-wide signal. Other threads should be notified now to take
2710 * the shared signals in @which since we will not.
2712 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2715 struct task_struct *t;
2717 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2718 if (sigisemptyset(&retarget))
2722 while_each_thread(tsk, t) {
2723 if (t->flags & PF_EXITING)
2726 if (!has_pending_signals(&retarget, &t->blocked))
2728 /* Remove the signals this thread can handle. */
2729 sigandsets(&retarget, &retarget, &t->blocked);
2731 if (!signal_pending(t))
2732 signal_wake_up(t, 0);
2734 if (sigisemptyset(&retarget))
2739 void exit_signals(struct task_struct *tsk)
2745 * @tsk is about to have PF_EXITING set - lock out users which
2746 * expect stable threadgroup.
2748 cgroup_threadgroup_change_begin(tsk);
2750 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2751 tsk->flags |= PF_EXITING;
2752 cgroup_threadgroup_change_end(tsk);
2756 spin_lock_irq(&tsk->sighand->siglock);
2758 * From now this task is not visible for group-wide signals,
2759 * see wants_signal(), do_signal_stop().
2761 tsk->flags |= PF_EXITING;
2763 cgroup_threadgroup_change_end(tsk);
2765 if (!signal_pending(tsk))
2768 unblocked = tsk->blocked;
2769 signotset(&unblocked);
2770 retarget_shared_pending(tsk, &unblocked);
2772 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2773 task_participate_group_stop(tsk))
2774 group_stop = CLD_STOPPED;
2776 spin_unlock_irq(&tsk->sighand->siglock);
2779 * If group stop has completed, deliver the notification. This
2780 * should always go to the real parent of the group leader.
2782 if (unlikely(group_stop)) {
2783 read_lock(&tasklist_lock);
2784 do_notify_parent_cldstop(tsk, false, group_stop);
2785 read_unlock(&tasklist_lock);
2789 EXPORT_SYMBOL(recalc_sigpending);
2790 EXPORT_SYMBOL_GPL(dequeue_signal);
2791 EXPORT_SYMBOL(flush_signals);
2792 EXPORT_SYMBOL(force_sig);
2793 EXPORT_SYMBOL(send_sig);
2794 EXPORT_SYMBOL(send_sig_info);
2795 EXPORT_SYMBOL(sigprocmask);
2798 * System call entry points.
2802 * sys_restart_syscall - restart a system call
2804 SYSCALL_DEFINE0(restart_syscall)
2806 struct restart_block *restart = ¤t->restart_block;
2807 return restart->fn(restart);
2810 long do_no_restart_syscall(struct restart_block *param)
2815 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2817 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2818 sigset_t newblocked;
2819 /* A set of now blocked but previously unblocked signals. */
2820 sigandnsets(&newblocked, newset, ¤t->blocked);
2821 retarget_shared_pending(tsk, &newblocked);
2823 tsk->blocked = *newset;
2824 recalc_sigpending();
2828 * set_current_blocked - change current->blocked mask
2831 * It is wrong to change ->blocked directly, this helper should be used
2832 * to ensure the process can't miss a shared signal we are going to block.
2834 void set_current_blocked(sigset_t *newset)
2836 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2837 __set_current_blocked(newset);
2840 void __set_current_blocked(const sigset_t *newset)
2842 struct task_struct *tsk = current;
2845 * In case the signal mask hasn't changed, there is nothing we need
2846 * to do. The current->blocked shouldn't be modified by other task.
2848 if (sigequalsets(&tsk->blocked, newset))
2851 spin_lock_irq(&tsk->sighand->siglock);
2852 __set_task_blocked(tsk, newset);
2853 spin_unlock_irq(&tsk->sighand->siglock);
2857 * This is also useful for kernel threads that want to temporarily
2858 * (or permanently) block certain signals.
2860 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2861 * interface happily blocks "unblockable" signals like SIGKILL
2864 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2866 struct task_struct *tsk = current;
2869 /* Lockless, only current can change ->blocked, never from irq */
2871 *oldset = tsk->blocked;
2875 sigorsets(&newset, &tsk->blocked, set);
2878 sigandnsets(&newset, &tsk->blocked, set);
2887 __set_current_blocked(&newset);
2892 * sys_rt_sigprocmask - change the list of currently blocked signals
2893 * @how: whether to add, remove, or set signals
2894 * @nset: stores pending signals
2895 * @oset: previous value of signal mask if non-null
2896 * @sigsetsize: size of sigset_t type
2898 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2899 sigset_t __user *, oset, size_t, sigsetsize)
2901 sigset_t old_set, new_set;
2904 /* XXX: Don't preclude handling different sized sigset_t's. */
2905 if (sigsetsize != sizeof(sigset_t))
2908 old_set = current->blocked;
2911 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2913 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2915 error = sigprocmask(how, &new_set, NULL);
2921 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2928 #ifdef CONFIG_COMPAT
2929 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2930 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2932 sigset_t old_set = current->blocked;
2934 /* XXX: Don't preclude handling different sized sigset_t's. */
2935 if (sigsetsize != sizeof(sigset_t))
2941 if (get_compat_sigset(&new_set, nset))
2943 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2945 error = sigprocmask(how, &new_set, NULL);
2949 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2953 static void do_sigpending(sigset_t *set)
2955 spin_lock_irq(¤t->sighand->siglock);
2956 sigorsets(set, ¤t->pending.signal,
2957 ¤t->signal->shared_pending.signal);
2958 spin_unlock_irq(¤t->sighand->siglock);
2960 /* Outside the lock because only this thread touches it. */
2961 sigandsets(set, ¤t->blocked, set);
2965 * sys_rt_sigpending - examine a pending signal that has been raised
2967 * @uset: stores pending signals
2968 * @sigsetsize: size of sigset_t type or larger
2970 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2974 if (sigsetsize > sizeof(*uset))
2977 do_sigpending(&set);
2979 if (copy_to_user(uset, &set, sigsetsize))
2985 #ifdef CONFIG_COMPAT
2986 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2987 compat_size_t, sigsetsize)
2991 if (sigsetsize > sizeof(*uset))
2994 do_sigpending(&set);
2996 return put_compat_sigset(uset, &set, sigsetsize);
3000 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3002 enum siginfo_layout layout = SIL_KILL;
3003 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3004 static const struct {
3005 unsigned char limit, layout;
3007 [SIGILL] = { NSIGILL, SIL_FAULT },
3008 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3009 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3010 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3011 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3012 #if defined(SIGEMT) && defined(NSIGEMT)
3013 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3015 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3016 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3017 [SIGSYS] = { NSIGSYS, SIL_SYS },
3019 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
3020 layout = filter[sig].layout;
3021 /* Handle the exceptions */
3022 if ((sig == SIGBUS) &&
3023 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3024 layout = SIL_FAULT_MCEERR;
3025 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3026 layout = SIL_FAULT_BNDERR;
3028 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3029 layout = SIL_FAULT_PKUERR;
3032 else if (si_code <= NSIGPOLL)
3035 if (si_code == SI_TIMER)
3037 else if (si_code == SI_SIGIO)
3039 else if (si_code < 0)
3045 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
3047 if (copy_to_user(to, from , sizeof(struct siginfo)))
3052 #ifdef CONFIG_COMPAT
3053 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3054 const struct siginfo *from)
3055 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3057 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3059 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3060 const struct siginfo *from, bool x32_ABI)
3063 struct compat_siginfo new;
3064 memset(&new, 0, sizeof(new));
3066 new.si_signo = from->si_signo;
3067 new.si_errno = from->si_errno;
3068 new.si_code = from->si_code;
3069 switch(siginfo_layout(from->si_signo, from->si_code)) {
3071 new.si_pid = from->si_pid;
3072 new.si_uid = from->si_uid;
3075 new.si_tid = from->si_tid;
3076 new.si_overrun = from->si_overrun;
3077 new.si_int = from->si_int;
3080 new.si_band = from->si_band;
3081 new.si_fd = from->si_fd;
3084 new.si_addr = ptr_to_compat(from->si_addr);
3085 #ifdef __ARCH_SI_TRAPNO
3086 new.si_trapno = from->si_trapno;
3089 case SIL_FAULT_MCEERR:
3090 new.si_addr = ptr_to_compat(from->si_addr);
3091 #ifdef __ARCH_SI_TRAPNO
3092 new.si_trapno = from->si_trapno;
3094 new.si_addr_lsb = from->si_addr_lsb;
3096 case SIL_FAULT_BNDERR:
3097 new.si_addr = ptr_to_compat(from->si_addr);
3098 #ifdef __ARCH_SI_TRAPNO
3099 new.si_trapno = from->si_trapno;
3101 new.si_lower = ptr_to_compat(from->si_lower);
3102 new.si_upper = ptr_to_compat(from->si_upper);
3104 case SIL_FAULT_PKUERR:
3105 new.si_addr = ptr_to_compat(from->si_addr);
3106 #ifdef __ARCH_SI_TRAPNO
3107 new.si_trapno = from->si_trapno;
3109 new.si_pkey = from->si_pkey;
3112 new.si_pid = from->si_pid;
3113 new.si_uid = from->si_uid;
3114 new.si_status = from->si_status;
3115 #ifdef CONFIG_X86_X32_ABI
3117 new._sifields._sigchld_x32._utime = from->si_utime;
3118 new._sifields._sigchld_x32._stime = from->si_stime;
3122 new.si_utime = from->si_utime;
3123 new.si_stime = from->si_stime;
3127 new.si_pid = from->si_pid;
3128 new.si_uid = from->si_uid;
3129 new.si_int = from->si_int;
3132 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3133 new.si_syscall = from->si_syscall;
3134 new.si_arch = from->si_arch;
3138 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3144 int copy_siginfo_from_user32(struct siginfo *to,
3145 const struct compat_siginfo __user *ufrom)
3147 struct compat_siginfo from;
3149 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3153 to->si_signo = from.si_signo;
3154 to->si_errno = from.si_errno;
3155 to->si_code = from.si_code;
3156 switch(siginfo_layout(from.si_signo, from.si_code)) {
3158 to->si_pid = from.si_pid;
3159 to->si_uid = from.si_uid;
3162 to->si_tid = from.si_tid;
3163 to->si_overrun = from.si_overrun;
3164 to->si_int = from.si_int;
3167 to->si_band = from.si_band;
3168 to->si_fd = from.si_fd;
3171 to->si_addr = compat_ptr(from.si_addr);
3172 #ifdef __ARCH_SI_TRAPNO
3173 to->si_trapno = from.si_trapno;
3176 case SIL_FAULT_MCEERR:
3177 to->si_addr = compat_ptr(from.si_addr);
3178 #ifdef __ARCH_SI_TRAPNO
3179 to->si_trapno = from.si_trapno;
3181 to->si_addr_lsb = from.si_addr_lsb;
3183 case SIL_FAULT_BNDERR:
3184 to->si_addr = compat_ptr(from.si_addr);
3185 #ifdef __ARCH_SI_TRAPNO
3186 to->si_trapno = from.si_trapno;
3188 to->si_lower = compat_ptr(from.si_lower);
3189 to->si_upper = compat_ptr(from.si_upper);
3191 case SIL_FAULT_PKUERR:
3192 to->si_addr = compat_ptr(from.si_addr);
3193 #ifdef __ARCH_SI_TRAPNO
3194 to->si_trapno = from.si_trapno;
3196 to->si_pkey = from.si_pkey;
3199 to->si_pid = from.si_pid;
3200 to->si_uid = from.si_uid;
3201 to->si_status = from.si_status;
3202 #ifdef CONFIG_X86_X32_ABI
3203 if (in_x32_syscall()) {
3204 to->si_utime = from._sifields._sigchld_x32._utime;
3205 to->si_stime = from._sifields._sigchld_x32._stime;
3209 to->si_utime = from.si_utime;
3210 to->si_stime = from.si_stime;
3214 to->si_pid = from.si_pid;
3215 to->si_uid = from.si_uid;
3216 to->si_int = from.si_int;
3219 to->si_call_addr = compat_ptr(from.si_call_addr);
3220 to->si_syscall = from.si_syscall;
3221 to->si_arch = from.si_arch;
3226 #endif /* CONFIG_COMPAT */
3229 * do_sigtimedwait - wait for queued signals specified in @which
3230 * @which: queued signals to wait for
3231 * @info: if non-null, the signal's siginfo is returned here
3232 * @ts: upper bound on process time suspension
3234 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3235 const struct timespec *ts)
3237 ktime_t *to = NULL, timeout = KTIME_MAX;
3238 struct task_struct *tsk = current;
3239 sigset_t mask = *which;
3243 if (!timespec_valid(ts))
3245 timeout = timespec_to_ktime(*ts);
3250 * Invert the set of allowed signals to get those we want to block.
3252 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3255 spin_lock_irq(&tsk->sighand->siglock);
3256 sig = dequeue_signal(tsk, &mask, info);
3257 if (!sig && timeout) {
3259 * None ready, temporarily unblock those we're interested
3260 * while we are sleeping in so that we'll be awakened when
3261 * they arrive. Unblocking is always fine, we can avoid
3262 * set_current_blocked().
3264 tsk->real_blocked = tsk->blocked;
3265 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3266 recalc_sigpending();
3267 spin_unlock_irq(&tsk->sighand->siglock);
3269 __set_current_state(TASK_INTERRUPTIBLE);
3270 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3272 spin_lock_irq(&tsk->sighand->siglock);
3273 __set_task_blocked(tsk, &tsk->real_blocked);
3274 sigemptyset(&tsk->real_blocked);
3275 sig = dequeue_signal(tsk, &mask, info);
3277 spin_unlock_irq(&tsk->sighand->siglock);
3281 return ret ? -EINTR : -EAGAIN;
3285 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3287 * @uthese: queued signals to wait for
3288 * @uinfo: if non-null, the signal's siginfo is returned here
3289 * @uts: upper bound on process time suspension
3290 * @sigsetsize: size of sigset_t type
3292 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3293 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3301 /* XXX: Don't preclude handling different sized sigset_t's. */
3302 if (sigsetsize != sizeof(sigset_t))
3305 if (copy_from_user(&these, uthese, sizeof(these)))
3309 if (copy_from_user(&ts, uts, sizeof(ts)))
3313 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3315 if (ret > 0 && uinfo) {
3316 if (copy_siginfo_to_user(uinfo, &info))
3323 #ifdef CONFIG_COMPAT
3324 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3325 struct compat_siginfo __user *, uinfo,
3326 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3333 if (sigsetsize != sizeof(sigset_t))
3336 if (get_compat_sigset(&s, uthese))
3340 if (compat_get_timespec(&t, uts))
3344 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3346 if (ret > 0 && uinfo) {
3347 if (copy_siginfo_to_user32(uinfo, &info))
3355 static inline void prepare_kill_siginfo(int sig, struct siginfo *info)
3357 clear_siginfo(info);
3358 info->si_signo = sig;
3360 info->si_code = SI_USER;
3361 info->si_pid = task_tgid_vnr(current);
3362 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3366 * sys_kill - send a signal to a process
3367 * @pid: the PID of the process
3368 * @sig: signal to be sent
3370 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3372 struct siginfo info;
3374 prepare_kill_siginfo(sig, &info);
3376 return kill_something_info(sig, &info, pid);
3380 * Verify that the signaler and signalee either are in the same pid namespace
3381 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3384 static bool access_pidfd_pidns(struct pid *pid)
3386 struct pid_namespace *active = task_active_pid_ns(current);
3387 struct pid_namespace *p = ns_of_pid(pid);
3400 static int copy_siginfo_from_user_any(siginfo_t *kinfo, siginfo_t __user *info)
3402 #ifdef CONFIG_COMPAT
3404 * Avoid hooking up compat syscalls and instead handle necessary
3405 * conversions here. Note, this is a stop-gap measure and should not be
3406 * considered a generic solution.
3408 if (in_compat_syscall())
3409 return copy_siginfo_from_user32(
3410 kinfo, (struct compat_siginfo __user *)info);
3412 return copy_from_user(kinfo, info, sizeof(siginfo_t));
3415 static struct pid *pidfd_to_pid(const struct file *file)
3417 if (file->f_op == &pidfd_fops)
3418 return file->private_data;
3420 return tgid_pidfd_to_pid(file);
3424 * sys_pidfd_send_signal - Signal a process through a pidfd
3425 * @pidfd: file descriptor of the process
3426 * @sig: signal to send
3427 * @info: signal info
3428 * @flags: future flags
3430 * The syscall currently only signals via PIDTYPE_PID which covers
3431 * kill(<positive-pid>, <signal>. It does not signal threads or process
3433 * In order to extend the syscall to threads and process groups the @flags
3434 * argument should be used. In essence, the @flags argument will determine
3435 * what is signaled and not the file descriptor itself. Put in other words,
3436 * grouping is a property of the flags argument not a property of the file
3439 * Return: 0 on success, negative errno on failure
3441 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3442 siginfo_t __user *, info, unsigned int, flags)
3449 /* Enforce flags be set to 0 until we add an extension. */
3457 /* Is this a pidfd? */
3458 pid = pidfd_to_pid(f.file);
3465 if (!access_pidfd_pidns(pid))
3469 ret = copy_siginfo_from_user_any(&kinfo, info);
3474 if (unlikely(sig != kinfo.si_signo))
3477 /* Only allow sending arbitrary signals to yourself. */
3479 if ((task_pid(current) != pid) &&
3480 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3483 prepare_kill_siginfo(sig, &kinfo);
3486 ret = kill_pid_info(sig, &kinfo, pid);
3494 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3496 struct task_struct *p;
3500 p = find_task_by_vpid(pid);
3501 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3502 error = check_kill_permission(sig, info, p);
3504 * The null signal is a permissions and process existence
3505 * probe. No signal is actually delivered.
3507 if (!error && sig) {
3508 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3510 * If lock_task_sighand() failed we pretend the task
3511 * dies after receiving the signal. The window is tiny,
3512 * and the signal is private anyway.
3514 if (unlikely(error == -ESRCH))
3523 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3525 struct siginfo info;
3527 clear_siginfo(&info);
3528 info.si_signo = sig;
3530 info.si_code = SI_TKILL;
3531 info.si_pid = task_tgid_vnr(current);
3532 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3534 return do_send_specific(tgid, pid, sig, &info);
3538 * sys_tgkill - send signal to one specific thread
3539 * @tgid: the thread group ID of the thread
3540 * @pid: the PID of the thread
3541 * @sig: signal to be sent
3543 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3544 * exists but it's not belonging to the target process anymore. This
3545 * method solves the problem of threads exiting and PIDs getting reused.
3547 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3549 /* This is only valid for single tasks */
3550 if (pid <= 0 || tgid <= 0)
3553 return do_tkill(tgid, pid, sig);
3557 * sys_tkill - send signal to one specific task
3558 * @pid: the PID of the task
3559 * @sig: signal to be sent
3561 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3563 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3565 /* This is only valid for single tasks */
3569 return do_tkill(0, pid, sig);
3572 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3574 /* Not even root can pretend to send signals from the kernel.
3575 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3577 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3578 (task_pid_vnr(current) != pid))
3581 info->si_signo = sig;
3583 /* POSIX.1b doesn't mention process groups. */
3584 return kill_proc_info(sig, info, pid);
3588 * sys_rt_sigqueueinfo - send signal information to a signal
3589 * @pid: the PID of the thread
3590 * @sig: signal to be sent
3591 * @uinfo: signal info to be sent
3593 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3594 siginfo_t __user *, uinfo)
3597 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3599 return do_rt_sigqueueinfo(pid, sig, &info);
3602 #ifdef CONFIG_COMPAT
3603 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3606 struct compat_siginfo __user *, uinfo)
3609 int ret = copy_siginfo_from_user32(&info, uinfo);
3612 return do_rt_sigqueueinfo(pid, sig, &info);
3616 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3618 /* This is only valid for single tasks */
3619 if (pid <= 0 || tgid <= 0)
3622 /* Not even root can pretend to send signals from the kernel.
3623 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3625 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3626 (task_pid_vnr(current) != pid))
3629 info->si_signo = sig;
3631 return do_send_specific(tgid, pid, sig, info);
3634 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3635 siginfo_t __user *, uinfo)
3639 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3642 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3645 #ifdef CONFIG_COMPAT
3646 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3650 struct compat_siginfo __user *, uinfo)
3654 if (copy_siginfo_from_user32(&info, uinfo))
3656 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3661 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3663 void kernel_sigaction(int sig, __sighandler_t action)
3665 spin_lock_irq(¤t->sighand->siglock);
3666 current->sighand->action[sig - 1].sa.sa_handler = action;
3667 if (action == SIG_IGN) {
3671 sigaddset(&mask, sig);
3673 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3674 flush_sigqueue_mask(&mask, ¤t->pending);
3675 recalc_sigpending();
3677 spin_unlock_irq(¤t->sighand->siglock);
3679 EXPORT_SYMBOL(kernel_sigaction);
3681 void __weak sigaction_compat_abi(struct k_sigaction *act,
3682 struct k_sigaction *oact)
3686 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3688 struct task_struct *p = current, *t;
3689 struct k_sigaction *k;
3692 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3695 k = &p->sighand->action[sig-1];
3697 spin_lock_irq(&p->sighand->siglock);
3701 sigaction_compat_abi(act, oact);
3704 sigdelsetmask(&act->sa.sa_mask,
3705 sigmask(SIGKILL) | sigmask(SIGSTOP));
3709 * "Setting a signal action to SIG_IGN for a signal that is
3710 * pending shall cause the pending signal to be discarded,
3711 * whether or not it is blocked."
3713 * "Setting a signal action to SIG_DFL for a signal that is
3714 * pending and whose default action is to ignore the signal
3715 * (for example, SIGCHLD), shall cause the pending signal to
3716 * be discarded, whether or not it is blocked"
3718 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3720 sigaddset(&mask, sig);
3721 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3722 for_each_thread(p, t)
3723 flush_sigqueue_mask(&mask, &t->pending);
3727 spin_unlock_irq(&p->sighand->siglock);
3732 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3735 struct task_struct *t = current;
3738 memset(oss, 0, sizeof(stack_t));
3739 oss->ss_sp = (void __user *) t->sas_ss_sp;
3740 oss->ss_size = t->sas_ss_size;
3741 oss->ss_flags = sas_ss_flags(sp) |
3742 (current->sas_ss_flags & SS_FLAG_BITS);
3746 void __user *ss_sp = ss->ss_sp;
3747 size_t ss_size = ss->ss_size;
3748 unsigned ss_flags = ss->ss_flags;
3751 if (unlikely(on_sig_stack(sp)))
3754 ss_mode = ss_flags & ~SS_FLAG_BITS;
3755 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3759 if (ss_mode == SS_DISABLE) {
3763 if (unlikely(ss_size < min_ss_size))
3767 t->sas_ss_sp = (unsigned long) ss_sp;
3768 t->sas_ss_size = ss_size;
3769 t->sas_ss_flags = ss_flags;
3774 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3778 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3780 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3781 current_user_stack_pointer(),
3783 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3788 int restore_altstack(const stack_t __user *uss)
3791 if (copy_from_user(&new, uss, sizeof(stack_t)))
3793 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3795 /* squash all but EFAULT for now */
3799 int __save_altstack(stack_t __user *uss, unsigned long sp)
3801 struct task_struct *t = current;
3802 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3803 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3804 __put_user(t->sas_ss_size, &uss->ss_size);
3807 if (t->sas_ss_flags & SS_AUTODISARM)
3812 #ifdef CONFIG_COMPAT
3813 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3814 compat_stack_t __user *uoss_ptr)
3820 compat_stack_t uss32;
3821 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3823 uss.ss_sp = compat_ptr(uss32.ss_sp);
3824 uss.ss_flags = uss32.ss_flags;
3825 uss.ss_size = uss32.ss_size;
3827 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3828 compat_user_stack_pointer(),
3829 COMPAT_MINSIGSTKSZ);
3830 if (ret >= 0 && uoss_ptr) {
3832 memset(&old, 0, sizeof(old));
3833 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3834 old.ss_flags = uoss.ss_flags;
3835 old.ss_size = uoss.ss_size;
3836 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3842 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3843 const compat_stack_t __user *, uss_ptr,
3844 compat_stack_t __user *, uoss_ptr)
3846 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3849 int compat_restore_altstack(const compat_stack_t __user *uss)
3851 int err = do_compat_sigaltstack(uss, NULL);
3852 /* squash all but -EFAULT for now */
3853 return err == -EFAULT ? err : 0;
3856 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3859 struct task_struct *t = current;
3860 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3862 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3863 __put_user(t->sas_ss_size, &uss->ss_size);
3866 if (t->sas_ss_flags & SS_AUTODISARM)
3872 #ifdef __ARCH_WANT_SYS_SIGPENDING
3875 * sys_sigpending - examine pending signals
3876 * @uset: where mask of pending signal is returned
3878 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3882 if (sizeof(old_sigset_t) > sizeof(*uset))
3885 do_sigpending(&set);
3887 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
3893 #ifdef CONFIG_COMPAT
3894 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3898 do_sigpending(&set);
3900 return put_user(set.sig[0], set32);
3906 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3908 * sys_sigprocmask - examine and change blocked signals
3909 * @how: whether to add, remove, or set signals
3910 * @nset: signals to add or remove (if non-null)
3911 * @oset: previous value of signal mask if non-null
3913 * Some platforms have their own version with special arguments;
3914 * others support only sys_rt_sigprocmask.
3917 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3918 old_sigset_t __user *, oset)
3920 old_sigset_t old_set, new_set;
3921 sigset_t new_blocked;
3923 old_set = current->blocked.sig[0];
3926 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3929 new_blocked = current->blocked;
3933 sigaddsetmask(&new_blocked, new_set);
3936 sigdelsetmask(&new_blocked, new_set);
3939 new_blocked.sig[0] = new_set;
3945 set_current_blocked(&new_blocked);
3949 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3955 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3957 #ifndef CONFIG_ODD_RT_SIGACTION
3959 * sys_rt_sigaction - alter an action taken by a process
3960 * @sig: signal to be sent
3961 * @act: new sigaction
3962 * @oact: used to save the previous sigaction
3963 * @sigsetsize: size of sigset_t type
3965 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3966 const struct sigaction __user *, act,
3967 struct sigaction __user *, oact,
3970 struct k_sigaction new_sa, old_sa;
3973 /* XXX: Don't preclude handling different sized sigset_t's. */
3974 if (sigsetsize != sizeof(sigset_t))
3977 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3980 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3984 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3989 #ifdef CONFIG_COMPAT
3990 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3991 const struct compat_sigaction __user *, act,
3992 struct compat_sigaction __user *, oact,
3993 compat_size_t, sigsetsize)
3995 struct k_sigaction new_ka, old_ka;
3996 #ifdef __ARCH_HAS_SA_RESTORER
3997 compat_uptr_t restorer;
4001 /* XXX: Don't preclude handling different sized sigset_t's. */
4002 if (sigsetsize != sizeof(compat_sigset_t))
4006 compat_uptr_t handler;
4007 ret = get_user(handler, &act->sa_handler);
4008 new_ka.sa.sa_handler = compat_ptr(handler);
4009 #ifdef __ARCH_HAS_SA_RESTORER
4010 ret |= get_user(restorer, &act->sa_restorer);
4011 new_ka.sa.sa_restorer = compat_ptr(restorer);
4013 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4014 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4019 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4021 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4023 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4024 sizeof(oact->sa_mask));
4025 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4026 #ifdef __ARCH_HAS_SA_RESTORER
4027 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4028 &oact->sa_restorer);
4034 #endif /* !CONFIG_ODD_RT_SIGACTION */
4036 #ifdef CONFIG_OLD_SIGACTION
4037 SYSCALL_DEFINE3(sigaction, int, sig,
4038 const struct old_sigaction __user *, act,
4039 struct old_sigaction __user *, oact)
4041 struct k_sigaction new_ka, old_ka;
4046 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
4047 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4048 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4049 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4050 __get_user(mask, &act->sa_mask))
4052 #ifdef __ARCH_HAS_KA_RESTORER
4053 new_ka.ka_restorer = NULL;
4055 siginitset(&new_ka.sa.sa_mask, mask);
4058 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4061 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
4062 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4063 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4064 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4065 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4072 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4073 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4074 const struct compat_old_sigaction __user *, act,
4075 struct compat_old_sigaction __user *, oact)
4077 struct k_sigaction new_ka, old_ka;
4079 compat_old_sigset_t mask;
4080 compat_uptr_t handler, restorer;
4083 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
4084 __get_user(handler, &act->sa_handler) ||
4085 __get_user(restorer, &act->sa_restorer) ||
4086 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4087 __get_user(mask, &act->sa_mask))
4090 #ifdef __ARCH_HAS_KA_RESTORER
4091 new_ka.ka_restorer = NULL;
4093 new_ka.sa.sa_handler = compat_ptr(handler);
4094 new_ka.sa.sa_restorer = compat_ptr(restorer);
4095 siginitset(&new_ka.sa.sa_mask, mask);
4098 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4101 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
4102 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4103 &oact->sa_handler) ||
4104 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4105 &oact->sa_restorer) ||
4106 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4107 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4114 #ifdef CONFIG_SGETMASK_SYSCALL
4117 * For backwards compatibility. Functionality superseded by sigprocmask.
4119 SYSCALL_DEFINE0(sgetmask)
4122 return current->blocked.sig[0];
4125 SYSCALL_DEFINE1(ssetmask, int, newmask)
4127 int old = current->blocked.sig[0];
4130 siginitset(&newset, newmask);
4131 set_current_blocked(&newset);
4135 #endif /* CONFIG_SGETMASK_SYSCALL */
4137 #ifdef __ARCH_WANT_SYS_SIGNAL
4139 * For backwards compatibility. Functionality superseded by sigaction.
4141 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4143 struct k_sigaction new_sa, old_sa;
4146 new_sa.sa.sa_handler = handler;
4147 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4148 sigemptyset(&new_sa.sa.sa_mask);
4150 ret = do_sigaction(sig, &new_sa, &old_sa);
4152 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4154 #endif /* __ARCH_WANT_SYS_SIGNAL */
4156 #ifdef __ARCH_WANT_SYS_PAUSE
4158 SYSCALL_DEFINE0(pause)
4160 while (!signal_pending(current)) {
4161 __set_current_state(TASK_INTERRUPTIBLE);
4164 return -ERESTARTNOHAND;
4169 static int sigsuspend(sigset_t *set)
4171 current->saved_sigmask = current->blocked;
4172 set_current_blocked(set);
4174 while (!signal_pending(current)) {
4175 __set_current_state(TASK_INTERRUPTIBLE);
4178 set_restore_sigmask();
4179 return -ERESTARTNOHAND;
4183 * sys_rt_sigsuspend - replace the signal mask for a value with the
4184 * @unewset value until a signal is received
4185 * @unewset: new signal mask value
4186 * @sigsetsize: size of sigset_t type
4188 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4192 /* XXX: Don't preclude handling different sized sigset_t's. */
4193 if (sigsetsize != sizeof(sigset_t))
4196 if (copy_from_user(&newset, unewset, sizeof(newset)))
4198 return sigsuspend(&newset);
4201 #ifdef CONFIG_COMPAT
4202 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4206 /* XXX: Don't preclude handling different sized sigset_t's. */
4207 if (sigsetsize != sizeof(sigset_t))
4210 if (get_compat_sigset(&newset, unewset))
4212 return sigsuspend(&newset);
4216 #ifdef CONFIG_OLD_SIGSUSPEND
4217 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4220 siginitset(&blocked, mask);
4221 return sigsuspend(&blocked);
4224 #ifdef CONFIG_OLD_SIGSUSPEND3
4225 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4228 siginitset(&blocked, mask);
4229 return sigsuspend(&blocked);
4233 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4238 void __init signals_init(void)
4240 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
4241 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
4242 != offsetof(struct siginfo, _sifields._pad));
4243 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4245 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4248 #ifdef CONFIG_KGDB_KDB
4249 #include <linux/kdb.h>
4251 * kdb_send_sig - Allows kdb to send signals without exposing
4252 * signal internals. This function checks if the required locks are
4253 * available before calling the main signal code, to avoid kdb
4256 void kdb_send_sig(struct task_struct *t, int sig)
4258 static struct task_struct *kdb_prev_t;
4260 if (!spin_trylock(&t->sighand->siglock)) {
4261 kdb_printf("Can't do kill command now.\n"
4262 "The sigmask lock is held somewhere else in "
4263 "kernel, try again later\n");
4266 new_t = kdb_prev_t != t;
4268 if (t->state != TASK_RUNNING && new_t) {
4269 spin_unlock(&t->sighand->siglock);
4270 kdb_printf("Process is not RUNNING, sending a signal from "
4271 "kdb risks deadlock\n"
4272 "on the run queue locks. "
4273 "The signal has _not_ been sent.\n"
4274 "Reissue the kill command if you want to risk "
4278 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4279 spin_unlock(&t->sighand->siglock);
4281 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4284 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4286 #endif /* CONFIG_KGDB_KDB */