2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "exec/gdbstub.h"
22 #include "hw/core/tcg-cpu-ops.h"
24 #include <sys/ucontext.h>
25 #include <sys/resource.h>
28 #include "user-internals.h"
32 #include "signal-common.h"
33 #include "host-signal.h"
35 static struct target_sigaction sigact_table[TARGET_NSIG];
37 static void host_signal_handler(int host_signum, siginfo_t *info,
40 /* Fallback addresses into sigtramp page. */
41 abi_ulong default_sigreturn;
42 abi_ulong default_rt_sigreturn;
45 * System includes define _NSIG as SIGRTMAX + 1,
46 * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
47 * and the first signal is SIGHUP defined as 1
48 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
49 * a process exists without sending it a signal.
52 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
54 static uint8_t host_to_target_signal_table[_NSIG] = {
55 [SIGHUP] = TARGET_SIGHUP,
56 [SIGINT] = TARGET_SIGINT,
57 [SIGQUIT] = TARGET_SIGQUIT,
58 [SIGILL] = TARGET_SIGILL,
59 [SIGTRAP] = TARGET_SIGTRAP,
60 [SIGABRT] = TARGET_SIGABRT,
61 /* [SIGIOT] = TARGET_SIGIOT,*/
62 [SIGBUS] = TARGET_SIGBUS,
63 [SIGFPE] = TARGET_SIGFPE,
64 [SIGKILL] = TARGET_SIGKILL,
65 [SIGUSR1] = TARGET_SIGUSR1,
66 [SIGSEGV] = TARGET_SIGSEGV,
67 [SIGUSR2] = TARGET_SIGUSR2,
68 [SIGPIPE] = TARGET_SIGPIPE,
69 [SIGALRM] = TARGET_SIGALRM,
70 [SIGTERM] = TARGET_SIGTERM,
72 [SIGSTKFLT] = TARGET_SIGSTKFLT,
74 [SIGCHLD] = TARGET_SIGCHLD,
75 [SIGCONT] = TARGET_SIGCONT,
76 [SIGSTOP] = TARGET_SIGSTOP,
77 [SIGTSTP] = TARGET_SIGTSTP,
78 [SIGTTIN] = TARGET_SIGTTIN,
79 [SIGTTOU] = TARGET_SIGTTOU,
80 [SIGURG] = TARGET_SIGURG,
81 [SIGXCPU] = TARGET_SIGXCPU,
82 [SIGXFSZ] = TARGET_SIGXFSZ,
83 [SIGVTALRM] = TARGET_SIGVTALRM,
84 [SIGPROF] = TARGET_SIGPROF,
85 [SIGWINCH] = TARGET_SIGWINCH,
86 [SIGIO] = TARGET_SIGIO,
87 [SIGPWR] = TARGET_SIGPWR,
88 [SIGSYS] = TARGET_SIGSYS,
89 /* next signals stay the same */
92 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
94 /* valid sig is between 1 and _NSIG - 1 */
95 int host_to_target_signal(int sig)
97 if (sig < 1 || sig >= _NSIG) {
100 return host_to_target_signal_table[sig];
103 /* valid sig is between 1 and TARGET_NSIG */
104 int target_to_host_signal(int sig)
106 if (sig < 1 || sig > TARGET_NSIG) {
109 return target_to_host_signal_table[sig];
112 static inline void target_sigaddset(target_sigset_t *set, int signum)
115 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
116 set->sig[signum / TARGET_NSIG_BPW] |= mask;
119 static inline int target_sigismember(const target_sigset_t *set, int signum)
122 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
123 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
126 void host_to_target_sigset_internal(target_sigset_t *d,
129 int host_sig, target_sig;
130 target_sigemptyset(d);
131 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
132 target_sig = host_to_target_signal(host_sig);
133 if (target_sig < 1 || target_sig > TARGET_NSIG) {
136 if (sigismember(s, host_sig)) {
137 target_sigaddset(d, target_sig);
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
152 void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
155 int host_sig, target_sig;
157 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
158 host_sig = target_to_host_signal(target_sig);
159 if (host_sig < 1 || host_sig >= _NSIG) {
162 if (target_sigismember(s, target_sig)) {
163 sigaddset(d, host_sig);
168 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
173 for(i = 0;i < TARGET_NSIG_WORDS; i++)
174 s1.sig[i] = tswapal(s->sig[i]);
175 target_to_host_sigset_internal(d, &s1);
178 void host_to_target_old_sigset(abi_ulong *old_sigset,
179 const sigset_t *sigset)
182 host_to_target_sigset(&d, sigset);
183 *old_sigset = d.sig[0];
186 void target_to_host_old_sigset(sigset_t *sigset,
187 const abi_ulong *old_sigset)
192 d.sig[0] = *old_sigset;
193 for(i = 1;i < TARGET_NSIG_WORDS; i++)
195 target_to_host_sigset(sigset, &d);
198 int block_signals(void)
200 TaskState *ts = (TaskState *)thread_cpu->opaque;
203 /* It's OK to block everything including SIGSEGV, because we won't
204 * run any further guest code before unblocking signals in
205 * process_pending_signals().
208 sigprocmask(SIG_SETMASK, &set, 0);
210 return qatomic_xchg(&ts->signal_pending, 1);
213 /* Wrapper for sigprocmask function
214 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
215 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
216 * a signal was already pending and the syscall must be restarted, or
218 * If set is NULL, this is guaranteed not to fail.
220 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
222 TaskState *ts = (TaskState *)thread_cpu->opaque;
225 *oldset = ts->signal_mask;
231 if (block_signals()) {
232 return -TARGET_ERESTARTSYS;
237 sigorset(&ts->signal_mask, &ts->signal_mask, set);
240 for (i = 1; i <= NSIG; ++i) {
241 if (sigismember(set, i)) {
242 sigdelset(&ts->signal_mask, i);
247 ts->signal_mask = *set;
250 g_assert_not_reached();
253 /* Silently ignore attempts to change blocking status of KILL or STOP */
254 sigdelset(&ts->signal_mask, SIGKILL);
255 sigdelset(&ts->signal_mask, SIGSTOP);
260 #if !defined(TARGET_NIOS2)
261 /* Just set the guest's signal mask to the specified value; the
262 * caller is assumed to have called block_signals() already.
264 void set_sigmask(const sigset_t *set)
266 TaskState *ts = (TaskState *)thread_cpu->opaque;
268 ts->signal_mask = *set;
272 /* sigaltstack management */
274 int on_sig_stack(unsigned long sp)
276 TaskState *ts = (TaskState *)thread_cpu->opaque;
278 return (sp - ts->sigaltstack_used.ss_sp
279 < ts->sigaltstack_used.ss_size);
282 int sas_ss_flags(unsigned long sp)
284 TaskState *ts = (TaskState *)thread_cpu->opaque;
286 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
287 : on_sig_stack(sp) ? SS_ONSTACK : 0);
290 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
293 * This is the X/Open sanctioned signal stack switching.
295 TaskState *ts = (TaskState *)thread_cpu->opaque;
297 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
298 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
303 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
305 TaskState *ts = (TaskState *)thread_cpu->opaque;
307 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
308 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
309 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
312 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
314 TaskState *ts = (TaskState *)thread_cpu->opaque;
315 size_t minstacksize = TARGET_MINSIGSTKSZ;
318 #if defined(TARGET_PPC64)
319 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
320 struct image_info *image = ts->info;
321 if (get_ppc64_abi(image) > 1) {
326 __get_user(ss.ss_sp, &uss->ss_sp);
327 __get_user(ss.ss_size, &uss->ss_size);
328 __get_user(ss.ss_flags, &uss->ss_flags);
330 if (on_sig_stack(get_sp_from_cpustate(env))) {
331 return -TARGET_EPERM;
334 switch (ss.ss_flags) {
336 return -TARGET_EINVAL;
338 case TARGET_SS_DISABLE:
343 case TARGET_SS_ONSTACK:
345 if (ss.ss_size < minstacksize) {
346 return -TARGET_ENOMEM;
351 ts->sigaltstack_used.ss_sp = ss.ss_sp;
352 ts->sigaltstack_used.ss_size = ss.ss_size;
356 /* siginfo conversion */
358 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
359 const siginfo_t *info)
361 int sig = host_to_target_signal(info->si_signo);
362 int si_code = info->si_code;
364 tinfo->si_signo = sig;
366 tinfo->si_code = info->si_code;
368 /* This memset serves two purposes:
369 * (1) ensure we don't leak random junk to the guest later
370 * (2) placate false positives from gcc about fields
371 * being used uninitialized if it chooses to inline both this
372 * function and tswap_siginfo() into host_to_target_siginfo().
374 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
376 /* This is awkward, because we have to use a combination of
377 * the si_code and si_signo to figure out which of the union's
378 * members are valid. (Within the host kernel it is always possible
379 * to tell, but the kernel carefully avoids giving userspace the
380 * high 16 bits of si_code, so we don't have the information to
381 * do this the easy way...) We therefore make our best guess,
382 * bearing in mind that a guest can spoof most of the si_codes
383 * via rt_sigqueueinfo() if it likes.
385 * Once we have made our guess, we record it in the top 16 bits of
386 * the si_code, so that tswap_siginfo() later can use it.
387 * tswap_siginfo() will strip these top bits out before writing
388 * si_code to the guest (sign-extending the lower bits).
395 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
396 * These are the only unspoofable si_code values.
398 tinfo->_sifields._kill._pid = info->si_pid;
399 tinfo->_sifields._kill._uid = info->si_uid;
400 si_type = QEMU_SI_KILL;
403 /* Everything else is spoofable. Make best guess based on signal */
406 tinfo->_sifields._sigchld._pid = info->si_pid;
407 tinfo->_sifields._sigchld._uid = info->si_uid;
408 tinfo->_sifields._sigchld._status = info->si_status;
409 tinfo->_sifields._sigchld._utime = info->si_utime;
410 tinfo->_sifields._sigchld._stime = info->si_stime;
411 si_type = QEMU_SI_CHLD;
414 tinfo->_sifields._sigpoll._band = info->si_band;
415 tinfo->_sifields._sigpoll._fd = info->si_fd;
416 si_type = QEMU_SI_POLL;
419 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
420 tinfo->_sifields._rt._pid = info->si_pid;
421 tinfo->_sifields._rt._uid = info->si_uid;
422 /* XXX: potential problem if 64 bit */
423 tinfo->_sifields._rt._sigval.sival_ptr
424 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
425 si_type = QEMU_SI_RT;
431 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
434 void tswap_siginfo(target_siginfo_t *tinfo,
435 const target_siginfo_t *info)
437 int si_type = extract32(info->si_code, 16, 16);
438 int si_code = sextract32(info->si_code, 0, 16);
440 __put_user(info->si_signo, &tinfo->si_signo);
441 __put_user(info->si_errno, &tinfo->si_errno);
442 __put_user(si_code, &tinfo->si_code);
444 /* We can use our internal marker of which fields in the structure
445 * are valid, rather than duplicating the guesswork of
446 * host_to_target_siginfo_noswap() here.
450 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
451 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
454 __put_user(info->_sifields._timer._timer1,
455 &tinfo->_sifields._timer._timer1);
456 __put_user(info->_sifields._timer._timer2,
457 &tinfo->_sifields._timer._timer2);
460 __put_user(info->_sifields._sigpoll._band,
461 &tinfo->_sifields._sigpoll._band);
462 __put_user(info->_sifields._sigpoll._fd,
463 &tinfo->_sifields._sigpoll._fd);
466 __put_user(info->_sifields._sigfault._addr,
467 &tinfo->_sifields._sigfault._addr);
470 __put_user(info->_sifields._sigchld._pid,
471 &tinfo->_sifields._sigchld._pid);
472 __put_user(info->_sifields._sigchld._uid,
473 &tinfo->_sifields._sigchld._uid);
474 __put_user(info->_sifields._sigchld._status,
475 &tinfo->_sifields._sigchld._status);
476 __put_user(info->_sifields._sigchld._utime,
477 &tinfo->_sifields._sigchld._utime);
478 __put_user(info->_sifields._sigchld._stime,
479 &tinfo->_sifields._sigchld._stime);
482 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
483 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
484 __put_user(info->_sifields._rt._sigval.sival_ptr,
485 &tinfo->_sifields._rt._sigval.sival_ptr);
488 g_assert_not_reached();
492 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
494 target_siginfo_t tgt_tmp;
495 host_to_target_siginfo_noswap(&tgt_tmp, info);
496 tswap_siginfo(tinfo, &tgt_tmp);
499 /* XXX: we support only POSIX RT signals are used. */
500 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
501 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
503 /* This conversion is used only for the rt_sigqueueinfo syscall,
504 * and so we know that the _rt fields are the valid ones.
508 __get_user(info->si_signo, &tinfo->si_signo);
509 __get_user(info->si_errno, &tinfo->si_errno);
510 __get_user(info->si_code, &tinfo->si_code);
511 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
512 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
513 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
514 info->si_value.sival_ptr = (void *)(long)sival_ptr;
517 static int fatal_signal (int sig)
522 case TARGET_SIGWINCH:
523 /* Ignored by default. */
530 /* Job control signals. */
537 /* returns 1 if given signal should dump core if not handled */
538 static int core_dump_signal(int sig)
554 static void signal_table_init(void)
556 int host_sig, target_sig, count;
559 * Signals are supported starting from TARGET_SIGRTMIN and going up
560 * until we run out of host realtime signals.
561 * glibc at least uses only the lower 2 rt signals and probably
562 * nobody's using the upper ones.
563 * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
564 * To fix this properly we need to do manual signal delivery multiplexed
565 * over a single host signal.
566 * Attempts for configure "missing" signals via sigaction will be
569 for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
570 target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
571 if (target_sig <= TARGET_NSIG) {
572 host_to_target_signal_table[host_sig] = target_sig;
576 /* generate signal conversion tables */
577 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
578 target_to_host_signal_table[target_sig] = _NSIG; /* poison */
580 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
581 if (host_to_target_signal_table[host_sig] == 0) {
582 host_to_target_signal_table[host_sig] = host_sig;
584 target_sig = host_to_target_signal_table[host_sig];
585 if (target_sig <= TARGET_NSIG) {
586 target_to_host_signal_table[target_sig] = host_sig;
590 if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
591 for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
592 if (target_to_host_signal_table[target_sig] == _NSIG) {
596 trace_signal_table_init(count);
600 void signal_init(void)
602 TaskState *ts = (TaskState *)thread_cpu->opaque;
603 struct sigaction act;
604 struct sigaction oact;
608 /* initialize signal conversion tables */
611 /* Set the signal mask from the host mask. */
612 sigprocmask(0, 0, &ts->signal_mask);
614 sigfillset(&act.sa_mask);
615 act.sa_flags = SA_SIGINFO;
616 act.sa_sigaction = host_signal_handler;
617 for(i = 1; i <= TARGET_NSIG; i++) {
619 if (i == TARGET_SIGPROF) {
623 host_sig = target_to_host_signal(i);
624 sigaction(host_sig, NULL, &oact);
625 if (oact.sa_sigaction == (void *)SIG_IGN) {
626 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
627 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
628 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
630 /* If there's already a handler installed then something has
631 gone horribly wrong, so don't even try to handle that case. */
632 /* Install some handlers for our own use. We need at least
633 SIGSEGV and SIGBUS, to detect exceptions. We can not just
634 trap all signals because it affects syscall interrupt
635 behavior. But do trap all default-fatal signals. */
636 if (fatal_signal (i))
637 sigaction(host_sig, &act, NULL);
641 /* Force a synchronously taken signal. The kernel force_sig() function
642 * also forces the signal to "not blocked, not ignored", but for QEMU
643 * that work is done in process_pending_signals().
645 void force_sig(int sig)
647 CPUState *cpu = thread_cpu;
648 CPUArchState *env = cpu->env_ptr;
649 target_siginfo_t info = {};
653 info.si_code = TARGET_SI_KERNEL;
654 info._sifields._kill._pid = 0;
655 info._sifields._kill._uid = 0;
656 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
660 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
661 * 'force' part is handled in process_pending_signals().
663 void force_sig_fault(int sig, int code, abi_ulong addr)
665 CPUState *cpu = thread_cpu;
666 CPUArchState *env = cpu->env_ptr;
667 target_siginfo_t info = {};
672 info._sifields._sigfault._addr = addr;
673 queue_signal(env, sig, QEMU_SI_FAULT, &info);
676 /* Force a SIGSEGV if we couldn't write to memory trying to set
677 * up the signal frame. oldsig is the signal we were trying to handle
678 * at the point of failure.
680 #if !defined(TARGET_RISCV)
681 void force_sigsegv(int oldsig)
683 if (oldsig == SIGSEGV) {
684 /* Make sure we don't try to deliver the signal again; this will
685 * end up with handle_pending_signal() calling dump_core_and_abort().
687 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
689 force_sig(TARGET_SIGSEGV);
693 void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
694 MMUAccessType access_type, bool maperr, uintptr_t ra)
696 const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
698 if (tcg_ops->record_sigsegv) {
699 tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
702 force_sig_fault(TARGET_SIGSEGV,
703 maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
705 cpu->exception_index = EXCP_INTERRUPT;
706 cpu_loop_exit_restore(cpu, ra);
709 void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
710 MMUAccessType access_type, uintptr_t ra)
712 const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
714 if (tcg_ops->record_sigbus) {
715 tcg_ops->record_sigbus(cpu, addr, access_type, ra);
718 force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
719 cpu->exception_index = EXCP_INTERRUPT;
720 cpu_loop_exit_restore(cpu, ra);
723 /* abort execution with signal */
724 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
726 CPUState *cpu = thread_cpu;
727 CPUArchState *env = cpu->env_ptr;
728 TaskState *ts = (TaskState *)cpu->opaque;
729 int host_sig, core_dumped = 0;
730 struct sigaction act;
732 host_sig = target_to_host_signal(target_sig);
733 trace_user_force_sig(env, target_sig, host_sig);
734 gdb_signalled(env, target_sig);
736 /* dump core if supported by target binary format */
737 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
740 ((*ts->bprm->core_dump)(target_sig, env) == 0);
743 /* we already dumped the core of target process, we don't want
744 * a coredump of qemu itself */
745 struct rlimit nodump;
746 getrlimit(RLIMIT_CORE, &nodump);
748 setrlimit(RLIMIT_CORE, &nodump);
749 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
750 target_sig, strsignal(host_sig), "core dumped" );
753 /* The proper exit code for dying from an uncaught signal is
754 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
755 * a negative value. To get the proper exit code we need to
756 * actually die from an uncaught signal. Here the default signal
757 * handler is installed, we send ourself a signal and we wait for
759 sigfillset(&act.sa_mask);
760 act.sa_handler = SIG_DFL;
762 sigaction(host_sig, &act, NULL);
764 /* For some reason raise(host_sig) doesn't send the signal when
765 * statically linked on x86-64. */
766 kill(getpid(), host_sig);
768 /* Make sure the signal isn't masked (just reuse the mask inside
770 sigdelset(&act.sa_mask, host_sig);
771 sigsuspend(&act.sa_mask);
777 /* queue a signal so that it will be send to the virtual CPU as soon
779 int queue_signal(CPUArchState *env, int sig, int si_type,
780 target_siginfo_t *info)
782 CPUState *cpu = env_cpu(env);
783 TaskState *ts = cpu->opaque;
785 trace_user_queue_signal(env, sig);
787 info->si_code = deposit32(info->si_code, 16, 16, si_type);
789 ts->sync_signal.info = *info;
790 ts->sync_signal.pending = sig;
791 /* signal that a new signal is pending */
792 qatomic_set(&ts->signal_pending, 1);
793 return 1; /* indicates that the signal was queued */
796 #ifndef HAVE_SAFE_SYSCALL
797 static inline void rewind_if_in_safe_syscall(void *puc)
799 /* Default version: never rewind */
803 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
805 CPUArchState *env = thread_cpu->env_ptr;
806 CPUState *cpu = env_cpu(env);
807 TaskState *ts = cpu->opaque;
808 target_siginfo_t tinfo;
809 ucontext_t *uc = puc;
810 struct emulated_sigtable *k;
813 bool sync_sig = false;
816 * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
817 * handling wrt signal blocking and unwinding.
819 if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
820 MMUAccessType access_type;
825 host_addr = (uintptr_t)info->si_addr;
828 * Convert forcefully to guest address space: addresses outside
829 * reserved_va are still valid to report via SEGV_MAPERR.
831 guest_addr = h2g_nocheck(host_addr);
833 pc = host_signal_pc(uc);
834 is_write = host_signal_write(info, uc);
835 access_type = adjust_signal_pc(&pc, is_write);
837 if (host_sig == SIGSEGV) {
840 if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
841 /* If this was a write to a TB protected page, restart. */
843 handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
849 * With reserved_va, the whole address space is PROT_NONE,
850 * which means that we may get ACCERR when we want MAPERR.
852 if (page_get_flags(guest_addr) & PAGE_VALID) {
855 info->si_code = SEGV_MAPERR;
859 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
860 cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
862 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
863 if (info->si_code == BUS_ADRALN) {
864 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
871 /* get target signal number */
872 guest_sig = host_to_target_signal(host_sig);
873 if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
876 trace_user_host_signal(env, host_sig, guest_sig);
878 host_to_target_siginfo_noswap(&tinfo, info);
879 k = &ts->sigtab[guest_sig - 1];
881 k->pending = guest_sig;
882 ts->signal_pending = 1;
885 * For synchronous signals, unwind the cpu state to the faulting
886 * insn and then exit back to the main loop so that the signal
887 * is delivered immediately.
890 cpu->exception_index = EXCP_INTERRUPT;
891 cpu_loop_exit_restore(cpu, pc);
894 rewind_if_in_safe_syscall(puc);
897 * Block host signals until target signal handler entered. We
898 * can't block SIGSEGV or SIGBUS while we're executing guest
899 * code in case the guest code provokes one in the window between
900 * now and it getting out to the main loop. Signals will be
901 * unblocked again in process_pending_signals().
903 * WARNING: we cannot use sigfillset() here because the uc_sigmask
904 * field is a kernel sigset_t, which is much smaller than the
905 * libc sigset_t which sigfillset() operates on. Using sigfillset()
906 * would write 0xff bytes off the end of the structure and trash
907 * data on the struct.
908 * We can't use sizeof(uc->uc_sigmask) either, because the libc
909 * headers define the struct field with the wrong (too large) type.
911 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
912 sigdelset(&uc->uc_sigmask, SIGSEGV);
913 sigdelset(&uc->uc_sigmask, SIGBUS);
915 /* interrupt the virtual CPU as soon as possible */
916 cpu_exit(thread_cpu);
919 /* do_sigaltstack() returns target values and errnos. */
920 /* compare linux/kernel/signal.c:do_sigaltstack() */
921 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
924 target_stack_t oss, *uoss = NULL;
925 abi_long ret = -TARGET_EFAULT;
928 /* Verify writability now, but do not alter user memory yet. */
929 if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
932 target_save_altstack(&oss, env);
938 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
941 ret = target_restore_altstack(uss, env);
948 memcpy(uoss, &oss, sizeof(oss));
949 unlock_user_struct(uoss, uoss_addr, 1);
956 unlock_user_struct(uoss, uoss_addr, 0);
961 /* do_sigaction() return target values and host errnos */
962 int do_sigaction(int sig, const struct target_sigaction *act,
963 struct target_sigaction *oact, abi_ulong ka_restorer)
965 struct target_sigaction *k;
966 struct sigaction act1;
970 trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
972 if (sig < 1 || sig > TARGET_NSIG) {
973 return -TARGET_EINVAL;
976 if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
977 return -TARGET_EINVAL;
980 if (block_signals()) {
981 return -TARGET_ERESTARTSYS;
984 k = &sigact_table[sig - 1];
986 __put_user(k->_sa_handler, &oact->_sa_handler);
987 __put_user(k->sa_flags, &oact->sa_flags);
988 #ifdef TARGET_ARCH_HAS_SA_RESTORER
989 __put_user(k->sa_restorer, &oact->sa_restorer);
992 oact->sa_mask = k->sa_mask;
995 /* FIXME: This is not threadsafe. */
996 __get_user(k->_sa_handler, &act->_sa_handler);
997 __get_user(k->sa_flags, &act->sa_flags);
998 #ifdef TARGET_ARCH_HAS_SA_RESTORER
999 __get_user(k->sa_restorer, &act->sa_restorer);
1001 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1002 k->ka_restorer = ka_restorer;
1004 /* To be swapped in target_to_host_sigset. */
1005 k->sa_mask = act->sa_mask;
1007 /* we update the host linux signal state */
1008 host_sig = target_to_host_signal(sig);
1009 trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1010 if (host_sig > SIGRTMAX) {
1011 /* we don't have enough host signals to map all target signals */
1012 qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1015 * we don't return an error here because some programs try to
1016 * register an handler for all possible rt signals even if they
1018 * An error here can abort them whereas there can be no problem
1019 * to not have the signal available later.
1020 * This is the case for golang,
1021 * See https://github.com/golang/go/issues/33746
1022 * So we silently ignore the error.
1026 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1027 sigfillset(&act1.sa_mask);
1028 act1.sa_flags = SA_SIGINFO;
1029 if (k->sa_flags & TARGET_SA_RESTART)
1030 act1.sa_flags |= SA_RESTART;
1031 /* NOTE: it is important to update the host kernel signal
1032 ignore state to avoid getting unexpected interrupted
1034 if (k->_sa_handler == TARGET_SIG_IGN) {
1035 act1.sa_sigaction = (void *)SIG_IGN;
1036 } else if (k->_sa_handler == TARGET_SIG_DFL) {
1037 if (fatal_signal (sig))
1038 act1.sa_sigaction = host_signal_handler;
1040 act1.sa_sigaction = (void *)SIG_DFL;
1042 act1.sa_sigaction = host_signal_handler;
1044 ret = sigaction(host_sig, &act1, NULL);
1050 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1051 struct emulated_sigtable *k)
1053 CPUState *cpu = env_cpu(cpu_env);
1056 target_sigset_t target_old_set;
1057 struct target_sigaction *sa;
1058 TaskState *ts = cpu->opaque;
1060 trace_user_handle_signal(cpu_env, sig);
1061 /* dequeue signal */
1064 sig = gdb_handlesig(cpu, sig);
1067 handler = TARGET_SIG_IGN;
1069 sa = &sigact_table[sig - 1];
1070 handler = sa->_sa_handler;
1073 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1074 print_taken_signal(sig, &k->info);
1077 if (handler == TARGET_SIG_DFL) {
1078 /* default handler : ignore some signal. The other are job control or fatal */
1079 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1080 kill(getpid(),SIGSTOP);
1081 } else if (sig != TARGET_SIGCHLD &&
1082 sig != TARGET_SIGURG &&
1083 sig != TARGET_SIGWINCH &&
1084 sig != TARGET_SIGCONT) {
1085 dump_core_and_abort(sig);
1087 } else if (handler == TARGET_SIG_IGN) {
1089 } else if (handler == TARGET_SIG_ERR) {
1090 dump_core_and_abort(sig);
1092 /* compute the blocked signals during the handler execution */
1093 sigset_t *blocked_set;
1095 target_to_host_sigset(&set, &sa->sa_mask);
1096 /* SA_NODEFER indicates that the current signal should not be
1097 blocked during the handler */
1098 if (!(sa->sa_flags & TARGET_SA_NODEFER))
1099 sigaddset(&set, target_to_host_signal(sig));
1101 /* save the previous blocked signal state to restore it at the
1102 end of the signal execution (see do_sigreturn) */
1103 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1105 /* block signals in the handler */
1106 blocked_set = ts->in_sigsuspend ?
1107 &ts->sigsuspend_mask : &ts->signal_mask;
1108 sigorset(&ts->signal_mask, blocked_set, &set);
1109 ts->in_sigsuspend = 0;
1111 /* if the CPU is in VM86 mode, we restore the 32 bit values */
1112 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1114 CPUX86State *env = cpu_env;
1115 if (env->eflags & VM_MASK)
1116 save_v86_state(env);
1119 /* prepare the stack frame of the virtual CPU */
1120 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1121 if (sa->sa_flags & TARGET_SA_SIGINFO) {
1122 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1124 setup_frame(sig, sa, &target_old_set, cpu_env);
1127 /* These targets do not have traditional signals. */
1128 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1130 if (sa->sa_flags & TARGET_SA_RESETHAND) {
1131 sa->_sa_handler = TARGET_SIG_DFL;
1136 void process_pending_signals(CPUArchState *cpu_env)
1138 CPUState *cpu = env_cpu(cpu_env);
1140 TaskState *ts = cpu->opaque;
1142 sigset_t *blocked_set;
1144 while (qatomic_read(&ts->signal_pending)) {
1145 /* FIXME: This is not threadsafe. */
1147 sigprocmask(SIG_SETMASK, &set, 0);
1150 sig = ts->sync_signal.pending;
1152 /* Synchronous signals are forced,
1153 * see force_sig_info() and callers in Linux
1154 * Note that not all of our queue_signal() calls in QEMU correspond
1155 * to force_sig_info() calls in Linux (some are send_sig_info()).
1156 * However it seems like a kernel bug to me to allow the process
1157 * to block a synchronous signal since it could then just end up
1158 * looping round and round indefinitely.
1160 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1161 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1162 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1163 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1166 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1169 for (sig = 1; sig <= TARGET_NSIG; sig++) {
1170 blocked_set = ts->in_sigsuspend ?
1171 &ts->sigsuspend_mask : &ts->signal_mask;
1173 if (ts->sigtab[sig - 1].pending &&
1174 (!sigismember(blocked_set,
1175 target_to_host_signal_table[sig]))) {
1176 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1177 /* Restart scan from the beginning, as handle_pending_signal
1178 * might have resulted in a new synchronous signal (eg SIGSEGV).
1184 /* if no signal is pending, unblock signals and recheck (the act
1185 * of unblocking might cause us to take another host signal which
1186 * will set signal_pending again).
1188 qatomic_set(&ts->signal_pending, 0);
1189 ts->in_sigsuspend = 0;
1190 set = ts->signal_mask;
1191 sigdelset(&set, SIGSEGV);
1192 sigdelset(&set, SIGBUS);
1193 sigprocmask(SIG_SETMASK, &set, 0);
1195 ts->in_sigsuspend = 0;