summaryrefslogtreecommitdiffstats
path: root/signal.c
diff options
context:
space:
mode:
authorKaz Kylheku <kaz@kylheku.com>2014-03-13 02:47:00 -0700
committerKaz Kylheku <kaz@kylheku.com>2014-03-13 02:47:00 -0700
commitd5a6ad5bdb5bdea748a03e9bcc675e0dea33e1d5 (patch)
treee12db71b10ca5103f708272a50e28cd8ee555a03 /signal.c
parent5251ac1ca49b5abc0d68f77291eb463f2040eb31 (diff)
downloadtxr-d5a6ad5bdb5bdea748a03e9bcc675e0dea33e1d5.tar.gz
txr-d5a6ad5bdb5bdea748a03e9bcc675e0dea33e1d5.tar.bz2
txr-d5a6ad5bdb5bdea748a03e9bcc675e0dea33e1d5.zip
On platforms with sigaltstack, TXR programs can now catch the
segmentation fault that occurs when running out of stack space, and escape by throwing an exception. Also, bugfix: save and restore the gc enable/disable state. Without this, if we are ever running cod in a gc disabled state and it jumps out, gc stays disabled. * configure: added check for sigaltstack. * gc.h (gc_enabled): Declaration added for existing variable. * signal.c (is_cpu_exception): New static function. (sig_handler): For cpu_exception type signals that pertain to the execution of some instruction, turn on async_sig_enabled, so that the signal is not deferred. Otherwise we will just return without calling the user-defined handler, restart the instruction and get into a loop. Also, disable gc around the handler just in case. One issue is that we might be on an alternate stack, which gc won't like. (setup_alt_stack, teardown_alt_stack): New static functions. (set_sig_handler): If we have sigaltstack, and are asked to set up a SEGV handler, then set it up on the alternate stack. * signal.h (extended_jmp_buf): Adding new member, gc. (extended_setjmp, extended_longjmp): use gc member to save and restore the gc_enable state across setjmp and longjmp.
Diffstat (limited to 'signal.c')
-rw-r--r--signal.c77
1 files changed, 77 insertions, 0 deletions
diff --git a/signal.c b/signal.c
index 4e820ab1..61255b70 100644
--- a/signal.c
+++ b/signal.c
@@ -57,9 +57,31 @@ val sig_ttou, sig_urg, sig_xcpu, sig_xfsz, sigtalrm, sig_prof;
val sig_poll, sig_sys, sig_winch, sig_iot, sig_stkflt;
val sig_io, sig_lost, sig_pwr;
+static int is_cpu_exception(int sig)
+{
+ switch (sig) {
+ case SIGFPE: case SIGILL:
+ case SIGSEGV: case SIGBUS:
+ case SIGTRAP:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static void sig_handler(int sig)
{
val lambda = sig_lambda[sig];
+ int gc = 0;
+ int as = 0;
+ int exc = is_cpu_exception(sig);
+
+ if (exc) {
+ gc = gc_state(0);
+ as = async_sig_enabled;
+ async_sig_enabled = 1;
+ }
+
if (lambda) {
if (async_sig_enabled) {
async_sig_enabled = 0;
@@ -70,6 +92,11 @@ static void sig_handler(int sig)
sig_deferred |= (1UL << sig);
}
}
+
+ if (exc) {
+ async_sig_enabled = as;
+ gc_state(gc);
+ }
}
void sig_init(void)
@@ -135,9 +162,49 @@ void sig_init(void)
reg_fun(intern(lit("set-sig-handler"), user_package), func_n2(set_sig_handler));
reg_fun(intern(lit("get-sig-handler"), user_package), func_n1(get_sig_handler));
reg_fun(intern(lit("sig-check"), user_package), func_n0(sig_check));
+}
+
+#if HAVE_SIGALTSTACK
+
+static void *stack;
+
+static void setup_alt_stack(void)
+{
+ stack_t ss;
+
+ if (!stack)
+ stack = chk_malloc(SIGSTKSZ);
+ ss.ss_sp = stack;
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = 0;
+
+ if (sigaltstack(&ss, NULL) == -1) {
+ free(stack);
+ stack = 0;
+ }
+}
+
+static void teardown_alt_stack(void)
+{
+ stack_t ss;
+
+ if (!stack)
+ return;
+
+ ss.ss_sp = stack;
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = SS_DISABLE;
+
+ if (sigaltstack(&ss, NULL) == -1)
+ return;
+
+ free(stack);
+ stack = 0;
}
+#endif
+
val set_sig_handler(val signo, val lambda)
{
static struct sigaction blank;
@@ -170,9 +237,19 @@ val set_sig_handler(val signo, val lambda)
sa.sa_flags = SA_RESTART;
sa.sa_handler = sig_handler;
sigfillset(&sa.sa_mask);
+#if HAVE_SIGALTSTACK
+ if (sig == SIGSEGV)
+ setup_alt_stack();
+ sa.sa_flags |= SA_ONSTACK;
+#endif
sigaction(sig, &sa, 0);
}
+#if HAVE_SIGALTSTACK
+ if (sig == SIGSEGV && (lambda == nil || lambda == t))
+ teardown_alt_stack();
+#endif
+
sig_lambda[sig] = lambda;
}