summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKaz Kylheku <kaz@kylheku.com>2015-05-01 06:26:35 -0700
committerKaz Kylheku <kaz@kylheku.com>2015-05-01 06:26:35 -0700
commite87977d7d5a30f5de607bf465091ed17c0ab1ef2 (patch)
treeb70f958c19f001c89bdc923e1a8ba7d207706f43
parent37c08311602c95f7f2f0fb40d33783c78a3b0d62 (diff)
downloadtxr-e87977d7d5a30f5de607bf465091ed17c0ab1ef2.tar.gz
txr-e87977d7d5a30f5de607bf465091ed17c0ab1ef2.tar.bz2
txr-e87977d7d5a30f5de607bf465091ed17c0ab1ef2.zip
Fix corruption triggered by extended gc disabling.
The issue is that when gc is disabled, the gc function does nothing. But various code depends on calls to gc() doing something, like making space available in various static arrays. When gc is disabled for long periods, there are issues, like array overruns. * gc.c (gc): Must no longer be called at all if gc_enabled is false, and asserts that it is true. Callers must check the gc_enabled flag and implement appropriate cases. (make_obj): Only call gc when gc_enabled is true. If there is no space in the freshobj array after trying gc, or gc couldn't be tried because it is disabled, then schedule a full gc. (gc_set): If the checkobj array is full, only call gc if gc is enabled, otherwise schedule a full_gc. (gc_mutated): Do not assume that the mutobj array has room for another object; only set the object's generation to -1 and put it into the array if there is room. Similarly to gc_set, do a gc if there is no room, but if gc is not enabled, then schedule a full gc. (gc_wrap): Only call gc if gc_enabled is true, and return t in that case rathe than nil. * txr.1: Document return value of sys:gc function.
-rw-r--r--ChangeLog31
-rw-r--r--gc.c100
-rw-r--r--txr.15
3 files changed, 97 insertions, 39 deletions
diff --git a/ChangeLog b/ChangeLog
index de4347b6..929c4f97 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,34 @@
+2015-05-01 Kaz Kylheku <kaz@kylheku.com>
+
+ Fix corruption triggered by extended gc disabling.
+
+ The issue is that when gc is disabled, the gc function
+ does nothing. But various code depends on calls to gc()
+ doing something, like making space available in various
+ static arrays.
+
+ When gc is disabled for long periods, there are issues,
+ like array overruns.
+
+ * gc.c (gc): Must no longer be called at all if gc_enabled is false,
+ and asserts that it is true. Callers must check the gc_enabled
+ flag and implement appropriate cases.
+ (make_obj): Only call gc when gc_enabled is true.
+ If there is no space in the freshobj array after trying gc,
+ or gc couldn't be tried because it is disabled, then
+ schedule a full gc.
+ (gc_set): If the checkobj array is full, only call gc if gc
+ is enabled, otherwise schedule a full_gc.
+ (gc_mutated): Do not assume that the mutobj array has room
+ for another object; only set the object's generation to -1
+ and put it into the array if there is room. Similarly to
+ gc_set, do a gc if there is no room, but if gc is not enabled, then
+ schedule a full gc.
+ (gc_wrap): Only call gc if gc_enabled is true, and return t in
+ that case rathe than nil.
+
+ * txr.1: Document return value of sys:gc function.
+
2015-04-30 Kaz Kylheku <kaz@kylheku.com>
Fix source location for dangling unquotes and splices.
diff --git a/gc.c b/gc.c
index 53357c0b..a553a81c 100644
--- a/gc.c
+++ b/gc.c
@@ -169,14 +169,18 @@ val make_obj(void)
assert (!async_sig_enabled);
#if CONFIG_GEN_GC
- if (opt_gc_debug || freshobj_idx >= FRESHOBJ_VEC_SIZE ||
- malloc_delta >= opt_gc_delta)
+ if ((opt_gc_debug || freshobj_idx >= FRESHOBJ_VEC_SIZE ||
+ malloc_delta >= opt_gc_delta) &&
+ gc_enabled)
{
gc();
prev_malloc_bytes = malloc_bytes;
}
+
+ if (freshobj_idx >= FRESHOBJ_VEC_SIZE)
+ full_gc = 1;
#else
- if (opt_gc_debug || malloc_delta >= opt_gc_delta) {
+ if ((opt_gc_debug || malloc_delta >= opt_gc_delta) && gc_enabled) {
gc();
prev_malloc_bytes = malloc_bytes;
}
@@ -211,8 +215,15 @@ val make_obj(void)
#endif
switch (tries) {
- case 0: gc(); break;
- case 1: more(); break;
+ case 0:
+ if (gc_enabled) {
+ gc();
+ break;
+ }
+ /* fallthrough */
+ case 1:
+ more();
+ break;
}
}
@@ -647,44 +658,43 @@ void gc(void)
int full_gc_next_time = 0;
static int gc_counter;
#endif
+ int swept;
+ mach_context_t mc;
- if (gc_enabled) {
- int swept;
-
- mach_context_t mc;
- save_context(mc);
- gc_enabled = 0;
- mark(&mc, &gc_stack_top);
- hash_process_weak();
- prepare_finals();
- swept = sweep();
+ assert (gc_enabled);
+
+ save_context(mc);
+ gc_enabled = 0;
+ mark(&mc, &gc_stack_top);
+ hash_process_weak();
+ prepare_finals();
+ swept = sweep();
#if CONFIG_GEN_GC
#if 0
- printf("sweep: freed %d full_gc == %d exhausted == %d\n",
- (int) swept, full_gc, exhausted);
+ printf("sweep: freed %d full_gc == %d exhausted == %d\n",
+ (int) swept, full_gc, exhausted);
#endif
- if (++gc_counter >= FULL_GC_INTERVAL ||
- freshobj_idx >= FRESHOBJ_VEC_SIZE)
- {
- full_gc_next_time = 1;
- gc_counter = 0;
- }
+ if (++gc_counter >= FULL_GC_INTERVAL ||
+ freshobj_idx >= FRESHOBJ_VEC_SIZE)
+ {
+ full_gc_next_time = 1;
+ gc_counter = 0;
+ }
- if (exhausted && full_gc && swept < 3 * HEAP_SIZE / 4)
- more();
+ if (exhausted && full_gc && swept < 3 * HEAP_SIZE / 4)
+ more();
#else
- if (swept < 3 * HEAP_SIZE / 4)
- more();
+ if (swept < 3 * HEAP_SIZE / 4)
+ more();
#endif
#if CONFIG_GEN_GC
- checkobj_idx = 0;
- mutobj_idx = 0;
- full_gc = full_gc_next_time;
+ checkobj_idx = 0;
+ mutobj_idx = 0;
+ full_gc = full_gc_next_time;
#endif
- call_finals();
- gc_enabled = 1;
- }
+ call_finals();
+ gc_enabled = 1;
}
int gc_state(int enabled)
@@ -731,9 +741,13 @@ val gc_set(loc lo, val obj)
if (checkobj_idx < CHECKOBJ_VEC_SIZE) {
obj->t.gen = -1;
checkobj[checkobj_idx++] = obj;
- } else {
+ } else if (gc_enabled) {
gc();
- /* obj can't be in gen 0 because there are no baby objects after gc */
+ /* obj can't be in gen 0 because there are no baby objects after gc */
+ } else {
+ /* We have no space to in checkobj record this backreference, and gc is
+ not available to promote obj to gen 0. We must schedule a full gc. */
+ full_gc = 1;
}
}
@@ -747,12 +761,17 @@ val gc_mutated(val obj)
already been noted. And if a full gc is coming, don't bother. */
if (full_gc || obj->t.gen <= 0)
return obj;
- obj->t.gen = -1;
/* Store in mutobj array *before* triggering gc, otherwise
baby objects referenced by obj could be reclaimed! */
- mutobj[mutobj_idx++] = obj;
- if (mutobj_idx >= MUTOBJ_VEC_SIZE)
+ if (mutobj_idx < MUTOBJ_VEC_SIZE) {
+ obj->t.gen = -1;
+ mutobj[mutobj_idx++] = obj;
+ } else if (gc_enabled) {
gc();
+ } else {
+ full_gc = 1;
+ }
+
return obj;
}
@@ -772,7 +791,10 @@ static val gc_set_delta(val delta)
static val gc_wrap(void)
{
- gc();
+ if (gc_enabled) {
+ gc();
+ return t;
+ }
return nil;
}
diff --git a/txr.1 b/txr.1
index 46e244aa..f1c07fa6 100644
--- a/txr.1
+++ b/txr.1
@@ -27698,6 +27698,11 @@ function triggers garbage collection. Garbage collection means
that unreachable objects are identified and reclaimed, so that their
storage can be re-used.
+The function returns
+.code nil
+if garbage collection is disabled (and consequently nothing is done), otherwise
+.codn t .
+
.coNP Function @ sys:gc-set-delta
.synb
.mets (sys:gc-set-delta << bytes )