diff --git a/vm/collector.hpp b/vm/collector.hpp index ece4926c28..0b8b473e8b 100644 --- a/vm/collector.hpp +++ b/vm/collector.hpp @@ -1,6 +1,8 @@ namespace factor { +struct must_start_gc_again {}; + template struct data_workhorse { factor_vm *parent; TargetGeneration *target; @@ -27,8 +29,7 @@ template struct data_workhorse { { cell size = untagged->size(); object *newpointer = target->allot(size); - /* XXX not exception-safe */ - if(!newpointer) longjmp(parent->current_gc->gc_unwind,1); + if(!newpointer) throw must_start_gc_again(); memcpy(newpointer,untagged,size); untagged->forward_to(newpointer); diff --git a/vm/gc.cpp b/vm/gc.cpp index a57f338c44..e01a05aa5b 100755 --- a/vm/gc.cpp +++ b/vm/gc.cpp @@ -135,49 +135,57 @@ void factor_vm::gc(gc_op op, cell requested_bytes, bool trace_contexts_p) /* Keep trying to GC higher and higher generations until we don't run out of space */ - if(setjmp(current_gc->gc_unwind)) + for(;;) { - /* We come back here if a generation is full */ - start_gc_again(); - } - - current_gc->event->op = current_gc->op; - - switch(current_gc->op) - { - case collect_nursery_op: - collect_nursery(); - break; - case collect_aging_op: - collect_aging(); - if(data->high_fragmentation_p()) + try { - current_gc->op = collect_full_op; - current_gc->event->op = collect_full_op; - collect_full(trace_contexts_p); + current_gc->event->op = current_gc->op; + + switch(current_gc->op) + { + case collect_nursery_op: + collect_nursery(); + break; + case collect_aging_op: + collect_aging(); + if(data->high_fragmentation_p()) + { + current_gc->op = collect_full_op; + current_gc->event->op = collect_full_op; + collect_full(trace_contexts_p); + } + break; + case collect_to_tenured_op: + collect_to_tenured(); + if(data->high_fragmentation_p()) + { + current_gc->op = collect_full_op; + current_gc->event->op = collect_full_op; + collect_full(trace_contexts_p); + } + break; + case collect_full_op: + collect_full(trace_contexts_p); + break; + case collect_compact_op: + collect_compact(trace_contexts_p); + break; + case collect_growing_heap_op: + collect_growing_heap(requested_bytes,trace_contexts_p); + break; + default: + critical_error("Bad GC op",current_gc->op); + break; + } + + break; } - break; - case collect_to_tenured_op: - collect_to_tenured(); - if(data->high_fragmentation_p()) + catch(const must_start_gc_again e) { - current_gc->op = collect_full_op; - current_gc->event->op = collect_full_op; - collect_full(trace_contexts_p); + /* We come back here if a generation is full */ + start_gc_again(); + continue; } - break; - case collect_full_op: - collect_full(trace_contexts_p); - break; - case collect_compact_op: - collect_compact(trace_contexts_p); - break; - case collect_growing_heap_op: - collect_growing_heap(requested_bytes,trace_contexts_p); - break; - default: - critical_error("Bad GC op",current_gc->op); - break; } end_gc(); diff --git a/vm/gc.hpp b/vm/gc.hpp index 5224dec3e2..5129ced909 100755 --- a/vm/gc.hpp +++ b/vm/gc.hpp @@ -45,7 +45,6 @@ struct gc_event { struct gc_state { gc_op op; u64 start_time; - jmp_buf gc_unwind; gc_event *event; explicit gc_state(gc_op op_, factor_vm *parent); diff --git a/vm/master.hpp b/vm/master.hpp index 9879fa607a..a111a86b69 100755 --- a/vm/master.hpp +++ b/vm/master.hpp @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include