Change C++ exception usage back into a longjmp() due to GCC bugs

db4
Slava Pestov 2009-10-05 03:27:28 -05:00
parent 14840edc4b
commit 6c047127ee
9 changed files with 122 additions and 126 deletions

View File

@ -60,6 +60,7 @@ DLL_OBJS = $(PLAF_DLL_OBJS) \
vm/strings.o \ vm/strings.o \
vm/tuples.o \ vm/tuples.o \
vm/utilities.o \ vm/utilities.o \
vm/vm.o \
vm/words.o \ vm/words.o \
vm/write_barrier.o vm/write_barrier.o

View File

@ -12,18 +12,16 @@ gc_state::gc_state(data_heap *data_, bool growing_data_heap_, cell collecting_ge
data(data_), data(data_),
growing_data_heap(growing_data_heap_), growing_data_heap(growing_data_heap_),
collecting_gen(collecting_gen_), collecting_gen(collecting_gen_),
collecting_aging_again(false),
start_time(current_micros()) { } start_time(current_micros()) { }
gc_state::~gc_state() { } gc_state::~gc_state() { }
/* If a generation fills up, throw this error. It is caught in garbage_collection() */
struct generation_full_condition { };
/* Given a pointer to oldspace, copy it to newspace */ /* Given a pointer to oldspace, copy it to newspace */
object *factor_vm::copy_untagged_object_impl(object *pointer, cell size) object *factor_vm::copy_untagged_object_impl(object *pointer, cell size)
{ {
if(current_gc->newspace->here + size >= current_gc->newspace->end) if(current_gc->newspace->here + size >= current_gc->newspace->end)
throw generation_full_condition(); longjmp(current_gc->gc_unwind,1);
object *newpointer = allot_zone(current_gc->newspace,size); object *newpointer = allot_zone(current_gc->newspace,size);
@ -502,7 +500,6 @@ void factor_vm::begin_gc(cell requested_bytes)
void factor_vm::end_gc() void factor_vm::end_gc()
{ {
gc_stats *s = &stats[current_gc->collecting_gen]; gc_stats *s = &stats[current_gc->collecting_gen];
cell gc_elapsed = (current_micros() - current_gc->start_time); cell gc_elapsed = (current_micros() - current_gc->start_time);
@ -545,77 +542,70 @@ void factor_vm::garbage_collection(cell collecting_gen_, bool growing_data_heap_
/* Keep trying to GC higher and higher generations until we don't run out /* Keep trying to GC higher and higher generations until we don't run out
of space */ of space */
for(;;) if(setjmp(current_gc->gc_unwind))
{ {
try /* We come back here if a generation is full */
{
begin_gc(requested_bytes);
/* Initialize chase pointer */ /* We have no older generations we can try collecting, so we
cell scan = current_gc->newspace->here; resort to growing the data heap */
if(current_gc->collecting_tenured_p())
{
current_gc->growing_data_heap = true;
/* Trace objects referenced from global environment */ /* see the comment in unmark_marked() */
trace_roots(); code->unmark_marked();
}
/* we try collecting aging space twice before going on to
collect tenured */
else if(data->have_aging_p()
&& current_gc->collecting_gen == data->aging()
&& !current_gc->collecting_aging_again)
{
current_gc->collecting_aging_again = true;
}
/* Collect the next oldest generation */
else
{
current_gc->collecting_gen++;
}
}
/* Trace objects referenced from stacks, unless we're doing begin_gc(requested_bytes);
save-image-and-exit in which case stack objects are irrelevant */
if(trace_contexts_) trace_contexts();
/* Trace objects referenced from older generations */ /* Initialize chase pointer */
trace_cards(); cell scan = current_gc->newspace->here;
/* On minor GC, trace code heap roots if it has pointers /* Trace objects referenced from global environment */
to this generation or younger. Otherwise, tracing data heap objects trace_roots();
will mark all reachable code blocks, and we free the unmarked ones
after. */
if(!current_gc->collecting_tenured_p() && current_gc->collecting_gen >= last_code_heap_scan)
{
update_code_heap_roots();
}
/* do some copying -- this is where most of the work is done */ /* Trace objects referenced from stacks, unless we're doing
copy_reachable_objects(scan,&current_gc->newspace->here); save-image-and-exit in which case stack objects are irrelevant */
if(trace_contexts_) trace_contexts();
/* On minor GC, update literal references in code blocks, now that all /* Trace objects referenced from older generations */
data heap objects are in their final location. On a major GC, trace_cards();
free all code blocks that did not get marked during tracing. */
if(current_gc->collecting_tenured_p())
free_unmarked_code_blocks();
else
update_dirty_code_blocks();
/* GC completed without any generations filling up; finish up */ /* On minor GC, trace code heap roots if it has pointers
break; to this generation or younger. Otherwise, tracing data heap objects
} will mark all reachable code blocks, and we free the unmarked ones
catch(const generation_full_condition &c) after. */
{ if(!current_gc->collecting_tenured_p() && current_gc->collecting_gen >= last_code_heap_scan)
/* We come back here if a generation is full */ {
update_code_heap_roots();
}
/* We have no older generations we can try collecting, so we /* do some copying -- this is where most of the work is done */
resort to growing the data heap */ copy_reachable_objects(scan,&current_gc->newspace->here);
if(current_gc->collecting_tenured_p())
{
current_gc->growing_data_heap = true;
/* see the comment in unmark_marked() */ /* On minor GC, update literal references in code blocks, now that all
code->unmark_marked(); data heap objects are in their final location. On a major GC,
} free all code blocks that did not get marked during tracing. */
/* we try collecting aging space twice before going on to if(current_gc->collecting_tenured_p())
collect tenured */ free_unmarked_code_blocks();
else if(data->have_aging_p() else
&& current_gc->collecting_gen == data->aging() update_dirty_code_blocks();
&& !current_gc->collecting_aging_again)
{
current_gc->collecting_aging_again = true;
}
/* Collect the next oldest generation */
else
{
current_gc->collecting_gen++;
}
}
}
/* GC completed without any generations filling up; finish up */
end_gc(); end_gc();
delete current_gc; delete current_gc;

View File

@ -34,6 +34,8 @@ struct gc_state {
/* GC start time, for benchmarking */ /* GC start time, for benchmarking */
u64 start_time; u64 start_time;
jmp_buf gc_unwind;
explicit gc_state(data_heap *data_, bool growing_data_heap_, cell collecting_gen_); explicit gc_state(data_heap *data_, bool growing_data_heap_, cell collecting_gen_);
~gc_state(); ~gc_state();

2
vm/generic_arrays.hpp Normal file → Executable file
View File

@ -4,7 +4,7 @@ namespace factor
template<typename Array> cell array_capacity(Array *array) template<typename Array> cell array_capacity(Array *array)
{ {
#ifdef FACTOR_DEBUG #ifdef FACTOR_DEBUG
assert(array->h.hi_tag() == T::type_number); assert(array->h.hi_tag() == Array::type_number);
#endif #endif
return array->capacity >> TAG_BITS; return array->capacity >> TAG_BITS;
} }

View File

@ -46,11 +46,12 @@ LONG factor_vm::exception_handler(PEXCEPTION_POINTERS pe)
else else
signal_callstack_top = NULL; signal_callstack_top = NULL;
switch (e->ExceptionCode) { switch (e->ExceptionCode)
case EXCEPTION_ACCESS_VIOLATION: {
case EXCEPTION_ACCESS_VIOLATION:
signal_fault_addr = e->ExceptionInformation[1]; signal_fault_addr = e->ExceptionInformation[1];
c->EIP = (cell)factor::memory_signal_handler_impl; c->EIP = (cell)factor::memory_signal_handler_impl;
break; break;
case STATUS_FLOAT_DENORMAL_OPERAND: case STATUS_FLOAT_DENORMAL_OPERAND:
case STATUS_FLOAT_DIVIDE_BY_ZERO: case STATUS_FLOAT_DIVIDE_BY_ZERO:

View File

@ -330,6 +330,7 @@ void factor_vm::compile_all_words()
} }
printf("done\n");
/* Update XTs in code heap */ /* Update XTs in code heap */
word_updater updater(this); word_updater updater(this);
iterate_code_heap(updater); iterate_code_heap(updater);

0
vm/tuples.cpp Normal file → Executable file
View File

11
vm/vm.cpp Executable file
View File

@ -0,0 +1,11 @@
#include "master.hpp"
namespace factor
{
factor_vm::factor_vm()
{
memset(this,0,sizeof(factor_vm));
}
}

58
vm/vm.hpp Normal file → Executable file
View File

@ -535,7 +535,6 @@ struct factor_vm
//code_heap //code_heap
heap *code; heap *code;
unordered_map<heap_block *, char *> forwarding; unordered_map<heap_block *, char *> forwarding;
typedef void (factor_vm::*code_heap_iterator)(code_block *compiled);
void init_code_heap(cell size); void init_code_heap(cell size);
bool in_code_heap_p(cell ptr); bool in_code_heap_p(cell ptr);
@ -743,58 +742,49 @@ struct factor_vm
void call_fault_handler(exception_type_t exception, exception_data_type_t code, MACH_EXC_STATE_TYPE *exc_state, MACH_THREAD_STATE_TYPE *thread_state, MACH_FLOAT_STATE_TYPE *float_state); void call_fault_handler(exception_type_t exception, exception_data_type_t code, MACH_EXC_STATE_TYPE *exc_state, MACH_THREAD_STATE_TYPE *thread_state, MACH_FLOAT_STATE_TYPE *float_state);
#endif #endif
factor_vm() factor_vm();
: profiling_p(false),
secure_gc(false),
gc_off(false),
fep_disabled(false),
full_output(false),
max_pic_size(0)
{
memset(this,0,sizeof(this)); // just to make sure
}
}; };
#ifndef FACTOR_REENTRANT #ifndef FACTOR_REENTRANT
#define FACTOR_SINGLE_THREADED_TESTING #define FACTOR_SINGLE_THREADED_TESTING
#endif #endif
#ifdef FACTOR_SINGLE_THREADED_SINGLETON #ifdef FACTOR_SINGLE_THREADED_SINGLETON
/* calls are dispatched using the singleton vm ptr */ /* calls are dispatched using the singleton vm ptr */
extern factor_vm *vm; extern factor_vm *vm;
#define PRIMITIVE_GETVM() vm #define PRIMITIVE_GETVM() vm
#define PRIMITIVE_OVERFLOW_GETVM() vm #define PRIMITIVE_OVERFLOW_GETVM() vm
#define VM_PTR vm #define VM_PTR vm
#define ASSERTVM() #define ASSERTVM()
#define SIGNAL_VM_PTR() vm #define SIGNAL_VM_PTR() vm
#endif #endif
#ifdef FACTOR_SINGLE_THREADED_TESTING #ifdef FACTOR_SINGLE_THREADED_TESTING
/* calls are dispatched as per multithreaded, but checked against singleton */ /* calls are dispatched as per multithreaded, but checked against singleton */
extern factor_vm *vm; extern factor_vm *vm;
#define ASSERTVM() assert(vm==myvm) #define ASSERTVM() assert(vm==myvm)
#define PRIMITIVE_GETVM() ((factor_vm*)myvm) #define PRIMITIVE_GETVM() ((factor_vm*)myvm)
#define PRIMITIVE_OVERFLOW_GETVM() ASSERTVM(); myvm #define PRIMITIVE_OVERFLOW_GETVM() ASSERTVM(); myvm
#define VM_PTR myvm #define VM_PTR myvm
#define SIGNAL_VM_PTR() tls_vm() #define SIGNAL_VM_PTR() tls_vm()
#endif #endif
#ifdef FACTOR_REENTRANT_TLS #ifdef FACTOR_REENTRANT_TLS
/* uses thread local storage to obtain vm ptr */ /* uses thread local storage to obtain vm ptr */
#define PRIMITIVE_GETVM() tls_vm() #define PRIMITIVE_GETVM() tls_vm()
#define PRIMITIVE_OVERFLOW_GETVM() tls_vm() #define PRIMITIVE_OVERFLOW_GETVM() tls_vm()
#define VM_PTR tls_vm() #define VM_PTR tls_vm()
#define ASSERTVM() #define ASSERTVM()
#define SIGNAL_VM_PTR() tls_vm() #define SIGNAL_VM_PTR() tls_vm()
#endif #endif
#ifdef FACTOR_REENTRANT #ifdef FACTOR_REENTRANT
#define PRIMITIVE_GETVM() ((factor_vm*)myvm) #define PRIMITIVE_GETVM() ((factor_vm*)myvm)
#define PRIMITIVE_OVERFLOW_GETVM() ((factor_vm*)myvm) #define PRIMITIVE_OVERFLOW_GETVM() ((factor_vm*)myvm)
#define VM_PTR myvm #define VM_PTR myvm
#define ASSERTVM() #define ASSERTVM()
#define SIGNAL_VM_PTR() tls_vm() #define SIGNAL_VM_PTR() tls_vm()
#endif #endif
extern unordered_map<THREADHANDLE, factor_vm *> thread_vms; extern unordered_map<THREADHANDLE, factor_vm *> thread_vms;