VM: Refactor gc* to Factor style

db4
Erik Charlebois 2013-05-11 22:04:08 -04:00
parent 9e2d775095
commit 34d04b9306
4 changed files with 304 additions and 385 deletions

504
vm/gc.cpp
View File

@ -1,293 +1,244 @@
#include "master.hpp" #include "master.hpp"
namespace factor namespace factor {
{
gc_event::gc_event(gc_op op_, factor_vm *parent) : gc_event::gc_event(gc_op op_, factor_vm* parent)
op(op_), : op(op_),
cards_scanned(0), cards_scanned(0),
decks_scanned(0), decks_scanned(0),
code_blocks_scanned(0), code_blocks_scanned(0),
start_time(nano_count()), start_time(nano_count()),
card_scan_time(0), card_scan_time(0),
code_scan_time(0), code_scan_time(0),
data_sweep_time(0), data_sweep_time(0),
code_sweep_time(0), code_sweep_time(0),
compaction_time(0) compaction_time(0) {
{ data_heap_before = parent->data_room();
data_heap_before = parent->data_room(); code_heap_before = parent->code_room();
code_heap_before = parent->code_room(); start_time = nano_count();
start_time = nano_count();
} }
void gc_event::started_card_scan() void gc_event::started_card_scan() { temp_time = nano_count(); }
{
temp_time = nano_count(); void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_) {
cards_scanned += cards_scanned_;
decks_scanned += decks_scanned_;
card_scan_time = (cell)(nano_count() - temp_time);
} }
void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_) void gc_event::started_code_scan() { temp_time = nano_count(); }
{
cards_scanned += cards_scanned_; void gc_event::ended_code_scan(cell code_blocks_scanned_) {
decks_scanned += decks_scanned_; code_blocks_scanned += code_blocks_scanned_;
card_scan_time = (cell)(nano_count() - temp_time); code_scan_time = (cell)(nano_count() - temp_time);
} }
void gc_event::started_code_scan() void gc_event::started_data_sweep() { temp_time = nano_count(); }
{
temp_time = nano_count(); void gc_event::ended_data_sweep() {
data_sweep_time = (cell)(nano_count() - temp_time);
} }
void gc_event::ended_code_scan(cell code_blocks_scanned_) void gc_event::started_code_sweep() { temp_time = nano_count(); }
{
code_blocks_scanned += code_blocks_scanned_; void gc_event::ended_code_sweep() {
code_scan_time = (cell)(nano_count() - temp_time); code_sweep_time = (cell)(nano_count() - temp_time);
} }
void gc_event::started_data_sweep() void gc_event::started_compaction() { temp_time = nano_count(); }
{
temp_time = nano_count(); void gc_event::ended_compaction() {
compaction_time = (cell)(nano_count() - temp_time);
} }
void gc_event::ended_data_sweep() void gc_event::ended_gc(factor_vm* parent) {
{ data_heap_after = parent->data_room();
data_sweep_time = (cell)(nano_count() - temp_time); code_heap_after = parent->code_room();
total_time = (cell)(nano_count() - start_time);
} }
void gc_event::started_code_sweep() gc_state::gc_state(gc_op op_, factor_vm* parent) : op(op_) {
{ if (parent->gc_events) {
temp_time = nano_count(); event = new gc_event(op, parent);
start_time = nano_count();
} else
event = NULL;
} }
void gc_event::ended_code_sweep() gc_state::~gc_state() {
{ if (event) {
code_sweep_time = (cell)(nano_count() - temp_time); delete event;
event = NULL;
}
} }
void gc_event::started_compaction() void factor_vm::end_gc() {
{ if (gc_events) {
temp_time = nano_count(); current_gc->event->ended_gc(this);
gc_events->push_back(*current_gc->event);
}
} }
void gc_event::ended_compaction() void factor_vm::start_gc_again() {
{ end_gc();
compaction_time = (cell)(nano_count() - temp_time);
switch (current_gc->op) {
case collect_nursery_op:
/* Nursery collection can fail if aging does not have enough
free space to fit all live objects from nursery. */
current_gc->op = collect_aging_op;
break;
case collect_aging_op:
/* Aging collection can fail if the aging semispace cannot fit
all the live objects from the other aging semispace and the
nursery. */
current_gc->op = collect_to_tenured_op;
break;
default:
/* Nothing else should fail mid-collection due to insufficient
space in the target generation. */
critical_error("Bad GC op", current_gc->op);
break;
}
if (gc_events)
current_gc->event = new gc_event(current_gc->op, this);
} }
void gc_event::ended_gc(factor_vm *parent) void factor_vm::set_current_gc_op(gc_op op) {
{ current_gc->op = op;
data_heap_after = parent->data_room(); if (gc_events)
code_heap_after = parent->code_room(); current_gc->event->op = op;
total_time = (cell)(nano_count() - start_time);
} }
gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_) void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p) {
{ FACTOR_ASSERT(!gc_off);
if(parent->gc_events) FACTOR_ASSERT(!current_gc);
{
event = new gc_event(op,parent);
start_time = nano_count();
}
else
event = NULL;
}
gc_state::~gc_state() /* Important invariant: tenured space must have enough contiguous free
{ space to fit the entire contents of the aging space and nursery. This is
if(event) because when doing a full collection, objects from younger generations
{ are promoted before any unreachable tenured objects are freed. */
delete event; FACTOR_ASSERT(!data->high_fragmentation_p());
event = NULL;
}
}
void factor_vm::end_gc() current_gc = new gc_state(op, this);
{ atomic::store(&current_gc_p, true);
if(gc_events)
{
current_gc->event->ended_gc(this);
gc_events->push_back(*current_gc->event);
}
}
void factor_vm::start_gc_again() /* Keep trying to GC higher and higher generations until we don't run
{ out of space in the target generation. */
end_gc(); for (;;) {
try {
if (gc_events)
current_gc->event->op = current_gc->op;
switch(current_gc->op) switch (current_gc->op) {
{ case collect_nursery_op:
case collect_nursery_op: collect_nursery();
/* Nursery collection can fail if aging does not have enough break;
free space to fit all live objects from nursery. */ case collect_aging_op:
current_gc->op = collect_aging_op; /* We end up here if the above fails. */
break; collect_aging();
case collect_aging_op: if (data->high_fragmentation_p()) {
/* Aging collection can fail if the aging semispace cannot fit /* Change GC op so that if we fail again, we crash. */
all the live objects from the other aging semispace and the set_current_gc_op(collect_full_op);
nursery. */ collect_full(trace_contexts_p);
current_gc->op = collect_to_tenured_op; }
break; break;
default: case collect_to_tenured_op:
/* Nothing else should fail mid-collection due to insufficient /* We end up here if the above fails. */
space in the target generation. */ collect_to_tenured();
critical_error("Bad GC op",current_gc->op); if (data->high_fragmentation_p()) {
break; /* Change GC op so that if we fail again, we crash. */
} set_current_gc_op(collect_full_op);
collect_full(trace_contexts_p);
}
break;
case collect_full_op:
collect_full(trace_contexts_p);
break;
case collect_compact_op:
collect_compact(trace_contexts_p);
break;
case collect_growing_heap_op:
collect_growing_heap(requested_size, trace_contexts_p);
break;
default:
critical_error("Bad GC op", current_gc->op);
break;
}
if(gc_events) break;
current_gc->event = new gc_event(current_gc->op,this); }
} catch (const must_start_gc_again&) {
/* We come back here if the target generation is full. */
start_gc_again();
continue;
}
}
void factor_vm::set_current_gc_op(gc_op op) end_gc();
{
current_gc->op = op;
if(gc_events) current_gc->event->op = op;
}
void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p) atomic::store(&current_gc_p, false);
{ delete current_gc;
FACTOR_ASSERT(!gc_off); current_gc = NULL;
FACTOR_ASSERT(!current_gc);
/* Important invariant: tenured space must have enough contiguous free /* Check the invariant again, just in case. */
space to fit the entire contents of the aging space and nursery. This is FACTOR_ASSERT(!data->high_fragmentation_p());
because when doing a full collection, objects from younger generations
are promoted before any unreachable tenured objects are freed. */
FACTOR_ASSERT(!data->high_fragmentation_p());
current_gc = new gc_state(op,this);
atomic::store(&current_gc_p, true);
/* Keep trying to GC higher and higher generations until we don't run
out of space in the target generation. */
for(;;)
{
try
{
if(gc_events) current_gc->event->op = current_gc->op;
switch(current_gc->op)
{
case collect_nursery_op:
collect_nursery();
break;
case collect_aging_op:
/* We end up here if the above fails. */
collect_aging();
if(data->high_fragmentation_p())
{
/* Change GC op so that if we fail again,
we crash. */
set_current_gc_op(collect_full_op);
collect_full(trace_contexts_p);
}
break;
case collect_to_tenured_op:
/* We end up here if the above fails. */
collect_to_tenured();
if(data->high_fragmentation_p())
{
/* Change GC op so that if we fail again,
we crash. */
set_current_gc_op(collect_full_op);
collect_full(trace_contexts_p);
}
break;
case collect_full_op:
collect_full(trace_contexts_p);
break;
case collect_compact_op:
collect_compact(trace_contexts_p);
break;
case collect_growing_heap_op:
collect_growing_heap(requested_size,trace_contexts_p);
break;
default:
critical_error("Bad GC op",current_gc->op);
break;
}
break;
}
catch(const must_start_gc_again &)
{
/* We come back here if the target generation is full. */
start_gc_again();
continue;
}
}
end_gc();
atomic::store(&current_gc_p, false);
delete current_gc;
current_gc = NULL;
/* Check the invariant again, just in case. */
FACTOR_ASSERT(!data->high_fragmentation_p());
} }
/* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
uninitialized stack locations before actually calling the GC. See the comment uninitialized stack locations before actually calling the GC. See the
in compiler.cfg.stacks.uninitialized for details. */ comment in compiler.cfg.stacks.uninitialized for details. */
struct call_frame_scrubber { struct call_frame_scrubber {
factor_vm *parent; factor_vm* parent;
context *ctx; context* ctx;
explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) : explicit call_frame_scrubber(factor_vm* parent_, context* ctx_)
parent(parent_), ctx(ctx_) {} : parent(parent_), ctx(ctx_) {}
void operator()(void *frame_top, cell frame_size, code_block *owner, void *addr) void operator()(void* frame_top, cell frame_size, code_block* owner,
{ void* addr) {
cell return_address = owner->offset(addr); cell return_address = owner->offset(addr);
gc_info *info = owner->block_gc_info(); gc_info* info = owner->block_gc_info();
FACTOR_ASSERT(return_address < owner->size()); FACTOR_ASSERT(return_address < owner->size());
cell index = info->return_address_index(return_address); cell index = info->return_address_index(return_address);
if(index != (cell)-1) if (index != (cell) - 1)
ctx->scrub_stacks(info,index); ctx->scrub_stacks(info, index);
} }
}; };
void factor_vm::scrub_context(context *ctx) void factor_vm::scrub_context(context* ctx) {
{ call_frame_scrubber scrubber(this, ctx);
call_frame_scrubber scrubber(this,ctx); iterate_callstack(ctx, scrubber);
iterate_callstack(ctx,scrubber);
} }
void factor_vm::scrub_contexts() void factor_vm::scrub_contexts() {
{ std::set<context*>::const_iterator begin = active_contexts.begin();
std::set<context *>::const_iterator begin = active_contexts.begin(); std::set<context*>::const_iterator end = active_contexts.end();
std::set<context *>::const_iterator end = active_contexts.end(); while (begin != end) {
while(begin != end) scrub_context(*begin);
{ begin++;
scrub_context(*begin); }
begin++;
}
} }
void factor_vm::primitive_minor_gc() void factor_vm::primitive_minor_gc() {
{ scrub_contexts();
scrub_contexts();
gc(collect_nursery_op, gc(collect_nursery_op, 0, /* requested size */
0, /* requested size */ true /* trace contexts? */);
true /* trace contexts? */);
} }
void factor_vm::primitive_full_gc() void factor_vm::primitive_full_gc() {
{ gc(collect_full_op, 0, /* requested size */
gc(collect_full_op, true /* trace contexts? */);
0, /* requested size */
true /* trace contexts? */);
} }
void factor_vm::primitive_compact_gc() void factor_vm::primitive_compact_gc() {
{ gc(collect_compact_op, 0, /* requested size */
gc(collect_compact_op, true /* trace contexts? */);
0, /* requested size */
true /* trace contexts? */);
} }
/* /*
@ -295,66 +246,57 @@ void factor_vm::primitive_compact_gc()
* fashion! * fashion!
*/ */
/* Allocates memory */ /* Allocates memory */
object *factor_vm::allot_large_object(cell type, cell size) object* factor_vm::allot_large_object(cell type, cell size) {
{ /* If tenured space does not have enough room, collect and compact */
/* If tenured space does not have enough room, collect and compact */ cell requested_size = size + data->high_water_mark();
cell requested_size = size + data->high_water_mark(); if (!data->tenured->can_allot_p(requested_size)) {
if(!data->tenured->can_allot_p(requested_size)) primitive_compact_gc();
{
primitive_compact_gc();
/* If it still won't fit, grow the heap */ /* If it still won't fit, grow the heap */
if(!data->tenured->can_allot_p(requested_size)) if (!data->tenured->can_allot_p(requested_size)) {
{ gc(collect_growing_heap_op, size, /* requested size */
gc(collect_growing_heap_op, true /* trace contexts? */);
size, /* requested size */ }
true /* trace contexts? */); }
}
}
object *obj = data->tenured->allot(size); object* obj = data->tenured->allot(size);
/* Allows initialization code to store old->new pointers /* Allows initialization code to store old->new pointers
without hitting the write barrier in the common case of without hitting the write barrier in the common case of
a nursery allocation */ a nursery allocation */
write_barrier(obj,size); write_barrier(obj, size);
obj->initialize(type); obj->initialize(type);
return obj; return obj;
} }
void factor_vm::primitive_enable_gc_events() void factor_vm::primitive_enable_gc_events() {
{ gc_events = new std::vector<gc_event>();
gc_events = new std::vector<gc_event>();
} }
/* Allocates memory */ /* Allocates memory */
void factor_vm::primitive_disable_gc_events() void factor_vm::primitive_disable_gc_events() {
{ if (gc_events) {
if(gc_events) growable_array result(this);
{
growable_array result(this);
std::vector<gc_event> *gc_events = this->gc_events; std::vector<gc_event>* gc_events = this->gc_events;
this->gc_events = NULL; this->gc_events = NULL;
std::vector<gc_event>::const_iterator iter = gc_events->begin(); std::vector<gc_event>::const_iterator iter = gc_events->begin();
std::vector<gc_event>::const_iterator end = gc_events->end(); std::vector<gc_event>::const_iterator end = gc_events->end();
for(; iter != end; iter++) for (; iter != end; iter++) {
{ gc_event event = *iter;
gc_event event = *iter; byte_array* obj = byte_array_from_value(&event);
byte_array *obj = byte_array_from_value(&event); result.add(tag<byte_array>(obj));
result.add(tag<byte_array>(obj)); }
}
result.trim(); result.trim();
ctx->push(result.elements.value()); ctx->push(result.elements.value());
delete this->gc_events; delete this->gc_events;
} } else
else ctx->push(false_object);
ctx->push(false_object);
} }
} }

View File

@ -1,55 +1,54 @@
namespace factor namespace factor {
{
enum gc_op { enum gc_op {
collect_nursery_op, collect_nursery_op,
collect_aging_op, collect_aging_op,
collect_to_tenured_op, collect_to_tenured_op,
collect_full_op, collect_full_op,
collect_compact_op, collect_compact_op,
collect_growing_heap_op collect_growing_heap_op
}; };
struct gc_event { struct gc_event {
gc_op op; gc_op op;
data_heap_room data_heap_before; data_heap_room data_heap_before;
code_heap_room code_heap_before; code_heap_room code_heap_before;
data_heap_room data_heap_after; data_heap_room data_heap_after;
code_heap_room code_heap_after; code_heap_room code_heap_after;
cell cards_scanned; cell cards_scanned;
cell decks_scanned; cell decks_scanned;
cell code_blocks_scanned; cell code_blocks_scanned;
u64 start_time; u64 start_time;
cell total_time; cell total_time;
cell card_scan_time; cell card_scan_time;
cell code_scan_time; cell code_scan_time;
cell data_sweep_time; cell data_sweep_time;
cell code_sweep_time; cell code_sweep_time;
cell compaction_time; cell compaction_time;
u64 temp_time; u64 temp_time;
gc_event(gc_op op_, factor_vm *parent); gc_event(gc_op op_, factor_vm* parent);
void started_card_scan(); void started_card_scan();
void ended_card_scan(cell cards_scanned_, cell decks_scanned_); void ended_card_scan(cell cards_scanned_, cell decks_scanned_);
void started_code_scan(); void started_code_scan();
void ended_code_scan(cell code_blocks_scanned_); void ended_code_scan(cell code_blocks_scanned_);
void started_data_sweep(); void started_data_sweep();
void ended_data_sweep(); void ended_data_sweep();
void started_code_sweep(); void started_code_sweep();
void ended_code_sweep(); void ended_code_sweep();
void started_compaction(); void started_compaction();
void ended_compaction(); void ended_compaction();
void ended_gc(factor_vm *parent); void ended_gc(factor_vm* parent);
}; };
struct gc_state { struct gc_state {
gc_op op; gc_op op;
u64 start_time; u64 start_time;
gc_event *event; gc_event* event;
explicit gc_state(gc_op op_, factor_vm *parent); explicit gc_state(gc_op op_, factor_vm* parent);
~gc_state(); ~gc_state();
void start_again(gc_op op_, factor_vm *parent); void start_again(gc_op op_, factor_vm* parent);
}; };
} }

View File

@ -1,19 +1,16 @@
#include "master.hpp" #include "master.hpp"
namespace factor namespace factor {
{
cell gc_info::return_address_index(cell return_address) cell gc_info::return_address_index(cell return_address) {
{ u32* return_address_array = return_addresses();
u32 *return_address_array = return_addresses();
for(cell i = 0; i < return_address_count; i++) for (cell i = 0; i < return_address_count; i++) {
{ if (return_address == return_address_array[i])
if(return_address == return_address_array[i]) return i;
return i; }
}
return (cell)-1; return (cell) - 1;
} }
} }

View File

@ -1,67 +1,48 @@
namespace factor namespace factor {
{
struct gc_info { struct gc_info {
u32 scrub_d_count; u32 scrub_d_count;
u32 scrub_r_count; u32 scrub_r_count;
u32 gc_root_count; u32 gc_root_count;
u32 derived_root_count; u32 derived_root_count;
u32 return_address_count; u32 return_address_count;
cell callsite_bitmap_size() cell callsite_bitmap_size() {
{ return scrub_d_count + scrub_r_count + gc_root_count;
return scrub_d_count + scrub_r_count + gc_root_count; }
}
cell total_bitmap_size() cell total_bitmap_size() {
{ return return_address_count * callsite_bitmap_size();
return return_address_count * callsite_bitmap_size(); }
}
cell total_bitmap_bytes() cell total_bitmap_bytes() { return ((total_bitmap_size() + 7) / 8); }
{
return ((total_bitmap_size() + 7) / 8);
}
u32 *return_addresses() u32* return_addresses() { return (u32*)this - return_address_count; }
{
return (u32 *)this - return_address_count;
}
u32 *base_pointer_map() u32* base_pointer_map() {
{ return return_addresses() - return_address_count * derived_root_count;
return return_addresses() - return_address_count * derived_root_count; }
}
u8 *gc_info_bitmap() u8* gc_info_bitmap() {
{ return (u8*)base_pointer_map() - total_bitmap_bytes();
return (u8 *)base_pointer_map() - total_bitmap_bytes(); }
}
cell callsite_scrub_d(cell index) cell callsite_scrub_d(cell index) { return index * scrub_d_count; }
{
return index * scrub_d_count;
}
cell callsite_scrub_r(cell index) cell callsite_scrub_r(cell index) {
{ return return_address_count * scrub_d_count + index * scrub_r_count;
return return_address_count * scrub_d_count + }
index * scrub_r_count;
}
cell callsite_gc_roots(cell index) cell callsite_gc_roots(cell index) {
{ return return_address_count * scrub_d_count +
return return_address_count * scrub_d_count return_address_count * scrub_r_count + index * gc_root_count;
+ return_address_count * scrub_r_count }
+ index * gc_root_count;
}
u32 lookup_base_pointer(cell index, cell derived_root) u32 lookup_base_pointer(cell index, cell derived_root) {
{ return base_pointer_map()[index * derived_root_count + derived_root];
return base_pointer_map()[index * derived_root_count + derived_root]; }
}
cell return_address_index(cell return_address); cell return_address_index(cell return_address);
}; };
} }