VM: Refactor gc* to Factor style

db4
Erik Charlebois 2013-05-11 22:04:08 -04:00
parent 9e2d775095
commit 34d04b9306
4 changed files with 304 additions and 385 deletions

222
vm/gc.cpp
View File

@ -1,10 +1,9 @@
#include "master.hpp" #include "master.hpp"
namespace factor namespace factor {
{
gc_event::gc_event(gc_op op_, factor_vm *parent) : gc_event::gc_event(gc_op op_, factor_vm* parent)
op(op_), : op(op_),
cards_scanned(0), cards_scanned(0),
decks_scanned(0), decks_scanned(0),
code_blocks_scanned(0), code_blocks_scanned(0),
@ -13,108 +12,77 @@ gc_event::gc_event(gc_op op_, factor_vm *parent) :
code_scan_time(0), code_scan_time(0),
data_sweep_time(0), data_sweep_time(0),
code_sweep_time(0), code_sweep_time(0),
compaction_time(0) compaction_time(0) {
{
data_heap_before = parent->data_room(); data_heap_before = parent->data_room();
code_heap_before = parent->code_room(); code_heap_before = parent->code_room();
start_time = nano_count(); start_time = nano_count();
} }
void gc_event::started_card_scan() void gc_event::started_card_scan() { temp_time = nano_count(); }
{
temp_time = nano_count();
}
void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_) void gc_event::ended_card_scan(cell cards_scanned_, cell decks_scanned_) {
{
cards_scanned += cards_scanned_; cards_scanned += cards_scanned_;
decks_scanned += decks_scanned_; decks_scanned += decks_scanned_;
card_scan_time = (cell)(nano_count() - temp_time); card_scan_time = (cell)(nano_count() - temp_time);
} }
void gc_event::started_code_scan() void gc_event::started_code_scan() { temp_time = nano_count(); }
{
temp_time = nano_count();
}
void gc_event::ended_code_scan(cell code_blocks_scanned_) void gc_event::ended_code_scan(cell code_blocks_scanned_) {
{
code_blocks_scanned += code_blocks_scanned_; code_blocks_scanned += code_blocks_scanned_;
code_scan_time = (cell)(nano_count() - temp_time); code_scan_time = (cell)(nano_count() - temp_time);
} }
void gc_event::started_data_sweep() void gc_event::started_data_sweep() { temp_time = nano_count(); }
{
temp_time = nano_count();
}
void gc_event::ended_data_sweep() void gc_event::ended_data_sweep() {
{
data_sweep_time = (cell)(nano_count() - temp_time); data_sweep_time = (cell)(nano_count() - temp_time);
} }
void gc_event::started_code_sweep() void gc_event::started_code_sweep() { temp_time = nano_count(); }
{
temp_time = nano_count();
}
void gc_event::ended_code_sweep() void gc_event::ended_code_sweep() {
{
code_sweep_time = (cell)(nano_count() - temp_time); code_sweep_time = (cell)(nano_count() - temp_time);
} }
void gc_event::started_compaction() void gc_event::started_compaction() { temp_time = nano_count(); }
{
temp_time = nano_count();
}
void gc_event::ended_compaction() void gc_event::ended_compaction() {
{
compaction_time = (cell)(nano_count() - temp_time); compaction_time = (cell)(nano_count() - temp_time);
} }
void gc_event::ended_gc(factor_vm *parent) void gc_event::ended_gc(factor_vm* parent) {
{
data_heap_after = parent->data_room(); data_heap_after = parent->data_room();
code_heap_after = parent->code_room(); code_heap_after = parent->code_room();
total_time = (cell)(nano_count() - start_time); total_time = (cell)(nano_count() - start_time);
} }
gc_state::gc_state(gc_op op_, factor_vm *parent) : op(op_) gc_state::gc_state(gc_op op_, factor_vm* parent) : op(op_) {
{ if (parent->gc_events) {
if(parent->gc_events) event = new gc_event(op, parent);
{
event = new gc_event(op,parent);
start_time = nano_count(); start_time = nano_count();
} } else
else
event = NULL; event = NULL;
} }
gc_state::~gc_state() gc_state::~gc_state() {
{ if (event) {
if(event)
{
delete event; delete event;
event = NULL; event = NULL;
} }
} }
void factor_vm::end_gc() void factor_vm::end_gc() {
{ if (gc_events) {
if(gc_events)
{
current_gc->event->ended_gc(this); current_gc->event->ended_gc(this);
gc_events->push_back(*current_gc->event); gc_events->push_back(*current_gc->event);
} }
} }
void factor_vm::start_gc_again() void factor_vm::start_gc_again() {
{
end_gc(); end_gc();
switch(current_gc->op) switch (current_gc->op) {
{
case collect_nursery_op: case collect_nursery_op:
/* Nursery collection can fail if aging does not have enough /* Nursery collection can fail if aging does not have enough
free space to fit all live objects from nursery. */ free space to fit all live objects from nursery. */
@ -129,22 +97,21 @@ void factor_vm::start_gc_again()
default: default:
/* Nothing else should fail mid-collection due to insufficient /* Nothing else should fail mid-collection due to insufficient
space in the target generation. */ space in the target generation. */
critical_error("Bad GC op",current_gc->op); critical_error("Bad GC op", current_gc->op);
break; break;
} }
if(gc_events) if (gc_events)
current_gc->event = new gc_event(current_gc->op,this); current_gc->event = new gc_event(current_gc->op, this);
} }
void factor_vm::set_current_gc_op(gc_op op) void factor_vm::set_current_gc_op(gc_op op) {
{
current_gc->op = op; current_gc->op = op;
if(gc_events) current_gc->event->op = op; if (gc_events)
current_gc->event->op = op;
} }
void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p) void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p) {
{
FACTOR_ASSERT(!gc_off); FACTOR_ASSERT(!gc_off);
FACTOR_ASSERT(!current_gc); FACTOR_ASSERT(!current_gc);
@ -154,29 +121,25 @@ void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p)
are promoted before any unreachable tenured objects are freed. */ are promoted before any unreachable tenured objects are freed. */
FACTOR_ASSERT(!data->high_fragmentation_p()); FACTOR_ASSERT(!data->high_fragmentation_p());
current_gc = new gc_state(op,this); current_gc = new gc_state(op, this);
atomic::store(&current_gc_p, true); atomic::store(&current_gc_p, true);
/* Keep trying to GC higher and higher generations until we don't run /* Keep trying to GC higher and higher generations until we don't run
out of space in the target generation. */ out of space in the target generation. */
for(;;) for (;;) {
{ try {
try if (gc_events)
{ current_gc->event->op = current_gc->op;
if(gc_events) current_gc->event->op = current_gc->op;
switch(current_gc->op) switch (current_gc->op) {
{
case collect_nursery_op: case collect_nursery_op:
collect_nursery(); collect_nursery();
break; break;
case collect_aging_op: case collect_aging_op:
/* We end up here if the above fails. */ /* We end up here if the above fails. */
collect_aging(); collect_aging();
if(data->high_fragmentation_p()) if (data->high_fragmentation_p()) {
{ /* Change GC op so that if we fail again, we crash. */
/* Change GC op so that if we fail again,
we crash. */
set_current_gc_op(collect_full_op); set_current_gc_op(collect_full_op);
collect_full(trace_contexts_p); collect_full(trace_contexts_p);
} }
@ -184,10 +147,8 @@ void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p)
case collect_to_tenured_op: case collect_to_tenured_op:
/* We end up here if the above fails. */ /* We end up here if the above fails. */
collect_to_tenured(); collect_to_tenured();
if(data->high_fragmentation_p()) if (data->high_fragmentation_p()) {
{ /* Change GC op so that if we fail again, we crash. */
/* Change GC op so that if we fail again,
we crash. */
set_current_gc_op(collect_full_op); set_current_gc_op(collect_full_op);
collect_full(trace_contexts_p); collect_full(trace_contexts_p);
} }
@ -199,17 +160,16 @@ void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p)
collect_compact(trace_contexts_p); collect_compact(trace_contexts_p);
break; break;
case collect_growing_heap_op: case collect_growing_heap_op:
collect_growing_heap(requested_size,trace_contexts_p); collect_growing_heap(requested_size, trace_contexts_p);
break; break;
default: default:
critical_error("Bad GC op",current_gc->op); critical_error("Bad GC op", current_gc->op);
break; break;
} }
break; break;
} }
catch(const must_start_gc_again &) catch (const must_start_gc_again&) {
{
/* We come back here if the target generation is full. */ /* We come back here if the target generation is full. */
start_gc_again(); start_gc_again();
continue; continue;
@ -227,66 +187,57 @@ void factor_vm::gc(gc_op op, cell requested_size, bool trace_contexts_p)
} }
/* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in /* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
uninitialized stack locations before actually calling the GC. See the comment uninitialized stack locations before actually calling the GC. See the
in compiler.cfg.stacks.uninitialized for details. */ comment in compiler.cfg.stacks.uninitialized for details. */
struct call_frame_scrubber { struct call_frame_scrubber {
factor_vm *parent; factor_vm* parent;
context *ctx; context* ctx;
explicit call_frame_scrubber(factor_vm *parent_, context *ctx_) : explicit call_frame_scrubber(factor_vm* parent_, context* ctx_)
parent(parent_), ctx(ctx_) {} : parent(parent_), ctx(ctx_) {}
void operator()(void *frame_top, cell frame_size, code_block *owner, void *addr) void operator()(void* frame_top, cell frame_size, code_block* owner,
{ void* addr) {
cell return_address = owner->offset(addr); cell return_address = owner->offset(addr);
gc_info *info = owner->block_gc_info(); gc_info* info = owner->block_gc_info();
FACTOR_ASSERT(return_address < owner->size()); FACTOR_ASSERT(return_address < owner->size());
cell index = info->return_address_index(return_address); cell index = info->return_address_index(return_address);
if(index != (cell)-1) if (index != (cell) - 1)
ctx->scrub_stacks(info,index); ctx->scrub_stacks(info, index);
} }
}; };
void factor_vm::scrub_context(context *ctx) void factor_vm::scrub_context(context* ctx) {
{ call_frame_scrubber scrubber(this, ctx);
call_frame_scrubber scrubber(this,ctx); iterate_callstack(ctx, scrubber);
iterate_callstack(ctx,scrubber);
} }
void factor_vm::scrub_contexts() void factor_vm::scrub_contexts() {
{ std::set<context*>::const_iterator begin = active_contexts.begin();
std::set<context *>::const_iterator begin = active_contexts.begin(); std::set<context*>::const_iterator end = active_contexts.end();
std::set<context *>::const_iterator end = active_contexts.end(); while (begin != end) {
while(begin != end)
{
scrub_context(*begin); scrub_context(*begin);
begin++; begin++;
} }
} }
void factor_vm::primitive_minor_gc() void factor_vm::primitive_minor_gc() {
{
scrub_contexts(); scrub_contexts();
gc(collect_nursery_op, gc(collect_nursery_op, 0, /* requested size */
0, /* requested size */
true /* trace contexts? */); true /* trace contexts? */);
} }
void factor_vm::primitive_full_gc() void factor_vm::primitive_full_gc() {
{ gc(collect_full_op, 0, /* requested size */
gc(collect_full_op,
0, /* requested size */
true /* trace contexts? */); true /* trace contexts? */);
} }
void factor_vm::primitive_compact_gc() void factor_vm::primitive_compact_gc() {
{ gc(collect_compact_op, 0, /* requested size */
gc(collect_compact_op,
0, /* requested size */
true /* trace contexts? */); true /* trace contexts? */);
} }
@ -295,56 +246,48 @@ void factor_vm::primitive_compact_gc()
* fashion! * fashion!
*/ */
/* Allocates memory */ /* Allocates memory */
object *factor_vm::allot_large_object(cell type, cell size) object* factor_vm::allot_large_object(cell type, cell size) {
{
/* If tenured space does not have enough room, collect and compact */ /* If tenured space does not have enough room, collect and compact */
cell requested_size = size + data->high_water_mark(); cell requested_size = size + data->high_water_mark();
if(!data->tenured->can_allot_p(requested_size)) if (!data->tenured->can_allot_p(requested_size)) {
{
primitive_compact_gc(); primitive_compact_gc();
/* If it still won't fit, grow the heap */ /* If it still won't fit, grow the heap */
if(!data->tenured->can_allot_p(requested_size)) if (!data->tenured->can_allot_p(requested_size)) {
{ gc(collect_growing_heap_op, size, /* requested size */
gc(collect_growing_heap_op,
size, /* requested size */
true /* trace contexts? */); true /* trace contexts? */);
} }
} }
object *obj = data->tenured->allot(size); object* obj = data->tenured->allot(size);
/* Allows initialization code to store old->new pointers /* Allows initialization code to store old->new pointers
without hitting the write barrier in the common case of without hitting the write barrier in the common case of
a nursery allocation */ a nursery allocation */
write_barrier(obj,size); write_barrier(obj, size);
obj->initialize(type); obj->initialize(type);
return obj; return obj;
} }
void factor_vm::primitive_enable_gc_events() void factor_vm::primitive_enable_gc_events() {
{
gc_events = new std::vector<gc_event>(); gc_events = new std::vector<gc_event>();
} }
/* Allocates memory */ /* Allocates memory */
void factor_vm::primitive_disable_gc_events() void factor_vm::primitive_disable_gc_events() {
{ if (gc_events) {
if(gc_events)
{
growable_array result(this); growable_array result(this);
std::vector<gc_event> *gc_events = this->gc_events; std::vector<gc_event>* gc_events = this->gc_events;
this->gc_events = NULL; this->gc_events = NULL;
std::vector<gc_event>::const_iterator iter = gc_events->begin(); std::vector<gc_event>::const_iterator iter = gc_events->begin();
std::vector<gc_event>::const_iterator end = gc_events->end(); std::vector<gc_event>::const_iterator end = gc_events->end();
for(; iter != end; iter++) for (; iter != end; iter++) {
{
gc_event event = *iter; gc_event event = *iter;
byte_array *obj = byte_array_from_value(&event); byte_array* obj = byte_array_from_value(&event);
result.add(tag<byte_array>(obj)); result.add(tag<byte_array>(obj));
} }
@ -352,8 +295,7 @@ void factor_vm::primitive_disable_gc_events()
ctx->push(result.elements.value()); ctx->push(result.elements.value());
delete this->gc_events; delete this->gc_events;
} } else
else
ctx->push(false_object); ctx->push(false_object);
} }

View File

@ -1,5 +1,4 @@
namespace factor namespace factor {
{
enum gc_op { enum gc_op {
collect_nursery_op, collect_nursery_op,
@ -28,7 +27,7 @@ struct gc_event {
cell compaction_time; cell compaction_time;
u64 temp_time; u64 temp_time;
gc_event(gc_op op_, factor_vm *parent); gc_event(gc_op op_, factor_vm* parent);
void started_card_scan(); void started_card_scan();
void ended_card_scan(cell cards_scanned_, cell decks_scanned_); void ended_card_scan(cell cards_scanned_, cell decks_scanned_);
void started_code_scan(); void started_code_scan();
@ -39,17 +38,17 @@ struct gc_event {
void ended_code_sweep(); void ended_code_sweep();
void started_compaction(); void started_compaction();
void ended_compaction(); void ended_compaction();
void ended_gc(factor_vm *parent); void ended_gc(factor_vm* parent);
}; };
struct gc_state { struct gc_state {
gc_op op; gc_op op;
u64 start_time; u64 start_time;
gc_event *event; gc_event* event;
explicit gc_state(gc_op op_, factor_vm *parent); explicit gc_state(gc_op op_, factor_vm* parent);
~gc_state(); ~gc_state();
void start_again(gc_op op_, factor_vm *parent); void start_again(gc_op op_, factor_vm* parent);
}; };
} }

View File

@ -1,19 +1,16 @@
#include "master.hpp" #include "master.hpp"
namespace factor namespace factor {
{
cell gc_info::return_address_index(cell return_address) cell gc_info::return_address_index(cell return_address) {
{ u32* return_address_array = return_addresses();
u32 *return_address_array = return_addresses();
for(cell i = 0; i < return_address_count; i++) for (cell i = 0; i < return_address_count; i++) {
{ if (return_address == return_address_array[i])
if(return_address == return_address_array[i])
return i; return i;
} }
return (cell)-1; return (cell) - 1;
} }
} }

View File

@ -1,5 +1,4 @@
namespace factor namespace factor {
{
struct gc_info { struct gc_info {
u32 scrub_d_count; u32 scrub_d_count;
@ -8,56 +7,38 @@ struct gc_info {
u32 derived_root_count; u32 derived_root_count;
u32 return_address_count; u32 return_address_count;
cell callsite_bitmap_size() cell callsite_bitmap_size() {
{
return scrub_d_count + scrub_r_count + gc_root_count; return scrub_d_count + scrub_r_count + gc_root_count;
} }
cell total_bitmap_size() cell total_bitmap_size() {
{
return return_address_count * callsite_bitmap_size(); return return_address_count * callsite_bitmap_size();
} }
cell total_bitmap_bytes() cell total_bitmap_bytes() { return ((total_bitmap_size() + 7) / 8); }
{
return ((total_bitmap_size() + 7) / 8);
}
u32 *return_addresses() u32* return_addresses() { return (u32*)this - return_address_count; }
{
return (u32 *)this - return_address_count;
}
u32 *base_pointer_map() u32* base_pointer_map() {
{
return return_addresses() - return_address_count * derived_root_count; return return_addresses() - return_address_count * derived_root_count;
} }
u8 *gc_info_bitmap() u8* gc_info_bitmap() {
{ return (u8*)base_pointer_map() - total_bitmap_bytes();
return (u8 *)base_pointer_map() - total_bitmap_bytes();
} }
cell callsite_scrub_d(cell index) cell callsite_scrub_d(cell index) { return index * scrub_d_count; }
{
return index * scrub_d_count; cell callsite_scrub_r(cell index) {
return return_address_count * scrub_d_count + index * scrub_r_count;
} }
cell callsite_scrub_r(cell index) cell callsite_gc_roots(cell index) {
{
return return_address_count * scrub_d_count + return return_address_count * scrub_d_count +
index * scrub_r_count; return_address_count * scrub_r_count + index * gc_root_count;
} }
cell callsite_gc_roots(cell index) u32 lookup_base_pointer(cell index, cell derived_root) {
{
return return_address_count * scrub_d_count
+ return_address_count * scrub_r_count
+ index * gc_root_count;
}
u32 lookup_base_pointer(cell index, cell derived_root)
{
return base_pointer_map()[index * derived_root_count + derived_root]; return base_pointer_map()[index * derived_root_count + derived_root];
} }