vm: got data heap compaction working
parent
0b65b194c9
commit
0c1e266302
|
@ -26,21 +26,66 @@ struct code_block_forwarder {
|
|||
}
|
||||
};
|
||||
|
||||
static inline cell tuple_size_with_forwarding(mark_bits<object> *forwarding_map, object *obj)
|
||||
{
|
||||
/* The tuple layout may or may not have been forwarded already. Tricky. */
|
||||
object *layout_obj = (object *)UNTAG(((tuple *)obj)->layout);
|
||||
tuple_layout *layout;
|
||||
|
||||
if(layout_obj < obj)
|
||||
{
|
||||
/* It's already been moved up; dereference through forwarding
|
||||
map to get the size */
|
||||
layout = (tuple_layout *)forwarding_map->forward_block(layout_obj);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* It hasn't been moved up yet; dereference directly */
|
||||
layout = (tuple_layout *)layout_obj;
|
||||
}
|
||||
|
||||
return tuple_size(layout);
|
||||
}
|
||||
|
||||
struct compaction_sizer {
|
||||
mark_bits<object> *forwarding_map;
|
||||
|
||||
explicit compaction_sizer(mark_bits<object> *forwarding_map_) :
|
||||
forwarding_map(forwarding_map_) {}
|
||||
|
||||
cell operator()(object *obj)
|
||||
{
|
||||
if(obj->free_p() || obj->h.hi_tag() != TUPLE_TYPE)
|
||||
return obj->size();
|
||||
else
|
||||
return align(tuple_size_with_forwarding(forwarding_map,obj),data_alignment);
|
||||
}
|
||||
};
|
||||
|
||||
struct object_compaction_updater {
|
||||
factor_vm *parent;
|
||||
slot_visitor<object_slot_forwarder> slot_forwarder;
|
||||
code_block_visitor<code_block_forwarder> code_forwarder;
|
||||
mark_bits<object> *data_forwarding_map;
|
||||
|
||||
explicit object_compaction_updater(factor_vm *parent_,
|
||||
slot_visitor<object_slot_forwarder> slot_forwader_,
|
||||
code_block_visitor<code_block_forwarder> code_forwarder_) :
|
||||
slot_visitor<object_slot_forwarder> slot_forwarder_,
|
||||
code_block_visitor<code_block_forwarder> code_forwarder_,
|
||||
mark_bits<object> *data_forwarding_map_) :
|
||||
parent(parent_),
|
||||
slot_forwarder(slot_forwader_),
|
||||
code_forwarder(code_forwarder_) {}
|
||||
slot_forwarder(slot_forwarder_),
|
||||
code_forwarder(code_forwarder_),
|
||||
data_forwarding_map(data_forwarding_map_) {}
|
||||
|
||||
void operator()(object *obj, cell size)
|
||||
{
|
||||
slot_forwarder.visit_slots(obj);
|
||||
cell payload_start;
|
||||
if(obj->h.hi_tag() == TUPLE_TYPE)
|
||||
payload_start = tuple_size_with_forwarding(data_forwarding_map,obj);
|
||||
else
|
||||
payload_start = obj->binary_payload_start();
|
||||
|
||||
slot_forwarder.visit_slots(obj,payload_start);
|
||||
code_forwarder.visit_object_code_block(obj);
|
||||
}
|
||||
};
|
||||
|
@ -49,8 +94,8 @@ struct code_block_compaction_updater {
|
|||
factor_vm *parent;
|
||||
slot_visitor<object_slot_forwarder> slot_forwarder;
|
||||
|
||||
explicit code_block_compaction_updater(factor_vm *parent_, slot_visitor<object_slot_forwarder> slot_forwader_) :
|
||||
parent(parent_), slot_forwarder(slot_forwader_) {}
|
||||
explicit code_block_compaction_updater(factor_vm *parent_, slot_visitor<object_slot_forwarder> slot_forwarder_) :
|
||||
parent(parent_), slot_forwarder(slot_forwarder_) {}
|
||||
|
||||
void operator()(code_block *compiled, cell size)
|
||||
{
|
||||
|
@ -83,13 +128,15 @@ void factor_vm::compact_full_impl(bool trace_contexts_p)
|
|||
|
||||
/* Slide everything in tenured space up, and update data and code heap
|
||||
pointers inside objects. */
|
||||
object_compaction_updater object_updater(this,slot_forwarder,code_forwarder);
|
||||
tenured->compact(object_updater);
|
||||
object_compaction_updater object_updater(this,slot_forwarder,code_forwarder,data_forwarding_map);
|
||||
compaction_sizer object_sizer(data_forwarding_map);
|
||||
tenured->compact(object_updater,object_sizer);
|
||||
|
||||
/* Slide everything in the code heap up, and update data and code heap
|
||||
pointers inside code blocks. */
|
||||
code_block_compaction_updater code_block_updater(this,slot_forwarder);
|
||||
code->allocator->compact(code_block_updater);
|
||||
standard_sizer<code_block> code_block_sizer;
|
||||
code->allocator->compact(code_block_updater,code_block_sizer);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -53,7 +53,8 @@ template<typename Block> struct free_list_allocator {
|
|||
cell occupied();
|
||||
void sweep();
|
||||
template<typename Iterator> void sweep(Iterator &iter);
|
||||
template<typename Iterator> void compact(Iterator &iter);
|
||||
template<typename Iterator, typename Sizer> void compact(Iterator &iter, Sizer &sizer);
|
||||
template<typename Iterator, typename Sizer> void iterate(Iterator &iter, Sizer &sizer);
|
||||
template<typename Iterator> void iterate(Iterator &iter);
|
||||
};
|
||||
|
||||
|
@ -358,31 +359,47 @@ void free_list_allocator<Block>::sweep(Iterator &iter)
|
|||
/* The forwarding map must be computed first by calling
|
||||
state.compute_forwarding(). */
|
||||
template<typename Block>
|
||||
template<typename Iterator>
|
||||
void free_list_allocator<Block>::compact(Iterator &iter)
|
||||
template<typename Iterator, typename Sizer>
|
||||
void free_list_allocator<Block>::compact(Iterator &iter, Sizer &sizer)
|
||||
{
|
||||
heap_compactor<Block,Iterator> compactor(&state,first_block(),iter);
|
||||
this->iterate(compactor);
|
||||
this->iterate(compactor,sizer);
|
||||
|
||||
/* Now update the free list; there will be a single free block at
|
||||
the end */
|
||||
this->initial_free_list((cell)compactor.address - this->start);
|
||||
}
|
||||
|
||||
/* During compaction we have to be careful and measure object sizes differently */
|
||||
template<typename Block>
|
||||
template<typename Iterator>
|
||||
void free_list_allocator<Block>::iterate(Iterator &iter)
|
||||
template<typename Iterator, typename Sizer>
|
||||
void free_list_allocator<Block>::iterate(Iterator &iter, Sizer &sizer)
|
||||
{
|
||||
Block *scan = first_block();
|
||||
Block *end = last_block();
|
||||
|
||||
while(scan != end)
|
||||
{
|
||||
cell size = scan->size();
|
||||
cell size = sizer(scan);
|
||||
Block *next = (Block *)((cell)scan + size);
|
||||
if(!scan->free_p()) iter(scan,size);
|
||||
scan = next;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Block> struct standard_sizer {
|
||||
cell operator()(Block *block)
|
||||
{
|
||||
return block->size();
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Block>
|
||||
template<typename Iterator>
|
||||
void free_list_allocator<Block>::iterate(Iterator &iter)
|
||||
{
|
||||
standard_sizer<Block> sizer;
|
||||
iterate(iter,sizer);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ object_start_map::object_start_map(cell size_, cell start_) :
|
|||
{
|
||||
object_start_offsets = new card[addr_to_card(size_)];
|
||||
object_start_offsets_end = object_start_offsets + addr_to_card(size_);
|
||||
clear_object_start_offsets();
|
||||
}
|
||||
|
||||
object_start_map::~object_start_map()
|
||||
|
|
|
@ -19,10 +19,10 @@ template<typename Visitor> struct slot_visitor {
|
|||
*handle = RETAG(untagged,TAG(pointer));
|
||||
}
|
||||
|
||||
void visit_slots(object *ptr)
|
||||
void visit_slots(object *ptr, cell payload_start)
|
||||
{
|
||||
cell *slot = (cell *)ptr;
|
||||
cell *end = (cell *)((cell)ptr + ptr->binary_payload_start());
|
||||
cell *end = (cell *)((cell)ptr + payload_start);
|
||||
|
||||
if(slot != end)
|
||||
{
|
||||
|
@ -31,6 +31,11 @@ template<typename Visitor> struct slot_visitor {
|
|||
}
|
||||
}
|
||||
|
||||
void visit_slots(object *ptr)
|
||||
{
|
||||
visit_slots(ptr,ptr->binary_payload_start());
|
||||
}
|
||||
|
||||
void visit_stack_elements(segment *region, cell *top)
|
||||
{
|
||||
for(cell *ptr = (cell *)region->start; ptr <= top; ptr++)
|
||||
|
|
Loading…
Reference in New Issue