VM: simplified fixup_gc_workhorse::fixup_data by mering lots of functions only used once
parent
8ea1eea4e1
commit
ee5fd49b9e
|
@ -39,14 +39,6 @@ bool code_heap::uninitialized_p(code_block* compiled) {
|
||||||
return uninitialized_blocks.count(compiled) > 0;
|
return uninitialized_blocks.count(compiled) > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool code_heap::marked_p(code_block* compiled) {
|
|
||||||
return allocator->state.marked_p((cell)compiled);
|
|
||||||
}
|
|
||||||
|
|
||||||
void code_heap::set_marked_p(code_block* compiled) {
|
|
||||||
allocator->state.set_marked_p((cell)compiled, compiled->size());
|
|
||||||
}
|
|
||||||
|
|
||||||
void code_heap::free(code_block* compiled) {
|
void code_heap::free(code_block* compiled) {
|
||||||
FACTOR_ASSERT(!uninitialized_p(compiled));
|
FACTOR_ASSERT(!uninitialized_p(compiled));
|
||||||
points_to_nursery.erase(compiled);
|
points_to_nursery.erase(compiled);
|
||||||
|
|
|
@ -37,8 +37,6 @@ struct code_heap {
|
||||||
void write_barrier(code_block* compiled);
|
void write_barrier(code_block* compiled);
|
||||||
void clear_remembered_set();
|
void clear_remembered_set();
|
||||||
bool uninitialized_p(code_block* compiled);
|
bool uninitialized_p(code_block* compiled);
|
||||||
bool marked_p(code_block* compiled);
|
|
||||||
void set_marked_p(code_block* compiled);
|
|
||||||
void free(code_block* compiled);
|
void free(code_block* compiled);
|
||||||
void flush_icache();
|
void flush_icache();
|
||||||
void guard_safepoint();
|
void guard_safepoint();
|
||||||
|
|
|
@ -15,18 +15,26 @@ struct gc_workhorse : no_fixup {
|
||||||
gc_workhorse(factor_vm* parent, TargetGeneration* target, Policy policy)
|
gc_workhorse(factor_vm* parent, TargetGeneration* target, Policy policy)
|
||||||
: parent(parent), target(target), policy(policy), code(parent->code) {}
|
: parent(parent), target(target), policy(policy), code(parent->code) {}
|
||||||
|
|
||||||
object* resolve_forwarding(object* untagged) {
|
object* fixup_data(object* obj) {
|
||||||
parent->check_data_pointer(untagged);
|
FACTOR_ASSERT((parent->current_gc &&
|
||||||
|
parent->current_gc->op == collect_growing_heap_op) ||
|
||||||
|
parent->data->seg->in_segment_p((cell)obj));
|
||||||
|
|
||||||
|
if (!policy.should_copy_p(obj)) {
|
||||||
|
policy.visited_object(obj);
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
object* untagged = obj;
|
||||||
/* is there another forwarding pointer? */
|
/* is there another forwarding pointer? */
|
||||||
while (untagged->forwarding_pointer_p())
|
while (untagged->forwarding_pointer_p())
|
||||||
untagged = untagged->forwarding_pointer();
|
untagged = untagged->forwarding_pointer();
|
||||||
|
|
||||||
/* we've found the destination */
|
if (!policy.should_copy_p(untagged)) {
|
||||||
|
policy.visited_object(untagged);
|
||||||
return untagged;
|
return untagged;
|
||||||
}
|
}
|
||||||
|
|
||||||
object* promote_object(object* untagged) {
|
|
||||||
cell size = untagged->size();
|
cell size = untagged->size();
|
||||||
object* newpointer = target->allot(size);
|
object* newpointer = target->allot(size);
|
||||||
if (!newpointer)
|
if (!newpointer)
|
||||||
|
@ -40,29 +48,9 @@ struct gc_workhorse : no_fixup {
|
||||||
return newpointer;
|
return newpointer;
|
||||||
}
|
}
|
||||||
|
|
||||||
object* fixup_data(object* obj) {
|
|
||||||
parent->check_data_pointer(obj);
|
|
||||||
|
|
||||||
if (!policy.should_copy_p(obj)) {
|
|
||||||
policy.visited_object(obj);
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
object* forwarding = resolve_forwarding(obj);
|
|
||||||
|
|
||||||
if (forwarding == obj)
|
|
||||||
return promote_object(obj);
|
|
||||||
else if (policy.should_copy_p(forwarding))
|
|
||||||
return promote_object(forwarding);
|
|
||||||
else {
|
|
||||||
policy.visited_object(forwarding);
|
|
||||||
return forwarding;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
code_block* fixup_code(code_block* compiled) {
|
code_block* fixup_code(code_block* compiled) {
|
||||||
if (!code->marked_p(compiled)) {
|
if (!code->allocator->state.marked_p((cell)compiled)) {
|
||||||
code->set_marked_p(compiled);
|
code->allocator->state.set_marked_p((cell)compiled, compiled->size());
|
||||||
parent->mark_stack.push_back((cell)compiled + 1);
|
parent->mark_stack.push_back((cell)compiled + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -368,7 +368,7 @@ struct code_block_printer {
|
||||||
reloc_size += parent->object_size(scan->relocation);
|
reloc_size += parent->object_size(scan->relocation);
|
||||||
parameter_size += parent->object_size(scan->parameters);
|
parameter_size += parent->object_size(scan->parameters);
|
||||||
|
|
||||||
if (parent->code->marked_p(scan))
|
if (parent->code->allocator->state.marked_p((cell)scan))
|
||||||
status = "marked";
|
status = "marked";
|
||||||
else
|
else
|
||||||
status = "allocated";
|
status = "allocated";
|
||||||
|
|
|
@ -1,9 +1,5 @@
|
||||||
namespace factor {
|
namespace factor {
|
||||||
|
|
||||||
template <typename T> struct identity {
|
|
||||||
T operator()(T t) { return t; }
|
|
||||||
};
|
|
||||||
|
|
||||||
struct no_fixup {
|
struct no_fixup {
|
||||||
static const bool translated_code_block_map = false;
|
static const bool translated_code_block_map = false;
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ template <typename Block>
|
||||||
Block* free_list_allocator<Block>::next_allocated_block_after(Block* block) {
|
Block* free_list_allocator<Block>::next_allocated_block_after(Block* block) {
|
||||||
while ((cell)block != this->end && block->free_p()) {
|
while ((cell)block != this->end && block->free_p()) {
|
||||||
free_heap_block* free_block = (free_heap_block*)block;
|
free_heap_block* free_block = (free_heap_block*)block;
|
||||||
block = (object*)((cell)free_block + free_block->size());
|
block = (Block*)((cell)free_block + free_block->size());
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((cell)block == this->end)
|
if ((cell)block == this->end)
|
||||||
|
|
|
@ -12,12 +12,12 @@ struct full_policy {
|
||||||
}
|
}
|
||||||
|
|
||||||
void promoted_object(object* obj) {
|
void promoted_object(object* obj) {
|
||||||
tenured->set_marked_p(obj);
|
tenured->state.set_marked_p((cell)obj, obj->size());
|
||||||
parent->mark_stack.push_back((cell)obj);
|
parent->mark_stack.push_back((cell)obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
void visited_object(object* obj) {
|
void visited_object(object* obj) {
|
||||||
if (!tenured->marked_p(obj))
|
if (!tenured->state.marked_p((cell)obj))
|
||||||
promoted_object(obj);
|
promoted_object(obj);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -25,14 +25,6 @@ struct tenured_space : free_list_allocator<object> {
|
||||||
return (cell)next_allocated_block_after(next);
|
return (cell)next_allocated_block_after(next);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool marked_p(object* obj) {
|
|
||||||
return this->state.marked_p((cell)obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_marked_p(object* obj) {
|
|
||||||
this->state.set_marked_p((cell)obj, obj->size());
|
|
||||||
}
|
|
||||||
|
|
||||||
void sweep() {
|
void sweep() {
|
||||||
free_list_allocator<object>::sweep();
|
free_list_allocator<object>::sweep();
|
||||||
starts.update_for_sweep(&this->state);
|
starts.update_for_sweep(&this->state);
|
||||||
|
|
|
@ -382,11 +382,6 @@ struct factor_vm {
|
||||||
return (Type*)allot_object(Type::type_number, size);
|
return (Type*)allot_object(Type::type_number, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void check_data_pointer(object* pointer) {
|
|
||||||
FACTOR_ASSERT((current_gc && current_gc->op == collect_growing_heap_op) ||
|
|
||||||
data->seg->in_segment_p((cell)pointer));
|
|
||||||
}
|
|
||||||
|
|
||||||
// generic arrays
|
// generic arrays
|
||||||
template <typename Array> Array* allot_uninitialized_array(cell capacity);
|
template <typename Array> Array* allot_uninitialized_array(cell capacity);
|
||||||
template <typename Array>
|
template <typename Array>
|
||||||
|
|
Loading…
Reference in New Issue