vm: code heap: move mark bits to a separate bitmap
parent
3d95494c1e
commit
69f9c80b5b
|
@ -296,7 +296,7 @@ void factor_vm::dump_code_heap()
|
||||||
const char *status;
|
const char *status;
|
||||||
if(scan->type() == FREE_BLOCK_TYPE)
|
if(scan->type() == FREE_BLOCK_TYPE)
|
||||||
status = "free";
|
status = "free";
|
||||||
else if(scan->marked_p())
|
else if(code->state->is_marked_p(scan))
|
||||||
{
|
{
|
||||||
reloc_size += object_size(((code_block *)scan)->relocation);
|
reloc_size += object_size(((code_block *)scan)->relocation);
|
||||||
literal_size += object_size(((code_block *)scan)->literals);
|
literal_size += object_size(((code_block *)scan)->literals);
|
||||||
|
|
|
@ -134,6 +134,8 @@ void factor_vm::collect_full_impl(bool trace_contexts_p)
|
||||||
{
|
{
|
||||||
full_collector collector(this);
|
full_collector collector(this);
|
||||||
|
|
||||||
|
code->state->clear_mark_bits();
|
||||||
|
|
||||||
collector.trace_roots();
|
collector.trace_roots();
|
||||||
if(trace_contexts_p)
|
if(trace_contexts_p)
|
||||||
{
|
{
|
||||||
|
@ -148,16 +150,6 @@ void factor_vm::collect_full_impl(bool trace_contexts_p)
|
||||||
nursery.here = nursery.start;
|
nursery.here = nursery.start;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* In both cases, compact code heap before updating code blocks so that
|
|
||||||
XTs are correct after */
|
|
||||||
|
|
||||||
void factor_vm::big_code_heap_update()
|
|
||||||
{
|
|
||||||
big_code_heap_updater updater(this);
|
|
||||||
code->free_unmarked(updater);
|
|
||||||
code->clear_remembered_set();
|
|
||||||
}
|
|
||||||
|
|
||||||
void factor_vm::collect_growing_heap(cell requested_bytes,
|
void factor_vm::collect_growing_heap(cell requested_bytes,
|
||||||
bool trace_contexts_p,
|
bool trace_contexts_p,
|
||||||
bool compact_code_heap_p)
|
bool compact_code_heap_p)
|
||||||
|
@ -168,15 +160,18 @@ void factor_vm::collect_growing_heap(cell requested_bytes,
|
||||||
collect_full_impl(trace_contexts_p);
|
collect_full_impl(trace_contexts_p);
|
||||||
delete old;
|
delete old;
|
||||||
|
|
||||||
if(compact_code_heap_p) compact_code_heap(trace_contexts_p);
|
if(compact_code_heap_p)
|
||||||
|
{
|
||||||
|
compact_code_heap(trace_contexts_p);
|
||||||
|
big_code_heap_updater updater(this);
|
||||||
|
iterate_code_heap(updater);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
big_code_heap_updater updater(this);
|
||||||
|
code->free_unmarked(updater);
|
||||||
|
}
|
||||||
|
|
||||||
big_code_heap_update();
|
|
||||||
}
|
|
||||||
|
|
||||||
void factor_vm::small_code_heap_update()
|
|
||||||
{
|
|
||||||
small_code_heap_updater updater(this);
|
|
||||||
code->free_unmarked(updater);
|
|
||||||
code->clear_remembered_set();
|
code->clear_remembered_set();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,10 +185,16 @@ void factor_vm::collect_full(bool trace_contexts_p, bool compact_code_heap_p)
|
||||||
if(compact_code_heap_p)
|
if(compact_code_heap_p)
|
||||||
{
|
{
|
||||||
compact_code_heap(trace_contexts_p);
|
compact_code_heap(trace_contexts_p);
|
||||||
big_code_heap_update();
|
big_code_heap_updater updater(this);
|
||||||
|
iterate_code_heap(updater);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
small_code_heap_update();
|
{
|
||||||
|
small_code_heap_updater updater(this);
|
||||||
|
code->free_unmarked(updater);
|
||||||
|
}
|
||||||
|
|
||||||
|
code->clear_remembered_set();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,9 +54,6 @@ void factor_vm::gc(gc_op op,
|
||||||
current_gc->op = collect_full_op;
|
current_gc->op = collect_full_op;
|
||||||
break;
|
break;
|
||||||
case collect_full_op:
|
case collect_full_op:
|
||||||
/* Since we start tracing again, any previously
|
|
||||||
marked code blocks must be re-marked and re-traced */
|
|
||||||
code->clear_mark_bits();
|
|
||||||
current_gc->op = collect_growing_heap_op;
|
current_gc->op = collect_growing_heap_op;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
91
vm/heap.cpp
91
vm/heap.cpp
|
@ -16,9 +16,18 @@ heap::heap(bool secure_gc_, cell size, bool executable_p) : secure_gc(secure_gc_
|
||||||
if(size > (1L << (sizeof(cell) * 8 - 6))) fatal_error("Heap too large",size);
|
if(size > (1L << (sizeof(cell) * 8 - 6))) fatal_error("Heap too large",size);
|
||||||
seg = new segment(align_page(size),executable_p);
|
seg = new segment(align_page(size),executable_p);
|
||||||
if(!seg) fatal_error("Out of memory in heap allocator",size);
|
if(!seg) fatal_error("Out of memory in heap allocator",size);
|
||||||
|
state = new mark_bits<heap_block,block_size_increment>(seg->start,size);
|
||||||
clear_free_list();
|
clear_free_list();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
heap::~heap()
|
||||||
|
{
|
||||||
|
delete seg;
|
||||||
|
seg = NULL;
|
||||||
|
delete state;
|
||||||
|
state = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
void heap::add_to_free_list(free_heap_block *block)
|
void heap::add_to_free_list(free_heap_block *block)
|
||||||
{
|
{
|
||||||
if(block->size() < free_list_count * block_size_increment)
|
if(block->size() < free_list_count * block_size_increment)
|
||||||
|
@ -34,52 +43,15 @@ void heap::add_to_free_list(free_heap_block *block)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called after reading the code heap from the image file, and after code GC.
|
/* Called after reading the code heap from the image file, and after code heap
|
||||||
|
compaction. Makes a free list consisting of one free block, at the very end. */
|
||||||
In the former case, we must add a large free block from compiling.base + size to
|
|
||||||
compiling.limit. */
|
|
||||||
void heap::build_free_list(cell size)
|
void heap::build_free_list(cell size)
|
||||||
{
|
{
|
||||||
heap_block *prev = NULL;
|
|
||||||
|
|
||||||
clear_free_list();
|
clear_free_list();
|
||||||
|
|
||||||
size = (size + block_size_increment - 1) & ~(block_size_increment - 1);
|
|
||||||
|
|
||||||
heap_block *scan = first_block();
|
|
||||||
free_heap_block *end = (free_heap_block *)(seg->start + size);
|
free_heap_block *end = (free_heap_block *)(seg->start + size);
|
||||||
|
end->set_type(FREE_BLOCK_TYPE);
|
||||||
/* Add all free blocks to the free list */
|
end->set_size(seg->end - (cell)end);
|
||||||
while(scan && scan < (heap_block *)end)
|
add_to_free_list(end);
|
||||||
{
|
|
||||||
if(scan->type() == FREE_BLOCK_TYPE)
|
|
||||||
add_to_free_list((free_heap_block *)scan);
|
|
||||||
|
|
||||||
prev = scan;
|
|
||||||
scan = next_block(scan);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If there is room at the end of the heap, add a free block. This
|
|
||||||
branch is only taken after loading a new image, not after code GC */
|
|
||||||
if((cell)(end + 1) <= seg->end)
|
|
||||||
{
|
|
||||||
end->set_marked_p(false);
|
|
||||||
end->set_type(FREE_BLOCK_TYPE);
|
|
||||||
end->set_size(seg->end - (cell)end);
|
|
||||||
|
|
||||||
/* add final free block */
|
|
||||||
add_to_free_list(end);
|
|
||||||
}
|
|
||||||
/* This branch is taken if the newly loaded image fits exactly, or
|
|
||||||
after code GC */
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* even if there's no room at the end of the heap for a new
|
|
||||||
free block, we might have to jigger it up by a few bytes in
|
|
||||||
case prev + prev->size */
|
|
||||||
if(prev) prev->set_size(seg->end - (cell)prev);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void heap::assert_free_block(free_heap_block *block)
|
void heap::assert_free_block(free_heap_block *block)
|
||||||
|
@ -154,7 +126,6 @@ heap_block *heap::heap_allot(cell size, cell type)
|
||||||
{
|
{
|
||||||
block = split_free_block(block,size);
|
block = split_free_block(block,size);
|
||||||
block->set_type(type);
|
block->set_type(type);
|
||||||
block->set_marked_p(false);
|
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -170,18 +141,7 @@ void heap::heap_free(heap_block *block)
|
||||||
|
|
||||||
void heap::mark_block(heap_block *block)
|
void heap::mark_block(heap_block *block)
|
||||||
{
|
{
|
||||||
block->set_marked_p(true);
|
state->set_marked_p(block,true);
|
||||||
}
|
|
||||||
|
|
||||||
void heap::clear_mark_bits()
|
|
||||||
{
|
|
||||||
heap_block *scan = first_block();
|
|
||||||
|
|
||||||
while(scan)
|
|
||||||
{
|
|
||||||
scan->set_marked_p(false);
|
|
||||||
scan = next_block(scan);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Compute total sum of sizes of free blocks, and size of largest free block */
|
/* Compute total sum of sizes of free blocks, and size of largest free block */
|
||||||
|
@ -210,20 +170,21 @@ void heap::heap_usage(cell *used, cell *total_free, cell *max_free)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The size of the heap, not including the last block if it's free */
|
/* The size of the heap after compaction */
|
||||||
cell heap::heap_size()
|
cell heap::heap_size()
|
||||||
{
|
{
|
||||||
heap_block *scan = first_block();
|
heap_block *scan = first_block();
|
||||||
|
|
||||||
|
while(scan)
|
||||||
|
{
|
||||||
|
if(scan->type() == FREE_BLOCK_TYPE) break;
|
||||||
|
else scan = next_block(scan);
|
||||||
|
}
|
||||||
|
|
||||||
while(next_block(scan) != NULL)
|
assert(scan->type() == FREE_BLOCK_TYPE);
|
||||||
scan = next_block(scan);
|
assert((cell)scan + scan->size() == seg->end);
|
||||||
|
|
||||||
/* this is the last block in the heap, and it is free */
|
return (cell)scan - (cell)first_block();
|
||||||
if(scan->type() == FREE_BLOCK_TYPE)
|
|
||||||
return (cell)scan - seg->start;
|
|
||||||
/* otherwise the last block is allocated */
|
|
||||||
else
|
|
||||||
return seg->size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void heap::compact_heap()
|
void heap::compact_heap()
|
||||||
|
@ -238,7 +199,7 @@ void heap::compact_heap()
|
||||||
{
|
{
|
||||||
heap_block *next = next_block(scan);
|
heap_block *next = next_block(scan);
|
||||||
|
|
||||||
if(scan->type() != FREE_BLOCK_TYPE && scan->marked_p())
|
if(state->is_marked_p(scan))
|
||||||
{
|
{
|
||||||
cell size = scan->size();
|
cell size = scan->size();
|
||||||
memmove(address,scan,size);
|
memmove(address,scan,size);
|
||||||
|
|
|
@ -13,9 +13,11 @@ struct heap {
|
||||||
bool secure_gc;
|
bool secure_gc;
|
||||||
segment *seg;
|
segment *seg;
|
||||||
heap_free_list free;
|
heap_free_list free;
|
||||||
|
mark_bits<heap_block,block_size_increment> *state;
|
||||||
unordered_map<heap_block *, char *> forwarding;
|
unordered_map<heap_block *, char *> forwarding;
|
||||||
|
|
||||||
explicit heap(bool secure_gc_, cell size, bool executable_p);
|
explicit heap(bool secure_gc_, cell size, bool executable_p);
|
||||||
|
~heap();
|
||||||
|
|
||||||
inline heap_block *next_block(heap_block *block)
|
inline heap_block *next_block(heap_block *block)
|
||||||
{
|
{
|
||||||
|
@ -46,7 +48,6 @@ struct heap {
|
||||||
heap_block *heap_allot(cell size, cell type);
|
heap_block *heap_allot(cell size, cell type);
|
||||||
void heap_free(heap_block *block);
|
void heap_free(heap_block *block);
|
||||||
void mark_block(heap_block *block);
|
void mark_block(heap_block *block);
|
||||||
void clear_mark_bits();
|
|
||||||
void heap_usage(cell *used, cell *total_free, cell *max_free);
|
void heap_usage(cell *used, cell *total_free, cell *max_free);
|
||||||
cell heap_size();
|
cell heap_size();
|
||||||
void compact_heap();
|
void compact_heap();
|
||||||
|
@ -71,11 +72,10 @@ struct heap {
|
||||||
else
|
else
|
||||||
prev = scan;
|
prev = scan;
|
||||||
}
|
}
|
||||||
else if(scan->marked_p())
|
else if(state->is_marked_p(scan))
|
||||||
{
|
{
|
||||||
if(prev && prev->type() == FREE_BLOCK_TYPE)
|
if(prev && prev->type() == FREE_BLOCK_TYPE)
|
||||||
add_to_free_list((free_heap_block *)prev);
|
add_to_free_list((free_heap_block *)prev);
|
||||||
scan->set_marked_p(false);
|
|
||||||
prev = scan;
|
prev = scan;
|
||||||
iter(scan);
|
iter(scan);
|
||||||
}
|
}
|
||||||
|
|
160
vm/image.cpp
160
vm/image.cpp
|
@ -67,86 +67,6 @@ void factor_vm::load_code_heap(FILE *file, image_header *h, vm_parameters *p)
|
||||||
code->build_free_list(h->code_size);
|
code->build_free_list(h->code_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save the current image to disk */
|
|
||||||
bool factor_vm::save_image(const vm_char *filename)
|
|
||||||
{
|
|
||||||
FILE* file;
|
|
||||||
image_header h;
|
|
||||||
|
|
||||||
file = OPEN_WRITE(filename);
|
|
||||||
if(file == NULL)
|
|
||||||
{
|
|
||||||
print_string("Cannot open image file: "); print_native_string(filename); nl();
|
|
||||||
print_string(strerror(errno)); nl();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
h.magic = image_magic;
|
|
||||||
h.version = image_version;
|
|
||||||
h.data_relocation_base = data->tenured->start;
|
|
||||||
h.data_size = data->tenured->here - data->tenured->start;
|
|
||||||
h.code_relocation_base = code->seg->start;
|
|
||||||
h.code_size = code->heap_size();
|
|
||||||
|
|
||||||
h.t = T;
|
|
||||||
h.bignum_zero = bignum_zero;
|
|
||||||
h.bignum_pos_one = bignum_pos_one;
|
|
||||||
h.bignum_neg_one = bignum_neg_one;
|
|
||||||
|
|
||||||
for(cell i = 0; i < USER_ENV; i++)
|
|
||||||
h.userenv[i] = (save_env_p(i) ? userenv[i] : F);
|
|
||||||
|
|
||||||
bool ok = true;
|
|
||||||
|
|
||||||
if(fwrite(&h,sizeof(image_header),1,file) != 1) ok = false;
|
|
||||||
if(fwrite((void*)data->tenured->start,h.data_size,1,file) != 1) ok = false;
|
|
||||||
if(fwrite(code->first_block(),h.code_size,1,file) != 1) ok = false;
|
|
||||||
if(fclose(file)) ok = false;
|
|
||||||
|
|
||||||
if(!ok)
|
|
||||||
{
|
|
||||||
print_string("save-image failed: "); print_string(strerror(errno)); nl();
|
|
||||||
}
|
|
||||||
|
|
||||||
return ok;
|
|
||||||
}
|
|
||||||
|
|
||||||
void factor_vm::primitive_save_image()
|
|
||||||
{
|
|
||||||
/* do a full GC to push everything into tenured space */
|
|
||||||
primitive_compact_gc();
|
|
||||||
|
|
||||||
gc_root<byte_array> path(dpop(),this);
|
|
||||||
path.untag_check(this);
|
|
||||||
save_image((vm_char *)(path.untagged() + 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
void factor_vm::primitive_save_image_and_exit()
|
|
||||||
{
|
|
||||||
/* We unbox this before doing anything else. This is the only point
|
|
||||||
where we might throw an error, so we have to throw an error here since
|
|
||||||
later steps destroy the current image. */
|
|
||||||
gc_root<byte_array> path(dpop(),this);
|
|
||||||
path.untag_check(this);
|
|
||||||
|
|
||||||
/* strip out userenv data which is set on startup anyway */
|
|
||||||
for(cell i = 0; i < USER_ENV; i++)
|
|
||||||
{
|
|
||||||
if(!save_env_p(i)) userenv[i] = F;
|
|
||||||
}
|
|
||||||
|
|
||||||
gc(collect_full_op,
|
|
||||||
0, /* requested size */
|
|
||||||
false, /* discard objects only reachable from stacks */
|
|
||||||
true /* compact the code heap */);
|
|
||||||
|
|
||||||
/* Save the image */
|
|
||||||
if(save_image((vm_char *)(path.untagged() + 1)))
|
|
||||||
exit(0);
|
|
||||||
else
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void factor_vm::data_fixup(cell *handle, cell data_relocation_base)
|
void factor_vm::data_fixup(cell *handle, cell data_relocation_base)
|
||||||
{
|
{
|
||||||
if(immediate_p(*handle))
|
if(immediate_p(*handle))
|
||||||
|
@ -353,4 +273,84 @@ void factor_vm::load_image(vm_parameters *p)
|
||||||
userenv[IMAGE_ENV] = allot_alien(F,(cell)p->image_path);
|
userenv[IMAGE_ENV] = allot_alien(F,(cell)p->image_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Save the current image to disk */
|
||||||
|
bool factor_vm::save_image(const vm_char *filename)
|
||||||
|
{
|
||||||
|
FILE* file;
|
||||||
|
image_header h;
|
||||||
|
|
||||||
|
file = OPEN_WRITE(filename);
|
||||||
|
if(file == NULL)
|
||||||
|
{
|
||||||
|
print_string("Cannot open image file: "); print_native_string(filename); nl();
|
||||||
|
print_string(strerror(errno)); nl();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
h.magic = image_magic;
|
||||||
|
h.version = image_version;
|
||||||
|
h.data_relocation_base = data->tenured->start;
|
||||||
|
h.data_size = data->tenured->here - data->tenured->start;
|
||||||
|
h.code_relocation_base = code->seg->start;
|
||||||
|
h.code_size = code->heap_size();
|
||||||
|
|
||||||
|
h.t = T;
|
||||||
|
h.bignum_zero = bignum_zero;
|
||||||
|
h.bignum_pos_one = bignum_pos_one;
|
||||||
|
h.bignum_neg_one = bignum_neg_one;
|
||||||
|
|
||||||
|
for(cell i = 0; i < USER_ENV; i++)
|
||||||
|
h.userenv[i] = (save_env_p(i) ? userenv[i] : F);
|
||||||
|
|
||||||
|
bool ok = true;
|
||||||
|
|
||||||
|
if(fwrite(&h,sizeof(image_header),1,file) != 1) ok = false;
|
||||||
|
if(fwrite((void*)data->tenured->start,h.data_size,1,file) != 1) ok = false;
|
||||||
|
if(fwrite(code->first_block(),h.code_size,1,file) != 1) ok = false;
|
||||||
|
if(fclose(file)) ok = false;
|
||||||
|
|
||||||
|
if(!ok)
|
||||||
|
{
|
||||||
|
print_string("save-image failed: "); print_string(strerror(errno)); nl();
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
void factor_vm::primitive_save_image()
|
||||||
|
{
|
||||||
|
/* do a full GC to push everything into tenured space */
|
||||||
|
primitive_compact_gc();
|
||||||
|
|
||||||
|
gc_root<byte_array> path(dpop(),this);
|
||||||
|
path.untag_check(this);
|
||||||
|
save_image((vm_char *)(path.untagged() + 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void factor_vm::primitive_save_image_and_exit()
|
||||||
|
{
|
||||||
|
/* We unbox this before doing anything else. This is the only point
|
||||||
|
where we might throw an error, so we have to throw an error here since
|
||||||
|
later steps destroy the current image. */
|
||||||
|
gc_root<byte_array> path(dpop(),this);
|
||||||
|
path.untag_check(this);
|
||||||
|
|
||||||
|
/* strip out userenv data which is set on startup anyway */
|
||||||
|
for(cell i = 0; i < USER_ENV; i++)
|
||||||
|
{
|
||||||
|
if(!save_env_p(i)) userenv[i] = F;
|
||||||
|
}
|
||||||
|
|
||||||
|
gc(collect_full_op,
|
||||||
|
0, /* requested size */
|
||||||
|
false, /* discard objects only reachable from stacks */
|
||||||
|
true /* compact the code heap */);
|
||||||
|
|
||||||
|
/* Save the image */
|
||||||
|
if(save_image((vm_char *)(path.untagged() + 1)))
|
||||||
|
exit(0);
|
||||||
|
else
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -201,15 +201,6 @@ struct heap_block
|
||||||
{
|
{
|
||||||
cell header;
|
cell header;
|
||||||
|
|
||||||
bool marked_p() { return header & 1; }
|
|
||||||
void set_marked_p(bool marked)
|
|
||||||
{
|
|
||||||
if(marked)
|
|
||||||
header |= 1;
|
|
||||||
else
|
|
||||||
header &= ~1;
|
|
||||||
}
|
|
||||||
|
|
||||||
cell type() { return (header >> 1) & 0x1f; }
|
cell type() { return (header >> 1) & 0x1f; }
|
||||||
void set_type(cell type)
|
void set_type(cell type)
|
||||||
{
|
{
|
||||||
|
|
|
@ -0,0 +1,103 @@
|
||||||
|
namespace factor
|
||||||
|
{
|
||||||
|
|
||||||
|
const int forwarding_granularity = 128;
|
||||||
|
|
||||||
|
template<typename Block, int Granularity> struct mark_bits {
|
||||||
|
cell start;
|
||||||
|
cell size;
|
||||||
|
cell bits_size;
|
||||||
|
unsigned int *marked;
|
||||||
|
unsigned int *freed;
|
||||||
|
cell forwarding_size;
|
||||||
|
cell *forwarding;
|
||||||
|
|
||||||
|
void clear_mark_bits()
|
||||||
|
{
|
||||||
|
memset(marked,0,bits_size * sizeof(unsigned int));
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear_free_bits()
|
||||||
|
{
|
||||||
|
memset(freed,0,bits_size * sizeof(unsigned int));
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear_forwarding()
|
||||||
|
{
|
||||||
|
memset(forwarding,0,forwarding_size * sizeof(cell));
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit mark_bits(cell start_, cell size_) :
|
||||||
|
start(start_),
|
||||||
|
size(size_),
|
||||||
|
bits_size(size / Granularity / 32),
|
||||||
|
marked(new unsigned int[bits_size]),
|
||||||
|
freed(new unsigned int[bits_size]),
|
||||||
|
forwarding_size(size / Granularity / forwarding_granularity),
|
||||||
|
forwarding(new cell[forwarding_size])
|
||||||
|
{
|
||||||
|
clear_mark_bits();
|
||||||
|
clear_free_bits();
|
||||||
|
clear_forwarding();
|
||||||
|
}
|
||||||
|
|
||||||
|
~mark_bits()
|
||||||
|
{
|
||||||
|
delete[] marked;
|
||||||
|
marked = NULL;
|
||||||
|
delete[] freed;
|
||||||
|
freed = NULL;
|
||||||
|
delete[] forwarding;
|
||||||
|
forwarding = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<cell,cell> bitmap_deref(Block *address)
|
||||||
|
{
|
||||||
|
cell word_number = (((cell)address - start) / Granularity);
|
||||||
|
cell word_index = (word_number >> 5);
|
||||||
|
cell word_shift = (word_number & 31);
|
||||||
|
|
||||||
|
#ifdef FACTOR_DEBUG
|
||||||
|
assert(word_index < bits_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return std::make_pair(word_index,word_shift);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool bitmap_elt(unsigned int *bits, Block *address)
|
||||||
|
{
|
||||||
|
std::pair<cell,cell> pair = bitmap_deref(address);
|
||||||
|
return (bits[pair.first] & (1 << pair.second)) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_bitmap_elt(unsigned int *bits, Block *address, bool flag)
|
||||||
|
{
|
||||||
|
std::pair<cell,cell> pair = bitmap_deref(address);
|
||||||
|
if(flag)
|
||||||
|
bits[pair.first] |= (1 << pair.second);
|
||||||
|
else
|
||||||
|
bits[pair.first] &= ~(1 << pair.second);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_marked_p(Block *address)
|
||||||
|
{
|
||||||
|
return bitmap_elt(marked,address);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_marked_p(Block *address, bool marked_p)
|
||||||
|
{
|
||||||
|
set_bitmap_elt(marked,address,marked_p);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_free_p(Block *address)
|
||||||
|
{
|
||||||
|
return bitmap_elt(freed,address);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_free_p(Block *address, bool free_p)
|
||||||
|
{
|
||||||
|
set_bitmap_elt(freed,address,free_p);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -78,6 +78,7 @@ namespace factor
|
||||||
#include "words.hpp"
|
#include "words.hpp"
|
||||||
#include "float_bits.hpp"
|
#include "float_bits.hpp"
|
||||||
#include "io.hpp"
|
#include "io.hpp"
|
||||||
|
#include "mark_bits.hpp"
|
||||||
#include "heap.hpp"
|
#include "heap.hpp"
|
||||||
#include "image.hpp"
|
#include "image.hpp"
|
||||||
#include "alien.hpp"
|
#include "alien.hpp"
|
||||||
|
|
|
@ -253,8 +253,6 @@ struct factor_vm
|
||||||
void collect_nursery();
|
void collect_nursery();
|
||||||
void collect_aging();
|
void collect_aging();
|
||||||
void collect_to_tenured();
|
void collect_to_tenured();
|
||||||
void big_code_heap_update();
|
|
||||||
void small_code_heap_update();
|
|
||||||
void collect_full_impl(bool trace_contexts_p);
|
void collect_full_impl(bool trace_contexts_p);
|
||||||
void collect_growing_heap(cell requested_bytes, bool trace_contexts_p, bool compact_code_heap_p);
|
void collect_growing_heap(cell requested_bytes, bool trace_contexts_p, bool compact_code_heap_p);
|
||||||
void collect_full(bool trace_contexts_p, bool compact_code_heap_p);
|
void collect_full(bool trace_contexts_p, bool compact_code_heap_p);
|
||||||
|
|
Loading…
Reference in New Issue