From 1beac119d896bbd31a64779f6d7d1919188779b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Lindqvist?= Date: Tue, 6 Jan 2015 17:58:24 +0100 Subject: [PATCH] VM: removes the template parameter from mark_bits since mark_bits only deals with arithmetic on memory addresses, it doesn't need to be specialized so it can be untemplateized --- vm/code_heap.cpp | 4 ++-- vm/compaction.cpp | 44 +++++++++++++++++----------------- vm/free_list_allocator.hpp | 18 +++++++------- vm/full_collector.cpp | 4 ++-- vm/mark_bits.hpp | 48 ++++++++++++++++++++------------------ vm/object_start_map.cpp | 2 +- vm/object_start_map.hpp | 2 +- vm/tenured_space.hpp | 8 +++++-- 8 files changed, 68 insertions(+), 62 deletions(-) diff --git a/vm/code_heap.cpp b/vm/code_heap.cpp index db45cb59f0..ef4c3d42df 100644 --- a/vm/code_heap.cpp +++ b/vm/code_heap.cpp @@ -40,11 +40,11 @@ bool code_heap::uninitialized_p(code_block* compiled) { } bool code_heap::marked_p(code_block* compiled) { - return allocator->state.marked_p(compiled); + return allocator->state.marked_p((cell)compiled); } void code_heap::set_marked_p(code_block* compiled) { - allocator->state.set_marked_p(compiled); + allocator->state.set_marked_p((cell)compiled, compiled->size()); } void code_heap::clear_mark_bits() { allocator->state.clear_mark_bits(); } diff --git a/vm/compaction.cpp b/vm/compaction.cpp index 18eee437ce..74328dff14 100644 --- a/vm/compaction.cpp +++ b/vm/compaction.cpp @@ -5,13 +5,13 @@ namespace factor { struct compaction_fixup { static const bool translated_code_block_map = false; - mark_bits* data_forwarding_map; - mark_bits* code_forwarding_map; + mark_bits* data_forwarding_map; + mark_bits* code_forwarding_map; const object** data_finger; const code_block** code_finger; - compaction_fixup(mark_bits* data_forwarding_map, - mark_bits* code_forwarding_map, + compaction_fixup(mark_bits* data_forwarding_map, + mark_bits* code_forwarding_map, const object** data_finger, const code_block** code_finger) : data_forwarding_map(data_forwarding_map), @@ -20,11 +20,11 @@ struct compaction_fixup { code_finger(code_finger) {} object* fixup_data(object* obj) { - return data_forwarding_map->forward_block(obj); + return (object*)data_forwarding_map->forward_block((cell)obj); } code_block* fixup_code(code_block* compiled) { - return code_forwarding_map->forward_block(compiled); + return (code_block*)code_forwarding_map->forward_block((cell)compiled); } object* translate_data(const object* obj) { @@ -42,17 +42,17 @@ struct compaction_fixup { } cell size(object* obj) { - if (data_forwarding_map->marked_p(obj)) + if (data_forwarding_map->marked_p((cell)obj)) return obj->size(*this); else - return data_forwarding_map->unmarked_block_size(obj); + return data_forwarding_map->unmarked_block_size((cell)obj); } cell size(code_block* compiled) { - if (code_forwarding_map->marked_p(compiled)) + if (code_forwarding_map->marked_p((cell)compiled)) return compiled->size(*this); else - return code_forwarding_map->unmarked_block_size(compiled); + return code_forwarding_map->unmarked_block_size((cell)compiled); } }; @@ -152,18 +152,18 @@ void factor_vm::update_code_roots_for_compaction() { std::vector::const_iterator iter = code_roots.begin(); std::vector::const_iterator end = code_roots.end(); - mark_bits* state = &code->allocator->state; + mark_bits* state = &code->allocator->state; for (; iter < end; iter++) { code_root* root = *iter; - code_block* block = (code_block*)(root->value & (~data_alignment + 1)); + cell block = root->value & (~data_alignment + 1); /* Offset of return address within 16-byte allocation line */ - cell offset = root->value - (cell)block; + cell offset = root->value - block; if (root->valid && state->marked_p(block)) { block = state->forward_block(block); - root->value = (cell)block + offset; + root->value = block + offset; } else root->valid = false; } @@ -181,8 +181,8 @@ void factor_vm::collect_compact_impl(bool trace_contexts_p) { event->started_compaction(); tenured_space* tenured = data->tenured; - mark_bits* data_forwarding_map = &tenured->state; - mark_bits* code_forwarding_map = &code->allocator->state; + mark_bits* data_forwarding_map = &tenured->state; + mark_bits* code_forwarding_map = &code->allocator->state; /* Figure out where blocks are going to go */ data_forwarding_map->compute_forwarding(); @@ -237,17 +237,17 @@ void factor_vm::collect_compact_impl(bool trace_contexts_p) { struct code_compaction_fixup { static const bool translated_code_block_map = false; - mark_bits* code_forwarding_map; + mark_bits* code_forwarding_map; const code_block** code_finger; - code_compaction_fixup(mark_bits* code_forwarding_map, + code_compaction_fixup(mark_bits* code_forwarding_map, const code_block** code_finger) : code_forwarding_map(code_forwarding_map), code_finger(code_finger) {} object* fixup_data(object* obj) { return obj; } code_block* fixup_code(code_block* compiled) { - return code_forwarding_map->forward_block(compiled); + return (code_block*)code_forwarding_map->forward_block((cell)compiled); } object* translate_data(const object* obj) { return fixup_data((object*)obj); } @@ -262,10 +262,10 @@ struct code_compaction_fixup { cell size(object* obj) { return obj->size(); } cell size(code_block* compiled) { - if (code_forwarding_map->marked_p(compiled)) + if (code_forwarding_map->marked_p((cell)compiled)) return compiled->size(*this); else - return code_forwarding_map->unmarked_block_size(compiled); + return code_forwarding_map->unmarked_block_size((cell)compiled); } }; @@ -282,7 +282,7 @@ struct object_grow_heap_updater { /* Compact just the code heap, after growing the data heap */ void factor_vm::collect_compact_code_impl(bool trace_contexts_p) { /* Figure out where blocks are going to go */ - mark_bits* code_forwarding_map = &code->allocator->state; + mark_bits* code_forwarding_map = &code->allocator->state; code_forwarding_map->compute_forwarding(); const code_block* code_finger = code->allocator->first_block(); diff --git a/vm/free_list_allocator.hpp b/vm/free_list_allocator.hpp index 13598f49eb..5d3d04b6c9 100644 --- a/vm/free_list_allocator.hpp +++ b/vm/free_list_allocator.hpp @@ -13,7 +13,7 @@ template struct free_list_allocator { cell start; cell end; free_list free_blocks; - mark_bits state; + mark_bits state; free_list_allocator(cell size, cell start); void initial_free_list(cell occupied); @@ -44,7 +44,7 @@ free_list_allocator::free_list_allocator(cell size, cell start) : size(size), start(start), end(start + size), - state(mark_bits(size, start)) { + state(mark_bits(size, start)) { initial_free_list(0); } @@ -128,8 +128,8 @@ template void free_list_allocator::sweep(Iterator& iter) { free_blocks.clear_free_list(); - Block* start = this->first_block(); - Block* end = this->last_block(); + cell start = (cell)this->first_block(); + cell end = (cell)this->last_block(); while (start != end) { /* find next unmarked block */ @@ -143,9 +143,9 @@ void free_list_allocator::sweep(Iterator& iter) { free_heap_block* free_block = (free_heap_block*)start; free_block->make_free(size); free_blocks.add_to_free_list(free_block); - iter(start, size); + iter((Block*)start, size); - start = (Block*)((char*)start + size); + start = start + size; } } } @@ -160,17 +160,17 @@ template void free_list_allocator::sweep() { } template struct heap_compactor { - mark_bits* state; + mark_bits* state; char* address; Iterator& iter; const Block** finger; - heap_compactor(mark_bits* state, Block* address, + heap_compactor(mark_bits* state, Block* address, Iterator& iter, const Block** finger) : state(state), address((char*)address), iter(iter), finger(finger) {} void operator()(Block* block, cell size) { - if (this->state->marked_p(block)) { + if (this->state->marked_p((cell)block)) { *finger = (Block*)((char*)block + size); memmove((Block*)address, block, size); iter(block, (Block*)address, size); diff --git a/vm/full_collector.cpp b/vm/full_collector.cpp index cffbaac710..ac8721a83e 100644 --- a/vm/full_collector.cpp +++ b/vm/full_collector.cpp @@ -21,11 +21,11 @@ void factor_vm::update_code_roots_for_sweep() { std::vector::const_iterator iter = code_roots.begin(); std::vector::const_iterator end = code_roots.end(); - mark_bits* state = &code->allocator->state; + mark_bits* state = &code->allocator->state; for (; iter < end; iter++) { code_root* root = *iter; - code_block* block = (code_block*)(root->value & (~data_alignment - 1)); + cell block = root->value & (~data_alignment - 1); if (root->valid && !state->marked_p(block)) root->valid = false; } diff --git a/vm/mark_bits.hpp b/vm/mark_bits.hpp index 29eb1f8417..e9d5db88db 100644 --- a/vm/mark_bits.hpp +++ b/vm/mark_bits.hpp @@ -3,7 +3,7 @@ namespace factor { const int mark_bits_granularity = sizeof(cell) * 8; const int mark_bits_mask = sizeof(cell) * 8 - 1; -template struct mark_bits { +struct mark_bits { cell size; cell start; cell bits_size; @@ -31,33 +31,33 @@ template struct mark_bits { forwarding = NULL; } - cell block_line(const Block* address) { - return (((cell)address - start) / data_alignment); + cell block_line(cell address) { + return (address - start) / data_alignment; } - Block* line_block(cell line) { - return (Block*)(line * data_alignment + start); + cell line_block(cell line) { + return line * data_alignment + start; } - std::pair bitmap_deref(const Block* address) { + std::pair bitmap_deref(const cell address) { cell line_number = block_line(address); cell word_index = (line_number / mark_bits_granularity); cell word_shift = (line_number & mark_bits_mask); return std::make_pair(word_index, word_shift); } - bool bitmap_elt(cell* bits, const Block* address) { + bool bitmap_elt(cell* bits, const cell address) { std::pair position = bitmap_deref(address); return (bits[position.first] & ((cell)1 << position.second)) != 0; } - Block* next_block_after(const Block* block) { - return (Block*)((cell)block + block->size()); + cell next_block_after(const cell block, const cell size) { + return block + size; } - void set_bitmap_range(cell* bits, const Block* address) { + void set_bitmap_range(cell* bits, const cell address, const cell size) { std::pair start = bitmap_deref(address); - std::pair end = bitmap_deref(next_block_after(address)); + std::pair end = bitmap_deref(next_block_after(address, size)); cell start_mask = ((cell)1 << start.second) - 1; cell end_mask = ((cell)1 << end.second) - 1; @@ -78,9 +78,11 @@ template struct mark_bits { } } - bool marked_p(const Block* address) { return bitmap_elt(marked, address); } + bool marked_p(const cell address) { return bitmap_elt(marked, address); } - void set_marked_p(const Block* address) { set_bitmap_range(marked, address); } + void set_marked_p(const cell address, const cell size) { + set_bitmap_range(marked, address, size); + } /* The eventual destination of a block after compaction is just the number of marked blocks before it. Live blocks must be marked on entry. */ @@ -94,22 +96,22 @@ template struct mark_bits { /* We have the popcount for every mark_bits_granularity entries; look up and compute the rest */ - Block* forward_block(const Block* original) { + cell forward_block(const cell original) { FACTOR_ASSERT(marked_p(original)); std::pair position = bitmap_deref(original); - cell offset = (cell)original & (data_alignment - 1); + cell offset = original & (data_alignment - 1); cell approx_popcount = forwarding[position.first]; cell mask = ((cell)1 << position.second) - 1; cell new_line_number = approx_popcount + popcount(marked[position.first] & mask); - Block* new_block = (Block*)((char*)line_block(new_line_number) + offset); + cell new_block = line_block(new_line_number) + offset; FACTOR_ASSERT(new_block <= original); return new_block; } - Block* next_unmarked_block_after(const Block* original) { + cell next_unmarked_block_after(const cell original) { std::pair position = bitmap_deref(original); cell bit_index = position.second; @@ -127,10 +129,10 @@ template struct mark_bits { } /* No unmarked blocks were found */ - return (Block*)(this->start + this->size); + return this->start + this->size; } - Block* next_marked_block_after(const Block* original) { + cell next_marked_block_after(const cell original) { std::pair position = bitmap_deref(original); cell bit_index = position.second; @@ -147,12 +149,12 @@ template struct mark_bits { } /* No marked blocks were found */ - return (Block*)(this->start + this->size); + return this->start + this->size; } - cell unmarked_block_size(Block* original) { - Block* next_marked = next_marked_block_after(original); - return ((char*)next_marked - (char*)original); + cell unmarked_block_size(cell original) { + cell next_marked = next_marked_block_after(original); + return next_marked - original; } }; diff --git a/vm/object_start_map.cpp b/vm/object_start_map.cpp index 5d371e7ab7..78da247ea7 100644 --- a/vm/object_start_map.cpp +++ b/vm/object_start_map.cpp @@ -58,7 +58,7 @@ void object_start_map::update_card_for_sweep(cell index, uint16_t mask) { } } -void object_start_map::update_for_sweep(mark_bits* state) { +void object_start_map::update_for_sweep(mark_bits* state) { for (cell index = 0; index < state->bits_size; index++) { cell mask = state->marked[index]; #ifdef FACTOR_64 diff --git a/vm/object_start_map.hpp b/vm/object_start_map.hpp index 9470c590fe..f916c8c041 100644 --- a/vm/object_start_map.hpp +++ b/vm/object_start_map.hpp @@ -15,7 +15,7 @@ struct object_start_map { void record_object_start_offset(object* obj); void clear_object_start_offsets(); void update_card_for_sweep(cell index, uint16_t mask); - void update_for_sweep(mark_bits* state); + void update_for_sweep(mark_bits* state); }; } diff --git a/vm/tenured_space.hpp b/vm/tenured_space.hpp index 58dcc9ca0a..444e0a02ce 100644 --- a/vm/tenured_space.hpp +++ b/vm/tenured_space.hpp @@ -27,9 +27,13 @@ struct tenured_space : free_list_allocator { void clear_mark_bits() { state.clear_mark_bits(); } - bool marked_p(object* obj) { return this->state.marked_p(obj); } + bool marked_p(object* obj) { + return this->state.marked_p((cell)obj); + } - void set_marked_p(object* obj) { this->state.set_marked_p(obj); } + void set_marked_p(object* obj) { + this->state.set_marked_p((cell)obj, obj->size()); + } void sweep() { free_list_allocator::sweep();