vm: faster allocation in tenured space and code heap. If small free list exhausted, allocate a 1kb block and split it up instead of searching larger free lists

db4
Slava Pestov 2009-11-10 20:19:29 -06:00
parent e9a7191050
commit 4162ee2127
4 changed files with 44 additions and 38 deletions

View File

@ -65,7 +65,7 @@ STRUCT: gc-event
{ data-sweep-time cell }
{ code-sweep-time cell }
{ compaction-time cell }
{ temp-time cell } ;
{ temp-time ulonglong } ;
STRUCT: dispatch-statistics
{ megamorphic-cache-hits cell }

View File

@ -39,39 +39,59 @@ void free_list::add_to_free_list(free_heap_block *block)
free_heap_block *free_list::find_free_block(cell size)
{
/* Check small free lists */
for(cell i = size / block_granularity; i < free_list_count; i++)
if(size / block_granularity < free_list_count)
{
std::vector<free_heap_block *> &blocks = small_blocks[i];
if(blocks.size())
std::vector<free_heap_block *> &blocks = small_blocks[size / block_granularity];
if(blocks.size() == 0)
{
free_heap_block *block = blocks.back();
blocks.pop_back();
/* Round up to a multiple of 'size' */
cell large_block_size = ((allocation_page_size + size - 1) / size) * size;
free_block_count--;
free_space -= block->size();
/* Allocate a block this big */
free_heap_block *large_block = find_free_block(large_block_size);
if(!large_block) return NULL;
return block;
large_block = split_free_block(large_block,large_block_size);
/* Split it up into pieces and add each piece back to the free list */
for(cell offset = 0; offset < large_block_size; offset += size)
{
free_heap_block *small_block = large_block;
large_block = (free_heap_block *)((cell)large_block + size);
small_block->make_free(size);
add_to_free_list(small_block);
}
}
}
/* Check large free lists */
free_heap_block key;
key.make_free(size);
large_block_set::iterator iter = large_blocks.lower_bound(&key);
large_block_set::iterator end = large_blocks.end();
if(iter != end)
{
free_heap_block *block = *iter;
large_blocks.erase(iter);
free_heap_block *block = blocks.back();
blocks.pop_back();
free_block_count--;
free_space -= block->size();
return block;
}
else
{
/* Check large free list */
free_heap_block key;
key.make_free(size);
large_block_set::iterator iter = large_blocks.lower_bound(&key);
large_block_set::iterator end = large_blocks.end();
return NULL;
if(iter != end)
{
free_heap_block *block = *iter;
large_blocks.erase(iter);
free_block_count--;
free_space -= block->size();
return block;
}
return NULL;
}
}
free_heap_block *free_list::split_free_block(free_heap_block *block, cell size)
@ -90,22 +110,7 @@ free_heap_block *free_list::split_free_block(free_heap_block *block, cell size)
bool free_list::can_allot_p(cell size)
{
/* Check small free lists */
for(cell i = size / block_granularity; i < free_list_count; i++)
{
if(small_blocks[i].size()) return true;
}
/* Check large free lists */
large_block_set::const_iterator iter = large_blocks.begin();
large_block_set::const_iterator end = large_blocks.end();
for(; iter != end; iter++)
{
if((*iter)->size() >= size) return true;
}
return false;
return largest_free_block() >= std::max(size,allocation_page_size);
}
cell free_list::largest_free_block()

View File

@ -2,6 +2,7 @@ namespace factor
{
static const cell free_list_count = 32;
static const cell allocation_page_size = 1024;
struct free_heap_block
{

View File

@ -26,7 +26,7 @@ struct gc_event {
cell data_sweep_time;
cell code_sweep_time;
cell compaction_time;
cell temp_time;
u64 temp_time;
explicit gc_event(gc_op op_, factor_vm *parent);
void started_card_scan();