vm: Revert extra canary page before callstack

This reverts commit 7d5c8d6990385b94569116a23163d7e75ae21f49. We pretty much can't avoid unwinding frames without some more drastic redesign.
db4
Joe Groff 2011-10-21 11:49:34 -07:00
parent 68518e741e
commit dca0fd3487
8 changed files with 20 additions and 31 deletions

View File

@ -4,7 +4,7 @@ namespace factor
{
callback_heap::callback_heap(cell size, factor_vm *parent_) :
seg(new segment(size,true,false)),
seg(new segment(size,true)),
here(seg->start),
parent(parent_) {}

View File

@ -6,7 +6,7 @@ namespace factor
code_heap::code_heap(cell size)
{
if(size > ((u64)1 << (sizeof(cell) * 8 - 6))) fatal_error("Heap too large",size);
seg = new segment(align_page(size),true,false);
seg = new segment(align_page(size),true);
if(!seg) fatal_error("Out of memory in code_heap constructor",size);
cell start = seg->start + getpagesize() + seh_area_size;

View File

@ -9,9 +9,9 @@ context::context(cell datastack_size, cell retainstack_size, cell callstack_size
datastack(0),
retainstack(0),
callstack_save(0),
datastack_seg(new segment(datastack_size,false,false)),
retainstack_seg(new segment(retainstack_size,false,false)),
callstack_seg(new segment(callstack_size,false,true))
datastack_seg(new segment(datastack_size,false)),
retainstack_seg(new segment(retainstack_size,false)),
callstack_seg(new segment(callstack_size,false))
{
reset();
}

View File

@ -22,7 +22,7 @@ data_heap::data_heap(cell young_size_,
tenured_size = tenured_size_;
cell total_size = young_size + 2 * aging_size + tenured_size + deck_size;
seg = new segment(total_size,false,false);
seg = new segment(total_size,false);
cell cards_size = addr_to_card(total_size);
cards = new card[cards_size];

View File

@ -98,8 +98,6 @@ void factor_vm::memory_protection_error(cell addr)
general_error(ERROR_CALLSTACK_OVERFLOW,false_object,false_object);
else if(ctx->callstack_seg->overflow_p(addr))
general_error(ERROR_CALLSTACK_UNDERFLOW,false_object,false_object);
else if(ctx->callstack_seg->canary_p(addr))
fatal_error("Call stack overflow in your call stack overflow", addr);
else
general_error(ERROR_MEMORY,from_unsigned_cell(addr),false_object);
}

View File

@ -91,11 +91,11 @@ void factor_vm::move_file(const vm_char *path1, const vm_char *path2)
general_error(ERROR_IO,tag_fixnum(errno),false_object);
}
segment::segment(cell size_, bool executable_p, bool canary_page_p)
segment::segment(cell size_, bool executable_p)
{
size = size_;
cell pagesize = getpagesize();
int pagesize = getpagesize();
int prot;
if(executable_p)
@ -103,18 +103,16 @@ segment::segment(cell size_, bool executable_p, bool canary_page_p)
else
prot = (PROT_READ | PROT_WRITE);
cell startsize = canary_page_p ? 2*pagesize : pagesize;
char *array = (char *)mmap(NULL,startsize + size + pagesize,prot,MAP_ANON | MAP_PRIVATE,-1,0);
char *array = (char *)mmap(NULL,pagesize + size + pagesize,prot,MAP_ANON | MAP_PRIVATE,-1,0);
if(array == (char*)-1) out_of_memory();
if(mprotect(array,startsize,PROT_NONE) == -1)
if(mprotect(array,pagesize,PROT_NONE) == -1)
fatal_error("Cannot protect low guard page",(cell)array);
if(mprotect(array + startsize + size,pagesize,PROT_NONE) == -1)
if(mprotect(array + pagesize + size,pagesize,PROT_NONE) == -1)
fatal_error("Cannot protect high guard page",(cell)array);
start = (cell)(array + startsize);
start = (cell)(array + pagesize);
end = start + size;
}
@ -238,7 +236,7 @@ void factor_vm::unix_init_signals()
libpthread. See http://redmine.ruby-lang.org/issues/show/1239 */
#ifndef __OpenBSD__
signal_callstack_seg = new segment(callstack_size,false,false);
signal_callstack_seg = new segment(callstack_size,false);
stack_t signal_callstack;
signal_callstack.ss_sp = (char *)signal_callstack_seg->start;

View File

@ -106,27 +106,25 @@ void factor_vm::primitive_existsp()
ctx->push(tag_boolean(windows_stat(path)));
}
segment::segment(cell size_, bool executable_p, bool canary_page_p)
segment::segment(cell size_, bool executable_p)
{
size = size_;
char *mem;
DWORD ignore;
cell pagesize = getpagesize();
cell startsize = canary_page_p ? 2*pagesize : pagesize;
if((mem = (char *)VirtualAlloc(NULL, startsize + size + pagesize,
if((mem = (char *)VirtualAlloc(NULL, getpagesize() * 2 + size,
MEM_COMMIT, executable_p ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE)) == 0)
out_of_memory();
if (!VirtualProtect(mem, startsize, PAGE_NOACCESS, &ignore))
if (!VirtualProtect(mem, getpagesize(), PAGE_NOACCESS, &ignore))
fatal_error("Cannot allocate low guard page", (cell)mem);
if (!VirtualProtect(mem + size + startsize,
pagesize, PAGE_NOACCESS, &ignore))
if (!VirtualProtect(mem + size + getpagesize(),
getpagesize(), PAGE_NOACCESS, &ignore))
fatal_error("Cannot allocate high guard page", (cell)mem);
start = (cell)mem + startsize;
start = (cell)mem + getpagesize();
end = start + size;
}

View File

@ -13,14 +13,9 @@ struct segment {
cell size;
cell end;
explicit segment(cell size, bool executable_p, bool canary_page_p);
explicit segment(cell size, bool executable_p);
~segment();
bool canary_p(cell addr)
{
return (addr >= start - 2*getpagesize() && addr < start - getpagesize());
}
bool underflow_p(cell addr)
{
return (addr >= start - getpagesize() && addr < start);