vm: allocate an extra canary page before callstack
This will let us safely unguard the callstack overflow guard page as a grace page so the callstack overflow handler can do its business without discarding frames.db4
parent
b0b0905460
commit
c3f007ce0a
|
@ -4,7 +4,7 @@ namespace factor
|
|||
{
|
||||
|
||||
callback_heap::callback_heap(cell size, factor_vm *parent_) :
|
||||
seg(new segment(size,true)),
|
||||
seg(new segment(size,true,false)),
|
||||
here(seg->start),
|
||||
parent(parent_) {}
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ namespace factor
|
|||
code_heap::code_heap(cell size)
|
||||
{
|
||||
if(size > ((u64)1 << (sizeof(cell) * 8 - 6))) fatal_error("Heap too large",size);
|
||||
seg = new segment(align_page(size),true);
|
||||
seg = new segment(align_page(size),true,false);
|
||||
if(!seg) fatal_error("Out of memory in code_heap constructor",size);
|
||||
|
||||
cell start = seg->start + getpagesize() + seh_area_size;
|
||||
|
|
|
@ -9,9 +9,9 @@ context::context(cell datastack_size, cell retainstack_size, cell callstack_size
|
|||
datastack(0),
|
||||
retainstack(0),
|
||||
callstack_save(0),
|
||||
datastack_seg(new segment(datastack_size,false)),
|
||||
retainstack_seg(new segment(retainstack_size,false)),
|
||||
callstack_seg(new segment(callstack_size,false))
|
||||
datastack_seg(new segment(datastack_size,false,false)),
|
||||
retainstack_seg(new segment(retainstack_size,false,false)),
|
||||
callstack_seg(new segment(callstack_size,false,true))
|
||||
{
|
||||
reset();
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ data_heap::data_heap(cell young_size_,
|
|||
tenured_size = tenured_size_;
|
||||
|
||||
cell total_size = young_size + 2 * aging_size + tenured_size + deck_size;
|
||||
seg = new segment(total_size,false);
|
||||
seg = new segment(total_size,false,false);
|
||||
|
||||
cell cards_size = addr_to_card(total_size);
|
||||
cards = new card[cards_size];
|
||||
|
|
|
@ -98,6 +98,8 @@ void factor_vm::memory_protection_error(cell addr)
|
|||
general_error(ERROR_CALLSTACK_OVERFLOW,false_object,false_object);
|
||||
else if(ctx->callstack_seg->overflow_p(addr))
|
||||
general_error(ERROR_CALLSTACK_UNDERFLOW,false_object,false_object);
|
||||
else if(ctx->callstack_seg->canary_p(addr))
|
||||
fatal_error("Call stack overflow in your call stack overflow", addr);
|
||||
else
|
||||
general_error(ERROR_MEMORY,from_unsigned_cell(addr),false_object);
|
||||
}
|
||||
|
|
|
@ -91,11 +91,11 @@ void factor_vm::move_file(const vm_char *path1, const vm_char *path2)
|
|||
general_error(ERROR_IO,tag_fixnum(errno),false_object);
|
||||
}
|
||||
|
||||
segment::segment(cell size_, bool executable_p)
|
||||
segment::segment(cell size_, bool executable_p, bool canary_page_p)
|
||||
{
|
||||
size = size_;
|
||||
|
||||
int pagesize = getpagesize();
|
||||
cell pagesize = getpagesize();
|
||||
|
||||
int prot;
|
||||
if(executable_p)
|
||||
|
@ -103,16 +103,18 @@ segment::segment(cell size_, bool executable_p)
|
|||
else
|
||||
prot = (PROT_READ | PROT_WRITE);
|
||||
|
||||
char *array = (char *)mmap(NULL,pagesize + size + pagesize,prot,MAP_ANON | MAP_PRIVATE,-1,0);
|
||||
cell startsize = canary_page_p ? 2*pagesize : pagesize;
|
||||
|
||||
char *array = (char *)mmap(NULL,startsize + size + pagesize,prot,MAP_ANON | MAP_PRIVATE,-1,0);
|
||||
if(array == (char*)-1) out_of_memory();
|
||||
|
||||
if(mprotect(array,pagesize,PROT_NONE) == -1)
|
||||
if(mprotect(array,startsize,PROT_NONE) == -1)
|
||||
fatal_error("Cannot protect low guard page",(cell)array);
|
||||
|
||||
if(mprotect(array + pagesize + size,pagesize,PROT_NONE) == -1)
|
||||
if(mprotect(array + startsize + size,pagesize,PROT_NONE) == -1)
|
||||
fatal_error("Cannot protect high guard page",(cell)array);
|
||||
|
||||
start = (cell)(array + pagesize);
|
||||
start = (cell)(array + startsize);
|
||||
end = start + size;
|
||||
}
|
||||
|
||||
|
@ -236,7 +238,7 @@ void factor_vm::unix_init_signals()
|
|||
libpthread. See http://redmine.ruby-lang.org/issues/show/1239 */
|
||||
|
||||
#ifndef __OpenBSD__
|
||||
signal_callstack_seg = new segment(callstack_size,false);
|
||||
signal_callstack_seg = new segment(callstack_size,false,false);
|
||||
|
||||
stack_t signal_callstack;
|
||||
signal_callstack.ss_sp = (char *)signal_callstack_seg->start;
|
||||
|
|
|
@ -106,25 +106,27 @@ void factor_vm::primitive_existsp()
|
|||
ctx->push(tag_boolean(windows_stat(path)));
|
||||
}
|
||||
|
||||
segment::segment(cell size_, bool executable_p)
|
||||
segment::segment(cell size_, bool executable_p, bool canary_page_p)
|
||||
{
|
||||
size = size_;
|
||||
|
||||
char *mem;
|
||||
DWORD ignore;
|
||||
|
||||
if((mem = (char *)VirtualAlloc(NULL, getpagesize() * 2 + size,
|
||||
cell pagesize = getpagesize();
|
||||
cell startsize = canary_page_p ? 2*pagesize : pagesize;
|
||||
if((mem = (char *)VirtualAlloc(NULL, startsize + size + pagesize,
|
||||
MEM_COMMIT, executable_p ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE)) == 0)
|
||||
out_of_memory();
|
||||
|
||||
if (!VirtualProtect(mem, getpagesize(), PAGE_NOACCESS, &ignore))
|
||||
if (!VirtualProtect(mem, startsize, PAGE_NOACCESS, &ignore))
|
||||
fatal_error("Cannot allocate low guard page", (cell)mem);
|
||||
|
||||
if (!VirtualProtect(mem + size + getpagesize(),
|
||||
getpagesize(), PAGE_NOACCESS, &ignore))
|
||||
if (!VirtualProtect(mem + size + startsize,
|
||||
pagesize, PAGE_NOACCESS, &ignore))
|
||||
fatal_error("Cannot allocate high guard page", (cell)mem);
|
||||
|
||||
start = (cell)mem + getpagesize();
|
||||
start = (cell)mem + startsize;
|
||||
end = start + size;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,9 +13,14 @@ struct segment {
|
|||
cell size;
|
||||
cell end;
|
||||
|
||||
explicit segment(cell size, bool executable_p);
|
||||
explicit segment(cell size, bool executable_p, bool canary_page_p);
|
||||
~segment();
|
||||
|
||||
bool canary_p(cell addr)
|
||||
{
|
||||
return (addr >= start - 2*getpagesize() && addr < start - getpagesize());
|
||||
}
|
||||
|
||||
bool underflow_p(cell addr)
|
||||
{
|
||||
return (addr >= start - getpagesize() && addr < start);
|
||||
|
|
Loading…
Reference in New Issue