factor/vm/data_gc.h

230 lines
5.5 KiB
C
Raw Normal View History

/* Set by the -S command line argument */
bool secure_gc;
2006-10-31 00:52:02 -05:00
bool in_page(CELL fault, CELL area, CELL area_size, int offset);
2006-07-07 00:07:18 -04:00
void *safe_malloc(size_t size);
typedef struct {
CELL start;
CELL size;
} F_BOUNDED_BLOCK;
/* set up guard pages to check for under/overflow.
size must be a multiple of the page size */
F_BOUNDED_BLOCK *alloc_bounded_block(CELL size);
void dealloc_bounded_block(F_BOUNDED_BLOCK *block);
2005-05-13 00:09:49 -04:00
CELL untagged_object_size(CELL pointer);
CELL unaligned_object_size(CELL pointer);
2005-05-13 00:09:49 -04:00
CELL object_size(CELL pointer);
CELL binary_payload_start(CELL pointer);
2006-09-26 01:08:05 -04:00
void primitive_data_room(void);
2005-02-18 20:37:01 -05:00
void primitive_size(void);
void primitive_begin_scan(void);
void primitive_next_object(void);
void primitive_end_scan(void);
2006-07-07 00:07:18 -04:00
CELL data_heap_start;
CELL data_heap_end;
2006-07-07 00:07:18 -04:00
/* card marking write barrier. a card is a byte storing a mark flag,
and the offset (in cells) of the first object in the card.
the mark flag is set by the write barrier when an object in the
card has a slot written to.
the offset of the first object is set by the allocator.
*/
#define CARD_MARK_MASK 0x80
#define CARD_BASE_MASK 0x7f
typedef u8 F_CARD;
2006-07-07 00:07:18 -04:00
F_CARD *cards;
F_CARD *cards_end;
2006-07-07 00:07:18 -04:00
/* A card is 16 bytes (128 bits), 5 address bits per card.
it is important that 7 bits is sufficient to represent every
offset within the card */
#define CARD_SIZE 128
#define CARD_BITS 7
#define ADDR_CARD_MASK (CARD_SIZE-1)
INLINE F_CARD card_marked(F_CARD c)
2006-07-07 00:07:18 -04:00
{
return c & CARD_MARK_MASK;
}
INLINE void unmark_card(F_CARD *c)
2006-07-07 00:07:18 -04:00
{
*c &= CARD_BASE_MASK;
}
INLINE void clear_card(F_CARD *c)
2006-07-07 00:07:18 -04:00
{
*c = CARD_BASE_MASK; /* invalid value */
}
INLINE u8 card_base(F_CARD c)
2006-07-07 00:07:18 -04:00
{
return c & CARD_BASE_MASK;
}
#define ADDR_TO_CARD(a) (F_CARD*)(((CELL)a >> CARD_BITS) + cards_offset)
2006-07-07 00:07:18 -04:00
#define CARD_TO_ADDR(c) (CELL*)(((CELL)c - cards_offset)<<CARD_BITS)
/* this is an inefficient write barrier. compiled definitions use a more
efficient one hand-coded in assembly. the write barrier must be called
any time we are potentially storing a pointer from an older generation
to a younger one */
INLINE void write_barrier(CELL address)
{
F_CARD *c = ADDR_TO_CARD(address);
2006-07-07 00:07:18 -04:00
*c |= CARD_MARK_MASK;
}
/* we need to remember the first object allocated in the card */
INLINE void allot_barrier(CELL address)
{
F_CARD *ptr = ADDR_TO_CARD(address);
F_CARD c = *ptr;
2006-07-07 00:07:18 -04:00
CELL b = card_base(c);
CELL a = (address & ADDR_CARD_MASK);
*ptr = (card_marked(c) | ((b < a) ? b : a));
}
void unmark_cards(CELL from, CELL to);
void clear_cards(CELL from, CELL to);
void collect_cards(CELL gen);
/* generational copying GC divides memory into zones */
typedef struct {
/* start of zone */
CELL base;
/* allocation pointer */
CELL here;
/* end of zone */
CELL limit;
} F_ZONE;
2006-07-07 00:07:18 -04:00
/* total number of generations. */
CELL gen_count;
/* the 0th generation is where new objects are allocated. */
#define NURSERY 0
/* the oldest generation */
#define TENURED (gen_count-1)
DLLEXPORT F_ZONE *generations;
2006-07-07 00:07:18 -04:00
/* used during garbage collection only */
F_ZONE *newspace;
2006-07-07 00:07:18 -04:00
#define tenured generations[TENURED]
#define nursery generations[NURSERY]
/* spare semi-space; rotates with tenured. */
F_ZONE prior;
2006-07-07 00:07:18 -04:00
INLINE bool in_zone(F_ZONE *z, CELL pointer)
2006-07-07 00:07:18 -04:00
{
return pointer >= z->base && pointer < z->limit;
}
CELL init_zone(F_ZONE *z, CELL size, CELL base);
2006-07-07 00:07:18 -04:00
void init_data_heap(CELL gens,
CELL young_size,
CELL aging_size,
bool secure_gc_);
2006-07-07 00:07:18 -04:00
/* statistics */
s64 gc_time;
CELL minor_collections;
CELL cards_scanned;
/* only meaningful during a GC */
CELL collecting_gen;
CELL collecting_gen_start;
2006-09-26 19:00:41 -04:00
bool collecting_code;
2006-07-07 00:07:18 -04:00
/* test if the pointer is in generation being collected, or a younger one.
init_data_heap() arranges things so that the older generations are first,
2006-07-07 00:07:18 -04:00
so we have to check that the pointer occurs after the beginning of
the requested generation. */
#define COLLECTING_GEN(ptr) (collecting_gen_start <= ptr)
INLINE bool should_copy(CELL untagged)
{
if(collecting_gen == TENURED)
return !in_zone(newspace,untagged);
else
return(in_zone(&prior,untagged) || COLLECTING_GEN(untagged));
}
CELL copy_object(CELL pointer);
#define COPY_OBJECT(lvalue) if(should_copy(lvalue)) lvalue = copy_object(lvalue)
INLINE void copy_handle(CELL *handle)
{
COPY_OBJECT(*handle);
}
/* in case a generation fills up in the middle of a gc, we jump back
up to try collecting the next generation. */
jmp_buf gc_jmp;
/* A heap walk allows useful things to be done, like finding all
references to an object for debugging purposes. */
CELL heap_scan_ptr;
/* GC is off during heap walking */
2006-10-31 00:52:02 -05:00
bool gc_off;
void garbage_collection(CELL gen, bool code_gc);
#define REGISTER_ROOT(obj) rpush(obj)
#define UNREGISTER_ROOT(obj) obj = rpop()
2006-10-31 15:48:34 -05:00
#define REGISTER_ARRAY(obj) rpush(tag_object(obj))
#define UNREGISTER_ARRAY(obj) obj = untag_array_fast(rpop())
#define REGISTER_STRING(obj) rpush(tag_object(obj))
#define UNREGISTER_STRING(obj) obj = untag_string_fast(rpop())
#define REGISTER_C_STRING(obj) rpush(tag_object(((F_ARRAY *)obj) - 1))
#define UNREGISTER_C_STRING(obj) obj = ((char*)(untag_array_fast(rpop()) + 1))
2006-07-07 00:07:18 -04:00
INLINE void *allot_zone(F_ZONE *z, CELL a)
2006-07-07 00:07:18 -04:00
{
CELL h = z->here;
z->here = h + align8(a);
allot_barrier(h);
return (void*)h;
}
INLINE void *allot(CELL a)
{
2006-10-31 15:48:34 -05:00
if(nursery.here + a > nursery.limit)
garbage_collection(NURSERY,false);
2006-07-07 00:07:18 -04:00
return allot_zone(&nursery,a);
}
/*
* It is up to the caller to fill in the object's fields in a meaningful
* fashion!
*/
INLINE void* allot_object(CELL type, CELL length)
{
CELL* object = allot(length);
*object = tag_header(type);
return object;
}
void update_cards_offset(void);
CELL collect_next(CELL scan);
2006-09-26 19:00:41 -04:00
void primitive_data_gc(void);
2006-07-07 00:07:18 -04:00
void primitive_gc_time(void);