2005-05-10 22:30:58 -04:00
|
|
|
/* macros for reading/writing memory, useful when working around
|
|
|
|
C's type system */
|
|
|
|
INLINE CELL get(CELL where)
|
|
|
|
{
|
|
|
|
return *((CELL*)where);
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE void put(CELL where, CELL what)
|
|
|
|
{
|
|
|
|
*((CELL*)where) = what;
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE u16 cget(CELL where)
|
|
|
|
{
|
|
|
|
return *((u16*)where);
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE void cput(CELL where, u16 what)
|
|
|
|
{
|
|
|
|
*((u16*)where) = what;
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE BYTE bget(CELL where)
|
|
|
|
{
|
|
|
|
return *((BYTE*)where);
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE void bput(CELL where, BYTE what)
|
|
|
|
{
|
|
|
|
*((BYTE*)where) = what;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* generational copying GC divides memory into zones */
|
2004-07-16 02:26:21 -04:00
|
|
|
typedef struct {
|
2005-05-10 22:30:58 -04:00
|
|
|
/* start of zone */
|
2004-07-16 02:26:21 -04:00
|
|
|
CELL base;
|
2005-05-10 22:30:58 -04:00
|
|
|
/* allocation pointer */
|
2004-07-16 02:26:21 -04:00
|
|
|
CELL here;
|
2005-05-10 22:30:58 -04:00
|
|
|
/* only for nursery: when it gets this full, call GC */
|
2004-07-29 17:18:41 -04:00
|
|
|
CELL alarm;
|
2005-05-10 22:30:58 -04:00
|
|
|
/* end of zone */
|
2004-07-16 02:26:21 -04:00
|
|
|
CELL limit;
|
|
|
|
} ZONE;
|
|
|
|
|
2005-05-10 22:30:58 -04:00
|
|
|
INLINE bool in_zone(ZONE* z, CELL pointer)
|
|
|
|
{
|
|
|
|
return pointer >= z->base && pointer < z->limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* total number of generations. */
|
|
|
|
#define GC_GENERATIONS 3
|
|
|
|
/* the 0th generation is where new objects are allocated. */
|
|
|
|
#define NURSERY 0
|
|
|
|
/* the oldest generation */
|
|
|
|
#define TENURED (GC_GENERATIONS-1)
|
|
|
|
|
|
|
|
ZONE generations[GC_GENERATIONS];
|
2005-05-11 00:43:52 -04:00
|
|
|
ZONE *allot_zone;
|
2005-05-10 22:30:58 -04:00
|
|
|
|
2005-05-11 00:43:52 -04:00
|
|
|
#define tenured generations[TENURED]
|
2005-05-12 01:02:39 -04:00
|
|
|
#define nursery generations[NURSERY]
|
2005-05-10 22:30:58 -04:00
|
|
|
|
2005-05-11 00:43:52 -04:00
|
|
|
CELL heap_start;
|
2005-05-12 01:02:39 -04:00
|
|
|
CELL heap_end;
|
2005-05-10 22:30:58 -04:00
|
|
|
|
2005-05-11 00:43:52 -04:00
|
|
|
/* spare semi-space; rotates with tenured. */
|
2004-08-31 20:31:16 -04:00
|
|
|
ZONE prior;
|
2004-07-16 02:26:21 -04:00
|
|
|
|
2005-05-10 22:30:58 -04:00
|
|
|
/* card marking write barrier. a card is a byte storing a mark flag,
|
|
|
|
and the offset (in cells) of the first object in the card.
|
2004-08-29 03:20:19 -04:00
|
|
|
|
2005-05-10 22:30:58 -04:00
|
|
|
the mark flag is set by the write barrier when an object in the
|
|
|
|
card has a slot written to.
|
2004-07-16 02:26:21 -04:00
|
|
|
|
2005-05-10 22:30:58 -04:00
|
|
|
the offset of the first object is set by the allocator.
|
|
|
|
*/
|
|
|
|
#define CARD_MARK_MASK 0x80
|
|
|
|
#define CARD_BASE_MASK 0x7f
|
|
|
|
typedef u8 CARD;
|
|
|
|
CARD *cards;
|
|
|
|
CARD *cards_end;
|
2004-07-16 02:26:21 -04:00
|
|
|
|
2005-05-10 22:30:58 -04:00
|
|
|
/* A card is 16 bytes (128 bits), 5 address bits per card.
|
|
|
|
it is important that 7 bits is sufficient to represent every
|
|
|
|
offset within the card */
|
2005-05-12 01:02:39 -04:00
|
|
|
#define CARD_SIZE 128
|
|
|
|
#define CARD_BITS 7
|
|
|
|
#define ADDR_CARD_MASK (CARD_SIZE-1)
|
2005-05-10 22:30:58 -04:00
|
|
|
|
|
|
|
INLINE CARD card_marked(CARD c)
|
2004-07-16 02:26:21 -04:00
|
|
|
{
|
2005-05-10 22:30:58 -04:00
|
|
|
return c & CARD_MARK_MASK;
|
2004-07-16 02:26:21 -04:00
|
|
|
}
|
|
|
|
|
2005-05-12 01:02:39 -04:00
|
|
|
INLINE void unmark_card(CARD *c)
|
2004-08-16 20:42:30 -04:00
|
|
|
{
|
2005-05-12 01:02:39 -04:00
|
|
|
*c &= CARD_BASE_MASK;
|
2004-08-16 20:42:30 -04:00
|
|
|
}
|
|
|
|
|
2005-05-12 01:02:39 -04:00
|
|
|
INLINE void clear_card(CARD *c)
|
2004-07-16 02:26:21 -04:00
|
|
|
{
|
2005-05-12 01:02:39 -04:00
|
|
|
*c = CARD_BASE_MASK; /* invalid value */
|
2004-07-16 02:26:21 -04:00
|
|
|
}
|
|
|
|
|
2005-05-12 01:02:39 -04:00
|
|
|
INLINE u8 card_base(CARD c)
|
2004-07-16 02:26:21 -04:00
|
|
|
{
|
2005-05-12 01:02:39 -04:00
|
|
|
return c & CARD_BASE_MASK;
|
2004-07-16 02:26:21 -04:00
|
|
|
}
|
|
|
|
|
2005-05-11 00:43:52 -04:00
|
|
|
#define ADDR_TO_CARD(a) (CARD*)((((CELL)a-heap_start)>>CARD_BITS)+(CELL)cards)
|
|
|
|
#define CARD_TO_ADDR(c) (CELL*)((((CELL)c-(CELL)cards)<<CARD_BITS)+heap_start)
|
2005-05-10 22:30:58 -04:00
|
|
|
|
|
|
|
/* this is an inefficient write barrier. compiled definitions use a more
|
|
|
|
efficient one hand-coded in assembly. the write barrier must be called
|
|
|
|
any time we are potentially storing a pointer from an older generation
|
|
|
|
to a younger one */
|
|
|
|
INLINE void write_barrier(CELL address)
|
2004-07-16 02:26:21 -04:00
|
|
|
{
|
2005-05-10 22:30:58 -04:00
|
|
|
CARD *c = ADDR_TO_CARD(address);
|
|
|
|
*c |= CARD_MARK_MASK;
|
2004-07-16 02:26:21 -04:00
|
|
|
}
|
|
|
|
|
2005-05-10 22:30:58 -04:00
|
|
|
/* we need to remember the first object allocated in the card */
|
|
|
|
INLINE void allot_barrier(CELL address)
|
2004-07-16 02:26:21 -04:00
|
|
|
{
|
2005-05-12 01:02:39 -04:00
|
|
|
CARD *ptr = ADDR_TO_CARD(address);
|
2005-05-10 22:30:58 -04:00
|
|
|
/* we need to remember the first object allocated in the
|
|
|
|
card */
|
2005-05-12 01:02:39 -04:00
|
|
|
CARD c = *ptr;
|
|
|
|
*ptr = (card_marked(c) | MIN(card_base(c),(address & ADDR_CARD_MASK)));
|
2004-07-16 02:26:21 -04:00
|
|
|
}
|
|
|
|
|
2005-05-10 22:30:58 -04:00
|
|
|
bool allot_profiling;
|
|
|
|
|
|
|
|
/* set up guard pages to check for under/overflow.
|
|
|
|
size must be a multiple of the page size */
|
|
|
|
void* alloc_guarded(CELL size);
|
|
|
|
|
|
|
|
CELL init_zone(ZONE *z, CELL size, CELL base);
|
|
|
|
void init_arena(CELL young_size, CELL aging_size);
|
|
|
|
void flip_zones();
|
|
|
|
|
|
|
|
void allot_profile_step(CELL a);
|
|
|
|
|
|
|
|
INLINE CELL align8(CELL a)
|
2004-07-23 20:35:13 -04:00
|
|
|
{
|
2005-05-10 22:30:58 -04:00
|
|
|
return ((a & 7) == 0) ? a : ((a + 8) & ~7);
|
2004-07-23 20:35:13 -04:00
|
|
|
}
|
|
|
|
|
2005-05-12 01:02:39 -04:00
|
|
|
INLINE void *allot(CELL a)
|
2004-07-23 20:35:13 -04:00
|
|
|
{
|
2005-05-11 00:43:52 -04:00
|
|
|
CELL h = allot_zone->here;
|
2005-05-10 22:30:58 -04:00
|
|
|
allot_barrier(h);
|
2005-05-11 00:43:52 -04:00
|
|
|
allot_zone->here = h + align8(a);
|
2005-05-10 22:30:58 -04:00
|
|
|
if(allot_profiling)
|
|
|
|
allot_profile_step(align8(a));
|
|
|
|
return (void*)h;
|
2004-07-23 20:35:13 -04:00
|
|
|
}
|
|
|
|
|
2004-07-16 02:26:21 -04:00
|
|
|
bool in_zone(ZONE* z, CELL pointer);
|
2004-07-24 00:54:57 -04:00
|
|
|
|
|
|
|
void primitive_room(void);
|
2004-08-29 03:20:19 -04:00
|
|
|
void primitive_allot_profiling(void);
|
2004-09-18 22:29:29 -04:00
|
|
|
void primitive_address(void);
|
2005-02-18 20:37:01 -05:00
|
|
|
void primitive_size(void);
|
|
|
|
|
|
|
|
/* A heap walk allows useful things to be done, like finding all
|
|
|
|
references to an object for debugging purposes. */
|
|
|
|
CELL heap_scan_ptr;
|
|
|
|
|
|
|
|
/* End of heap when walk was started; prevents infinite loop if
|
|
|
|
walk consing */
|
|
|
|
CELL heap_scan_end;
|
|
|
|
|
|
|
|
void primitive_begin_scan(void);
|
|
|
|
void primitive_next_object(void);
|
|
|
|
void primitive_end_scan(void);
|