vm: replace block comments /**/ with line comments //

char-rename
Alexander Iljin 2016-08-21 17:26:04 +03:00 committed by Björn Lindqvist
parent e0acf4f328
commit 0d57734dab
93 changed files with 1332 additions and 1351 deletions

View File

@ -19,11 +19,10 @@ struct aging_policy {
};
void factor_vm::collect_aging() {
/* Promote objects referenced from tenured space to tenured space, copy
everything else to the aging semi-space, and reset the nursery pointer. */
// Promote objects referenced from tenured space to tenured space, copy
// everything else to the aging semi-space, and reset the nursery pointer.
{
/* Change the op so that if we fail here, an assertion will be
raised. */
// Change the op so that if we fail here, an assertion will be raised.
current_gc->op = collect_to_tenured_op;
gc_workhorse<tenured_space, to_tenured_policy>
@ -48,7 +47,7 @@ void factor_vm::collect_aging() {
visitor.visit_mark_stack(&mark_stack);
}
{
/* If collection fails here, do a to_tenured collection. */
// If collection fails here, do a to_tenured collection.
current_gc->op = collect_aging_op;
std::swap(data->aging, data->aging_semispace);

View File

@ -2,8 +2,8 @@
namespace factor {
/* gets the address of an object representing a C pointer, with the
intention of storing the pointer across code which may potentially GC. */
// gets the address of an object representing a C pointer, with the
// intention of storing the pointer across code which may potentially GC.
char* factor_vm::pinned_alien_offset(cell obj) {
switch (TAG(obj)) {
case ALIEN_TYPE: {
@ -19,12 +19,12 @@ char* factor_vm::pinned_alien_offset(cell obj) {
return NULL;
default:
type_error(ALIEN_TYPE, obj);
return NULL; /* can't happen */
return NULL; // can't happen
}
}
/* make an alien */
/* Allocates memory */
// make an alien
// Allocates memory
cell factor_vm::allot_alien(cell delegate_, cell displacement) {
if (displacement == 0)
return delegate_;
@ -46,13 +46,13 @@ cell factor_vm::allot_alien(cell delegate_, cell displacement) {
return new_alien.value();
}
/* Allocates memory */
// Allocates memory
cell factor_vm::allot_alien(cell address) {
return allot_alien(false_object, address);
}
/* make an alien pointing at an offset of another alien */
/* Allocates memory */
// make an alien pointing at an offset of another alien
// Allocates memory
void factor_vm::primitive_displaced_alien() {
cell alien = ctx->pop();
cell displacement = to_cell(ctx->pop());
@ -69,20 +69,20 @@ void factor_vm::primitive_displaced_alien() {
}
}
/* address of an object representing a C pointer. Explicitly throw an error
if the object is a byte array, as a sanity check. */
/* Allocates memory (from_unsigned_cell can allocate) */
// address of an object representing a C pointer. Explicitly throw an error
// if the object is a byte array, as a sanity check.
// Allocates memory (from_unsigned_cell can allocate)
void factor_vm::primitive_alien_address() {
ctx->replace(from_unsigned_cell((cell)pinned_alien_offset(ctx->peek())));
}
/* pop ( alien n ) from datastack, return alien's address plus n */
// pop ( alien n ) from datastack, return alien's address plus n
void* factor_vm::alien_pointer() {
fixnum offset = to_fixnum(ctx->pop());
return alien_offset(ctx->pop()) + offset;
}
/* define words to read/write values at an alien address */
// define words to read/write values at an alien address
#define DEFINE_ALIEN_ACCESSOR(name, type, from, to) \
VM_C_API void primitive_alien_##name(factor_vm * parent) { \
parent->ctx->push(parent->from(*(type*)(parent->alien_pointer()))); \
@ -95,8 +95,8 @@ void* factor_vm::alien_pointer() {
EACH_ALIEN_PRIMITIVE(DEFINE_ALIEN_ACCESSOR)
/* open a native library and push a handle */
/* Allocates memory */
// open a native library and push a handle
// Allocates memory
void factor_vm::primitive_dlopen() {
data_root<byte_array> path(ctx->pop(), this);
check_tagged(path);
@ -106,8 +106,8 @@ void factor_vm::primitive_dlopen() {
ctx->push(library.value());
}
/* look up a symbol in a native library */
/* Allocates memory */
// look up a symbol in a native library
// Allocates memory
void factor_vm::primitive_dlsym() {
data_root<object> library(ctx->pop(), this);
data_root<byte_array> name(ctx->peek(), this);
@ -126,8 +126,8 @@ void factor_vm::primitive_dlsym() {
ctx->replace(allot_alien(ffi_dlsym(NULL, sym)));
}
/* look up a symbol in a native library */
/* Allocates memory */
// look up a symbol in a native library
// Allocates memory
void factor_vm::primitive_dlsym_raw() {
data_root<object> library(ctx->pop(), this);
data_root<byte_array> name(ctx->peek(), this);
@ -146,7 +146,7 @@ void factor_vm::primitive_dlsym_raw() {
ctx->replace(allot_alien(ffi_dlsym_raw(NULL, sym)));
}
/* close a native library handle */
// close a native library handle
void factor_vm::primitive_dlclose() {
dll* d = untag_check<dll>(ctx->pop());
if (d->handle != NULL)
@ -161,7 +161,7 @@ void factor_vm::primitive_dll_validp() {
ctx->replace(special_objects[OBJ_CANONICAL_TRUE]);
}
/* gets the address of an object representing a C pointer */
// gets the address of an object representing a C pointer
char* factor_vm::alien_offset(cell obj) {
switch (TAG(obj)) {
case BYTE_ARRAY_TYPE:
@ -172,7 +172,7 @@ char* factor_vm::alien_offset(cell obj) {
return NULL;
default:
type_error(ALIEN_TYPE, obj);
return NULL; /* can't happen */
return NULL; // can't happen
}
}

View File

@ -1,22 +1,20 @@
namespace factor {
/*
* It is up to the caller to fill in the object's fields in a meaningful
* fashion!
*/
/* Allocates memory */
// It is up to the caller to fill in the object's fields in a meaningful
// fashion!
// Allocates memory
inline object* factor_vm::allot_object(cell type, cell size) {
FACTOR_ASSERT(!current_gc);
bump_allocator *nursery = data->nursery;
/* If the object is bigger than the nursery, allocate it in tenured
space */
// If the object is bigger than the nursery, allocate it in tenured space
if (size >= nursery->size)
return allot_large_object(type, size);
/* If the object is smaller than the nursery, allocate it in the nursery,
after a GC if needed */
// If the object is smaller than the nursery, allocate it in the nursery,
// after a GC if needed
if (nursery->here + size > nursery->end)
primitive_minor_gc();

View File

@ -2,7 +2,7 @@
namespace factor {
/* Allocates memory */
// Allocates memory
array* factor_vm::allot_array(cell capacity, cell fill_) {
data_root<object> fill(fill_, this);
array* new_array = allot_uninitialized_array<array>(capacity);
@ -10,7 +10,7 @@ array* factor_vm::allot_array(cell capacity, cell fill_) {
return new_array;
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_array() {
cell fill = ctx->pop();
cell capacity = unbox_array_size();
@ -18,7 +18,7 @@ void factor_vm::primitive_array() {
ctx->push(tag<array>(new_array));
}
/* Allocates memory */
// Allocates memory
cell factor_vm::allot_array_4(cell v1_, cell v2_, cell v3_, cell v4_) {
data_root<object> v1(v1_, this);
data_root<object> v2(v2_, this);
@ -32,7 +32,7 @@ cell factor_vm::allot_array_4(cell v1_, cell v2_, cell v3_, cell v4_) {
return a.value();
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_resize_array() {
data_root<array> a(ctx->pop(), this);
check_tagged(a);
@ -40,7 +40,7 @@ void factor_vm::primitive_resize_array() {
ctx->push(tag<array>(reallot_array(a.untagged(), capacity)));
}
/* Allocates memory */
// Allocates memory
cell factor_vm::std_vector_to_array(std::vector<cell>& elements) {
cell element_count = elements.size();
@ -57,7 +57,7 @@ cell factor_vm::std_vector_to_array(std::vector<cell>& elements) {
return objects.value();
}
/* Allocates memory */
// Allocates memory
void growable_array::add(cell elt_) {
factor_vm* parent = elements.parent;
data_root<object> elt(elt_, parent);
@ -67,7 +67,7 @@ void growable_array::add(cell elt_) {
parent->set_array_nth(elements.untagged(), count++, elt.value());
}
/* Allocates memory */
// Allocates memory
void growable_array::append(array* elts_) {
factor_vm* parent = elements.parent;
data_root<array> elts(elts_, parent);
@ -82,7 +82,7 @@ void growable_array::append(array* elts_) {
array_nth(elts.untagged(), index));
}
/* Allocates memory */
// Allocates memory
void growable_array::trim() {
factor_vm* parent = elements.parent;
elements = parent->reallot_array(elements.untagged(), count);

View File

@ -18,7 +18,7 @@ struct growable_array {
cell count;
data_root<array> elements;
/* Allocates memory */
// Allocates memory
growable_array(factor_vm* parent, cell capacity = 10)
: count(0),
elements(parent->allot_array(capacity, false_object), parent) {}

View File

@ -1,59 +1,56 @@
/*
Copyright (C) 1989-94 Massachusetts Institute of Technology
Portions copyright (C) 2004-2008 Slava Pestov
// Copyright (C) 1989-94 Massachusetts Institute of Technology
// Portions copyright (C) 2004-2008 Slava Pestov
This material was developed by the Scheme project at the Massachusetts
Institute of Technology, Department of Electrical Engineering and
Computer Science. Permission to copy and modify this software, to
redistribute either the original software or a modified version, and
to use this software for any purpose is granted, subject to the
following restrictions and understandings.
// This material was developed by the Scheme project at the Massachusetts
// Institute of Technology, Department of Electrical Engineering and
// Computer Science. Permission to copy and modify this software, to
// redistribute either the original software or a modified version, and
// to use this software for any purpose is granted, subject to the
// following restrictions and understandings.
1. Any copy made of this software must include this copyright notice
in full.
// 1. Any copy made of this software must include this copyright notice
// in full.
2. Users of this software agree to make their best efforts (a) to
return to the MIT Scheme project any improvements or extensions that
they make, so that these may be included in future releases; and (b)
to inform MIT of noteworthy uses of this software.
// 2. Users of this software agree to make their best efforts (a) to
// return to the MIT Scheme project any improvements or extensions that
// they make, so that these may be included in future releases; and (b)
// to inform MIT of noteworthy uses of this software.
3. All materials developed as a consequence of the use of this
software shall duly acknowledge such use, in accordance with the usual
standards of acknowledging credit in academic research.
// 3. All materials developed as a consequence of the use of this
// software shall duly acknowledge such use, in accordance with the usual
// standards of acknowledging credit in academic research.
4. MIT has made no warrantee or representation that the operation of
this software will be error-free, and MIT is under no obligation to
provide any services, by way of maintenance, update, or otherwise.
// 4. MIT has made no warrantee or representation that the operation of
// this software will be error-free, and MIT is under no obligation to
// provide any services, by way of maintenance, update, or otherwise.
5. In conjunction with products arising from the use of this material,
there shall be no use of the name of the Massachusetts Institute of
Technology nor of any adaptation thereof in any advertising,
promotional, or sales literature without prior written consent from
MIT in each case. */
// 5. In conjunction with products arising from the use of this material,
// there shall be no use of the name of the Massachusetts Institute of
// Technology nor of any adaptation thereof in any advertising,
// promotional, or sales literature without prior written consent from
// MIT in each case.
/* Changes for Scheme 48:
* - Converted to ANSI.
* - Added bitwise operations.
* - Added s48 to the beginning of all externally visible names.
* - Cached the bignum representations of -1, 0, and 1.
*/
// Changes for Scheme 48:
// * - Converted to ANSI.
// * - Added bitwise operations.
// * - Added s48 to the beginning of all externally visible names.
// * - Cached the bignum representations of -1, 0, and 1.
/* Changes for Factor:
* - Adapt bignumint.h for Factor memory manager
* - Add more bignum <-> C type conversions
* - Remove unused functions
* - Add local variable GC root recording
* - Remove s48 prefix from function names
* - Various fixes for Win64
* - Port to C++
* - Added bignum_gcd implementation
*/
// Changes for Factor:
// * - Adapt bignumint.h for Factor memory manager
// * - Add more bignum <-> C type conversions
// * - Remove unused functions
// * - Add local variable GC root recording
// * - Remove s48 prefix from function names
// * - Various fixes for Win64
// * - Port to C++
// * - Added bignum_gcd implementation
#include "master.hpp"
namespace factor {
/* Exports */
// Exports
int factor_vm::bignum_equal_p(bignum* x, bignum* y) {
return ((BIGNUM_ZERO_P(x))
@ -79,7 +76,7 @@ enum bignum_comparison factor_vm::bignum_compare(bignum* x, bignum* y) {
: (bignum_compare_unsigned(x, y))));
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_add(bignum* x, bignum* y) {
return (
(BIGNUM_ZERO_P(x)) ? (y) : (BIGNUM_ZERO_P(y))
@ -91,7 +88,7 @@ bignum* factor_vm::bignum_add(bignum* x, bignum* y) {
: (bignum_add_unsigned(x, y, 0)))));
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_subtract(bignum* x, bignum* y) {
return ((BIGNUM_ZERO_P(x))
? ((BIGNUM_ZERO_P(y)) ? (y) : (bignum_new_sign(
@ -113,7 +110,7 @@ bignum *factor_vm::bignum_square(bignum* x_)
return bignum_multiply(x_, x_);
}
#else
/* Allocates memory */
// Allocates memory
bignum *factor_vm::bignum_square(bignum* x_)
{
data_root<bignum> x(x_, this);
@ -157,7 +154,7 @@ bignum *factor_vm::bignum_square(bignum* x_)
}
#endif
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_multiply(bignum* x, bignum* y) {
#ifndef _WIN64
@ -191,7 +188,7 @@ bignum* factor_vm::bignum_multiply(bignum* x, bignum* y) {
return (bignum_multiply_unsigned(x, y, negative_p));
}
/* Allocates memory */
// Allocates memory
void factor_vm::bignum_divide(bignum* numerator, bignum* denominator,
bignum** quotient, bignum** remainder) {
if (BIGNUM_ZERO_P(denominator)) {
@ -244,7 +241,7 @@ void factor_vm::bignum_divide(bignum* numerator, bignum* denominator,
}
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_quotient(bignum* numerator, bignum* denominator) {
if (BIGNUM_ZERO_P(denominator)) {
divide_by_zero_error();
@ -262,7 +259,7 @@ bignum* factor_vm::bignum_quotient(bignum* numerator, bignum* denominator) {
case bignum_comparison_less:
return (BIGNUM_ZERO());
case bignum_comparison_greater:
default: /* to appease gcc -Wall */
default: // to appease gcc -Wall
{
bignum* quotient;
if ((BIGNUM_LENGTH(denominator)) == 1) {
@ -285,7 +282,7 @@ bignum* factor_vm::bignum_quotient(bignum* numerator, bignum* denominator) {
}
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_remainder(bignum* numerator, bignum* denominator) {
if (BIGNUM_ZERO_P(denominator)) {
divide_by_zero_error();
@ -299,7 +296,7 @@ bignum* factor_vm::bignum_remainder(bignum* numerator, bignum* denominator) {
case bignum_comparison_less:
return numerator;
case bignum_comparison_greater:
default: /* to appease gcc -Wall */
default: // to appease gcc -Wall
{
bignum* remainder;
if ((BIGNUM_LENGTH(denominator)) == 1) {
@ -321,9 +318,9 @@ bignum* factor_vm::bignum_remainder(bignum* numerator, bignum* denominator) {
}
}
/* cell_to_bignum, fixnum_to_bignum, long_long_to_bignum, ulong_long_to_bignum
*/
/* Allocates memory */
// cell_to_bignum, fixnum_to_bignum, long_long_to_bignum, ulong_long_to_bignum
// Allocates memory
#define FOO_TO_BIGNUM(name, type, stype, utype) \
bignum* factor_vm::name##_to_bignum(type n) { \
int negative_p; \
@ -366,8 +363,8 @@ FOO_TO_BIGNUM(fixnum, fixnum, fixnum, cell)
FOO_TO_BIGNUM(long_long, int64_t, int64_t, uint64_t)
FOO_TO_BIGNUM(ulong_long, uint64_t, int64_t, uint64_t)
/* cannot allocate memory */
/* bignum_to_cell, fixnum_to_cell, long_long_to_cell, ulong_long_to_cell */
// cannot allocate memory
// bignum_to_cell, fixnum_to_cell, long_long_to_cell, ulong_long_to_cell
#define BIGNUM_TO_FOO(name, type, stype, utype) \
type bignum_to_##name(bignum* bn) { \
if (BIGNUM_ZERO_P(bn)) \
@ -405,7 +402,7 @@ cell bignum_maybe_to_fixnum(bignum* bn) {
return tag<bignum>(bn);
}
/* cannot allocate memory */
// cannot allocate memory
fixnum factor_vm::bignum_to_fixnum_strict(bignum* bn) {
if (!bignum_fits_fixnum_p(bn)) {
@ -426,7 +423,7 @@ fixnum factor_vm::bignum_to_fixnum_strict(bignum* bn) {
#define inf std::numeric_limits<double>::infinity()
/* Allocates memory */
// Allocates memory
bignum* factor_vm::double_to_bignum(double x) {
if (x == inf || x == -inf || x != x)
return (BIGNUM_ZERO());
@ -461,7 +458,7 @@ bignum* factor_vm::double_to_bignum(double x) {
#undef DTB_WRITE_DIGIT
/* Comparisons */
// Comparisons
int factor_vm::bignum_equal_p_unsigned(bignum* x, bignum* y) {
bignum_length_type length = (BIGNUM_LENGTH(x));
@ -502,9 +499,9 @@ enum bignum_comparison factor_vm::bignum_compare_unsigned(bignum* x,
return (bignum_comparison_equal);
}
/* Addition */
// Addition
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_add_unsigned(bignum* x_, bignum* y_, int negative_p) {
data_root<bignum> x(x_, this);
@ -558,9 +555,9 @@ bignum* factor_vm::bignum_add_unsigned(bignum* x_, bignum* y_, int negative_p) {
}
}
/* Subtraction */
// Subtraction
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_subtract_unsigned(bignum* x_, bignum* y_) {
data_root<bignum> x(x_, this);
@ -622,13 +619,13 @@ bignum* factor_vm::bignum_subtract_unsigned(bignum* x_, bignum* y_) {
}
}
/* Multiplication
Maximum value for product_low or product_high:
((R * R) + (R * (R - 2)) + (R - 1))
Maximum value for carry: ((R * (R - 1)) + (R - 1))
where R == BIGNUM_RADIX_ROOT */
// Multiplication
// Maximum value for product_low or product_high:
// ((R * R) + (R * (R - 2)) + (R - 1))
// Maximum value for carry: ((R * (R - 1)) + (R - 1))
// where R == BIGNUM_RADIX_ROOT
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_multiply_unsigned(bignum* x_, bignum* y_,
int negative_p) {
@ -688,7 +685,7 @@ bignum* factor_vm::bignum_multiply_unsigned(bignum* x_, bignum* y_,
}
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_multiply_unsigned_small_factor(bignum* x_,
bignum_digit_type y,
int negative_p) {
@ -740,23 +737,22 @@ void factor_vm::bignum_destructive_scale_up(bignum* bn,
(*scan++) = (HD_CONS((HD_LOW(product_high)), (HD_LOW(product_low))));
carry = (HD_HIGH(product_high));
}
/* A carry here would be an overflow, i.e. it would not fit.
Hopefully the callers allocate enough space that this will
never happen.
*/
// A carry here would be an overflow, i.e. it would not fit.
// Hopefully the callers allocate enough space that this will
// never happen.
BIGNUM_ASSERT(carry == 0);
return;
#undef product_high
}
/* Division */
// Division
/* For help understanding this algorithm, see:
Knuth, Donald E., "The Art of Computer Programming",
volume 2, "Seminumerical Algorithms"
section 4.3.1, "Multiple-Precision Arithmetic". */
// For help understanding this algorithm, see:
// Knuth, Donald E., "The Art of Computer Programming",
// volume 2, "Seminumerical Algorithms"
// section 4.3.1, "Multiple-Precision Arithmetic".
/* Allocates memory */
// Allocates memory
void factor_vm::bignum_divide_unsigned_large_denominator(
bignum* numerator_, bignum* denominator_,
bignum** quotient, bignum** remainder,
@ -843,26 +839,26 @@ void factor_vm::bignum_divide_unsigned_normalized(bignum* u, bignum* v,
bignum_digit_type* q_scan = NULL;
bignum_digit_type v1 = (v_end[-1]);
bignum_digit_type v2 = (v_end[-2]);
bignum_digit_type ph; /* high half of double-digit product */
bignum_digit_type pl; /* low half of double-digit product */
bignum_digit_type ph; // high half of double-digit product
bignum_digit_type pl; // low half of double-digit product
bignum_digit_type guess;
bignum_digit_type gh; /* high half-digit of guess */
bignum_digit_type ch; /* high half of double-digit comparand */
bignum_digit_type gh; // high half-digit of guess
bignum_digit_type ch; // high half of double-digit comparand
bignum_digit_type v2l = (HD_LOW(v2));
bignum_digit_type v2h = (HD_HIGH(v2));
bignum_digit_type cl; /* low half of double-digit comparand */
#define gl ph /* low half-digit of guess */
bignum_digit_type cl; // low half of double-digit comparand
#define gl ph // low half-digit of guess
#define uj pl
#define qj ph
bignum_digit_type gm; /* memory loc for reference parameter */
bignum_digit_type gm; // memory loc for reference parameter
if (q != BIGNUM_OUT_OF_BAND)
q_scan = ((BIGNUM_START_PTR(q)) + (BIGNUM_LENGTH(q)));
while (u_scan_limit < u_scan) {
uj = (*--u_scan);
if (uj != v1) {
/* comparand =
(((((uj * BIGNUM_RADIX) + uj1) % v1) * BIGNUM_RADIX) + uj2);
guess = (((uj * BIGNUM_RADIX) + uj1) / v1); */
// comparand =
// (((((uj * BIGNUM_RADIX) + uj1) % v1) * BIGNUM_RADIX) + uj2);
// guess = (((uj * BIGNUM_RADIX) + uj1) / v1);
cl = (u_scan[-2]);
ch = (bignum_digit_divide(uj, (u_scan[-1]), v1, (&gm)));
guess = gm;
@ -872,20 +868,20 @@ void factor_vm::bignum_divide_unsigned_normalized(bignum* u, bignum* v,
guess = (BIGNUM_RADIX - 1);
}
while (1) {
/* product = (guess * v2); */
// product = (guess * v2);
gl = (HD_LOW(guess));
gh = (HD_HIGH(guess));
pl = (v2l * gl);
ph = ((v2l * gh) + (v2h * gl) + (HD_HIGH(pl)));
pl = (HD_CONS((HD_LOW(ph)), (HD_LOW(pl))));
ph = ((v2h * gh) + (HD_HIGH(ph)));
/* if (comparand >= product) */
// if (comparand >= product)
if ((ch > ph) || ((ch == ph) && (cl >= pl)))
break;
guess -= 1;
/* comparand += (v1 << BIGNUM_DIGIT_LENGTH) */
// comparand += (v1 << BIGNUM_DIGIT_LENGTH)
ch += v1;
/* if (comparand >= (BIGNUM_RADIX * BIGNUM_RADIX)) */
// if (comparand >= (BIGNUM_RADIX * BIGNUM_RADIX))
if (ch >= BIGNUM_RADIX)
break;
}
@ -944,8 +940,8 @@ bignum_digit_type factor_vm::bignum_divide_subtract(
#undef ph
#undef diff
}
/* Subtraction generated carry, implying guess is one too large.
Add v back in to bring it back down. */
// Subtraction generated carry, implying guess is one too large.
// Add v back in to bring it back down.
v_scan = v_start;
u_scan = u_start;
carry = 0;
@ -966,7 +962,7 @@ bignum_digit_type factor_vm::bignum_divide_subtract(
return (guess - 1);
}
/* Allocates memory */
// Allocates memory
void factor_vm::bignum_divide_unsigned_medium_denominator(
bignum* numerator_, bignum_digit_type denominator, bignum** quotient,
bignum** remainder, int q_negative_p, int r_negative_p) {
@ -976,7 +972,7 @@ void factor_vm::bignum_divide_unsigned_medium_denominator(
bignum_length_type length_n = (BIGNUM_LENGTH(numerator));
int shift = 0;
/* Because `bignum_digit_divide' requires a normalized denominator. */
// Because `bignum_digit_divide' requires a normalized denominator.
while (denominator < (BIGNUM_RADIX / 2)) {
denominator <<= 1;
shift += 1;
@ -1054,9 +1050,9 @@ void factor_vm::bignum_destructive_unnormalization(bignum* bn,
return;
}
/* This is a reduced version of the division algorithm, applied to the
case of dividing two bignum digits by one bignum digit. It is
assumed that the numerator, denominator are normalized. */
// This is a reduced version of the division algorithm, applied to the
// case of dividing two bignum digits by one bignum digit. It is
// assumed that the numerator, denominator are normalized.
#define BDD_STEP(qn, j) \
{ \
@ -1080,7 +1076,7 @@ void factor_vm::bignum_destructive_unnormalization(bignum* bn,
bignum_digit_type factor_vm::bignum_digit_divide(
bignum_digit_type uh, bignum_digit_type ul, bignum_digit_type v,
bignum_digit_type* q) /* return value */
bignum_digit_type* q) // return value
{
bignum_digit_type guess;
bignum_digit_type comparand;
@ -1172,7 +1168,7 @@ bignum_digit_type factor_vm::bignum_digit_divide_subtract(
#undef BDDS_MULSUB
#undef BDDS_ADD
/* Allocates memory */
// Allocates memory
void factor_vm::bignum_divide_unsigned_small_denominator(
bignum* numerator_, bignum_digit_type denominator, bignum** quotient,
bignum** remainder, int q_negative_p, int r_negative_p) {
@ -1193,9 +1189,9 @@ void factor_vm::bignum_divide_unsigned_small_denominator(
return;
}
/* Given (denominator > 1), it is fairly easy to show that
(quotient_high < BIGNUM_RADIX_ROOT), after which it is easy to see
that all digits are < BIGNUM_RADIX. */
// Given (denominator > 1), it is fairly easy to show that
// (quotient_high < BIGNUM_RADIX_ROOT), after which it is easy to see
// that all digits are < BIGNUM_RADIX.
bignum_digit_type factor_vm::bignum_destructive_scale_down(
bignum* bn, bignum_digit_type denominator) {
@ -1218,7 +1214,7 @@ bignum_digit_type factor_vm::bignum_destructive_scale_down(
#undef quotient_high
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_remainder_unsigned_small_denominator(
bignum* n, bignum_digit_type d, int negative_p) {
bignum_digit_type two_digits;
@ -1235,7 +1231,7 @@ bignum* factor_vm::bignum_remainder_unsigned_small_denominator(
return (bignum_digit_to_bignum(r, negative_p));
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_digit_to_bignum(bignum_digit_type digit,
int negative_p) {
if (digit == 0)
@ -1247,7 +1243,7 @@ bignum* factor_vm::bignum_digit_to_bignum(bignum_digit_type digit,
}
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::allot_bignum(bignum_length_type length, int negative_p) {
BIGNUM_ASSERT((length >= 0) || (length < BIGNUM_RADIX));
bignum* result = allot_uninitialized_array<bignum>(length + 1);
@ -1255,7 +1251,7 @@ bignum* factor_vm::allot_bignum(bignum_length_type length, int negative_p) {
return (result);
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::allot_bignum_zeroed(bignum_length_type length,
int negative_p) {
bignum* result = allot_bignum(length, negative_p);
@ -1266,7 +1262,7 @@ bignum* factor_vm::allot_bignum_zeroed(bignum_length_type length,
return (result);
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_shorten_length(bignum* bn,
bignum_length_type length) {
bignum_length_type current_length = (BIGNUM_LENGTH(bn));
@ -1278,7 +1274,7 @@ bignum* factor_vm::bignum_shorten_length(bignum* bn,
return (bn);
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_trim(bignum* bn) {
bignum_digit_type* start = (BIGNUM_START_PTR(bn));
bignum_digit_type* end = (start + (BIGNUM_LENGTH(bn)));
@ -1294,9 +1290,9 @@ bignum* factor_vm::bignum_trim(bignum* bn) {
return (bn);
}
/* Copying */
// Copying
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_new_sign(bignum* x_, int negative_p) {
data_root<bignum> x(x_, this);
bignum* result = allot_bignum(BIGNUM_LENGTH(x), negative_p);
@ -1304,7 +1300,7 @@ bignum* factor_vm::bignum_new_sign(bignum* x_, int negative_p) {
return result;
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_maybe_new_sign(bignum* x_, int negative_p) {
if ((BIGNUM_NEGATIVE_P(x_)) ? negative_p : (!negative_p))
return x_;
@ -1322,11 +1318,9 @@ void factor_vm::bignum_destructive_copy(bignum* source, bignum* target) {
return;
}
/*
* Added bitwise operations (and oddp).
*/
// * Added bitwise operations (and oddp).
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_bitwise_not(bignum* x_) {
int carry = 1;
@ -1378,7 +1372,7 @@ bignum* factor_vm::bignum_bitwise_not(bignum* x_) {
}
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_arithmetic_shift(bignum* arg1, fixnum n) {
if (BIGNUM_NEGATIVE_P(arg1) && n < 0)
return bignum_bitwise_not(
@ -1391,7 +1385,7 @@ bignum* factor_vm::bignum_arithmetic_shift(bignum* arg1, fixnum n) {
#define IOR_OP 1
#define XOR_OP 2
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_bitwise_and(bignum* arg1, bignum* arg2) {
return ((BIGNUM_NEGATIVE_P(arg1)) ? (BIGNUM_NEGATIVE_P(arg2))
? bignum_negneg_bitwise_op(AND_OP, arg1, arg2)
@ -1401,7 +1395,7 @@ bignum* factor_vm::bignum_bitwise_and(bignum* arg1, bignum* arg2) {
: bignum_pospos_bitwise_op(AND_OP, arg1, arg2));
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_bitwise_ior(bignum* arg1, bignum* arg2) {
return ((BIGNUM_NEGATIVE_P(arg1)) ? (BIGNUM_NEGATIVE_P(arg2))
? bignum_negneg_bitwise_op(IOR_OP, arg1, arg2)
@ -1411,7 +1405,7 @@ bignum* factor_vm::bignum_bitwise_ior(bignum* arg1, bignum* arg2) {
: bignum_pospos_bitwise_op(IOR_OP, arg1, arg2));
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_bitwise_xor(bignum* arg1, bignum* arg2) {
return ((BIGNUM_NEGATIVE_P(arg1)) ? (BIGNUM_NEGATIVE_P(arg2))
? bignum_negneg_bitwise_op(XOR_OP, arg1, arg2)
@ -1421,9 +1415,9 @@ bignum* factor_vm::bignum_bitwise_xor(bignum* arg1, bignum* arg2) {
: bignum_pospos_bitwise_op(XOR_OP, arg1, arg2));
}
/* Allocates memory */
/* ash for the magnitude */
/* assume arg1 is a big number, n is a long */
// Allocates memory
// ash for the magnitude
// assume arg1 is a big number, n is a long
bignum* factor_vm::bignum_magnitude_ash(bignum* arg1_, fixnum n) {
data_root<bignum> arg1(arg1_, this);
@ -1484,7 +1478,7 @@ bignum* factor_vm::bignum_magnitude_ash(bignum* arg1_, fixnum n) {
return bignum_trim(result);
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_pospos_bitwise_op(int op, bignum* arg1_,
bignum* arg2_) {
data_root<bignum> arg1(arg1_, this);
@ -1519,7 +1513,7 @@ bignum* factor_vm::bignum_pospos_bitwise_op(int op, bignum* arg1_,
return bignum_trim(result);
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_posneg_bitwise_op(int op, bignum* arg1_,
bignum* arg2_) {
data_root<bignum> arg1(arg1_, this);
@ -1570,7 +1564,7 @@ bignum* factor_vm::bignum_posneg_bitwise_op(int op, bignum* arg1_,
return bignum_trim(result);
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_negneg_bitwise_op(int op, bignum* arg1_,
bignum* arg2_) {
data_root<bignum> arg1(arg1_, this);
@ -1654,7 +1648,7 @@ void factor_vm::bignum_negate_magnitude(bignum* arg) {
}
}
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_integer_length(bignum* x_) {
data_root<bignum> x(x_, this);
bignum_length_type index = ((BIGNUM_LENGTH(x)) - 1);
@ -1680,7 +1674,7 @@ bignum* factor_vm::bignum_integer_length(bignum* x_) {
return (bignum_trim(result));
}
/* Allocates memory */
// Allocates memory
int factor_vm::bignum_logbitp(int shift, bignum* arg) {
return ((BIGNUM_NEGATIVE_P(arg))
? !bignum_unsigned_logbitp(shift, bignum_bitwise_not(arg))
@ -1699,13 +1693,13 @@ int factor_vm::bignum_unsigned_logbitp(int shift, bignum* bn) {
}
#ifdef _WIN64
/* Allocates memory. */
// Allocates memory.
bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
data_root<bignum> a(a_, this);
data_root<bignum> b(b_, this);
/* Copies of a and b with that are both positive. */
// Copies of a and b with that are both positive.
data_root<bignum> ac(bignum_maybe_new_sign(a.untagged(), 0), this);
data_root<bignum> bc(bignum_maybe_new_sign(b.untagged(), 0), this);
@ -1724,7 +1718,7 @@ bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
return ac.untagged();
}
#else
/* Allocates memory */
// Allocates memory
bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
data_root<bignum> a(a_, this);
data_root<bignum> b(b_, this);
@ -1734,7 +1728,7 @@ bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
bignum_digit_type* scan_a, *scan_b, *scan_c, *scan_d;
bignum_digit_type* a_end, *b_end, *c_end;
/* clone the bignums so we can modify them in-place */
// clone the bignums so we can modify them in-place
size_a = BIGNUM_LENGTH(a);
data_root<bignum> c(allot_bignum(size_a, 0), this);
// c = allot_bignum(size_a, 0);
@ -1753,7 +1747,7 @@ bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
(*scan_d++) = (*scan_b++);
b = d;
/* Initial reduction: make sure that 0 <= b <= a. */
// Initial reduction: make sure that 0 <= b <= a.
if (bignum_compare(a.untagged(), b.untagged()) == bignum_comparison_less) {
swap(a, b);
std::swap(size_a, size_b);
@ -1768,7 +1762,7 @@ bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
? BIGNUM_REF(b, size_a - 1) << (BIGNUM_DIGIT_LENGTH - nbits)
: 0));
/* inner loop of Lehmer's algorithm; */
// inner loop of Lehmer's algorithm;
A = 1;
B = 0;
C = 0;
@ -1797,7 +1791,7 @@ bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
}
if (k == 0) {
/* no progress; do a Euclidean step */
// no progress; do a Euclidean step
if (size_b == 0) {
return bignum_trim(a.untagged());
}
@ -1833,10 +1827,9 @@ bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
continue;
}
/*
a, b = A*b - B*a, D*a - C*b if k is odd
a, b = A*a - B*b, D*b - C*a if k is even
*/
// a, b = A*b - B*a, D*a - C*b if k is odd
// a, b = A*a - B*b, D*b - C*a if k is even
scan_a = BIGNUM_START_PTR(a);
scan_b = BIGNUM_START_PTR(b);
scan_c = scan_a;
@ -1892,12 +1885,12 @@ bignum* factor_vm::bignum_gcd(bignum* a_, bignum* b_) {
BIGNUM_ASSERT(size_a >= size_b);
}
/* a fits into a fixnum, so b must too */
// a fits into a fixnum, so b must too
fixnum xx = bignum_to_fixnum(a.untagged());
fixnum yy = bignum_to_fixnum(b.untagged());
fixnum tt;
/* usual Euclidean algorithm for longs */
// usual Euclidean algorithm for longs
while (yy != 0) {
tt = yy;
yy = xx % yy;

View File

@ -1,38 +1,36 @@
namespace factor {
/*
// Copyright (C) 1989-1992 Massachusetts Institute of Technology
// Portions copyright (C) 2004-2009 Slava Pestov
Copyright (C) 1989-1992 Massachusetts Institute of Technology
Portions copyright (C) 2004-2009 Slava Pestov
// This material was developed by the Scheme project at the Massachusetts
// Institute of Technology, Department of Electrical Engineering and
// Computer Science. Permission to copy and modify this software, to
// redistribute either the original software or a modified version, and
// to use this software for any purpose is granted, subject to the
// following restrictions and understandings.
This material was developed by the Scheme project at the Massachusetts
Institute of Technology, Department of Electrical Engineering and
Computer Science. Permission to copy and modify this software, to
redistribute either the original software or a modified version, and
to use this software for any purpose is granted, subject to the
following restrictions and understandings.
// 1. Any copy made of this software must include this copyright notice
// in full.
1. Any copy made of this software must include this copyright notice
in full.
// 2. Users of this software agree to make their best efforts (a) to
// return to the MIT Scheme project any improvements or extensions that
// they make, so that these may be included in future releases; and (b)
// to inform MIT of noteworthy uses of this software.
2. Users of this software agree to make their best efforts (a) to
return to the MIT Scheme project any improvements or extensions that
they make, so that these may be included in future releases; and (b)
to inform MIT of noteworthy uses of this software.
// 3. All materials developed as a consequence of the use of this
// software shall duly acknowledge such use, in accordance with the usual
// standards of acknowledging credit in academic research.
3. All materials developed as a consequence of the use of this
software shall duly acknowledge such use, in accordance with the usual
standards of acknowledging credit in academic research.
// 4. MIT has made no warrantee or representation that the operation of
// this software will be error-free, and MIT is under no obligation to
// provide any services, by way of maintenance, update, or otherwise.
4. MIT has made no warrantee or representation that the operation of
this software will be error-free, and MIT is under no obligation to
provide any services, by way of maintenance, update, or otherwise.
5. In conjunction with products arising from the use of this material,
there shall be no use of the name of the Massachusetts Institute of
Technology nor of any adaptation thereof in any advertising,
promotional, or sales literature without prior written consent from
MIT in each case. */
// 5. In conjunction with products arising from the use of this material,
// there shall be no use of the name of the Massachusetts Institute of
// Technology nor of any adaptation thereof in any advertising,
// promotional, or sales literature without prior written consent from
// MIT in each case.
#define BIGNUM_OUT_OF_BAND ((bignum*)0)

View File

@ -1,48 +1,48 @@
/* -*-C-*-
// -*-C-*-
$Id: s48_bignumint.h,v 1.14 2005/12/21 02:36:52 spestov Exp $
// $Id: s48_bignumint.h,v 1.14 2005/12/21 02:36:52 spestov Exp $
Copyright (c) 1989-1992 Massachusetts Institute of Technology
// Copyright (c) 1989-1992 Massachusetts Institute of Technology
This material was developed by the Scheme project at the Massachusetts
Institute of Technology, Department of Electrical Engineering and
Computer Science. Permission to copy and modify this software, to
redistribute either the original software or a modified version, and
to use this software for any purpose is granted, subject to the
following restrictions and understandings.
// This material was developed by the Scheme project at the Massachusetts
// Institute of Technology, Department of Electrical Engineering and
// Computer Science. Permission to copy and modify this software, to
// redistribute either the original software or a modified version, and
// to use this software for any purpose is granted, subject to the
// following restrictions and understandings.
1. Any copy made of this software must include this copyright notice
in full.
// 1. Any copy made of this software must include this copyright notice
// in full.
2. Users of this software agree to make their best efforts (a) to
return to the MIT Scheme project any improvements or extensions that
they make, so that these may be included in future releases; and (b)
to inform MIT of noteworthy uses of this software.
// 2. Users of this software agree to make their best efforts (a) to
// return to the MIT Scheme project any improvements or extensions that
// they make, so that these may be included in future releases; and (b)
// to inform MIT of noteworthy uses of this software.
3. All materials developed as a consequence of the use of this
software shall duly acknowledge such use, in accordance with the usual
standards of acknowledging credit in academic research.
// 3. All materials developed as a consequence of the use of this
// software shall duly acknowledge such use, in accordance with the usual
// standards of acknowledging credit in academic research.
4. MIT has made no warrantee or representation that the operation of
this software will be error-free, and MIT is under no obligation to
provide any services, by way of maintenance, update, or otherwise.
// 4. MIT has made no warrantee or representation that the operation of
// this software will be error-free, and MIT is under no obligation to
// provide any services, by way of maintenance, update, or otherwise.
5. In conjunction with products arising from the use of this material,
there shall be no use of the name of the Massachusetts Institute of
Technology nor of any adaptation thereof in any advertising,
promotional, or sales literature without prior written consent from
MIT in each case. */
// 5. In conjunction with products arising from the use of this material,
// there shall be no use of the name of the Massachusetts Institute of
// Technology nor of any adaptation thereof in any advertising,
// promotional, or sales literature without prior written consent from
// MIT in each case.
namespace factor {
/* Internal Interface to Bignum Code */
// Internal Interface to Bignum Code
#undef BIGNUM_ZERO_P
#undef BIGNUM_NEGATIVE_P
/* The memory model is based on the following definitions, and on the
definition of the type `bignum_type'. The only other special
definition is `CHAR_BIT', which is defined in the Ansi C header
file "limits.h". */
// The memory model is based on the following definitions, and on the
// definition of the type `bignum_type'. The only other special
// definition is `CHAR_BIT', which is defined in the Ansi C header
// file "limits.h".
typedef fixnum bignum_digit_type;
typedef fixnum bignum_length_type;
@ -55,10 +55,10 @@ typedef int64_t bignum_twodigit_type;
#endif
#endif
/* BIGNUM_TO_POINTER casts a bignum object to a digit array pointer. */
// BIGNUM_TO_POINTER casts a bignum object to a digit array pointer.
#define BIGNUM_TO_POINTER(bignum) ((bignum_digit_type*)(bignum->data()))
/* BIGNUM_EXCEPTION is invoked to handle assertion violations. */
// BIGNUM_EXCEPTION is invoked to handle assertion violations.
#define BIGNUM_EXCEPTION abort
#define BIGNUM_DIGIT_LENGTH (((sizeof(bignum_digit_type)) * CHAR_BIT) - 2)
@ -79,8 +79,8 @@ typedef int64_t bignum_twodigit_type;
#define BIGNUM_REF(bignum, index) (*((BIGNUM_START_PTR(bignum)) + (index)))
/* These definitions are here to facilitate caching of the constants
0, 1, and -1. */
// These definitions are here to facilitate caching of the constants
// 0, 1, and -1.
#define BIGNUM_ZERO() untag<bignum>(special_objects[OBJ_BIGNUM_ZERO])
#define BIGNUM_ONE(neg_p) untag<bignum>( \
special_objects[neg_p ? OBJ_BIGNUM_NEG_ONE : OBJ_BIGNUM_POS_ONE])
@ -103,6 +103,6 @@ typedef int64_t bignum_twodigit_type;
BIGNUM_EXCEPTION(); \
}
#endif /* not BIGNUM_DISABLE_ASSERTION_CHECKS */
#endif // not BIGNUM_DISABLE_ASSERTION_CHECKS
}

View File

@ -1,6 +1,6 @@
namespace factor {
/* Cannot allocate */
// Cannot allocate
inline static bool to_boolean(cell value) { return value != false_object; }
}

View File

@ -1,7 +1,7 @@
namespace factor {
struct bump_allocator {
/* offset of 'here' and 'end' is hardcoded in compiler backends */
// offset of 'here' and 'end' is hardcoded in compiler backends
cell here;
cell start;
cell end;
@ -27,9 +27,9 @@ struct bump_allocator {
void flush() {
here = start;
#ifdef FACTOR_DEBUG
/* In case of bugs, there may be bogus references pointing to the
memory space after the gc has run. Filling it with a pattern
makes accesses to such shadow data fail hard. */
// In case of bugs, there may be bogus references pointing to the
// memory space after the gc has run. Filling it with a pattern
// makes accesses to such shadow data fail hard.
memset_cell((void*)start, 0xbaadbaad, size);
#endif
}

View File

@ -2,26 +2,26 @@
namespace factor {
/* Allocates memory */
// Allocates memory
byte_array* factor_vm::allot_byte_array(cell size) {
byte_array* array = allot_uninitialized_array<byte_array>(size);
memset(array + 1, 0, size);
return array;
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_byte_array() {
cell size = unbox_array_size();
ctx->push(tag<byte_array>(allot_byte_array(size)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_uninitialized_byte_array() {
cell size = unbox_array_size();
ctx->push(tag<byte_array>(allot_uninitialized_array<byte_array>(size)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_resize_byte_array() {
data_root<byte_array> array(ctx->pop(), this);
check_tagged(array);
@ -29,21 +29,21 @@ void factor_vm::primitive_resize_byte_array() {
ctx->push(tag<byte_array>(reallot_array(array.untagged(), capacity)));
}
/* Allocates memory */
// Allocates memory
void growable_byte_array::grow_bytes(cell len) {
count += len;
if (count >= array_capacity(elements.untagged()))
elements = elements.parent->reallot_array(elements.untagged(), count * 2);
}
/* Allocates memory */
// Allocates memory
void growable_byte_array::append_bytes(void* elts, cell len) {
cell old_count = count;
grow_bytes(len);
memcpy(&elements->data<uint8_t>()[old_count], elts, len);
}
/* Allocates memory */
// Allocates memory
void growable_byte_array::append_byte_array(cell byte_array_) {
data_root<byte_array> byte_array(byte_array_, elements.parent);
@ -58,7 +58,7 @@ void growable_byte_array::append_byte_array(cell byte_array_) {
count += len;
}
/* Allocates memory */
// Allocates memory
void growable_byte_array::trim() {
factor_vm* parent = elements.parent;
elements = parent->reallot_array(elements.untagged(), count);

View File

@ -4,7 +4,7 @@ struct growable_byte_array {
cell count;
data_root<byte_array> elements;
/* Allocates memory */
// Allocates memory
growable_byte_array(factor_vm* parent, cell capacity = 40)
: count(0), elements(parent->allot_byte_array(capacity), parent) {}
@ -15,7 +15,7 @@ struct growable_byte_array {
void trim();
};
/* Allocates memory */
// Allocates memory
template <typename Type>
byte_array* factor_vm::byte_array_from_value(Type* value) {
byte_array* data = allot_uninitialized_array<byte_array>(sizeof(Type));

View File

@ -49,10 +49,10 @@ void callback_heap::update(code_block* stub) {
}
code_block* callback_heap::add(cell owner, cell return_rewind) {
/* code_template is a 2-tuple where the first element contains the
relocations and the second a byte array of compiled assembly
code. The code assumes that there are four relocations on x86 and
three on ppc. */
// code_template is a 2-tuple where the first element contains the
// relocations and the second a byte array of compiled assembly
// code. The code assumes that there are four relocations on x86 and
// three on ppc.
tagged<array> code_template(parent->special_objects[CALLBACK_STUB]);
tagged<byte_array> insns(array_nth(code_template.untagged(), 1));
cell size = array_capacity(insns.untagged());
@ -71,12 +71,12 @@ code_block* callback_heap::add(cell owner, cell return_rewind) {
memcpy((void*)stub->entry_point(), insns->data<void>(), size);
/* Store VM pointer in two relocations. */
// Store VM pointer in two relocations.
store_callback_operand(stub, 0, (cell)parent);
store_callback_operand(stub, 2, (cell)parent);
/* On x86, the RET instruction takes an argument which depends on
the callback's calling convention */
// On x86, the RET instruction takes an argument which depends on
// the callback's calling convention
if (return_takes_param_p())
store_callback_operand(stub, 3, return_rewind);
@ -84,7 +84,7 @@ code_block* callback_heap::add(cell owner, cell return_rewind) {
return stub;
}
/* Allocates memory (add(), allot_alien())*/
// Allocates memory (add(), allot_alien())
void factor_vm::primitive_callback() {
cell return_rewind = to_cell(ctx->pop());
tagged<word> w(ctx->pop());
@ -101,7 +101,7 @@ void factor_vm::primitive_free_callback() {
callbacks->allocator->free(stub);
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_callback_room() {
allocator_room room = callbacks->allocator->as_allocator_room();
ctx->push(tag<byte_array>(byte_array_from_value(&room)));

View File

@ -1,27 +1,27 @@
namespace factor {
/* The callback heap is used to store the machine code that alien-callbacks
actually jump to when C code invokes them.
// The callback heap is used to store the machine code that alien-callbacks
// actually jump to when C code invokes them.
The callback heap has entries that look like code_blocks from the code heap, but
callback heap entries are allocated contiguously, never deallocated, and all
fields but the owner are set to false_object. The owner points to the callback
bottom word, whose entry point is the callback body itself, generated by the
optimizing compiler. The machine code that follows a callback stub consists of a
single CALLBACK_STUB machine code template, which performs a jump to a "far"
address (on PowerPC and x86-64, its loaded into a register first).
// The callback heap has entries that look like code_blocks from the code heap, but
// callback heap entries are allocated contiguously, never deallocated, and all
// fields but the owner are set to false_object. The owner points to the callback
// bottom word, whose entry point is the callback body itself, generated by the
// optimizing compiler. The machine code that follows a callback stub consists of a
// single CALLBACK_STUB machine code template, which performs a jump to a "far"
// address (on PowerPC and x86-64, its loaded into a register first).
GC updates the CALLBACK_STUB code if the code block of the callback bottom word
is ever moved. The callback stub itself won't move, though, and is never
deallocated. This means that the callback stub itself is a stable function
pointer that C code can hold on to until the associated Factor VM exits.
// GC updates the CALLBACK_STUB code if the code block of the callback bottom word
// is ever moved. The callback stub itself won't move, though, and is never
// deallocated. This means that the callback stub itself is a stable function
// pointer that C code can hold on to until the associated Factor VM exits.
Since callback stubs are GC roots, and are never deallocated, the associated
callback code in the code heap is also never deallocated.
// Since callback stubs are GC roots, and are never deallocated, the associated
// callback code in the code heap is also never deallocated.
The callback heap is not saved in the image. Running GC in a new session after
saving the image will deallocate any code heap entries that were only reachable
from the callback heap in the previous session when the image was saved. */
// The callback heap is not saved in the image. Running GC in a new session after
// saving the image will deallocate any code heap entries that were only reachable
// from the callback heap in the previous session when the image was saved.
struct callback_heap {
segment* seg;

View File

@ -2,21 +2,21 @@
namespace factor {
/* Allocates memory (allot) */
// Allocates memory (allot)
callstack* factor_vm::allot_callstack(cell size) {
callstack* stack = allot<callstack>(callstack_object_size(size));
stack->length = tag_fixnum(size);
return stack;
}
/* We ignore the two topmost frames, the 'callstack' primitive
frame itself, and the frame calling the 'callstack' primitive,
so that set-callstack doesn't get stuck in an infinite loop.
// We ignore the two topmost frames, the 'callstack' primitive
// frame itself, and the frame calling the 'callstack' primitive,
// so that set-callstack doesn't get stuck in an infinite loop.
This means that if 'callstack' is called in tail position, we
will have popped a necessary frame... however this word is only
called by continuation implementation, and user code shouldn't
be calling it at all, so we leave it as it is for now. */
// This means that if 'callstack' is called in tail position, we
// will have popped a necessary frame... however this word is only
// called by continuation implementation, and user code shouldn't
// be calling it at all, so we leave it as it is for now.
cell factor_vm::second_from_top_stack_frame(context* ctx) {
cell frame_top = ctx->callstack_top;
for (cell i = 0; i < 2; ++i) {
@ -28,7 +28,7 @@ cell factor_vm::second_from_top_stack_frame(context* ctx) {
return frame_top;
}
/* Allocates memory (allot_callstack) */
// Allocates memory (allot_callstack)
cell factor_vm::capture_callstack(context* ctx) {
cell top = second_from_top_stack_frame(ctx);
cell bottom = ctx->callstack_bottom;
@ -40,7 +40,7 @@ cell factor_vm::capture_callstack(context* ctx) {
return tag<callstack>(stack);
}
/* Allocates memory (capture_callstack) */
// Allocates memory (capture_callstack)
void factor_vm::primitive_callstack_for() {
context* other_ctx = (context*)pinned_alien_offset(ctx->peek());
ctx->replace(capture_callstack(other_ctx));
@ -50,10 +50,10 @@ struct stack_frame_in_array {
cell cells[3];
};
/* Allocates memory (frames.trim()), iterate_callstack_object() */
// Allocates memory (frames.trim()), iterate_callstack_object()
void factor_vm::primitive_callstack_to_array() {
data_root<callstack> callstack(ctx->peek(), this);
/* Allocates memory here. */
// Allocates memory here.
growable_array frames(this);
auto stack_frame_accumulator = [&](cell frame_top,
@ -70,7 +70,7 @@ void factor_vm::primitive_callstack_to_array() {
};
iterate_callstack_object(callstack.untagged(), stack_frame_accumulator);
/* The callstack iterator visits frames in reverse order (top to bottom) */
// The callstack iterator visits frames in reverse order (top to bottom)
std::reverse((stack_frame_in_array*)frames.elements->data(),
(stack_frame_in_array*)(frames.elements->data() +
frames.count));
@ -79,8 +79,8 @@ void factor_vm::primitive_callstack_to_array() {
ctx->replace(frames.elements.value());
}
/* Some primitives implementing a limited form of callstack mutation.
Used by the single stepper. */
// Some primitives implementing a limited form of callstack mutation.
// Used by the single stepper.
void factor_vm::primitive_innermost_stack_frame_executing() {
callstack* stack = untag_check<callstack>(ctx->peek());
void* frame = stack->top();
@ -95,7 +95,7 @@ void factor_vm::primitive_innermost_stack_frame_scan() {
ctx->replace(code->code_block_for_address(addr)->scan(this, addr));
}
/* Allocates memory (jit_compile_quotation) */
// Allocates memory (jit_compile_quotation)
void factor_vm::primitive_set_innermost_stack_frame_quotation() {
data_root<callstack> stack(ctx->pop(), this);
data_root<quotation> quot(ctx->pop(), this);
@ -112,7 +112,7 @@ void factor_vm::primitive_set_innermost_stack_frame_quotation() {
*(cell*)inner = quot->entry_point + offset;
}
/* Allocates memory (allot_alien) */
// Allocates memory (allot_alien)
void factor_vm::primitive_callstack_bounds() {
ctx->push(allot_alien(ctx->callstack_seg->start));
ctx->push(allot_alien(ctx->callstack_seg->end));

View File

@ -4,9 +4,9 @@ inline static cell callstack_object_size(cell size) {
return sizeof(callstack) + size;
}
/* This is a little tricky. The iterator may allocate memory, so we
keep the callstack in a GC root and use relative offsets */
/* Allocates memory */
// This is a little tricky. The iterator may allocate memory, so we
// keep the callstack in a GC root and use relative offsets
// Allocates memory
template <typename Iterator, typename Fixup>
inline void factor_vm::iterate_callstack_object(callstack* stack_,
Iterator& iterator,
@ -31,7 +31,7 @@ inline void factor_vm::iterate_callstack_object(callstack* stack_,
FACTOR_ASSERT(frame_offset == frame_length);
}
/* Allocates memory */
// Allocates memory
template <typename Iterator>
inline void factor_vm::iterate_callstack_object(callstack* stack,
Iterator& iterator) {
@ -39,24 +39,24 @@ inline void factor_vm::iterate_callstack_object(callstack* stack,
iterate_callstack_object(stack, iterator, none);
}
/* Iterates the callstack from innermost to outermost
callframe. Allocates memory */
// Iterates the callstack from innermost to outermost
// callframe. Allocates memory
template <typename Iterator, typename Fixup>
void factor_vm::iterate_callstack(context* ctx, Iterator& iterator,
Fixup& fixup) {
cell top = ctx->callstack_top;
cell bottom = ctx->callstack_bottom;
/* When we are translating the code block maps, all callstacks must
be empty. */
// When we are translating the code block maps, all callstacks must
// be empty.
FACTOR_ASSERT(!Fixup::translated_code_block_map || top == bottom);
while (top < bottom) {
cell addr = *(cell*)top;
FACTOR_ASSERT(addr != 0);
/* Only the address is valid, if the code heap has been compacted,
owner might not point to a real code block. */
// Only the address is valid, if the code heap has been compacted,
// owner might not point to a real code block.
code_block* owner = code->code_block_for_address(addr);
code_block* fixed_owner = fixup.translate_code(owner);
@ -72,7 +72,7 @@ void factor_vm::iterate_callstack(context* ctx, Iterator& iterator,
FACTOR_ASSERT(top == bottom);
}
/* Allocates memory */
// Allocates memory
template <typename Iterator>
inline void factor_vm::iterate_callstack(context* ctx, Iterator& iterator) {
no_fixup none;

View File

@ -5,8 +5,8 @@ namespace factor {
static cell code_block_owner(code_block* compiled) {
cell owner = compiled->owner;
/* Cold generic word call sites point to quotations that call the
inline-cache-miss and inline-cache-miss-tail primitives. */
// Cold generic word call sites point to quotations that call the
// inline-cache-miss and inline-cache-miss-tail primitives.
if (TAG(owner) != QUOTATION_TYPE)
return owner;
@ -43,9 +43,9 @@ cell code_block::owner_quot() const {
return owner;
}
/* If the code block is an unoptimized quotation, we can calculate the
scan offset. In all other cases -1 is returned.
Allocates memory (quot_code_offset_to_scan) */
// If the code block is an unoptimized quotation, we can calculate the
// scan offset. In all other cases -1 is returned.
// Allocates memory (quot_code_offset_to_scan)
cell code_block::scan(factor_vm* vm, cell addr) const {
if (type() != code_block_unoptimized) {
return tag_fixnum(-1);
@ -79,21 +79,21 @@ cell factor_vm::compute_entry_point_pic_tail_address(cell w_) {
return compute_entry_point_pic_address(w.untagged(), w->pic_tail_def);
}
/* Relocate new code blocks completely; updating references to literals,
dlsyms, and words. For all other words in the code heap, we only need
to update references to other words, without worrying about literals
or dlsyms. */
// Relocate new code blocks completely; updating references to literals,
// dlsyms, and words. For all other words in the code heap, we only need
// to update references to other words, without worrying about literals
// or dlsyms.
void factor_vm::update_word_references(code_block* compiled,
bool reset_inline_caches) {
if (code->uninitialized_p(compiled)) {
initialize_code_block(compiled);
/* update_word_references() is always applied to every block in
the code heap. Since it resets all call sites to point to
their canonical entry point (cold entry point for non-tail calls,
standard entry point for tail calls), it means that no PICs
are referenced after this is done. So instead of polluting
the code heap with dead PICs that will be freed on the next
GC, we add them to the free list immediately. */
// update_word_references() is always applied to every block in
// the code heap. Since it resets all call sites to point to
// their canonical entry point (cold entry point for non-tail calls,
// standard entry point for tail calls), it means that no PICs
// are referenced after this is done. So instead of polluting
// the code heap with dead PICs that will be freed on the next
// GC, we add them to the free list immediately.
} else if (reset_inline_caches && compiled->pic_p()) {
code->free(compiled);
} else {
@ -134,8 +134,7 @@ void factor_vm::update_word_references(code_block* compiled,
}
}
/* Look up an external library symbol referenced by a compiled code
block */
// Look up an external library symbol referenced by a compiled code block
cell factor_vm::compute_dlsym_address(array* parameters,
cell index,
bool toc) {
@ -248,15 +247,15 @@ struct initial_code_block_visitor {
}
};
/* Perform all fixups on a code block */
// Perform all fixups on a code block
void factor_vm::initialize_code_block(code_block* compiled, cell literals) {
initial_code_block_visitor visitor(this, literals);
compiled->each_instruction_operand(visitor);
compiled->flush_icache();
/* next time we do a minor GC, we have to trace this code block, since
the newly-installed instruction operands might point to literals in
nursery or aging */
// next time we do a minor GC, we have to trace this code block, since
// the newly-installed instruction operands might point to literals in
// nursery or aging
code->write_barrier(compiled);
}
@ -267,7 +266,7 @@ void factor_vm::initialize_code_block(code_block* compiled) {
code->uninitialized_blocks.erase(iter);
}
/* Fixup labels. This is done at compile time, not image load time */
// Fixup labels. This is done at compile time, not image load time
void factor_vm::fixup_labels(array* labels, code_block* compiled) {
cell size = array_capacity(labels);
@ -284,21 +283,21 @@ void factor_vm::fixup_labels(array* labels, code_block* compiled) {
}
}
/* Might GC */
/* Allocates memory */
// Might GC
// Allocates memory
code_block* factor_vm::allot_code_block(cell size, code_block_type type) {
code_block* block = code->allocator->allot(size + sizeof(code_block));
/* If allocation failed, do a full GC and compact the code heap.
A full GC that occurs as a result of the data heap filling up does not
trigger a compaction. This setup ensures that most GCs do not compact
the code heap, but if the code fills up, it probably means it will be
fragmented after GC anyway, so its best to compact. */
// If allocation failed, do a full GC and compact the code heap.
// A full GC that occurs as a result of the data heap filling up does not
// trigger a compaction. This setup ensures that most GCs do not compact
// the code heap, but if the code fills up, it probably means it will be
// fragmented after GC anyway, so its best to compact.
if (block == NULL) {
primitive_compact_gc();
block = code->allocator->allot(size + sizeof(code_block));
/* Insufficient room even after code GC, give up */
// Insufficient room even after code GC, give up
if (block == NULL) {
std::cout << "Code heap used: " << code->allocator->occupied_space()
<< "\n";
@ -311,8 +310,8 @@ code_block* factor_vm::allot_code_block(cell size, code_block_type type) {
return block;
}
/* Might GC */
/* Allocates memory */
// Might GC
// Allocates memory
code_block* factor_vm::add_code_block(code_block_type type, cell code_,
cell labels_, cell owner_,
cell relocation_, cell parameters_,
@ -330,7 +329,7 @@ code_block* factor_vm::add_code_block(code_block_type type, cell code_,
compiled->owner = owner.value();
/* slight space optimization */
// slight space optimization
if (relocation.type() == BYTE_ARRAY_TYPE &&
array_capacity(relocation.untagged()) == 0)
compiled->relocation = false_object;
@ -343,39 +342,38 @@ code_block* factor_vm::add_code_block(code_block_type type, cell code_,
else
compiled->parameters = parameters.value();
/* code */
// code
memcpy(compiled + 1, code.untagged() + 1, code_length);
/* fixup labels */
// fixup labels
if (to_boolean(labels.value()))
fixup_labels(labels.as<array>().untagged(), compiled);
compiled->set_stack_frame_size(frame_size_untagged);
/* Once we are ready, fill in literal and word references in this code
block's instruction operands. In most cases this is done right after this
method returns, except when compiling words with the non-optimizing
compiler at the beginning of bootstrap */
// Once we are ready, fill in literal and word references in this code
// block's instruction operands. In most cases this is done right after this
// method returns, except when compiling words with the non-optimizing
// compiler at the beginning of bootstrap
this->code->uninitialized_blocks.insert(
std::make_pair(compiled, literals.value()));
this->code->all_blocks.insert((cell)compiled);
/* next time we do a minor GC, we have to trace this code block, since
the fields of the code_block struct might point into nursery or aging */
// next time we do a minor GC, we have to trace this code block, since
// the fields of the code_block struct might point into nursery or aging
this->code->write_barrier(compiled);
return compiled;
}
/* References to undefined symbols are patched up to call this function on
image load. It finds the symbol and library, and throws an error. */
// References to undefined symbols are patched up to call this function on
// image load. It finds the symbol and library, and throws an error.
void factor_vm::undefined_symbol() {
cell frame = ctx->callstack_top;
cell return_address = *(cell*)frame;
code_block* compiled = code->code_block_for_address(return_address);
/* Find the RT_DLSYM relocation nearest to the given return
address. */
// Find the RT_DLSYM relocation nearest to the given return address.
cell symbol = false_object;
cell library = false_object;

View File

@ -1,6 +1,6 @@
namespace factor {
/* The compiled code heap is structured into blocks. */
// The compiled code heap is structured into blocks.
struct code_block {
// header format (bits indexed with least significant as zero):
// bit 0 : free?
@ -11,9 +11,9 @@ struct code_block {
// if free:
// bits 3-end: code size / 8
cell header;
cell owner; /* tagged pointer to word, quotation or f */
cell parameters; /* tagged pointer to array or f */
cell relocation; /* tagged pointer to byte-array or f */
cell owner; // tagged pointer to word, quotation or f
cell parameters; // tagged pointer to array or f
cell relocation; // tagged pointer to byte-array or f
bool free_p() const { return (header & 1) == 1; }
@ -47,10 +47,10 @@ struct code_block {
cell stack_frame_size_for_address(cell addr) const {
cell natural_frame_size = stack_frame_size();
/* The first instruction in a code block is the prolog safepoint,
and a leaf procedure code block will record a frame size of zero.
If we're seeing a stack frame in either of these cases, it's a
fake "leaf frame" set up by the signal handler. */
// The first instruction in a code block is the prolog safepoint,
// and a leaf procedure code block will record a frame size of zero.
// If we're seeing a stack frame in either of these cases, it's a
// fake "leaf frame" set up by the signal handler.
if (natural_frame_size == 0 || addr == entry_point())
return LEAF_FRAME_SIZE;
return natural_frame_size;
@ -68,7 +68,7 @@ struct code_block {
cell entry_point() const { return (cell)(this + 1); }
/* GC info is stored at the end of the block */
// GC info is stored at the end of the block
gc_info* block_gc_info() const {
return (gc_info*)((uint8_t*)this + size() - sizeof(gc_info));
}

View File

@ -13,7 +13,7 @@ code_heap::code_heap(cell size) {
allocator = new free_list_allocator<code_block>(seg->end - start, start);
/* See os-windows-x86.64.cpp for seh_area usage */
// See os-windows-x86.64.cpp for seh_area usage
safepoint_page = seg->start;
seh_area = (char*)seg->start + getpagesize();
}
@ -82,10 +82,11 @@ code_block* code_heap::code_block_for_address(cell address) {
--blocki;
code_block* found_block = (code_block*)*blocki;
FACTOR_ASSERT(found_block->entry_point() <=
address /* XXX this isn't valid during fixup. should store the
size in the map
&& address - found_block->entry_point() <
found_block->size()*/);
address // XXX this isn't valid during fixup. should store the
// size in the map
// && address - found_block->entry_point() <
// found_block->size()
);
return found_block;
}
@ -97,7 +98,7 @@ cell code_heap::frame_predecessor(cell frame_top) {
return frame_top + frame_size;
}
/* Recomputes the all_blocks set of code blocks */
// Recomputes the all_blocks set of code blocks
void code_heap::initialize_all_blocks_set() {
all_blocks.clear();
auto all_blocks_set_inserter = [&](code_block* block, cell size) {
@ -109,9 +110,9 @@ void code_heap::initialize_all_blocks_set() {
#endif
}
/* Update pointers to words referenced from all code blocks.
Only needed after redefining an existing word.
If generic words were redefined, inline caches need to be reset. */
// Update pointers to words referenced from all code blocks.
// Only needed after redefining an existing word.
// If generic words were redefined, inline caches need to be reset.
void factor_vm::update_code_heap_words(bool reset_inline_caches) {
auto word_updater = [&](code_block* block, cell size) {
update_word_references(block, reset_inline_caches);
@ -119,8 +120,8 @@ void factor_vm::update_code_heap_words(bool reset_inline_caches) {
each_code_block(word_updater);
}
/* Fix up new words only.
Fast path for compilation units that only define new words. */
// Fix up new words only.
// Fast path for compilation units that only define new words.
void factor_vm::initialize_code_blocks() {
FACTOR_FOR_EACH(code->uninitialized_blocks) {
@ -129,7 +130,7 @@ void factor_vm::initialize_code_blocks() {
code->uninitialized_blocks.clear();
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_modify_code_heap() {
bool reset_inline_caches = to_boolean(ctx->pop());
bool update_existing_words = to_boolean(ctx->pop());
@ -177,7 +178,7 @@ void factor_vm::primitive_modify_code_heap() {
initialize_code_blocks();
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_code_room() {
allocator_room room = code->allocator->as_allocator_room();
ctx->push(tag<byte_array>(byte_array_from_value(&room)));
@ -190,7 +191,7 @@ void factor_vm::primitive_strip_stack_traces() {
each_code_block(stack_trace_stripper);
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_code_blocks() {
std::vector<cell> objects;
auto code_block_accumulator = [&](code_block* block, cell size) {
@ -201,12 +202,12 @@ void factor_vm::primitive_code_blocks() {
objects.push_back(tag_fixnum(block->type()));
objects.push_back(tag_fixnum(block->size()));
/* Note: the entry point is always a multiple of the heap
alignment (16 bytes). We cannot allocate while iterating
through the code heap, so it is not possible to call
from_unsigned_cell() here. It is OK, however, to add it as
if it were a fixnum, and have library code shift it to the
left by 4. */
// Note: the entry point is always a multiple of the heap
// alignment (16 bytes). We cannot allocate while iterating
// through the code heap, so it is not possible to call
// from_unsigned_cell() here. It is OK, however, to add it as
// if it were a fixnum, and have library code shift it to the
// left by 4.
cell entry_point = block->entry_point();
FACTOR_ASSERT((entry_point & (data_alignment - 1)) == 0);
FACTOR_ASSERT((entry_point & TAG_MASK) == FIXNUM_TYPE);

View File

@ -7,29 +7,29 @@ const cell seh_area_size = 0;
#endif
struct code_heap {
/* The actual memory area */
// The actual memory area
segment* seg;
/* Memory area reserved for safepoint guard page */
// Memory area reserved for safepoint guard page
cell safepoint_page;
/* Memory area reserved for SEH. Only used on Windows */
// Memory area reserved for SEH. Only used on Windows
char* seh_area;
/* Memory allocator */
// Memory allocator
free_list_allocator<code_block>* allocator;
std::set<cell> all_blocks;
/* Keys are blocks which need to be initialized by initialize_code_block().
Values are literal tables. Literal table arrays are GC roots until the
time the block is initialized, after which point they are discarded. */
// Keys are blocks which need to be initialized by initialize_code_block().
// Values are literal tables. Literal table arrays are GC roots until the
// time the block is initialized, after which point they are discarded.
std::map<code_block*, cell> uninitialized_blocks;
/* Code blocks which may reference objects in the nursery */
// Code blocks which may reference objects in the nursery
std::set<code_block*> points_to_nursery;
/* Code blocks which may reference objects in aging space or the nursery */
// Code blocks which may reference objects in aging space or the nursery
std::set<code_block*> points_to_aging;
explicit code_heap(cell size);

View File

@ -25,7 +25,7 @@ struct gc_workhorse : no_fixup {
return obj;
}
/* is there another forwarding pointer? */
// is there another forwarding pointer?
while (obj->forwarding_pointer_p()) {
object* dest = obj->forwarding_pointer();
obj = dest;

View File

@ -52,9 +52,9 @@ struct compaction_fixup {
}
};
/* After a compaction, invalidate any code heap roots which are not
marked, and also slide the valid roots up so that call sites can be updated
correctly in case an inline cache compilation triggered compaction. */
// After a compaction, invalidate any code heap roots which are not
// marked, and also slide the valid roots up so that call sites can be updated
// correctly in case an inline cache compilation triggered compaction.
void factor_vm::update_code_roots_for_compaction() {
mark_bits* state = &code->allocator->state;
@ -63,7 +63,7 @@ void factor_vm::update_code_roots_for_compaction() {
code_root* root = *iter;
cell block = root->value & (~data_alignment + 1);
/* Offset of return address within 16-byte allocation line */
// Offset of return address within 16-byte allocation line
cell offset = root->value - block;
if (root->valid && state->marked_p(block)) {
@ -74,7 +74,7 @@ void factor_vm::update_code_roots_for_compaction() {
}
}
/* Compact data and code heaps */
// Compact data and code heaps
void factor_vm::collect_compact_impl() {
gc_event* event = current_gc->event;
@ -89,7 +89,7 @@ void factor_vm::collect_compact_impl() {
mark_bits* data_forwarding_map = &tenured->state;
mark_bits* code_forwarding_map = &code->allocator->state;
/* Figure out where blocks are going to go */
// Figure out where blocks are going to go
data_forwarding_map->compute_forwarding();
code_forwarding_map->compute_forwarding();
@ -103,11 +103,11 @@ void factor_vm::collect_compact_impl() {
forwarder.visit_uninitialized_code_blocks();
/* Object start offsets get recomputed by the object_compaction_updater */
// Object start offsets get recomputed by the object_compaction_updater
data->tenured->starts.clear_object_start_offsets();
/* Slide everything in tenured space up, and update data and code heap
pointers inside objects. */
// Slide everything in tenured space up, and update data and code heap
// pointers inside objects.
auto compact_object_func = [&](object* old_addr, object* new_addr, cell size) {
forwarder.visit_slots(new_addr);
forwarder.visit_object_code_block(new_addr);
@ -115,8 +115,8 @@ void factor_vm::collect_compact_impl() {
};
tenured->compact(compact_object_func, fixup, &data_finger);
/* Slide everything in the code heap up, and update data and code heap
pointers inside code blocks. */
// Slide everything in the code heap up, and update data and code heap
// pointers inside code blocks.
auto compact_code_func = [&](code_block* old_addr,
code_block* new_addr,
cell size) {
@ -132,9 +132,9 @@ void factor_vm::collect_compact_impl() {
update_code_roots_for_compaction();
/* Each callback has a relocation with a pointer to a code block in
the code heap. Since the code heap has now been compacted, those
pointers are invalid and we need to update them. */
// Each callback has a relocation with a pointer to a code block in
// the code heap. Since the code heap has now been compacted, those
// pointers are invalid and we need to update them.
auto callback_updater = [&](code_block* stub, cell size) {
callbacks->update(stub);
};
@ -151,7 +151,7 @@ void factor_vm::collect_compact() {
collect_compact_impl();
if (data->high_fragmentation_p()) {
/* Compaction did not free up enough memory. Grow the heap. */
// Compaction did not free up enough memory. Grow the heap.
set_current_gc_op(collect_growing_heap_op);
collect_growing_heap(0);
}
@ -160,7 +160,7 @@ void factor_vm::collect_compact() {
}
void factor_vm::collect_growing_heap(cell requested_size) {
/* Grow the data heap and copy all live objects to the new heap. */
// Grow the data heap and copy all live objects to the new heap.
data_heap* old = data;
set_data_heap(data->grow(&nursery, requested_size));
collect_mark_impl();

View File

@ -50,7 +50,7 @@ vm_error_type context::address_to_error(cell addr) {
return ERROR_RETAINSTACK_UNDERFLOW;
if (retainstack_seg->overflow_p(addr))
return ERROR_RETAINSTACK_OVERFLOW;
/* These are flipped because the callstack grows downwards. */
// These are flipped because the callstack grows downwards.
if (callstack_seg->underflow_p(addr))
return ERROR_CALLSTACK_OVERFLOW;
if (callstack_seg->overflow_p(addr))
@ -81,8 +81,8 @@ context::~context() {
delete callstack_seg;
}
/* called on startup */
/* Allocates memory (new_context()) */
// called on startup
// Allocates memory (new_context())
void factor_vm::init_contexts(cell datastack_size_, cell retainstack_size_,
cell callstack_size_) {
datastack_size = datastack_size_;
@ -110,12 +110,12 @@ context* factor_vm::new_context() {
return new_context;
}
/* Allocates memory */
// Allocates memory
void factor_vm::init_context(context* ctx) {
ctx->context_objects[OBJ_CONTEXT] = allot_alien((cell)ctx);
}
/* Allocates memory (init_context(), but not parent->new_context() */
// Allocates memory (init_context(), but not parent->new_context()
VM_C_API context* new_context(factor_vm* parent) {
context* new_context = parent->new_context();
parent->init_context(new_context);
@ -137,7 +137,7 @@ VM_C_API void delete_context(factor_vm* parent) {
parent->delete_context();
}
/* Allocates memory (init_context()) */
// Allocates memory (init_context())
VM_C_API void reset_context(factor_vm* parent) {
// The function is used by (start-context-and-delete) which expects
@ -153,7 +153,7 @@ VM_C_API void reset_context(factor_vm* parent) {
parent->init_context(ctx);
}
/* Allocates memory */
// Allocates memory
cell factor_vm::begin_callback(cell quot_) {
data_root<object> quot(quot_, this);
@ -166,7 +166,7 @@ cell factor_vm::begin_callback(cell quot_) {
return quot.value();
}
/* Allocates memory */
// Allocates memory
cell begin_callback(factor_vm* parent, cell quot) {
return parent->begin_callback(quot);
}
@ -199,7 +199,7 @@ void factor_vm::primitive_context_object_for() {
ctx->replace(other_ctx->context_objects[n]);
}
/* Allocates memory */
// Allocates memory
cell factor_vm::stack_to_array(cell bottom, cell top, vm_error_type error) {
fixnum depth = (fixnum)(top - bottom + sizeof(cell));
@ -211,14 +211,14 @@ cell factor_vm::stack_to_array(cell bottom, cell top, vm_error_type error) {
return tag<array>(a);
}
/* Allocates memory */
// Allocates memory
cell factor_vm::datastack_to_array(context* ctx) {
return stack_to_array(ctx->datastack_seg->start,
ctx->datastack,
ERROR_DATASTACK_UNDERFLOW);
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_datastack_for() {
data_root<alien> alien_ctx(ctx->pop(), this);
context* other_ctx = (context*)pinned_alien_offset(alien_ctx.value());
@ -226,20 +226,20 @@ void factor_vm::primitive_datastack_for() {
ctx->push(array);
}
/* Allocates memory */
// Allocates memory
cell factor_vm::retainstack_to_array(context* ctx) {
return stack_to_array(ctx->retainstack_seg->start,
ctx->retainstack,
ERROR_RETAINSTACK_UNDERFLOW);
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_retainstack_for() {
context* other_ctx = (context*)pinned_alien_offset(ctx->peek());
ctx->replace(retainstack_to_array(other_ctx));
}
/* returns pointer to top of stack */
// returns pointer to top of stack
cell factor_vm::array_to_stack(array* array, cell bottom) {
cell depth = array_capacity(array) * sizeof(cell);
memcpy((void*)bottom, array + 1, depth);
@ -256,7 +256,7 @@ void factor_vm::primitive_set_retainstack() {
ctx->retainstack = array_to_stack(arr, ctx->retainstack_seg->start);
}
/* Used to implement call( */
// Used to implement call(
void factor_vm::primitive_check_datastack() {
fixnum out = to_fixnum(ctx->pop());
fixnum in = to_fixnum(ctx->pop());

View File

@ -1,7 +1,7 @@
namespace factor {
/* Context object count and identifiers must be kept in sync with:
core/kernel/kernel.factor */
// Context object count and identifiers must be kept in sync with:
// core/kernel/kernel.factor
static const cell context_object_count = 4;
enum context_object {
@ -11,35 +11,35 @@ enum context_object {
OBJ_IN_CALLBACK_P,
};
/* When the callstack fills up (e.g by to deep recursion), a callstack
overflow error is triggered. So before continuing executing on it
in general_error(), we chop off this many bytes to have some space
to work with. Mac OSX 64 bit needs more than 8192. See issue #1419. */
// When the callstack fills up (e.g by to deep recursion), a callstack
// overflow error is triggered. So before continuing executing on it
// in general_error(), we chop off this many bytes to have some space
// to work with. Mac OSX 64 bit needs more than 8192. See issue #1419.
static const cell stack_reserved = 16384;
struct context {
/* First 5 fields accessed directly by compiler. See basis/vm/vm.factor */
// First 5 fields accessed directly by compiler. See basis/vm/vm.factor
/* Factor callstack pointers */
// Factor callstack pointers
cell callstack_top;
cell callstack_bottom;
/* current datastack top pointer */
// current datastack top pointer
cell datastack;
/* current retain stack top pointer */
// current retain stack top pointer
cell retainstack;
/* C callstack pointer */
// C callstack pointer
cell callstack_save;
segment* datastack_seg;
segment* retainstack_seg;
segment* callstack_seg;
/* context-specific special objects, accessed by context-object and
set-context-object primitives */
// context-specific special objects, accessed by context-object and
// set-context-object primitives
cell context_objects[context_object_count];
context(cell ds_size, cell rs_size, cell cs_size);

View File

@ -8,20 +8,20 @@ namespace factor {
#define CALLSTACK_BOTTOM(ctx) (ctx->callstack_seg->end - 32)
/* In the instruction sequence:
// In the instruction sequence:
LOAD32 r3,...
B blah
// LOAD32 r3,...
// B blah
the offset from the immediate operand to LOAD32 to the instruction after
the branch is one instruction. */
// the offset from the immediate operand to LOAD32 to the instruction after
// the branch is one instruction.
static const fixnum xt_tail_pic_offset = 4;
inline static void check_call_site(cell return_address) {
uint32_t insn = *(uint32_t*)return_address;
/* Check that absolute bit is 0 */
// Check that absolute bit is 0
FACTOR_ASSERT((insn & 0x2) == 0x0);
/* Check that instruction is branch */
// Check that instruction is branch
FACTOR_ASSERT((insn >> 26) == 0x12);
}
@ -47,7 +47,7 @@ inline static void set_call_target(cell return_address, cell target) {
insn = ((insn & ~b_mask) | (relative_address & b_mask));
*(uint32_t*)return_address = insn;
/* Flush the cache line containing the call we just patched */
// Flush the cache line containing the call we just patched
__asm__ __volatile__("icbi 0, %0\n"
"sync\n" ::"r"(return_address)
:);
@ -76,7 +76,7 @@ inline static unsigned int fpu_status(unsigned int status) {
return r;
}
/* Defined in assembly */
// Defined in assembly
VM_C_API void flush_icache(cell start, cell len);
}

View File

@ -2,8 +2,8 @@ namespace factor {
#define FACTOR_CPU_STRING "x86.32"
/* Must match the calculation in word jit-signal-handler-prolog in
basis/bootstrap/assembler/x86.factor */
// Must match the calculation in word jit-signal-handler-prolog in
// basis/bootstrap/assembler/x86.factor
static const unsigned SIGNAL_HANDLER_STACK_FRAME_SIZE = 64;
static const unsigned JIT_FRAME_SIZE = 32;

View File

@ -2,8 +2,8 @@ namespace factor {
#define FACTOR_CPU_STRING "x86.64"
/* Must match the calculation in word jit-signal-handler-prolog in
basis/bootstrap/assembler/x86.factor */
// Must match the calculation in word jit-signal-handler-prolog in
// basis/bootstrap/assembler/x86.factor
static const unsigned SIGNAL_HANDLER_STACK_FRAME_SIZE = 192;
}

View File

@ -6,27 +6,27 @@ void factor_vm::dispatch_non_resumable_signal(cell* sp, cell* pc,
cell handler,
cell limit) {
/* Fault came from the VM or foreign code. We don't try to fix the
call stack from *sp and instead use the last saved "good value"
which we get from ctx->callstack_top. Then launch the handler
without going through the resumable subprimitive. */
// Fault came from the VM or foreign code. We don't try to fix the
// call stack from *sp and instead use the last saved "good value"
// which we get from ctx->callstack_top. Then launch the handler
// without going through the resumable subprimitive.
cell frame_top = ctx->callstack_top;
cell seg_start = ctx->callstack_seg->start;
if (frame_top < seg_start) {
/* The saved callstack pointer is outside the callstack
segment. That means that we need to carefully cut off one frame
first which hopefully should put the pointer within the
callstack's bounds. */
// The saved callstack pointer is outside the callstack
// segment. That means that we need to carefully cut off one frame
// first which hopefully should put the pointer within the
// callstack's bounds.
code_block *block = code->code_block_for_address(*pc);
cell frame_size = block->stack_frame_size_for_address(*pc);
frame_top += frame_size;
}
/* Cut the callstack down to the shallowest Factor stack
frame that leaves room for the signal handler to do its thing,
and launch the handler without going through the resumable
subprimitive. */
// Cut the callstack down to the shallowest Factor stack
// frame that leaves room for the signal handler to do its thing,
// and launch the handler without going through the resumable
// subprimitive.
FACTOR_ASSERT(seg_start <= frame_top);
while (frame_top < ctx->callstack_bottom && frame_top < limit) {
frame_top = code->frame_predecessor(frame_top);
@ -38,28 +38,28 @@ void factor_vm::dispatch_non_resumable_signal(cell* sp, cell* pc,
void factor_vm::dispatch_resumable_signal(cell* sp, cell* pc, cell handler) {
/* Fault came from Factor, and we've got a good callstack. Route the
signal handler through the resumable signal handler
subprimitive. */
// Fault came from Factor, and we've got a good callstack. Route the
// signal handler through the resumable signal handler
// subprimitive.
cell offset = *sp % 16;
signal_handler_addr = handler;
/* True stack frames are always 16-byte aligned. Leaf procedures
that don't create a stack frame will be out of alignment by
sizeof(cell) bytes. */
/* On architectures with a link register we would have to check for
leafness by matching the PC to a word. We should also use
FRAME_RETURN_ADDRESS instead of assuming the stack pointer is the
right place to put the resume address. */
// True stack frames are always 16-byte aligned. Leaf procedures
// that don't create a stack frame will be out of alignment by
// sizeof(cell) bytes.
// On architectures with a link register we would have to check for
// leafness by matching the PC to a word. We should also use
// FRAME_RETURN_ADDRESS instead of assuming the stack pointer is the
// right place to put the resume address.
cell index = 0;
cell delta = 0;
if (offset == 0) {
delta = sizeof(cell);
index = SIGNAL_HANDLER_WORD;
} else if (offset == 16 - sizeof(cell)) {
/* Make a fake frame for the leaf procedure */
// Make a fake frame for the leaf procedure
FACTOR_ASSERT(code->code_block_for_address(*pc) != NULL);
delta = LEAF_FRAME_SIZE;
index = LEAF_SIGNAL_HANDLER_WORD;
@ -85,10 +85,10 @@ void factor_vm::dispatch_signal_handler(cell* sp, cell* pc, cell handler) {
dispatch_non_resumable_signal(sp, pc, handler, cs_limit);
}
/* Poking with the stack pointer, which the above code does, means
that pointers to stack-allocated objects will become
corrupted. Therefore the root vectors needs to be cleared because
their pointers to stack variables are now garbage. */
// Poking with the stack pointer, which the above code does, means
// that pointers to stack-allocated objects will become
// corrupted. Therefore the root vectors needs to be cleared because
// their pointers to stack variables are now garbage.
data_roots.clear();
code_roots.clear();
}

View File

@ -5,14 +5,14 @@ namespace factor {
inline static void flush_icache(cell start, cell len) {}
/* In the instruction sequence:
// In the instruction sequence:
MOV EBX,...
JMP blah
// MOV EBX,...
// JMP blah
the offset from the immediate operand to MOV to the instruction after
the jump is a cell for the immediate operand, 4 bytes for the JMP
destination, and one byte for the JMP opcode. */
// the offset from the immediate operand to MOV to the instruction after
// the jump is a cell for the immediate operand, 4 bytes for the JMP
// destination, and one byte for the JMP opcode.
static const fixnum xt_tail_pic_offset = 4 + 1;
static const unsigned char call_opcode = 0xe8;

View File

@ -133,13 +133,13 @@ data_heap_room factor_vm::data_room() {
return room;
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_data_room() {
data_heap_room room = data_room();
ctx->push(tag<byte_array>(byte_array_from_value(&room)));
}
/* Allocates memory */
// Allocates memory
cell factor_vm::instances(cell type) {
primitive_full_gc();
@ -152,7 +152,7 @@ cell factor_vm::instances(cell type) {
return std_vector_to_array(objects);
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_all_instances() {
ctx->push(instances(TYPE_COUNT));
}

View File

@ -9,7 +9,7 @@ struct data_heap {
segment* seg;
/* Borrowed reference to a factor_vm::nursery */
// Borrowed reference to a factor_vm::nursery
bump_allocator* nursery;
aging_space* aging;
aging_space* aging_semispace;

View File

@ -1,7 +1,7 @@
#include "master.hpp"
/* A tool to debug write barriers. Call check_data_heap() to ensure that all
cards that should be marked are actually marked. */
// A tool to debug write barriers. Call check_data_heap() to ensure that all
// cards that should be marked are actually marked.
namespace factor {

View File

@ -365,7 +365,7 @@ struct code_block_printer {
}
};
/* Dump all code blocks for debugging */
// Dump all code blocks for debugging
void factor_vm::dump_code_heap(ostream& out) {
code_block_printer printer(this, out);
code->allocator->iterate(printer);
@ -460,10 +460,10 @@ void factor_vm::factorbug() {
cin >> setw(1024) >> cmd >> setw(0);
if (!cin.good()) {
if (!seen_command) {
/* If we exit with an EOF immediately, then
dump stacks. This is useful for builder and
other cases where Factor is run with stdin
redirected to /dev/null */
// If we exit with an EOF immediately, then
// dump stacks. This is useful for builder and
// other cases where Factor is run with stdin
// redirected to /dev/null
fep_disabled = true;
print_datastack(cout);

View File

@ -4,7 +4,7 @@ extern bool factor_print_p;
#ifdef FACTOR_DEBUG
/* To chop the directory path of the __FILE__ macro. */
// To chop the directory path of the __FILE__ macro.
inline const char* abbrev_path(const char* path) {
const char* p1 = strrchr(path, '\\');
const char* p2 = strrchr(path, '/');

View File

@ -114,7 +114,7 @@ void factor_vm::primitive_reset_dispatch_stats() {
memset(&dispatch_stats, 0, sizeof(dispatch_statistics));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_dispatch_stats() {
ctx->push(tag<byte_array>(byte_array_from_value(&dispatch_stats)));
}

View File

@ -3,10 +3,10 @@
namespace factor {
void factor_vm::c_to_factor(cell quot) {
/* First time this is called, wrap the c-to-factor sub-primitive inside
of a callback stub, which saves and restores non-volatile registers
per platform ABI conventions, so that the Factor compiler can treat
all registers as volatile */
// First time this is called, wrap the c-to-factor sub-primitive inside
// of a callback stub, which saves and restores non-volatile registers
// per platform ABI conventions, so that the Factor compiler can treat
// all registers as volatile
if (!c_to_factor_func) {
tagged<word> c_to_factor_word(special_objects[C_TO_FACTOR_WORD]);
code_block* c_to_factor_block = callbacks->add(c_to_factor_word.value(), 0);

View File

@ -34,7 +34,7 @@ void critical_error(const char* msg, cell tagged) {
current_vm()->factorbug();
}
/* Allocates memory */
// Allocates memory
void factor_vm::general_error(vm_error_type error, cell arg1_, cell arg2_) {
data_root<object> arg1(arg1_, this);
@ -42,38 +42,38 @@ void factor_vm::general_error(vm_error_type error, cell arg1_, cell arg2_) {
faulting_p = true;
/* If we had an underflow or overflow, data or retain stack
pointers might be out of bounds, so fix them before allocating
anything */
// If we had an underflow or overflow, data or retain stack
// pointers might be out of bounds, so fix them before allocating
// anything
ctx->fix_stacks();
/* If error was thrown during heap scan, we re-enable the GC */
// If error was thrown during heap scan, we re-enable the GC
gc_off = false;
/* If the error handler is set, we rewind any C stack frames and
pass the error to user-space. */
// If the error handler is set, we rewind any C stack frames and
// pass the error to user-space.
if (!current_gc && to_boolean(special_objects[ERROR_HANDLER_QUOT])) {
#ifdef FACTOR_DEBUG
/* Doing a GC here triggers all kinds of funny errors */
// Doing a GC here triggers all kinds of funny errors
primitive_compact_gc();
#endif
/* Now its safe to allocate and GC */
// Now its safe to allocate and GC
cell error_object =
allot_array_4(tag_fixnum(KERNEL_ERROR), tag_fixnum(error),
arg1.value(), arg2.value());
ctx->push(error_object);
/* Clear the data roots since arg1 and arg2's destructors won't be
called. */
// Clear the data roots since arg1 and arg2's destructors won't be
// called.
data_roots.clear();
/* The unwind-native-frames subprimitive will clear faulting_p
if it was successfully reached. */
// The unwind-native-frames subprimitive will clear faulting_p
// if it was successfully reached.
unwind_native_frames(special_objects[ERROR_HANDLER_QUOT],
ctx->callstack_top);
} /* Error was thrown in early startup before error handler is set, so just
crash. */
} // Error was thrown in early startup before error handler is set, so just
// crash.
else {
std::cout << "You have triggered a bug in Factor. Please report.\n";
std::cout << "error: " << error << std::endl;
@ -88,19 +88,19 @@ void factor_vm::general_error(vm_error_type error, cell arg1_, cell arg2_) {
}
}
/* Allocates memory */
// Allocates memory
void factor_vm::type_error(cell type, cell tagged) {
general_error(ERROR_TYPE, tag_fixnum(type), tagged);
}
/* Allocates memory */
// Allocates memory
void factor_vm::not_implemented_error() {
general_error(ERROR_NOT_IMPLEMENTED, false_object, false_object);
}
void factor_vm::verify_memory_protection_error(cell addr) {
/* Called from the OS-specific top halves of the signal handlers to
make sure it's safe to dispatch to memory_signal_handler_impl. */
// Called from the OS-specific top halves of the signal handlers to
// make sure it's safe to dispatch to memory_signal_handler_impl.
if (fatal_erroring_p)
fa_diddly_atal_error();
if (faulting_p && !code->safepoint_p(addr))
@ -111,16 +111,16 @@ void factor_vm::verify_memory_protection_error(cell addr) {
fatal_error("Memory protection fault during gc", addr);
}
/* Allocates memory */
// Allocates memory
void factor_vm::divide_by_zero_error() {
general_error(ERROR_DIVIDE_BY_ZERO, false_object, false_object);
}
/* For testing purposes */
/* Allocates memory */
// For testing purposes
// Allocates memory
void factor_vm::primitive_unimplemented() { not_implemented_error(); }
/* Allocates memory */
// Allocates memory
void memory_signal_handler_impl() {
factor_vm* vm = current_vm();
if (vm->code->safepoint_p(vm->signal_fault_addr)) {
@ -132,13 +132,13 @@ void memory_signal_handler_impl() {
vm->general_error(type, number, false_object);
}
if (!vm->signal_resumable) {
/* In theory we should only get here if the callstack overflowed during a
safepoint */
// In theory we should only get here if the callstack overflowed during a
// safepoint
vm->general_error(ERROR_CALLSTACK_OVERFLOW, false_object, false_object);
}
}
/* Allocates memory */
// Allocates memory
void synchronous_signal_handler_impl() {
factor_vm* vm = current_vm();
vm->general_error(ERROR_SIGNAL,
@ -146,11 +146,11 @@ void synchronous_signal_handler_impl() {
false_object);
}
/* Allocates memory */
// Allocates memory
void fp_signal_handler_impl() {
factor_vm* vm = current_vm();
/* Clear pending exceptions to avoid getting stuck in a loop */
// Clear pending exceptions to avoid getting stuck in a loop
vm->set_fpu_state(vm->get_fpu_state());
vm->general_error(ERROR_FP_TRAP,

View File

@ -4,8 +4,8 @@ namespace factor {
void init_globals() { init_mvm(); }
/* Compile code in boot image so that we can execute the startup quotation */
/* Allocates memory */
// Compile code in boot image so that we can execute the startup quotation
// Allocates memory
void factor_vm::prepare_boot_image() {
std::cout << "*** Stage 2 early init... " << std::flush;
@ -38,22 +38,22 @@ void factor_vm::prepare_boot_image() {
}
void factor_vm::init_factor(vm_parameters* p) {
/* Kilobytes */
// Kilobytes
p->datastack_size = align_page(p->datastack_size << 10);
p->retainstack_size = align_page(p->retainstack_size << 10);
p->callstack_size = align_page(p->callstack_size << 10);
p->callback_size = align_page(p->callback_size << 10);
/* Megabytes */
// Megabytes
p->young_size <<= 20;
p->aging_size <<= 20;
p->tenured_size <<= 20;
p->code_size <<= 20;
/* Disable GC during init as a sanity check */
// Disable GC during init as a sanity check
gc_off = true;
/* OS-specific initialization */
// OS-specific initialization
early_init();
p->executable_path = vm_executable_path();
@ -96,7 +96,7 @@ void factor_vm::init_factor(vm_parameters* p) {
special_objects[idx] = allot_alien(false_object, aliens[n][1]);
}
/* We can GC now */
// We can GC now
gc_off = false;
if (!to_boolean(special_objects[OBJ_STAGE2]))
@ -110,7 +110,7 @@ void factor_vm::init_factor(vm_parameters* p) {
}
/* Allocates memory */
// Allocates memory
void factor_vm::pass_args_to_factor(int argc, vm_char** argv) {
growable_array args(this);

View File

@ -1,7 +1,7 @@
namespace factor {
/* Some functions for converting floating point numbers to binary
representations and vice versa */
// Some functions for converting floating point numbers to binary
// representations and vice versa
union double_bits_pun {
double x;

View File

@ -32,22 +32,22 @@ void free_list::add_to_free_list(free_heap_block* block) {
}
free_heap_block* free_list::find_free_block(cell size) {
/* Check small free lists */
// Check small free lists
cell bucket = size / data_alignment;
if (bucket < free_list_count) {
std::vector<free_heap_block*>& blocks = small_blocks[bucket];
if (blocks.size() == 0) {
/* Round up to a multiple of 'size' */
// Round up to a multiple of 'size'
cell large_block_size = ((allocation_page_size + size - 1) / size) * size;
/* Allocate a block this big */
// Allocate a block this big
free_heap_block* large_block = find_free_block(large_block_size);
if (!large_block)
return NULL;
large_block = split_free_block(large_block, large_block_size);
/* Split it up into pieces and add each piece back to the free list */
// Split it up into pieces and add each piece back to the free list
for (cell offset = 0; offset < large_block_size; offset += size) {
free_heap_block* small_block = large_block;
large_block = (free_heap_block*)((cell)large_block + size);
@ -64,7 +64,7 @@ free_heap_block* free_list::find_free_block(cell size) {
return block;
} else {
/* Check large free list */
// Check large free list
free_heap_block key;
key.make_free(size);
large_block_set::iterator iter = large_blocks.lower_bound(&key);
@ -87,7 +87,7 @@ free_heap_block* free_list::find_free_block(cell size) {
free_heap_block* free_list::split_free_block(free_heap_block* block,
cell size) {
if (block->size() != size) {
/* split the block in two */
// split the block in two
free_heap_block* split = (free_heap_block*)((cell)block + size);
split->make_free(block->size() - size);
block->make_free(size);

View File

@ -145,11 +145,11 @@ void free_list_allocator<Block>::sweep(Iterator& iter) {
cell end = this->end;
while (start != end) {
/* find next unmarked block */
// find next unmarked block
start = state.next_unmarked_block_after(start);
if (start != end) {
/* find size */
// find size
cell size = state.unmarked_block_size(start);
FACTOR_ASSERT(size > 0);
@ -168,8 +168,8 @@ template <typename Block> void free_list_allocator<Block>::sweep() {
sweep(null_sweep);
}
/* The forwarding map must be computed first by calling
state.compute_forwarding(). */
// The forwarding map must be computed first by calling
// state.compute_forwarding().
template <typename Block>
template <typename Iterator, typename Fixup>
void free_list_allocator<Block>::compact(Iterator& iter, Fixup fixup,
@ -186,13 +186,13 @@ void free_list_allocator<Block>::compact(Iterator& iter, Fixup fixup,
};
iterate(compact_block_func, fixup);
/* Now update the free list; there will be a single free block at
the end */
// Now update the free list; there will be a single free block at
// the end
free_blocks.initial_free_list(start, end, dest_addr - start);
}
/* During compaction we have to be careful and measure object sizes
differently */
// During compaction we have to be careful and measure object sizes
// differently
template <typename Block>
template <typename Iterator, typename Fixup>
void free_list_allocator<Block>::iterate(Iterator& iter, Fixup fixup) {

View File

@ -24,10 +24,10 @@ struct full_policy {
}
};
/* After a sweep, invalidate any code heap roots which are not marked,
so that if a block makes a tail call to a generic word, and the PIC
compiler triggers a GC, and the caller block gets GCd as a result,
the PIC code won't try to overwrite the call site */
// After a sweep, invalidate any code heap roots which are not marked,
// so that if a block makes a tail call to a generic word, and the PIC
// compiler triggers a GC, and the caller block gets GCd as a result,
// the PIC code won't try to overwrite the call site
void factor_vm::update_code_roots_for_sweep() {
mark_bits* state = &code->allocator->state;
@ -85,12 +85,12 @@ void factor_vm::collect_full() {
collect_sweep_impl();
if (data->low_memory_p()) {
/* Full GC did not free up enough memory. Grow the heap. */
// Full GC did not free up enough memory. Grow the heap.
set_current_gc_op(collect_growing_heap_op);
collect_growing_heap(0);
} else if (data->high_fragmentation_p()) {
/* Enough free memory, but it is not contiguous. Perform a
compaction. */
// Enough free memory, but it is not contiguous. Perform a
// compaction.
set_current_gc_op(collect_compact_op);
collect_compact_impl();
}

View File

@ -74,19 +74,19 @@ void factor_vm::end_gc() {
void factor_vm::start_gc_again() {
switch (current_gc->op) {
case collect_nursery_op:
/* Nursery collection can fail if aging does not have enough
free space to fit all live objects from nursery. */
// Nursery collection can fail if aging does not have enough
// free space to fit all live objects from nursery.
current_gc->op = collect_aging_op;
break;
case collect_aging_op:
/* Aging collection can fail if the aging semispace cannot fit
all the live objects from the other aging semispace and the
nursery. */
// Aging collection can fail if the aging semispace cannot fit
// all the live objects from the other aging semispace and the
// nursery.
current_gc->op = collect_to_tenured_op;
break;
default:
/* Nothing else should fail mid-collection due to insufficient
space in the target generation. */
// Nothing else should fail mid-collection due to insufficient
// space in the target generation.
critical_error("in start_gc_again, bad GC op", current_gc->op);
break;
}
@ -102,10 +102,10 @@ void factor_vm::gc(gc_op op, cell requested_size) {
FACTOR_ASSERT(!gc_off);
FACTOR_ASSERT(!current_gc);
/* Important invariant: tenured space must have enough contiguous free
space to fit the entire contents of the aging space and nursery. This is
because when doing a full collection, objects from younger generations
are promoted before any unreachable tenured objects are freed. */
// Important invariant: tenured space must have enough contiguous free
// space to fit the entire contents of the aging space and nursery. This is
// because when doing a full collection, objects from younger generations
// are promoted before any unreachable tenured objects are freed.
FACTOR_ASSERT(!data->high_fragmentation_p());
current_gc = new gc_state(op, this);
@ -113,8 +113,8 @@ void factor_vm::gc(gc_op op, cell requested_size) {
ctx->callstack_seg->set_border_locked(false);
atomic::store(&current_gc_p, true);
/* Keep trying to GC higher and higher generations until we don't run
out of space in the target generation. */
// Keep trying to GC higher and higher generations until we don't run
// out of space in the target generation.
for (;;) {
try {
if (gc_events)
@ -125,19 +125,19 @@ void factor_vm::gc(gc_op op, cell requested_size) {
collect_nursery();
break;
case collect_aging_op:
/* We end up here if the above fails. */
// We end up here if the above fails.
collect_aging();
if (data->high_fragmentation_p()) {
/* Change GC op so that if we fail again, we crash. */
// Change GC op so that if we fail again, we crash.
set_current_gc_op(collect_full_op);
collect_full();
}
break;
case collect_to_tenured_op:
/* We end up here if the above fails. */
// We end up here if the above fails.
collect_to_tenured();
if (data->high_fragmentation_p()) {
/* Change GC op so that if we fail again, we crash. */
// Change GC op so that if we fail again, we crash.
set_current_gc_op(collect_full_op);
collect_full();
}
@ -159,7 +159,7 @@ void factor_vm::gc(gc_op op, cell requested_size) {
break;
}
catch (const must_start_gc_again&) {
/* We come back here if the target generation is full. */
// We come back here if the target generation is full.
start_gc_again();
continue;
}
@ -173,7 +173,7 @@ void factor_vm::gc(gc_op op, cell requested_size) {
delete current_gc;
current_gc = NULL;
/* Check the invariant again, just in case. */
// Check the invariant again, just in case.
FACTOR_ASSERT(!data->high_fragmentation_p());
}
@ -189,18 +189,17 @@ void factor_vm::primitive_compact_gc() {
gc(collect_compact_op, 0);
}
/*
* It is up to the caller to fill in the object's fields in a meaningful
* fashion!
*/
/* Allocates memory */
// It is up to the caller to fill in the object's fields in a meaningful
// fashion!
// Allocates memory
object* factor_vm::allot_large_object(cell type, cell size) {
/* If tenured space does not have enough room, collect and compact */
// If tenured space does not have enough room, collect and compact
cell requested_size = size + data->high_water_mark();
if (!data->tenured->can_allot_p(requested_size)) {
primitive_compact_gc();
/* If it still won't fit, grow the heap */
// If it still won't fit, grow the heap
if (!data->tenured->can_allot_p(requested_size)) {
gc(collect_growing_heap_op, size);
}
@ -208,9 +207,9 @@ object* factor_vm::allot_large_object(cell type, cell size) {
object* obj = data->tenured->allot(size);
/* Allows initialization code to store old->new pointers
without hitting the write barrier in the common case of
a nursery allocation */
// Allows initialization code to store old->new pointers
// without hitting the write barrier in the common case of
// a nursery allocation
write_barrier(obj, size);
obj->initialize(type);
@ -221,8 +220,8 @@ void factor_vm::primitive_enable_gc_events() {
gc_events = new std::vector<gc_event>();
}
/* Allocates memory (byte_array_from_value, result.add) */
/* XXX: Remember that growable_array has a data_root already */
// Allocates memory (byte_array_from_value, result.add)
// XXX: Remember that growable_array has a data_root already
void factor_vm::primitive_disable_gc_events() {
if (gc_events) {
growable_array result(this);

View File

@ -13,7 +13,7 @@ template <typename Array> cell array_size(Array* array) {
return array_size<Array>(array_capacity(array));
}
/* Allocates memory */
// Allocates memory
template <typename Array>
Array* factor_vm::allot_uninitialized_array(cell capacity) {
Array* array = allot<Array>(array_size<Array>(capacity));
@ -27,7 +27,7 @@ bool factor_vm::reallot_array_in_place_p(Array* array, cell capacity) {
capacity <= array_capacity(array);
}
/* Allocates memory (sometimes) */
// Allocates memory (sometimes)
template <typename Array>
Array* factor_vm::reallot_array(Array* array_, cell capacity) {
data_root<Array> array(array_, this);

View File

@ -77,7 +77,7 @@ void vm_parameters::init_from_args(int argc, vm_char** argv) {
else if (factor_arg(arg, STRING_LITERAL("-callbacks=%d"), &callback_size))
;
else if (STRNCMP(arg, STRING_LITERAL("-i="), 3) == 0) {
/* In case you specify -i more than once. */
// In case you specify -i more than once.
if (image_path) {
free((vm_char *)image_path);
}
@ -231,8 +231,8 @@ FILE* factor_vm::open_image(vm_parameters* p) {
return file;
}
/* Read an image file from disk, only done once during startup */
/* This function also initializes the data and code heaps */
// Read an image file from disk, only done once during startup
// This function also initializes the data and code heaps
void factor_vm::load_image(vm_parameters* p) {
FILE* file = open_image(p);
if (file == NULL) {
@ -257,7 +257,7 @@ void factor_vm::load_image(vm_parameters* p) {
raw_fclose(file);
/* Certain special objects in the image are known to the runtime */
// Certain special objects in the image are known to the runtime
memcpy(special_objects, h.special_objects, sizeof(special_objects));
cell data_offset = data->tenured->start - h.data_relocation_base;
@ -265,9 +265,9 @@ void factor_vm::load_image(vm_parameters* p) {
fixup_heaps(data_offset, code_offset);
}
/* Save the current image to disk. We don't throw any exceptions here
because if the 'then-die' argument is t it is not safe to do
so. Instead we signal failure by returning false. */
// Save the current image to disk. We don't throw any exceptions here
// because if the 'then-die' argument is t it is not safe to do
// so. Instead we signal failure by returning false.
bool factor_vm::save_image(const vm_char* saving_filename,
const vm_char* filename) {
image_header h;
@ -301,39 +301,39 @@ bool factor_vm::save_image(const vm_char* saving_filename,
return true;
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_save_image() {
/* We unbox this before doing anything else. This is the only point
where we might throw an error, so we have to throw an error here since
later steps destroy the current image. */
// We unbox this before doing anything else. This is the only point
// where we might throw an error, so we have to throw an error here since
// later steps destroy the current image.
bool then_die = to_boolean(ctx->pop());
byte_array* path2 = untag_check<byte_array>(ctx->pop());
byte_array* path1 = untag_check<byte_array>(ctx->pop());
/* Copy the paths to non-gc memory to avoid them hanging around in
the saved image. */
// Copy the paths to non-gc memory to avoid them hanging around in
// the saved image.
vm_char* path1_saved = safe_strdup(path1->data<vm_char>());
vm_char* path2_saved = safe_strdup(path2->data<vm_char>());
if (then_die) {
/* strip out special_objects data which is set on startup anyway */
// strip out special_objects data which is set on startup anyway
for (cell i = 0; i < special_object_count; i++)
if (!save_special_p(i))
special_objects[i] = false_object;
/* dont trace objects only reachable from context stacks so we don't
get volatile data saved in the image. */
// dont trace objects only reachable from context stacks so we don't
// get volatile data saved in the image.
active_contexts.clear();
code->uninitialized_blocks.clear();
/* I think clearing the callback heap should be fine too. */
// I think clearing the callback heap should be fine too.
callbacks->allocator->initial_free_list(0);
}
/* do a full GC to push everything remaining into tenured space */
// do a full GC to push everything remaining into tenured space
primitive_compact_gc();
/* Save the image */
// Save the image
bool ret = save_image(path1_saved, path2_saved);
if (then_die) {
exit(ret ? 0 : 1);

View File

@ -13,13 +13,13 @@ struct embedded_image_footer {
struct image_header {
cell magic;
cell version;
/* base address of data heap when image was saved */
// base address of data heap when image was saved
cell data_relocation_base;
/* size of heap */
// size of heap
cell data_size;
/* base address of code heap when image was saved */
// base address of code heap when image was saved
cell code_relocation_base;
/* size of code heap */
// size of code heap
cell code_size;
cell reserved_1;
@ -27,7 +27,7 @@ struct image_header {
cell reserved_3;
cell reserved_4;
/* Initial user environment */
// Initial user environment
cell special_objects[special_object_count];
};

View File

@ -3,20 +3,20 @@
namespace factor {
void factor_vm::deallocate_inline_cache(cell return_address) {
/* Find the call target. */
// Find the call target.
void* old_entry_point = get_call_target(return_address);
code_block* old_block = (code_block*)old_entry_point - 1;
/* Free the old PIC since we know its unreachable */
// Free the old PIC since we know its unreachable
if (old_block->pic_p())
code->free(old_block);
}
/* Figure out what kind of type check the PIC needs based on the methods
it contains */
// Figure out what kind of type check the PIC needs based on the methods
// it contains
static cell determine_inline_cache_type(array* cache_entries) {
for (cell i = 0; i < array_capacity(cache_entries); i += 2) {
/* Is it a tuple layout? */
// Is it a tuple layout?
if (TAG(array_nth(cache_entries, i)) == ARRAY_TYPE) {
return PIC_TUPLE;
}
@ -42,26 +42,26 @@ struct inline_cache_jit : public jit {
void inline_cache_jit::emit_check_and_jump(cell ic_type, cell i,
cell klass, cell method) {
/* Class equal? */
// Class equal?
cell check_type = PIC_CHECK_TAG;
if (TAG(klass) != FIXNUM_TYPE)
check_type = PIC_CHECK_TUPLE;
/* The tag check can be skipped if it is the first one and we are
checking for the fixnum type which is 0. That is because the
AND instruction in the PIC_TAG template already sets the zero
flag. */
// The tag check can be skipped if it is the first one and we are
// checking for the fixnum type which is 0. That is because the
// AND instruction in the PIC_TAG template already sets the zero
// flag.
if (!(i == 0 && ic_type == PIC_TAG && klass == 0)) {
emit_with_literal(parent->special_objects[check_type], klass);
}
/* Yes? Jump to method */
// Yes? Jump to method
emit_with_literal(parent->special_objects[PIC_HIT], method);
}
/* index: 0 = top of stack, 1 = item underneath, etc
cache_entries: array of class/method pairs */
/* Allocates memory */
// index: 0 = top of stack, 1 = item underneath, etc
// cache_entries: array of class/method pairs
// Allocates memory
void inline_cache_jit::emit_inline_cache(fixnum index, cell generic_word_,
cell methods_, cell cache_entries_,
bool tail_call_p) {
@ -72,15 +72,15 @@ void inline_cache_jit::emit_inline_cache(fixnum index, cell generic_word_,
cell ic_type = determine_inline_cache_type(cache_entries.untagged());
parent->update_pic_count(ic_type);
/* Generate machine code to determine the object's class. */
// Generate machine code to determine the object's class.
emit_with_literal(parent->special_objects[PIC_LOAD],
tag_fixnum(-index * sizeof(cell)));
/* Put the tag of the object, or class of the tuple in a register. */
// Put the tag of the object, or class of the tuple in a register.
emit(parent->special_objects[ic_type]);
/* Generate machine code to check, in turn, if the class is one of the cached
entries. */
// Generate machine code to check, in turn, if the class is one of the cached
// entries.
for (cell i = 0; i < array_capacity(cache_entries.untagged()); i += 2) {
cell klass = array_nth(cache_entries.untagged(), i);
cell method = array_nth(cache_entries.untagged(), i + 1);
@ -88,15 +88,15 @@ void inline_cache_jit::emit_inline_cache(fixnum index, cell generic_word_,
emit_check_and_jump(ic_type, i, klass, method);
}
/* If none of the above conditionals tested true, then execution "falls
through" to here. */
// If none of the above conditionals tested true, then execution "falls
// through" to here.
/* A stack frame is set up, since the inline-cache-miss sub-primitive
makes a subroutine call to the VM. */
// A stack frame is set up, since the inline-cache-miss sub-primitive
// makes a subroutine call to the VM.
emit(parent->special_objects[JIT_PROLOG]);
/* The inline-cache-miss sub-primitive call receives enough information to
reconstruct the PIC with the new entry. */
// The inline-cache-miss sub-primitive call receives enough information to
// reconstruct the PIC with the new entry.
push(generic_word.value());
push(methods.value());
push(tag_fixnum(index));
@ -104,11 +104,11 @@ void inline_cache_jit::emit_inline_cache(fixnum index, cell generic_word_,
emit_subprimitive(
parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD],
true, /* tail_call_p */
true); /* stack_frame_p */
true, // tail_call_p
true); // stack_frame_p
}
/* Allocates memory */
// Allocates memory
code_block* factor_vm::compile_inline_cache(fixnum index, cell generic_word_,
cell methods_, cell cache_entries_,
bool tail_call_p) {
@ -124,7 +124,7 @@ code_block* factor_vm::compile_inline_cache(fixnum index, cell generic_word_,
return code;
}
/* Allocates memory */
// Allocates memory
cell factor_vm::add_inline_cache_entry(cell cache_entries_, cell klass_,
cell method_) {
data_root<array> cache_entries(cache_entries_, this);
@ -148,13 +148,13 @@ void factor_vm::update_pic_transitions(cell pic_size) {
dispatch_stats.ic_to_pic_transitions++;
}
/* The cache_entries parameter is empty (on cold call site) or has entries
(on cache miss). Called from assembly with the actual return address.
Compilation of the inline cache may trigger a GC, which may trigger a
compaction;
also, the block containing the return address may now be dead. Use a
code_root to take care of the details. */
/* Allocates memory */
// The cache_entries parameter is empty (on cold call site) or has entries
// (on cache miss). Called from assembly with the actual return address.
// Compilation of the inline cache may trigger a GC, which may trigger a
// compaction;
// also, the block containing the return address may now be dead. Use a
// code_root to take care of the details.
// Allocates memory
cell factor_vm::inline_cache_miss(cell return_address_) {
code_root return_address(return_address_, this);
bool tail_call_site = tail_call_site_p(return_address.value);
@ -193,11 +193,11 @@ cell factor_vm::inline_cache_miss(cell return_address_) {
->entry_point();
}
/* Install the new stub. */
// Install the new stub.
if (return_address.valid) {
/* Since each PIC is only referenced from a single call site,
if the old call target was a PIC, we can deallocate it immediately,
instead of leaving dead PICs around until the next GC. */
// Since each PIC is only referenced from a single call site,
// if the old call target was a PIC, we can deallocate it immediately,
// instead of leaving dead PICs around until the next GC.
deallocate_inline_cache(return_address.value);
set_call_target(return_address.value, xt);
@ -212,7 +212,7 @@ cell factor_vm::inline_cache_miss(cell return_address_) {
return xt;
}
/* Allocates memory */
// Allocates memory
VM_C_API cell inline_cache_miss(cell return_address, factor_vm* parent) {
return parent->inline_cache_miss(return_address);
}

View File

@ -9,7 +9,7 @@ instruction_operand::instruction_operand(relocation_entry rel,
index(index),
pointer(compiled->entry_point() + rel.offset()) {}
/* Load a 32-bit value from a PowerPC LIS/ORI sequence */
// Load a 32-bit value from a PowerPC LIS/ORI sequence
fixnum instruction_operand::load_value_2_2() {
uint32_t* ptr = (uint32_t*)pointer;
cell hi = (ptr[-2] & 0xffff);
@ -17,7 +17,7 @@ fixnum instruction_operand::load_value_2_2() {
return hi << 16 | lo;
}
/* Load a 64-bit value from a PowerPC LIS/ORI/SLDI/ORIS/ORI sequence */
// Load a 64-bit value from a PowerPC LIS/ORI/SLDI/ORIS/ORI sequence
fixnum instruction_operand::load_value_2_2_2_2() {
uint32_t* ptr = (uint32_t*)pointer;
uint64_t hhi = (ptr[-5] & 0xffff);
@ -28,7 +28,7 @@ fixnum instruction_operand::load_value_2_2_2_2() {
return (cell)val;
}
/* Load a value from a bitfield of a PowerPC instruction */
// Load a value from a bitfield of a PowerPC instruction
fixnum instruction_operand::load_value_masked(cell mask, cell bits,
cell shift) {
int32_t* ptr = (int32_t*)(pointer - sizeof(uint32_t));
@ -77,14 +77,14 @@ code_block* instruction_operand::load_code_block() {
return ((code_block*)load_value(pointer) - 1);
}
/* Store a 32-bit value into a PowerPC LIS/ORI sequence */
// Store a 32-bit value into a PowerPC LIS/ORI sequence
void instruction_operand::store_value_2_2(fixnum value) {
uint32_t* ptr = (uint32_t*)pointer;
ptr[-2] = ((ptr[-2] & ~0xffff) | ((value >> 16) & 0xffff));
ptr[-1] = ((ptr[-1] & ~0xffff) | (value & 0xffff));
}
/* Store a 64-bit value into a PowerPC LIS/ORI/SLDI/ORIS/ORI sequence */
// Store a 64-bit value into a PowerPC LIS/ORI/SLDI/ORIS/ORI sequence
void instruction_operand::store_value_2_2_2_2(fixnum value) {
uint64_t val = value;
uint32_t* ptr = (uint32_t*)pointer;
@ -94,7 +94,7 @@ void instruction_operand::store_value_2_2_2_2(fixnum value) {
ptr[-1] = ((ptr[-1] & ~0xffff) | ((val >> 0) & 0xffff));
}
/* Store a value into a bitfield of a PowerPC instruction */
// Store a value into a bitfield of a PowerPC instruction
void instruction_operand::store_value_masked(fixnum value, cell mask,
cell shift) {
uint32_t* ptr = (uint32_t*)(pointer - sizeof(uint32_t));

View File

@ -1,66 +1,66 @@
namespace factor {
enum relocation_type {
/* arg is a literal table index, holding a pair (symbol/dll) */
// arg is a literal table index, holding a pair (symbol/dll)
RT_DLSYM,
/* a word or quotation's general entry point */
// a word or quotation's general entry point
RT_ENTRY_POINT,
/* a word's PIC entry point */
// a word's PIC entry point
RT_ENTRY_POINT_PIC,
/* a word's tail-call PIC entry point */
// a word's tail-call PIC entry point
RT_ENTRY_POINT_PIC_TAIL,
/* current offset */
// current offset
RT_HERE,
/* current code block */
// current code block
RT_THIS,
/* data heap literal */
// data heap literal
RT_LITERAL,
/* untagged fixnum literal */
// untagged fixnum literal
RT_UNTAGGED,
/* address of megamorphic_cache_hits var */
// address of megamorphic_cache_hits var
RT_MEGAMORPHIC_CACHE_HITS,
/* address of vm object */
// address of vm object
RT_VM,
/* value of vm->cards_offset */
// value of vm->cards_offset
RT_CARDS_OFFSET,
/* value of vm->decks_offset */
// value of vm->decks_offset
RT_DECKS_OFFSET,
RT_UNUSED,
/* arg is a literal table index, holding a pair (symbol/dll) */
// arg is a literal table index, holding a pair (symbol/dll)
RT_DLSYM_TOC,
/* address of inline_cache_miss function. This is a separate
relocation to reduce compile time and size for PICs. */
// address of inline_cache_miss function. This is a separate
// relocation to reduce compile time and size for PICs.
RT_INLINE_CACHE_MISS,
/* address of safepoint page in code heap */
// address of safepoint page in code heap
RT_SAFEPOINT
};
enum relocation_class {
/* absolute address in a pointer-width location */
// absolute address in a pointer-width location
RC_ABSOLUTE_CELL,
/* absolute address in a 4 byte location */
// absolute address in a 4 byte location
RC_ABSOLUTE,
/* relative address in a 4 byte location */
// relative address in a 4 byte location
RC_RELATIVE,
/* absolute address in a PowerPC LIS/ORI sequence */
// absolute address in a PowerPC LIS/ORI sequence
RC_ABSOLUTE_PPC_2_2,
/* absolute address in a PowerPC LWZ instruction */
// absolute address in a PowerPC LWZ instruction
RC_ABSOLUTE_PPC_2,
/* relative address in a PowerPC LWZ/STW/BC instruction */
// relative address in a PowerPC LWZ/STW/BC instruction
RC_RELATIVE_PPC_2_PC,
/* relative address in a PowerPC B/BL instruction */
// relative address in a PowerPC B/BL instruction
RC_RELATIVE_PPC_3_PC,
/* relative address in an ARM B/BL instruction */
// relative address in an ARM B/BL instruction
RC_RELATIVE_ARM_3,
/* pointer to address in an ARM LDR/STR instruction */
// pointer to address in an ARM LDR/STR instruction
RC_INDIRECT_ARM,
/* pointer to address in an ARM LDR/STR instruction offset by 8 bytes */
// pointer to address in an ARM LDR/STR instruction offset by 8 bytes
RC_INDIRECT_ARM_PC,
/* absolute address in a 2 byte location */
// absolute address in a 2 byte location
RC_ABSOLUTE_2,
/* absolute address in a 1 byte location */
// absolute address in a 1 byte location
RC_ABSOLUTE_1,
/* absolute address in a PowerPC LIS/ORI/SLDI/ORIS/ORI sequence */
// absolute address in a PowerPC LIS/ORI/SLDI/ORIS/ORI sequence
RC_ABSOLUTE_PPC_2_2_2_2,
};
@ -70,7 +70,7 @@ static const cell rel_relative_ppc_3_mask = 0x03fffffc;
static const cell rel_indirect_arm_mask = 0x00000fff;
static const cell rel_relative_arm_3_mask = 0x00ffffff;
/* code relocation table consists of a table of entries for each fixup */
// code relocation table consists of a table of entries for each fixup
struct relocation_entry {
uint32_t value;
@ -113,7 +113,7 @@ struct relocation_entry {
return 0;
default:
critical_error("Bad rel type in number_of_parameters()", type());
return -1; /* Can't happen */
return -1; // Can't happen
}
}
};

View File

@ -2,16 +2,16 @@
namespace factor {
/* Simple wrappers for ANSI C I/O functions, used for bootstrapping.
// Simple wrappers for ANSI C I/O functions, used for bootstrapping.
Note the ugly loop logic in almost every function; we have to handle EINTR
and restart the operation if the system call was interrupted. Naive
applications don't do this, but then they quickly fail if one enables
itimer()s or other signals.
// Note the ugly loop logic in almost every function; we have to handle EINTR
// and restart the operation if the system call was interrupted. Naive
// applications don't do this, but then they quickly fail if one enables
// itimer()s or other signals.
The Factor library provides platform-specific code for Unix and Windows
with many more capabilities so these words are not usually used in
normal operation. */
// The Factor library provides platform-specific code for Unix and Windows
// with many more capabilities so these words are not usually used in
// normal operation.
size_t raw_fread(void* ptr, size_t size, size_t nitems, FILE* stream) {
FACTOR_ASSERT(nitems > 0);
@ -48,7 +48,7 @@ void factor_vm::init_c_io() {
special_objects[OBJ_STDERR] = allot_alien(false_object, (cell)stderr);
}
/* Allocates memory */
// Allocates memory
void factor_vm::io_error_if_not_EINTR() {
if (errno == EINTR)
return;
@ -186,7 +186,7 @@ void factor_vm::primitive_fgetc() {
ctx->replace(tag_fixnum(c));
}
/* Allocates memory (from_unsigned_cell())*/
// Allocates memory (from_unsigned_cell())
void factor_vm::primitive_fread() {
FILE* file = pop_file_handle();
void* buf = (void*)alien_offset(ctx->pop());
@ -244,9 +244,9 @@ void factor_vm::primitive_fclose() {
io_error_if_not_EINTR();
}
/* This function is used by FFI I/O. Accessing the errno global directly is
not portable, since on some libc's errno is not a global but a funky macro that
reads thread-local storage. */
// This function is used by FFI I/O. Accessing the errno global directly is
// not portable, since on some libc's errno is not a global but a funky macro that
// reads thread-local storage.
VM_C_API int err_no() { return errno; }
VM_C_API void set_err_no(int err) { errno = err; }

View File

@ -1,10 +1,10 @@
namespace factor {
/* Safe IO functions that does not throw Factor errors. */
// Safe IO functions that does not throw Factor errors.
int raw_fclose(FILE* stream);
size_t raw_fread(void* ptr, size_t size, size_t nitems, FILE* stream);
/* Platform specific primitives */
// Platform specific primitives
VM_C_API int err_no();
VM_C_API void set_err_no(int err);

View File

@ -2,13 +2,13 @@
namespace factor {
/* Simple code generator used by:
- quotation compiler (quotations.cpp),
- megamorphic caches (dispatch.cpp),
- polymorphic inline caches (inline_cache.cpp) */
// Simple code generator used by:
// - quotation compiler (quotations.cpp),
// - megamorphic caches (dispatch.cpp),
// - polymorphic inline caches (inline_cache.cpp)
/* Allocates memory (`code` and `relocation` initializers create
growable_byte_array) */
// Allocates memory (`code` and `relocation` initializers create
// growable_byte_array)
jit::jit(code_block_type type, cell owner, factor_vm* vm)
: type(type),
owner(owner, vm),
@ -31,7 +31,7 @@ jit::~jit() {
(void)old_count;
}
/* Allocates memory */
// Allocates memory
void jit::emit_relocation(cell relocation_template_) {
data_root<byte_array> relocation_template(relocation_template_, parent);
cell capacity =
@ -45,7 +45,7 @@ void jit::emit_relocation(cell relocation_template_) {
}
}
/* Allocates memory */
// Allocates memory
void jit::emit(cell code_template_) {
data_root<array> code_template(code_template_, parent);
@ -69,7 +69,7 @@ void jit::emit(cell code_template_) {
code.append_byte_array(insns.value());
}
/* Allocates memory */
// Allocates memory
void jit::emit_with_literal(cell code_template_, cell argument_) {
data_root<array> code_template(code_template_, parent);
data_root<object> argument(argument_, parent);
@ -77,7 +77,7 @@ void jit::emit_with_literal(cell code_template_, cell argument_) {
emit(code_template.value());
}
/* Allocates memory */
// Allocates memory
void jit::emit_with_parameter(cell code_template_, cell argument_) {
data_root<array> code_template(code_template_, parent);
data_root<object> argument(argument_, parent);
@ -85,7 +85,7 @@ void jit::emit_with_parameter(cell code_template_, cell argument_) {
emit(code_template.value());
}
/* Allocates memory */
// Allocates memory
bool jit::emit_subprimitive(cell word_, bool tail_call_p, bool stack_frame_p) {
data_root<word> word(word_, parent);
data_root<array> code_template(word->subprimitive, parent);
@ -105,18 +105,18 @@ bool jit::emit_subprimitive(cell word_, bool tail_call_p, bool stack_frame_p) {
return false;
}
/* Facility to convert compiled code offsets to quotation offsets.
Call jit_compute_offset() with the compiled code offset, then emit
code, and at the end jit->position is the quotation position. */
// Facility to convert compiled code offsets to quotation offsets.
// Call jit_compute_offset() with the compiled code offset, then emit
// code, and at the end jit->position is the quotation position.
void jit::compute_position(cell offset_) {
computing_offset_p = true;
position = 0;
offset = offset_;
}
/* Allocates memory (trim(), add_code_block) */
// Allocates memory (trim(), add_code_block)
code_block* jit::to_code_block(cell frame_size) {
/* Emit dummy GC info */
// Emit dummy GC info
code.grow_bytes(alignment_for(code.count + 4, data_alignment));
uint32_t dummy_gc_info = 0;
code.append_bytes(&dummy_gc_info, sizeof(uint32_t));
@ -127,7 +127,7 @@ code_block* jit::to_code_block(cell frame_size) {
literals.trim();
return parent->add_code_block(
type, code.elements.value(), false_object, /* no labels */
type, code.elements.value(), false_object, // no labels
owner.value(), relocation.elements.value(), parameters.elements.value(),
literals.elements.value(), frame_size);
}

View File

@ -20,17 +20,17 @@ struct jit {
void emit_relocation(cell relocation_template);
void emit(cell code_template);
/* Allocates memory */
// Allocates memory
void parameter(cell parameter) { parameters.add(parameter); }
/* Allocates memory */
// Allocates memory
void emit_with_parameter(cell code_template_, cell parameter_);
/* Allocates memory */
// Allocates memory
void literal(cell literal) { literals.add(literal); }
/* Allocates memory */
// Allocates memory
void emit_with_literal(cell code_template_, cell literal_);
/* Allocates memory */
// Allocates memory
void push(cell literal) {
emit_with_literal(parent->special_objects[JIT_PUSH_LITERAL], literal);
}
@ -39,8 +39,8 @@ struct jit {
fixnum get_position() {
if (computing_offset_p) {
/* If this is still on, emit() didn't clear it,
so the offset was out of bounds */
// If this is still on, emit() didn't clear it,
// so the offset was out of bounds
return -1;
}
return position;

View File

@ -9,7 +9,7 @@ inline static cell alignment_for(cell a, cell b) { return align(a, b) - a; }
static const cell data_alignment = 16;
/* Must match leaf-stack-frame-size in core/layouts/layouts.factor */
// Must match leaf-stack-frame-size in core/layouts/layouts.factor
#define LEAF_FRAME_SIZE 16
#define WORD_SIZE (signed)(sizeof(cell) * 8)
@ -20,7 +20,7 @@ static const cell data_alignment = 16;
#define UNTAG(x) ((cell)(x) & ~TAG_MASK)
#define RETAG(x, tag) (UNTAG(x) | (tag))
/*** Tags ***/
// *** Tags ***
#define FIXNUM_TYPE 0
#define F_TYPE 1
#define ARRAY_TYPE 2
@ -80,7 +80,7 @@ enum code_block_type {
code_block_pic
};
/* Constants used when floating-point trap exceptions are thrown */
// Constants used when floating-point trap exceptions are thrown
enum {
FP_TRAP_INVALID_OPERATION = 1 << 0,
FP_TRAP_OVERFLOW = 1 << 1,
@ -89,11 +89,11 @@ enum {
FP_TRAP_INEXACT = 1 << 4,
};
/* What Factor calls 'f' */
// What Factor calls 'f'
static const cell false_object = F_TYPE;
inline static bool immediate_p(cell obj) {
/* We assume that fixnums have tag 0 and false_object has tag 1 */
// We assume that fixnums have tag 0 and false_object has tag 1
return TAG(obj) <= F_TYPE;
}
@ -131,8 +131,8 @@ struct object {
template <typename Iterator> void each_slot(Iterator& iter);
/* Only valid for objects in tenured space; must cast to free_heap_block
to do anything with it if its free */
// Only valid for objects in tenured space; must cast to free_heap_block
// to do anything with it if its free
bool free_p() const { return (header & 1) == 1; }
cell type() const { return (header >> 2) & TAG_MASK; }
@ -152,32 +152,32 @@ struct object {
void forward_to(object* pointer) { header = ((cell)pointer | 2); }
};
/* Assembly code makes assumptions about the layout of this struct */
// Assembly code makes assumptions about the layout of this struct
struct array : public object {
static const cell type_number = ARRAY_TYPE;
static const cell element_size = sizeof(cell);
/* tagged */
// tagged
cell capacity;
cell* data() const { return (cell*)(this + 1); }
};
/* These are really just arrays, but certain elements have special
significance */
// These are really just arrays, but certain elements have special
// significance
struct tuple_layout : public array {
NO_TYPE_CHECK;
/* tagged */
// tagged
cell klass;
/* tagged fixnum */
// tagged fixnum
cell size;
/* tagged fixnum */
// tagged fixnum
cell echelon;
};
struct bignum : public object {
static const cell type_number = BIGNUM_TYPE;
static const cell element_size = sizeof(cell);
/* tagged */
// tagged
cell capacity;
cell* data() const { return (cell*)(this + 1); }
@ -186,7 +186,7 @@ struct bignum : public object {
struct byte_array : public object {
static const cell type_number = BYTE_ARRAY_TYPE;
static const cell element_size = 1;
/* tagged */
// tagged
cell capacity;
#ifndef FACTOR_64
@ -199,14 +199,14 @@ struct byte_array : public object {
}
};
/* Assembly code makes assumptions about the layout of this struct */
// Assembly code makes assumptions about the layout of this struct
struct string : public object {
static const cell type_number = STRING_TYPE;
/* tagged num of chars */
// tagged num of chars
cell length;
/* tagged */
// tagged
cell aux;
/* tagged */
// tagged
cell hashcode;
uint8_t* data() const { return (uint8_t*)(this + 1); }
@ -214,46 +214,46 @@ struct string : public object {
struct code_block;
/* Assembly code makes assumptions about the layout of this struct:
basis/bootstrap/images/images.factor
basis/compiler/constants/constants.factor
core/bootstrap/primitives.factor
*/
// Assembly code makes assumptions about the layout of this struct:
// basis/bootstrap/images/images.factor
// basis/compiler/constants/constants.factor
// core/bootstrap/primitives.factor
struct word : public object {
static const cell type_number = WORD_TYPE;
/* TAGGED hashcode */
// TAGGED hashcode
cell hashcode;
/* TAGGED word name */
// TAGGED word name
cell name;
/* TAGGED word vocabulary */
// TAGGED word vocabulary
cell vocabulary;
/* TAGGED definition */
// TAGGED definition
cell def;
/* TAGGED property assoc for library code */
// TAGGED property assoc for library code
cell props;
/* TAGGED alternative entry point for direct non-tail calls. Used for inline
* caching */
// TAGGED alternative entry point for direct non-tail calls. Used for inline
// caching
cell pic_def;
/* TAGGED alternative entry point for direct tail calls. Used for inline
* caching */
// TAGGED alternative entry point for direct tail calls. Used for inline
// caching
cell pic_tail_def;
/* TAGGED machine code for sub-primitive */
// TAGGED machine code for sub-primitive
cell subprimitive;
/* UNTAGGED entry point: jump here to execute word */
// UNTAGGED entry point: jump here to execute word
cell entry_point;
/* UNTAGGED compiled code block */
// UNTAGGED compiled code block
/* defined in code_blocks.hpp */
// defined in code_blocks.hpp
code_block* code() const;
};
/* Assembly code makes assumptions about the layout of this struct */
// Assembly code makes assumptions about the layout of this struct
struct wrapper : public object {
static const cell type_number = WRAPPER_TYPE;
cell object;
};
/* Assembly code makes assumptions about the layout of this struct */
// Assembly code makes assumptions about the layout of this struct
struct boxed_float : object {
static const cell type_number = FLOAT_TYPE;
@ -264,36 +264,36 @@ struct boxed_float : object {
double n;
};
/* Assembly code makes assumptions about the layout of this struct:
basis/bootstrap/images/images.factor
basis/compiler/constants/constants.factor
core/bootstrap/primitives.factor
*/
// Assembly code makes assumptions about the layout of this struct:
// basis/bootstrap/images/images.factor
// basis/compiler/constants/constants.factor
// core/bootstrap/primitives.factor
struct quotation : public object {
static const cell type_number = QUOTATION_TYPE;
/* tagged */
// tagged
cell array;
/* tagged */
// tagged
cell cached_effect;
/* tagged */
// tagged
cell cache_counter;
/* UNTAGGED entry point; jump here to call quotation */
// UNTAGGED entry point; jump here to call quotation
cell entry_point;
/* defined in code_blocks.hpp */
// defined in code_blocks.hpp
code_block* code() const;
};
/* Assembly code makes assumptions about the layout of this struct */
// Assembly code makes assumptions about the layout of this struct
struct alien : public object {
static const cell type_number = ALIEN_TYPE;
/* tagged */
// tagged
cell base;
/* tagged */
// tagged
cell expired;
/* untagged */
// untagged
cell displacement;
/* untagged */
// untagged
cell address;
void update_address() {
@ -306,15 +306,15 @@ struct alien : public object {
struct dll : public object {
static const cell type_number = DLL_TYPE;
/* tagged byte array holding a C string */
// tagged byte array holding a C string
cell path;
/* OS-specific handle */
// OS-specific handle
void* handle;
};
struct callstack : public object {
static const cell type_number = CALLSTACK_TYPE;
/* tagged */
// tagged
cell length;
cell frame_top_at(cell offset) const {
@ -329,7 +329,7 @@ struct callstack : public object {
struct tuple : public object {
static const cell type_number = TUPLE_TYPE;
/* tagged layout */
// tagged layout
cell layout;
cell* data() const { return (cell*)(this + 1); }

View File

@ -1,33 +1,33 @@
/* Fault handler information. MacOSX version.
Copyright (C) 1993-1999, 2002-2003 Bruno Haible <clisp.org at bruno>
// Fault handler information. MacOSX version.
// Copyright (C) 1993-1999, 2002-2003 Bruno Haible <clisp.org at bruno>
Copyright (C) 2003 Paolo Bonzini <gnu.org at bonzini>
// Copyright (C) 2003 Paolo Bonzini <gnu.org at bonzini>
Used under BSD license with permission from Paolo Bonzini and Bruno Haible,
2005-03-10:
// Used under BSD license with permission from Paolo Bonzini and Bruno Haible,
// 2005-03-10:
http://sourceforge.net/mailarchive/message.php?msg_name=200503102200.32002.bruno%40clisp.org
// http://sourceforge.net/mailarchive/message.php?msg_name=200503102200.32002.bruno%40clisp.org
Modified for Factor by Slava Pestov */
// Modified for Factor by Slava Pestov
#include "master.hpp"
namespace factor {
/* The exception port on which our thread listens. */
// The exception port on which our thread listens.
mach_port_t our_exception_port;
/* The following sources were used as a *reference* for this exception handling
code:
// The following sources were used as a *reference* for this exception handling
// code:
1. Apple's mach/xnu documentation
2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
omnigroup's macosx-dev list.
http://www.wodeveloper.com/omniLists/macosx-dev/2000/June/msg00137.html */
// 1. Apple's mach/xnu documentation
// 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
// omnigroup's macosx-dev list.
// http://www.wodeveloper.com/omniLists/macosx-dev/2000/June/msg00137.html
/* Modify a suspended thread's thread_state so that when the thread resumes
executing, the call frame of the current C primitive (if any) is rewound, and
the appropriate Factor error is thrown from the top-most Factor frame. */
// Modify a suspended thread's thread_state so that when the thread resumes
// executing, the call frame of the current C primitive (if any) is rewound, and
// the appropriate Factor error is thrown from the top-most Factor frame.
void factor_vm::call_fault_handler(exception_type_t exception,
exception_data_type_t code,
MACH_EXC_STATE_TYPE* exc_state,
@ -72,37 +72,37 @@ static void call_fault_handler(mach_port_t thread, exception_type_t exception,
MACH_EXC_STATE_TYPE* exc_state,
MACH_THREAD_STATE_TYPE* thread_state,
MACH_FLOAT_STATE_TYPE* float_state) {
/* Look up the VM instance involved */
// Look up the VM instance involved
THREADHANDLE thread_id = pthread_from_mach_thread_np(thread);
FACTOR_ASSERT(thread_id);
std::map<THREADHANDLE, factor_vm*>::const_iterator vm =
thread_vms.find(thread_id);
/* Handle the exception */
// Handle the exception
if (vm != thread_vms.end())
vm->second->call_fault_handler(exception, code, exc_state, thread_state,
float_state);
}
/* Handle an exception by invoking the user's fault handler and/or forwarding
the duty to the previously installed handlers. */
// Handle an exception by invoking the user's fault handler and/or forwarding
// the duty to the previously installed handlers.
extern "C" kern_return_t catch_exception_raise(
mach_port_t exception_port, mach_port_t thread, mach_port_t task,
exception_type_t exception, exception_data_t code,
mach_msg_type_number_t code_count) {
/* 10.6 likes to report exceptions from child processes too. Ignore those */
// 10.6 likes to report exceptions from child processes too. Ignore those
if (task != mach_task_self())
return KERN_FAILURE;
/* Get fault information and the faulting thread's register contents..
See http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/thread_get_state.html. */
// Get fault information and the faulting thread's register contents..
// See http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/thread_get_state.html.
MACH_EXC_STATE_TYPE exc_state;
mach_msg_type_number_t exc_state_count = MACH_EXC_STATE_COUNT;
if (thread_get_state(thread, MACH_EXC_STATE_FLAVOR, (natural_t*)&exc_state,
&exc_state_count) !=
KERN_SUCCESS) {
/* The thread is supposed to be suspended while the exception
handler is called. This shouldn't fail. */
// The thread is supposed to be suspended while the exception
// handler is called. This shouldn't fail.
return KERN_FAILURE;
}
@ -111,8 +111,8 @@ extern "C" kern_return_t catch_exception_raise(
if (thread_get_state(thread, MACH_THREAD_STATE_FLAVOR,
(natural_t*)&thread_state, &thread_state_count) !=
KERN_SUCCESS) {
/* The thread is supposed to be suspended while the exception
handler is called. This shouldn't fail. */
// The thread is supposed to be suspended while the exception
// handler is called. This shouldn't fail.
return KERN_FAILURE;
}
@ -121,18 +121,18 @@ extern "C" kern_return_t catch_exception_raise(
if (thread_get_state(thread, MACH_FLOAT_STATE_FLAVOR,
(natural_t*)&float_state, &float_state_count) !=
KERN_SUCCESS) {
/* The thread is supposed to be suspended while the exception
handler is called. This shouldn't fail. */
// The thread is supposed to be suspended while the exception
// handler is called. This shouldn't fail.
return KERN_FAILURE;
}
/* Modify registers so to have the thread resume executing the
fault handler */
// Modify registers so to have the thread resume executing the
// fault handler
call_fault_handler(thread, exception, code[0], &exc_state, &thread_state,
&float_state);
/* Set the faulting thread's register contents..
See http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/thread_set_state.html. */
// Set the faulting thread's register contents..
// See http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/thread_set_state.html.
if (thread_set_state(thread, MACH_FLOAT_STATE_FLAVOR,
(natural_t*)&float_state, float_state_count) !=
KERN_SUCCESS) {
@ -148,19 +148,19 @@ extern "C" kern_return_t catch_exception_raise(
return KERN_SUCCESS;
}
/* The main function of the thread listening for exceptions. */
// The main function of the thread listening for exceptions.
static void* mach_exception_thread(void* arg) {
for (;;) {
/* These two structures contain some private kernel data. We don't need
to access any of it so we don't bother defining a proper struct. The
correct definitions are in the xnu source code. */
/* Buffer for a message to be received. */
// These two structures contain some private kernel data. We don't need
// to access any of it so we don't bother defining a proper struct. The
// correct definitions are in the xnu source code.
// Buffer for a message to be received.
struct {
mach_msg_header_t head;
mach_msg_body_t msgh_body;
char data[1024];
} msg;
/* Buffer for a reply message. */
// Buffer for a reply message.
struct {
mach_msg_header_t head;
char data[1024];
@ -168,7 +168,7 @@ static void* mach_exception_thread(void* arg) {
mach_msg_return_t retval;
/* Wait for a message on the exception port. */
// Wait for a message on the exception port.
retval =
mach_msg(&msg.head, MACH_RCV_MSG | MACH_RCV_LARGE, 0, sizeof(msg),
our_exception_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
@ -176,11 +176,11 @@ static void* mach_exception_thread(void* arg) {
abort();
}
/* Handle the message: Call exc_server, which will call
catch_exception_raise and produce a reply message. */
// Handle the message: Call exc_server, which will call
// catch_exception_raise and produce a reply message.
exc_server(&msg.head, &reply.head);
/* Send the reply. */
// Send the reply.
if (mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0,
MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL) !=
MACH_MSG_SUCCESS) {
@ -190,38 +190,38 @@ static void* mach_exception_thread(void* arg) {
return NULL; // quiet warning
}
/* Initialize the Mach exception handler thread. */
// Initialize the Mach exception handler thread.
void mach_initialize() {
mach_port_t self;
exception_mask_t mask;
self = mach_task_self();
/* Allocate a port on which the thread shall listen for exceptions. */
// Allocate a port on which the thread shall listen for exceptions.
if (mach_port_allocate(self, MACH_PORT_RIGHT_RECEIVE, &our_exception_port) !=
KERN_SUCCESS)
fatal_error("mach_port_allocate() failed", 0);
/* See
* http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/mach_port_insert_right.html.
*/
// See
// http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/mach_port_insert_right.html.
if (mach_port_insert_right(self, our_exception_port, our_exception_port,
MACH_MSG_TYPE_MAKE_SEND) !=
KERN_SUCCESS)
fatal_error("mach_port_insert_right() failed", 0);
/* The exceptions we want to catch. */
// The exceptions we want to catch.
mask = EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC;
/* Create the thread listening on the exception port. */
// Create the thread listening on the exception port.
start_thread(mach_exception_thread, NULL);
/* Replace the exception port info for these exceptions with our own.
Note that we replace the exception port for the entire task, not only
for a particular thread. This has the effect that when our exception
port gets the message, the thread specific exception port has already
been asked, and we don't need to bother about it. See
http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/task_set_exception_ports.html. */
// Replace the exception port info for these exceptions with our own.
// Note that we replace the exception port for the entire task, not only
// for a particular thread. This has the effect that when our exception
// port gets the message, the thread specific exception port has already
// been asked, and we don't need to bother about it. See
// http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/task_set_exception_ports.html.
if (task_set_exception_ports(self, mask, our_exception_port,
EXCEPTION_DEFAULT, MACHINE_THREAD_STATE) !=
KERN_SUCCESS)

View File

@ -1,13 +1,13 @@
/* Fault handler information. MacOSX version.
Copyright (C) 1993-1999, 2002-2003 Bruno Haible <clisp.org at bruno>
Copyright (C) 2003 Paolo Bonzini <gnu.org at bonzini>
// Fault handler information. MacOSX version.
// Copyright (C) 1993-1999, 2002-2003 Bruno Haible <clisp.org at bruno>
// Copyright (C) 2003 Paolo Bonzini <gnu.org at bonzini>
Used under BSD license with permission from Paolo Bonzini and Bruno Haible,
2005-03-10:
// Used under BSD license with permission from Paolo Bonzini and Bruno Haible,
// 2005-03-10:
http://sourceforge.net/mailarchive/message.php?msg_name=200503102200.32002.bruno%40clisp.org
// http://sourceforge.net/mailarchive/message.php?msg_name=200503102200.32002.bruno%40clisp.org
Modified for Factor by Slava Pestov */
// Modified for Factor by Slava Pestov
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
@ -20,28 +20,28 @@ Modified for Factor by Slava Pestov */
#include <mach/task.h>
#include <pthread.h>
/* This is not defined in any header, although documented. */
// This is not defined in any header, although documented.
/* http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/exc_server.html says:
The exc_server function is the MIG generated server handling function
to handle messages from the kernel relating to the occurrence of an
exception in a thread. Such messages are delivered to the exception port
set via thread_set_exception_ports or task_set_exception_ports. When an
exception occurs in a thread, the thread sends an exception message to its
exception port, blocking in the kernel waiting for the receipt of a reply.
The exc_server function performs all necessary argument handling for this
kernel message and calls catch_exception_raise, catch_exception_raise_state
or catch_exception_raise_state_identity, which should handle the exception.
If the called routine returns KERN_SUCCESS, a reply message will be sent,
allowing the thread to continue from the point of the exception; otherwise,
no reply message is sent and the called routine must have dealt with the
exception thread directly. */
// http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/exc_server.html says:
// The exc_server function is the MIG generated server handling function
// to handle messages from the kernel relating to the occurrence of an
// exception in a thread. Such messages are delivered to the exception port
// set via thread_set_exception_ports or task_set_exception_ports. When an
// exception occurs in a thread, the thread sends an exception message to its
// exception port, blocking in the kernel waiting for the receipt of a reply.
// The exc_server function performs all necessary argument handling for this
// kernel message and calls catch_exception_raise, catch_exception_raise_state
// or catch_exception_raise_state_identity, which should handle the exception.
// If the called routine returns KERN_SUCCESS, a reply message will be sent,
// allowing the thread to continue from the point of the exception; otherwise,
// no reply message is sent and the called routine must have dealt with the
// exception thread directly.
extern "C" boolean_t exc_server(mach_msg_header_t* request_msg,
mach_msg_header_t* reply_msg);
/* http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/catch_exception_raise.html
These functions are defined in this file, and called by exc_server.
FIXME: What needs to be done when this code is put into a shared library? */
// http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/catch_exception_raise.html
// These functions are defined in this file, and called by exc_server.
// FIXME: What needs to be done when this code is put into a shared library?
extern "C" kern_return_t catch_exception_raise(
mach_port_t exception_port, mach_port_t thread, mach_port_t task,
exception_type_t exception, exception_data_t code,

View File

@ -84,8 +84,8 @@ struct mark_bits {
set_bitmap_range(marked, address, size);
}
/* The eventual destination of a block after compaction is just the number
of marked blocks before it. Live blocks must be marked on entry. */
// The eventual destination of a block after compaction is just the number
// of marked blocks before it. Live blocks must be marked on entry.
void compute_forwarding() {
cell accum = 0;
for (cell index = 0; index < bits_size; index++) {
@ -94,8 +94,8 @@ struct mark_bits {
}
}
/* We have the popcount for every mark_bits_granularity entries; look
up and compute the rest */
// We have the popcount for every mark_bits_granularity entries; look
// up and compute the rest
cell forward_block(const cell original) {
FACTOR_ASSERT(marked_p(original));
std::pair<cell, cell> position = bitmap_deref(original);
@ -118,17 +118,17 @@ struct mark_bits {
for (cell index = position.first; index < bits_size; index++) {
cell mask = ((fixnum)marked[index] >> bit_index);
if (~mask) {
/* Found an unmarked block on this page. Stop, it's hammer time */
// Found an unmarked block on this page. Stop, it's hammer time
cell clear_bit = rightmost_clear_bit(mask);
return line_block(index * mark_bits_granularity + bit_index +
clear_bit);
} else {
/* No unmarked blocks on this page. Keep looking */
// No unmarked blocks on this page. Keep looking
bit_index = 0;
}
}
/* No unmarked blocks were found */
// No unmarked blocks were found
return this->start + this->size;
}
@ -139,16 +139,16 @@ struct mark_bits {
for (cell index = position.first; index < bits_size; index++) {
cell mask = (marked[index] >> bit_index);
if (mask) {
/* Found an marked block on this page. Stop, it's hammer time */
// Found an marked block on this page. Stop, it's hammer time
cell set_bit = rightmost_set_bit(mask);
return line_block(index * mark_bits_granularity + bit_index + set_bit);
} else {
/* No marked blocks on this page. Keep looking */
// No marked blocks on this page. Keep looking
bit_index = 0;
}
}
/* No marked blocks were found */
// No marked blocks were found
return this->start + this->size;
}

View File

@ -11,7 +11,7 @@
#include <errno.h>
/* C headers */
// C headers
#include <fcntl.h>
#include <limits.h>
#include <math.h>
@ -22,7 +22,7 @@
#include <wchar.h>
#include <stdint.h>
/* C++ headers */
// C++ headers
#include <algorithm>
#include <list>
#include <map>
@ -37,7 +37,7 @@
#define FACTOR_STRINGIZE_I(x) #x
#define FACTOR_STRINGIZE(x) FACTOR_STRINGIZE_I(x)
/* Record compiler version */
// Record compiler version
#if defined(__clang__)
#define FACTOR_COMPILER_VERSION "Clang (GCC " __VERSION__ ")"
#elif defined(__INTEL_COMPILER)
@ -52,10 +52,10 @@
#define FACTOR_COMPILER_VERSION "unknown"
#endif
/* Record compilation time */
// Record compilation time
#define FACTOR_COMPILE_TIME __TIMESTAMP__
/* Detect target CPU type */
// Detect target CPU type
#if defined(__arm__)
#define FACTOR_ARM
#elif defined(__amd64__) || defined(__x86_64__) || defined(_M_AMD64)
@ -82,10 +82,10 @@
#define WINDOWS
#endif
/* Forward-declare this since it comes up in function prototypes */
// Forward-declare this since it comes up in function prototypes
namespace factor { struct factor_vm; }
/* Factor headers */
// Factor headers
#include "assert.hpp"
#include "debug.hpp"
#include "layouts.hpp"
@ -140,4 +140,4 @@ namespace factor { struct factor_vm; }
#include "mvm.hpp"
#include "factor.hpp"
#endif /* __FACTOR_MASTER_H__ */
#endif // __FACTOR_MASTER_H__

View File

@ -17,28 +17,28 @@ void factor_vm::primitive_float_to_fixnum() {
ctx->replace(tag_fixnum(float_to_fixnum(ctx->peek())));
}
/* does not allocate, even though from_signed_cell can allocate */
/* Division can only overflow when we are dividing the most negative fixnum
by -1. */
// does not allocate, even though from_signed_cell can allocate
// Division can only overflow when we are dividing the most negative fixnum
// by -1.
void factor_vm::primitive_fixnum_divint() {
fixnum y = untag_fixnum(ctx->pop());
fixnum x = untag_fixnum(ctx->peek());
fixnum result = x / y;
if (result == -fixnum_min)
/* Does not allocate */
// Does not allocate
ctx->replace(from_signed_cell(-fixnum_min));
else
ctx->replace(tag_fixnum(result));
}
/* does not allocate, even though from_signed_cell can allocate */
// does not allocate, even though from_signed_cell can allocate
void factor_vm::primitive_fixnum_divmod() {
cell* s0 = (cell*)(ctx->datastack);
cell* s1 = (cell*)(ctx->datastack - sizeof(cell));
fixnum y = untag_fixnum(*s0);
fixnum x = untag_fixnum(*s1);
if (y == -1 && x == fixnum_min) {
/* Does not allocate */
// Does not allocate
*s1 = from_signed_cell(-fixnum_min);
*s0 = tag_fixnum(0);
} else {
@ -47,10 +47,9 @@ void factor_vm::primitive_fixnum_divmod() {
}
}
/*
* If we're shifting right by n bits, we won't overflow as long as none of the
* high WORD_SIZE-TAG_BITS-n bits are set.
*/
// If we're shifting right by n bits, we won't overflow as long as none of the
// high WORD_SIZE-TAG_BITS-n bits are set.
inline fixnum factor_vm::sign_mask(fixnum x) {
return x >> (WORD_SIZE - 1);
}
@ -63,7 +62,7 @@ inline fixnum factor_vm::branchless_abs(fixnum x) {
return (x ^ sign_mask(x)) - sign_mask(x);
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_fixnum_shift() {
fixnum y = untag_fixnum(ctx->pop());
fixnum x = untag_fixnum(ctx->peek());
@ -85,12 +84,12 @@ void factor_vm::primitive_fixnum_shift() {
ctx->replace(tag<bignum>(bignum_arithmetic_shift(fixnum_to_bignum(x), y)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_fixnum_to_bignum() {
ctx->replace(tag<bignum>(fixnum_to_bignum(untag_fixnum(ctx->peek()))));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_float_to_bignum() {
ctx->replace(tag<bignum>(float_to_bignum(ctx->peek())));
}
@ -104,31 +103,31 @@ void factor_vm::primitive_bignum_eq() {
ctx->replace(tag_boolean(bignum_equal_p(x, y)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_bignum_add() {
POP_BIGNUMS(x, y);
ctx->replace(tag<bignum>(bignum_add(x, y)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_bignum_subtract() {
POP_BIGNUMS(x, y);
ctx->replace(tag<bignum>(bignum_subtract(x, y)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_bignum_multiply() {
POP_BIGNUMS(x, y);
ctx->replace(tag<bignum>(bignum_multiply(x, y)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_bignum_divint() {
POP_BIGNUMS(x, y);
ctx->replace(tag<bignum>(bignum_quotient(x, y)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_bignum_divmod() {
cell* s0 = (cell*)(ctx->datastack);
cell* s1 = (cell*)(ctx->datastack - sizeof(cell));
@ -166,7 +165,7 @@ void factor_vm::primitive_bignum_xor() {
ctx->replace(tag<bignum>(bignum_bitwise_xor(x, y)));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_bignum_shift() {
fixnum y = untag_fixnum(ctx->pop());
bignum* x = untag<bignum>(ctx->peek());
@ -207,12 +206,12 @@ void factor_vm::primitive_bignum_log2() {
ctx->replace(tag<bignum>(bignum_integer_length(untag<bignum>(ctx->peek()))));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_fixnum_to_float() {
ctx->replace(allot_float(fixnum_to_float(ctx->peek())));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_format_float() {
char* locale = alien_offset(ctx->pop());
char* format = alien_offset(ctx->pop());
@ -263,25 +262,25 @@ void factor_vm::primitive_float_eq() {
ctx->replace(tag_boolean(x == y));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_float_add() {
POP_FLOATS(x, y);
ctx->replace(allot_float(x + y));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_float_subtract() {
POP_FLOATS(x, y);
ctx->replace(allot_float(x - y));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_float_multiply() {
POP_FLOATS(x, y);
ctx->replace(allot_float(x * y));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_float_divfloat() {
POP_FLOATS(x, y);
ctx->replace(allot_float(x / y));
@ -307,13 +306,13 @@ void factor_vm::primitive_float_greatereq() {
ctx->replace(tag_boolean(x >= y));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_float_bits() {
ctx->replace(
from_unsigned_cell(float_bits((float)untag_float_check(ctx->peek()))));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_bits_float() {
ctx->replace(allot_float(bits_float((uint32_t)to_cell(ctx->peek()))));
}
@ -322,12 +321,12 @@ void factor_vm::primitive_double_bits() {
ctx->replace(from_unsigned_8(double_bits(untag_float_check(ctx->peek()))));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_bits_double() {
ctx->replace(allot_float(bits_double(to_unsigned_8(ctx->peek()))));
}
/* Cannot allocate. */
// Cannot allocate.
#define CELL_TO_FOO(name, type, converter) \
type factor_vm::name(cell tagged) { \
switch (TAG(tagged)) { \
@ -350,17 +349,17 @@ CELL_TO_FOO(to_cell, cell, bignum_to_cell)
CELL_TO_FOO(to_signed_8, int64_t, bignum_to_long_long)
CELL_TO_FOO(to_unsigned_8, uint64_t, bignum_to_ulong_long)
/* Allocates memory */
// Allocates memory
VM_C_API cell from_signed_cell(fixnum integer, factor_vm* parent) {
return parent->from_signed_cell(integer);
}
/* Allocates memory */
// Allocates memory
VM_C_API cell from_unsigned_cell(cell integer, factor_vm* parent) {
return parent->from_unsigned_cell(integer);
}
/* Allocates memory */
// Allocates memory
cell factor_vm::from_signed_8(int64_t n) {
if (n < fixnum_min || n > fixnum_max)
return tag<bignum>(long_long_to_bignum(n));
@ -372,7 +371,7 @@ VM_C_API cell from_signed_8(int64_t n, factor_vm* parent) {
return parent->from_signed_8(n);
}
/* Allocates memory */
// Allocates memory
cell factor_vm::from_unsigned_8(uint64_t n) {
if (n > (uint64_t)fixnum_max)
return tag<bignum>(ulong_long_to_bignum(n));
@ -384,17 +383,17 @@ VM_C_API cell from_unsigned_8(uint64_t n, factor_vm* parent) {
return parent->from_unsigned_8(n);
}
/* Cannot allocate */
// Cannot allocate
float factor_vm::to_float(cell value) {
return (float)untag_float_check(value);
}
/* Cannot allocate */
// Cannot allocate
double factor_vm::to_double(cell value) { return untag_float_check(value); }
/* The fixnum+, fixnum- and fixnum* primitives are defined in cpu_*.S. On
overflow, they call these functions. */
/* Allocates memory */
// The fixnum+, fixnum- and fixnum* primitives are defined in cpu_*.S. On
// overflow, they call these functions.
// Allocates memory
inline void factor_vm::overflow_fixnum_add(fixnum x, fixnum y) {
ctx->replace(
tag<bignum>(fixnum_to_bignum(untag_fixnum(x) + untag_fixnum(y))));
@ -404,7 +403,7 @@ VM_C_API void overflow_fixnum_add(fixnum x, fixnum y, factor_vm* parent) {
parent->overflow_fixnum_add(x, y);
}
/* Allocates memory */
// Allocates memory
inline void factor_vm::overflow_fixnum_subtract(fixnum x, fixnum y) {
ctx->replace(
tag<bignum>(fixnum_to_bignum(untag_fixnum(x) - untag_fixnum(y))));
@ -414,7 +413,7 @@ VM_C_API void overflow_fixnum_subtract(fixnum x, fixnum y, factor_vm* parent) {
parent->overflow_fixnum_subtract(x, y);
}
/* Allocates memory */
// Allocates memory
inline void factor_vm::overflow_fixnum_multiply(fixnum x, fixnum y) {
data_root<bignum> bx(fixnum_to_bignum(x), this);
data_root<bignum> by(fixnum_to_bignum(y), this);

View File

@ -5,28 +5,28 @@ static const fixnum fixnum_max =
static const fixnum fixnum_min = (-((fixnum)1 << (WORD_SIZE - TAG_BITS - 1)));
static const fixnum array_size_max = ((cell)1 << (WORD_SIZE - TAG_BITS - 2));
/* Allocates memory */
// Allocates memory
inline cell factor_vm::from_signed_cell(fixnum x) {
if (x < fixnum_min || x > fixnum_max)
return tag<bignum>(fixnum_to_bignum(x));
return tag_fixnum(x);
}
/* Allocates memory */
// Allocates memory
inline cell factor_vm::from_unsigned_cell(cell x) {
if (x > (cell)fixnum_max)
return tag<bignum>(cell_to_bignum(x));
return tag_fixnum(x);
}
/* Allocates memory */
// Allocates memory
inline cell factor_vm::allot_float(double n) {
boxed_float* flo = allot<boxed_float>(sizeof(boxed_float));
flo->n = n;
return tag(flo);
}
/* Allocates memory */
// Allocates memory
inline bignum* factor_vm::float_to_bignum(cell tagged) {
return double_to_bignum(untag_float(tagged));
}
@ -54,7 +54,7 @@ inline cell factor_vm::unbox_array_size() {
return n;
}
general_error(ERROR_ARRAY_SIZE, obj, tag_fixnum(array_size_max));
return 0; /* can't happen */
return 0; // can't happen
}
VM_C_API cell from_signed_cell(fixnum integer, factor_vm* vm);

View File

@ -18,8 +18,8 @@ struct nursery_policy {
void factor_vm::collect_nursery() {
/* Copy live objects from the nursery (as determined by the root set and
marked cards in aging and tenured) to aging space. */
// Copy live objects from the nursery (as determined by the root set and
// marked cards in aging and tenured) to aging space.
gc_workhorse<aging_space, nursery_policy>
workhorse(this, data->aging, nursery_policy(data->nursery));
slot_visitor<gc_workhorse<aging_space, nursery_policy>>

View File

@ -19,7 +19,7 @@ cell object_start_map::find_object_containing_card(cell card_index) {
card_index--;
while (object_start_offsets[card_index] == card_starts_inside_object) {
/* First card should start with an object */
// First card should start with an object
FACTOR_ASSERT(card_index > 0);
card_index--;
}
@ -27,7 +27,7 @@ cell object_start_map::find_object_containing_card(cell card_index) {
}
}
/* we need to remember the first object allocated in the card */
// we need to remember the first object allocated in the card
void object_start_map::record_object_start_offset(object* obj) {
cell idx = addr_to_card((cell)obj - start);
card obj_start = ((cell)obj & addr_card_mask);
@ -44,10 +44,10 @@ void object_start_map::update_card_for_sweep(cell index, uint16_t mask) {
mask >>= (offset / data_alignment);
if (mask == 0) {
/* The rest of the block after the old object start is free */
// The rest of the block after the old object start is free
object_start_offsets[index] = card_starts_inside_object;
} else {
/* Move the object start forward if necessary */
// Move the object start forward if necessary
object_start_offsets[index] =
(card)(offset + (rightmost_set_bit(mask) * data_alignment));
}

View File

@ -2,7 +2,7 @@
namespace factor {
/* Size of the object pointed to by a tagged pointer */
// Size of the object pointed to by a tagged pointer
cell object_size(cell tagged) {
if (immediate_p(tagged))
return 0;
@ -44,7 +44,7 @@ void factor_vm::primitive_set_slot() {
write_barrier(slot_ptr);
}
/* Allocates memory */
// Allocates memory
cell factor_vm::clone_object(cell obj_) {
data_root<object> obj(obj_, this);
@ -57,10 +57,10 @@ cell factor_vm::clone_object(cell obj_) {
return tag_dynamic(new_obj);
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_clone() { ctx->replace(clone_object(ctx->peek())); }
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_size() {
ctx->replace(from_unsigned_cell(object_size(ctx->peek())));
}
@ -79,9 +79,9 @@ struct slot_become_fixup : no_fixup {
}
};
/* classes.tuple uses this to reshape tuples; tools.deploy.shaker uses this
to coalesce equal but distinct quotations and wrappers. */
/* Calls gc */
// classes.tuple uses this to reshape tuples; tools.deploy.shaker uses this
// to coalesce equal but distinct quotations and wrappers.
// Calls gc
void factor_vm::primitive_become() {
primitive_minor_gc();
array* new_objects = untag_check<array>(ctx->pop());
@ -91,7 +91,7 @@ void factor_vm::primitive_become() {
if (capacity != array_capacity(old_objects))
critical_error("bad parameters to become", 0);
/* Build the forwarding map */
// Build the forwarding map
std::map<object*, object*> become_map;
for (cell i = 0; i < capacity; i++) {
@ -101,7 +101,7 @@ void factor_vm::primitive_become() {
become_map[untag<object>(old_ptr)] = untag<object>(new_ptr);
}
/* Update all references to old objects to point to new objects */
// Update all references to old objects to point to new objects
{
slot_visitor<slot_become_fixup> visitor(this,
slot_become_fixup(&become_map));
@ -120,8 +120,8 @@ void factor_vm::primitive_become() {
each_code_block(code_block_become_func);
}
/* Since we may have introduced old->new references, need to revisit
all objects and code blocks on a minor GC. */
// Since we may have introduced old->new references, need to revisit
// all objects and code blocks on a minor GC.
data->mark_all_cards();
}

View File

@ -7,32 +7,32 @@ namespace factor {
static const cell special_object_count = 85;
enum special_object {
OBJ_WALKER_HOOK = 3, /* non-local exit hook, used by library only */
OBJ_CALLCC_1, /* used to pass the value in callcc1 */
OBJ_WALKER_HOOK = 3, // non-local exit hook, used by library only
OBJ_CALLCC_1, // used to pass the value in callcc1
ERROR_HANDLER_QUOT = 5, /* quotation called when VM throws an error */
ERROR_HANDLER_QUOT = 5, // quotation called when VM throws an error
OBJ_CELL_SIZE = 7, /* sizeof(cell) */
OBJ_CPU, /* CPU architecture */
OBJ_OS, /* operating system name */
OBJ_CELL_SIZE = 7, // sizeof(cell)
OBJ_CPU, // CPU architecture
OBJ_OS, // operating system name
OBJ_ARGS = 10, /* command line arguments */
OBJ_STDIN, /* stdin FILE* handle */
OBJ_STDOUT, /* stdout FILE* handle */
OBJ_ARGS = 10, // command line arguments
OBJ_STDIN, // stdin FILE* handle
OBJ_STDOUT, // stdout FILE* handle
OBJ_IMAGE = 13, /* image path name */
OBJ_EXECUTABLE, /* runtime executable path name */
OBJ_IMAGE = 13, // image path name
OBJ_EXECUTABLE, // runtime executable path name
OBJ_EMBEDDED = 15, /* are we embedded in another app? */
OBJ_EVAL_CALLBACK, /* used when Factor is embedded in a C app */
OBJ_YIELD_CALLBACK, /* used when Factor is embedded in a C app */
OBJ_SLEEP_CALLBACK, /* used when Factor is embedded in a C app */
OBJ_EMBEDDED = 15, // are we embedded in another app?
OBJ_EVAL_CALLBACK, // used when Factor is embedded in a C app
OBJ_YIELD_CALLBACK, // used when Factor is embedded in a C app
OBJ_SLEEP_CALLBACK, // used when Factor is embedded in a C app
OBJ_STARTUP_QUOT = 20, /* startup quotation */
OBJ_GLOBAL, /* global namespace */
OBJ_SHUTDOWN_QUOT, /* shutdown quotation */
OBJ_STARTUP_QUOT = 20, // startup quotation
OBJ_GLOBAL, // global namespace
OBJ_SHUTDOWN_QUOT, // shutdown quotation
/* Quotation compilation in quotations.cpp */
// Quotation compilation in quotations.cpp
JIT_PROLOG = 23,
JIT_PRIMITIVE_WORD,
JIT_PRIMITIVE,
@ -54,8 +54,8 @@ enum special_object {
JIT_EXECUTE,
JIT_DECLARE_WORD,
/* External entry points. These are defined in the files in
bootstrap/assembler/ */
// External entry points. These are defined in the files in
// bootstrap/assembler/
C_TO_FACTOR_WORD = 43,
LAZY_JIT_COMPILE_WORD,
UNWIND_NATIVE_FRAMES_WORD,
@ -66,14 +66,14 @@ enum special_object {
WIN_EXCEPTION_HANDLER,
UNUSED2,
/* Incremented on every modify-code-heap call; invalidates call( inline
caching */
// Incremented on every modify-code-heap call; invalidates call( inline
// caching
REDEFINITION_COUNTER = 52,
/* Callback stub generation in callbacks.cpp */
// Callback stub generation in callbacks.cpp
CALLBACK_STUB = 53,
/* Polymorphic inline cache generation in inline_cache.cpp */
// Polymorphic inline cache generation in inline_cache.cpp
PIC_LOAD = 54,
PIC_TAG,
PIC_TUPLE,
@ -83,16 +83,16 @@ enum special_object {
PIC_MISS_WORD,
PIC_MISS_TAIL_WORD,
/* Megamorphic cache generation in dispatch.cpp */
// Megamorphic cache generation in dispatch.cpp
MEGA_LOOKUP = 62,
MEGA_LOOKUP_WORD,
MEGA_MISS_WORD,
OBJ_UNDEFINED = 65, /* default quotation for undefined words */
OBJ_UNDEFINED = 65, // default quotation for undefined words
OBJ_STDERR = 66, /* stderr FILE* handle */
OBJ_STDERR = 66, // stderr FILE* handle
OBJ_STAGE2 = 67, /* have we bootstrapped? */
OBJ_STAGE2 = 67, // have we bootstrapped?
OBJ_CURRENT_THREAD = 68,
@ -100,30 +100,30 @@ enum special_object {
OBJ_RUN_QUEUE = 70,
OBJ_SLEEP_QUEUE = 71,
OBJ_VM_COMPILER = 72, /* version string of the compiler we were built with */
OBJ_VM_COMPILER = 72, // version string of the compiler we were built with
OBJ_WAITING_CALLBACKS = 73,
OBJ_SIGNAL_PIPE = 74, /* file descriptor for pipe used to communicate signals
only used on unix */
OBJ_VM_COMPILE_TIME = 75, /* when the binary was built */
OBJ_VM_VERSION = 76, /* factor version */
OBJ_VM_GIT_LABEL = 77, /* git label (git describe --all --long) */
OBJ_SIGNAL_PIPE = 74, // file descriptor for pipe used to communicate signals
// only used on unix
OBJ_VM_COMPILE_TIME = 75, // when the binary was built
OBJ_VM_VERSION = 76, // factor version
OBJ_VM_GIT_LABEL = 77, // git label (git describe --all --long)
/* Canonical truth value. In Factor, 't' */
// Canonical truth value. In Factor, 't'
OBJ_CANONICAL_TRUE = 78,
/* Canonical bignums. These needs to be kept in the image in case
some heap objects refer to them. */
// Canonical bignums. These needs to be kept in the image in case
// some heap objects refer to them.
OBJ_BIGNUM_ZERO,
OBJ_BIGNUM_POS_ONE,
OBJ_BIGNUM_NEG_ONE = 81,
};
/* save-image-and-exit discards special objects that are filled in on startup
anyway, to reduce image size */
// save-image-and-exit discards special objects that are filled in on startup
// anyway, to reduce image size
inline static bool save_special_p(cell i) {
/* Need to fix the order here. */
// Need to fix the order here.
return (i >= OBJ_STARTUP_QUOT && i <= LEAF_SIGNAL_HANDLER_WORD) ||
(i >= REDEFINITION_COUNTER && i <= OBJ_UNDEFINED) ||
i == OBJ_STAGE2 ||

View File

@ -12,7 +12,7 @@ void early_init() {}
#define SUFFIX ".image"
#define SUFFIX_LEN 6
/* You must free() the result yourself. */
// You must free() the result yourself.
const char* default_image_path() {
const char* path = vm_executable_path();

View File

@ -5,12 +5,12 @@ namespace factor {
void flush_icache(cell start, cell len) {
int result;
/* XXX: why doesn't this work on Nokia n800? It should behave
identically to the below assembly. */
/* result = syscall(__ARM_NR_cacheflush,start,start + len,0); */
// XXX: why doesn't this work on Nokia n800? It should behave
// identically to the below assembly.
// result = syscall(__ARM_NR_cacheflush,start,start + len,0);
/* Assembly swiped from
http://lists.arm.linux.org.uk/pipermail/linux-arm/2002-July/003931.html */
// Assembly swiped from
// http://lists.arm.linux.org.uk/pipermail/linux-arm/2002-July/003931.html
__asm__ __volatile__("mov r0, %1\n"
"sub r1, %2, #1\n"
"mov r2, #0\n"

View File

@ -6,7 +6,7 @@ namespace factor {
// FXSR
// environment
struct _fpstate {
/* Regular FPU environment */
// Regular FPU environment
unsigned long cw;
unsigned long sw;
unsigned long tag;
@ -16,13 +16,13 @@ struct _fpstate {
unsigned long datasel;
struct _fpreg _st[8];
unsigned short status;
unsigned short magic; /* 0xffff = regular FPU data only */
unsigned short magic; // 0xffff = regular FPU data only
/* FXSR FPU environment */
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
// FXSR FPU environment
unsigned long _fxsr_env[6]; // FXSR FPU env is ignored
unsigned long mxcsr;
unsigned long reserved;
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct _fpxreg _fxsr_st[8]; // FXSR FPU reg data is ignored
struct _xmmreg _xmm[8];
unsigned long padding[56];
};

View File

@ -25,8 +25,8 @@ inline static void uap_clear_fpu_status(void* uap) {
#define FUNCTION_CODE_POINTER(ptr) ptr
#define FUNCTION_TOC_POINTER(ptr) ptr
/* Must match the stack-frame-size constant in
bootstrap/assembler/x86.64.unix.factor */
// Must match the stack-frame-size constant in
// bootstrap/assembler/x86.64.unix.factor
static const unsigned JIT_FRAME_SIZE = 32;
}

View File

@ -5,9 +5,9 @@ namespace factor {
const char* vm_executable_path() {
ssize_t bufsiz = 4096;
/* readlink is called in a loop with increasing buffer sizes in case
someone tries to run Factor from a incredibly deeply nested
path. */
// readlink is called in a loop with increasing buffer sizes in case
// someone tries to run Factor from a incredibly deeply nested
// path.
while (true) {
char* buf = new char[bufsiz + 1];
ssize_t size= readlink("/proc/self/exe", buf, bufsiz);
@ -15,13 +15,13 @@ const char* vm_executable_path() {
fatal_error("Cannot read /proc/self/exe", errno);
} else {
if (size < bufsiz) {
/* Buffer was large enough, return string. */
// Buffer was large enough, return string.
buf[size] = '\0';
const char* ret = safe_strdup(buf);
delete[] buf;
return ret;
} else {
/* Buffer wasn't big enough, double it and try again. */
// Buffer wasn't big enough, double it and try again.
delete[] buf;
bufsiz *= 2;
}

View File

@ -2,16 +2,16 @@
namespace factor {
/* Fault handler information. MacOSX version.
Copyright (C) 1993-1999, 2002-2003 Bruno Haible <clisp.org at bruno>
Copyright (C) 2003 Paolo Bonzini <gnu.org at bonzini>
// Fault handler information. MacOSX version.
// Copyright (C) 1993-1999, 2002-2003 Bruno Haible <clisp.org at bruno>
// Copyright (C) 2003 Paolo Bonzini <gnu.org at bonzini>
Used under BSD license with permission from Paolo Bonzini and Bruno Haible,
2005-03-10:
// Used under BSD license with permission from Paolo Bonzini and Bruno Haible,
// 2005-03-10:
http://sourceforge.net/mailarchive/message.php?msg_name=200503102200.32002.bruno%40clisp.org
// http://sourceforge.net/mailarchive/message.php?msg_name=200503102200.32002.bruno%40clisp.org
Modified for Factor by Slava Pestov */
// Modified for Factor by Slava Pestov
#define MACH_EXC_STATE_TYPE i386_exception_state_t
#define MACH_EXC_STATE_FLAVOR i386_EXCEPTION_STATE
#define MACH_EXC_STATE_COUNT i386_EXCEPTION_STATE_COUNT

View File

@ -2,16 +2,16 @@
namespace factor {
/* Fault handler information. MacOSX version.
Copyright (C) 1993-1999, 2002-2003 Bruno Haible <clisp.org at bruno>
Copyright (C) 2003 Paolo Bonzini <gnu.org at bonzini>
// Fault handler information. MacOSX version.
// Copyright (C) 1993-1999, 2002-2003 Bruno Haible <clisp.org at bruno>
// Copyright (C) 2003 Paolo Bonzini <gnu.org at bonzini>
Used under BSD license with permission from Paolo Bonzini and Bruno Haible,
2005-03-10:
// Used under BSD license with permission from Paolo Bonzini and Bruno Haible,
// 2005-03-10:
http://sourceforge.net/mailarchive/message.php?msg_name=200503102200.32002.bruno%40clisp.org
// http://sourceforge.net/mailarchive/message.php?msg_name=200503102200.32002.bruno%40clisp.org
Modified for Factor by Slava Pestov and Daniel Ehrenberg */
// Modified for Factor by Slava Pestov and Daniel Ehrenberg
#define MACH_EXC_STATE_TYPE x86_exception_state64_t
#define MACH_EXC_STATE_FLAVOR x86_EXCEPTION_STATE64
#define MACH_EXC_STATE_COUNT x86_EXCEPTION_STATE64_COUNT
@ -67,8 +67,8 @@ inline static void uap_clear_fpu_status(void* uap) {
mach_clear_fpu_status(UAP_FS(uap));
}
/* Must match the stack-frame-size constant in
basis/bootstrap/assembler/x86.64.unix.factor */
// Must match the stack-frame-size constant in
// basis/bootstrap/assembler/x86.64.unix.factor
static const unsigned JIT_FRAME_SIZE = 32;
}

View File

@ -22,7 +22,7 @@ void early_init(void) {
}
}
/* You must free() this yourself. */
// You must free() this yourself.
const char* vm_executable_path(void) {
return safe_strdup([[[NSBundle mainBundle] executablePath] UTF8String]);
}
@ -66,7 +66,7 @@ void factor_vm::init_signals(void) {
mach_initialize();
}
/* Amateurs at Apple: implement this function, properly! */
// Amateurs at Apple: implement this function, properly!
Protocol* objc_getProtocol(char* name) {
if (strcmp(name, "NSTextInput") == 0)
return @protocol(NSTextInput);

View File

@ -306,27 +306,27 @@ void factor_vm::unix_init_signals() {
sigaction_safe(SIGALRM, &sample_sigaction, NULL);
}
/* We don't use SA_IGN here because then the ignore action is inherited
by subprocesses, which we don't want. There is a unit test in
io.launcher.unix for this. */
// We don't use SA_IGN here because then the ignore action is inherited
// by subprocesses, which we don't want. There is a unit test in
// io.launcher.unix for this.
{
struct sigaction ignore_sigaction;
init_sigaction_with_handler(&ignore_sigaction, ignore_signal_handler);
sigaction_safe(SIGPIPE, &ignore_sigaction, NULL);
/* We send SIGUSR2 to the stdin_loop thread to interrupt it on FEP */
// We send SIGUSR2 to the stdin_loop thread to interrupt it on FEP
sigaction_safe(SIGUSR2, &ignore_sigaction, NULL);
}
}
/* On Unix, shared fds such as stdin cannot be set to non-blocking mode
(http://homepages.tesco.net/J.deBoynePollard/FGA/dont-set-shared-file-descriptors-to-non-blocking-mode.html)
so we kludge around this by spawning a thread, which waits on a control pipe
for a signal, upon receiving this signal it reads one block of data from
stdin and writes it to a data pipe. Upon completion, it writes a 4-byte
integer to the size pipe, indicating how much data was written to the data
pipe.
// On Unix, shared fds such as stdin cannot be set to non-blocking mode
// (http://homepages.tesco.net/J.deBoynePollard/FGA/dont-set-shared-file-descriptors-to-non-blocking-mode.html)
// so we kludge around this by spawning a thread, which waits on a control pipe
// for a signal, upon receiving this signal it reads one block of data from
// stdin and writes it to a data pipe. Upon completion, it writes a 4-byte
// integer to the size pipe, indicating how much data was written to the data
// pipe.
The read end of the size pipe can be set to non-blocking. */
// The read end of the size pipe can be set to non-blocking.
extern "C" {
int stdin_read;
int stdin_write;
@ -402,8 +402,8 @@ void* stdin_loop(void* arg) {
fatal_error("stdin_loop: bad data on control fd", buf[0]);
for (;;) {
/* If we fep, the parent thread will grab stdin_mutex and send us
SIGUSR2 to interrupt the read() call. */
// If we fep, the parent thread will grab stdin_mutex and send us
// SIGUSR2 to interrupt the read() call.
pthread_mutex_lock(&stdin_mutex);
pthread_mutex_unlock(&stdin_mutex);
ssize_t bytes = read(0, buf, sizeof(buf));
@ -440,9 +440,9 @@ void open_console() {
pthread_mutex_init(&stdin_mutex, NULL);
}
/* This method is used to kill the stdin_loop before exiting from factor.
A Nvidia driver bug on Linux is the reason this has to be done, see:
http://www.nvnews.net/vbulletin/showthread.php?t=164619 */
// This method is used to kill the stdin_loop before exiting from factor.
// An Nvidia driver bug on Linux is the reason this has to be done, see:
// http://www.nvnews.net/vbulletin/showthread.php?t=164619
void close_console() {
if (stdin_thread_initialized_p) {
pthread_cancel(stdin_thread);
@ -452,9 +452,9 @@ void close_console() {
void lock_console() {
FACTOR_ASSERT(stdin_thread_initialized_p);
/* Lock the stdin_mutex and send the stdin_loop thread a signal to interrupt
any read() it has in progress. When the stdin loop iterates again, it will
try to lock the same mutex and wait until unlock_console() is called. */
// Lock the stdin_mutex and send the stdin_loop thread a signal to interrupt
// any read() it has in progress. When the stdin loop iterates again, it will
// try to lock the same mutex and wait until unlock_console() is called.
pthread_mutex_lock(&stdin_mutex);
pthread_kill(stdin_thread, SIGUSR2);
}

View File

@ -3,7 +3,7 @@
namespace factor {
void factor_vm::c_to_factor_toplevel(cell quot) {
/* 32-bit Windows SEH set up in basis/bootstrap/assembler/x86.32.windows.factor */
// 32-bit Windows SEH set up in basis/bootstrap/assembler/x86.32.windows.factor
c_to_factor(quot);
}

View File

@ -26,19 +26,19 @@ struct seh_data {
};
void factor_vm::c_to_factor_toplevel(cell quot) {
/* The annoying thing about Win64 SEH is that the offsets in
* function tables are 32-bit integers, and the exception handler
* itself must reside between the start and end pointers, so
* we stick everything at the beginning of the code heap and
* generate a small trampoline that jumps to the real
* exception handler. */
// The annoying thing about Win64 SEH is that the offsets in
// function tables are 32-bit integers, and the exception handler
// itself must reside between the start and end pointers, so
// we stick everything at the beginning of the code heap and
// generate a small trampoline that jumps to the real
// exception handler.
seh_data* seh_area = (seh_data*)code->seh_area;
cell base = code->seg->start;
/* Should look at generating this with the Factor assembler */
// Should look at generating this with the Factor assembler
/* mov rax,0 */
// mov rax,0
seh_area->handler[0] = 0x48;
seh_area->handler[1] = 0xb8;
seh_area->handler[2] = 0x0;
@ -50,12 +50,12 @@ void factor_vm::c_to_factor_toplevel(cell quot) {
seh_area->handler[8] = 0x0;
seh_area->handler[9] = 0x0;
/* jmp rax */
// jmp rax
seh_area->handler[10] = 0x48;
seh_area->handler[11] = 0xff;
seh_area->handler[12] = 0xe0;
/* Store address of exception handler in the operand of the 'mov' */
// Store address of exception handler in the operand of the 'mov'
cell handler = (cell)&factor::exception_handler;
memcpy(&seh_area->handler[2], &handler, sizeof(cell));

View File

@ -10,25 +10,25 @@ typedef struct DECLSPEC_ALIGN(16) _M128A {
LONGLONG High;
} M128A, *PM128A;
/* The ExtendedRegisters field of the x86.32 CONTEXT structure uses this layout;
* however, this structure is only made available from winnt.h on x86.64 */
// The ExtendedRegisters field of the x86.32 CONTEXT structure uses this layout;
// however, this structure is only made available from winnt.h on x86.64
typedef struct _XMM_SAVE_AREA32 {
WORD ControlWord; /* 000 */
WORD StatusWord; /* 002 */
BYTE TagWord; /* 004 */
BYTE Reserved1; /* 005 */
WORD ErrorOpcode; /* 006 */
DWORD ErrorOffset; /* 008 */
WORD ErrorSelector; /* 00c */
WORD Reserved2; /* 00e */
DWORD DataOffset; /* 010 */
WORD DataSelector; /* 014 */
WORD Reserved3; /* 016 */
DWORD MxCsr; /* 018 */
DWORD MxCsr_Mask; /* 01c */
M128A FloatRegisters[8]; /* 020 */
M128A XmmRegisters[16]; /* 0a0 */
BYTE Reserved4[96]; /* 1a0 */
WORD ControlWord; // 000
WORD StatusWord; // 002
BYTE TagWord; // 004
BYTE Reserved1; // 005
WORD ErrorOpcode; // 006
DWORD ErrorOffset; // 008
WORD ErrorSelector; // 00c
WORD Reserved2; // 00e
DWORD DataOffset; // 010
WORD DataSelector; // 014
WORD Reserved3; // 016
DWORD MxCsr; // 018
DWORD MxCsr_Mask; // 01c
M128A FloatRegisters[8]; // 020
M128A XmmRegisters[16]; // 0a0
BYTE Reserved4[96]; // 1a0
} XMM_SAVE_AREA32, *PXMM_SAVE_AREA32;
#define X87SW(ctx) (ctx)->FloatSave.StatusWord

View File

@ -7,7 +7,7 @@ namespace factor {
#define MXCSR(ctx) (ctx)->MxCsr
/* Must match the stack-frame-size constant in
basis/bootstap/assembler/x86.64.windows.factor */
// Must match the stack-frame-size constant in
// basis/bootstap/assembler/x86.64.windows.factor
static const unsigned JIT_FRAME_SIZE = 64;
}

View File

@ -55,7 +55,7 @@ BOOL factor_vm::windows_stat(vm_char* path) {
return ret;
}
/* You must free() this yourself. */
// You must free() this yourself.
const vm_char* factor_vm::default_image_path() {
vm_char full_path[MAX_UNICODE_PATH];
vm_char* ptr;
@ -76,7 +76,7 @@ const vm_char* factor_vm::default_image_path() {
return safe_strdup(temp_path);
}
/* You must free() this yourself. */
// You must free() this yourself.
const vm_char* factor_vm::vm_executable_path() {
vm_char full_path[MAX_UNICODE_PATH];
if (!GetModuleFileName(NULL, full_path, MAX_UNICODE_PATH))
@ -126,13 +126,13 @@ long getpagesize() {
bool move_file(const vm_char* path1, const vm_char* path2) {
/* MoveFileEx returns FALSE on fail. */
// MoveFileEx returns FALSE on fail.
BOOL val = MoveFileEx((path1), (path2), MOVEFILE_REPLACE_EXISTING);
if (val == FALSE) {
/* MoveFileEx doesn't set errno, which primitive_save_image()
reads the error code from. Instead of converting from
GetLastError() to errno values, we ust set it to the generic
EIO value. */
// MoveFileEx doesn't set errno, which primitive_save_image()
// reads the error code from. Instead of converting from
// GetLastError() to errno values, we ust set it to the generic
// EIO value.
errno = EIO;
}
return val == TRUE;
@ -173,8 +173,8 @@ uint64_t nano_count() {
#ifdef FACTOR_64
hi = count.HighPart;
#else
/* On VirtualBox, QueryPerformanceCounter does not increment
the high part every time the low part overflows. Workaround. */
// On VirtualBox, QueryPerformanceCounter does not increment
// the high part every time the low part overflows. Workaround.
if (lo > count.LowPart)
hi++;
#endif
@ -217,7 +217,7 @@ LONG factor_vm::exception_handler(PEXCEPTION_RECORD e, void* frame, PCONTEXT c,
#else
signal_fpu_status = fpu_status(X87SW(c) | MXCSR(c));
/* This seems to have no effect */
// This seems to have no effect
X87SW(c) = 0;
#endif
MXCSR(c) &= 0xffffffc0;
@ -241,9 +241,9 @@ VM_C_API LONG exception_handler(PEXCEPTION_RECORD e, void* frame, PCONTEXT c,
return vm->exception_handler(e, frame, c, dispatch);
}
/* On Unix SIGINT (ctrl-c) automatically interrupts blocking io system
calls. It doesn't on Windows, so we need to manually send some
cancellation requests to unblock the thread. */
// On Unix SIGINT (ctrl-c) automatically interrupts blocking io system
// calls. It doesn't on Windows, so we need to manually send some
// cancellation requests to unblock the thread.
VOID CALLBACK dummy_cb (ULONG_PTR dwParam) { }
// CancelSynchronousIo is not in Windows XP
@ -251,8 +251,8 @@ VOID CALLBACK dummy_cb (ULONG_PTR dwParam) { }
static void wake_up_thread(HANDLE thread) {
if (!CancelSynchronousIo(thread)) {
DWORD err = GetLastError();
/* CancelSynchronousIo() didn't find anything to cancel, let's try
with QueueUserAPC() instead. */
// CancelSynchronousIo() didn't find anything to cancel, let's try
// with QueueUserAPC() instead.
if (err == ERROR_NOT_FOUND) {
if (!QueueUserAPC(&dummy_cb, thread, NULL)) {
fatal_error("QueueUserAPC() failed", GetLastError());
@ -269,16 +269,15 @@ static void wake_up_thread(HANDLE thread) {}
static BOOL WINAPI ctrl_handler(DWORD dwCtrlType) {
switch (dwCtrlType) {
case CTRL_C_EVENT: {
/* The CtrlHandler runs in its own thread without stopping the main
thread. Since in practice nobody uses the multi-VM stuff yet, we just
grab the first VM we can get. This will not be a good idea when we
actually support native threads. */
// The CtrlHandler runs in its own thread without stopping the main
// thread. Since in practice nobody uses the multi-VM stuff yet, we just
// grab the first VM we can get. This will not be a good idea when we
// actually support native threads.
FACTOR_ASSERT(thread_vms.size() == 1);
factor_vm* vm = thread_vms.begin()->second;
vm->enqueue_fep();
/* Before leaving the ctrl_handler, try and wake up the main
thread. */
// Before leaving the ctrl_handler, try and wake up the main thread.
wake_up_thread(factor::boot_thread);
return TRUE;
}

View File

@ -1,7 +1,7 @@
#include <ctype.h>
#ifndef wcslen
/* for cygwin */
// for cygwin
#include <wchar.h>
#endif
@ -23,7 +23,7 @@
#undef max
#endif
/* Difference between Jan 1 00:00:00 1601 and Jan 1 00:00:00 1970 */
// Difference between Jan 1 00:00:00 1601 and Jan 1 00:00:00 1970
#define EPOCH_OFFSET 0x019db1ded53e8000LL
namespace factor {

View File

@ -1,6 +1,6 @@
namespace factor {
/* Generated with PRIMITIVE in primitives.cpp */
// Generated with PRIMITIVE in primitives.cpp
#define EACH_PRIMITIVE(_) \
_(alien_address) _(all_instances) _(array) _(array_to_quotation) _(become) \

View File

@ -2,40 +2,41 @@
namespace factor {
/* Simple non-optimizing compiler.
// Simple non-optimizing compiler.
This is one of the two compilers implementing Factor; the second one is written
in Factor and performs advanced optimizations. See
basis/compiler/compiler.factor.
// This is one of the two compilers implementing Factor; the second one is
// written in Factor and performs advanced optimizations. See
// basis/compiler/compiler.factor.
The non-optimizing compiler compiles a quotation at a time by
concatenating machine code chunks; prolog, epilog, call word, jump to
word, etc. These machine code chunks are generated from Factor code in
basis/bootstrap/assembler/.
// The non-optimizing compiler compiles a quotation at a time by
// concatenating machine code chunks; prolog, epilog, call word, jump to
// word, etc. These machine code chunks are generated from Factor code in
// basis/bootstrap/assembler/.
Calls to words and constant quotations (referenced by conditionals and dips)
are direct jumps to machine code blocks. Literals are also referenced directly
without going through the literal table.
// Calls to words and constant quotations (referenced by conditionals and
// dips) are direct jumps to machine code blocks. Literals are also
// referenced directly without going through the literal table.
It actually does do a little bit of very simple optimization:
// It actually does do a little bit of very simple optimization:
1) Tail call optimization.
// 1) Tail call optimization.
2) If a quotation is determined to not call any other words (except for a few
special words which are open-coded, see below), then no prolog/epilog is
generated.
// 2) If a quotation is determined to not call any other words (except for a
// few special words which are open-coded, see below), then no prolog/epilog
// is generated.
3) When in tail position and immediately preceded by literal arguments, the
'if' is generated inline, instead of as a call to the 'if' word.
// 3) When in tail position and immediately preceded by literal arguments,
// the 'if' is generated inline, instead of as a call to the 'if' word.
4) When preceded by a quotation, calls to 'dip', '2dip' and '3dip' are
open-coded as retain stack manipulation surrounding a subroutine call.
// 4) When preceded by a quotation, calls to 'dip', '2dip' and '3dip' are
// open-coded as retain stack manipulation surrounding a subroutine call.
5) Sub-primitives are primitive words which are implemented in assembly and not
in the VM. They are open-coded and no subroutine call is generated. This
includes stack shufflers, some fixnum arithmetic words, and words such as tag,
slot and eq?. A primitive call is relatively expensive (two subroutine calls)
so this results in a big speedup for relatively little effort. */
// 5) Sub-primitives are primitive words which are implemented in assembly
// and not in the VM. They are open-coded and no subroutine call is generated.
// This includes stack shufflers, some fixnum arithmetic words, and words
// such as tag, slot and eq?. A primitive call is relatively expensive
// (two subroutine calls) so this results in a big speedup for relatively
// little effort.
inline cell quotation_jit::nth(cell index) {
return array_nth(elements.untagged(), index);
@ -83,18 +84,17 @@ bool quotation_jit::mega_lookup_p(cell i, cell length) {
nth(i + 3) == parent->special_objects[MEGA_LOOKUP_WORD];
}
/* Subprimitives should be flagged with whether they require a stack frame.
See #295. */
// Subprimitives should be flagged with whether they require a stack frame.
// See #295.
bool quotation_jit::special_subprimitive_p(cell obj) {
return obj == parent->special_objects[SIGNAL_HANDLER_WORD] ||
obj == parent->special_objects[LEAF_SIGNAL_HANDLER_WORD] ||
obj == parent->special_objects[UNWIND_NATIVE_FRAMES_WORD];
}
/* All quotations wants a stack frame, except if they contain:
1) calls to the special subprimitives, see #295.
2) mega cache lookups, see #651 */
// All quotations wants a stack frame, except if they contain:
// 1) calls to the special subprimitives, see #295.
// 2) mega cache lookups, see #651
bool quotation_jit::stack_frame_p() {
cell length = array_capacity(elements.untagged());
for (cell i = 0; i < length; i++) {
@ -112,7 +112,7 @@ static bool trivial_quotation_p(array* elements) {
TAG(array_nth(elements, 0)) == WORD_TYPE;
}
/* Allocates memory (emit) */
// Allocates memory (emit)
void quotation_jit::emit_epilog(bool needed) {
if (needed) {
emit(parent->special_objects[JIT_SAFEPOINT]);
@ -120,14 +120,14 @@ void quotation_jit::emit_epilog(bool needed) {
}
}
/* Allocates memory conditionally */
// Allocates memory conditionally
void quotation_jit::emit_quotation(cell quot_) {
data_root<quotation> quot(quot_, parent);
array* elements = untag<array>(quot->array);
/* If the quotation consists of a single word, compile a direct call
to the word. */
// If the quotation consists of a single word, compile a direct call
// to the word.
if (trivial_quotation_p(elements))
literal(array_nth(elements, 0));
else {
@ -137,7 +137,7 @@ void quotation_jit::emit_quotation(cell quot_) {
}
}
/* Allocates memory (parameter(), literal(), emit_epilog, emit_with_literal)*/
// Allocates memory (parameter(), literal(), emit_epilog, emit_with_literal)
void quotation_jit::iterate_quotation() {
bool stack_frame = stack_frame_p();
@ -157,12 +157,12 @@ void quotation_jit::iterate_quotation() {
switch (obj.type()) {
case WORD_TYPE:
/* Sub-primitives */
// Sub-primitives
if (to_boolean(obj.as<word>()->subprimitive)) {
tail_call = emit_subprimitive(obj.value(), /* word */
i == length - 1, /* tail_call_p */
stack_frame); /* stack_frame_p */
} /* Everything else */
tail_call = emit_subprimitive(obj.value(), // word
i == length - 1, // tail_call_p
stack_frame); // stack_frame_p
} // Everything else
else if (i == length - 1) {
emit_epilog(stack_frame);
tail_call = true;
@ -174,11 +174,11 @@ void quotation_jit::iterate_quotation() {
push(obj.as<wrapper>()->object);
break;
case BYTE_ARRAY_TYPE:
/* Primitive calls */
// Primitive calls
if (primitive_call_p(i, length)) {
/* On x86-64 and PowerPC, the VM pointer is stored in
a register; on other platforms, the RT_VM relocation
is used and it needs an offset parameter */
// On x86-64 and PowerPC, the VM pointer is stored in a register;
// on other platforms, the RT_VM relocation is used and it needs
// an offset parameter
#ifdef FACTOR_X86
parameter(tag_fixnum(0));
#endif
@ -195,8 +195,8 @@ void quotation_jit::iterate_quotation() {
push(obj.value());
break;
case QUOTATION_TYPE:
/* 'if' preceded by two literal quotations (this is why if and ? are
mutually recursive in the library, but both still work) */
// 'if' preceded by two literal quotations (this is why if and ? are
// mutually recursive in the library, but both still work)
if (fast_if_p(i, length)) {
emit_epilog(stack_frame);
tail_call = true;
@ -204,17 +204,17 @@ void quotation_jit::iterate_quotation() {
emit_quotation(nth(i + 1));
emit(parent->special_objects[JIT_IF]);
i += 2;
} /* dip */
} // dip
else if (fast_dip_p(i, length)) {
emit_quotation(obj.value());
emit(parent->special_objects[JIT_DIP]);
i++;
} /* 2dip */
} // 2dip
else if (fast_2dip_p(i, length)) {
emit_quotation(obj.value());
emit(parent->special_objects[JIT_2DIP]);
i++;
} /* 3dip */
} // 3dip
else if (fast_3dip_p(i, length)) {
emit_quotation(obj.value());
emit(parent->special_objects[JIT_3DIP]);
@ -223,12 +223,12 @@ void quotation_jit::iterate_quotation() {
push(obj.value());
break;
case ARRAY_TYPE:
/* Method dispatch */
// Method dispatch
if (mega_lookup_p(i, length)) {
tail_call = true;
emit_mega_cache_lookup(nth(i), untag_fixnum(nth(i + 1)), nth(i + 2));
i += 3;
} /* Non-optimizing compiler ignores declarations */
} // Non-optimizing compiler ignores declarations
else if (declare_p(i, length))
i++;
else
@ -253,35 +253,35 @@ cell quotation_jit::word_stack_frame_size(cell obj) {
return JIT_FRAME_SIZE;
}
/* Allocates memory */
// Allocates memory
void quotation_jit::emit_mega_cache_lookup(cell methods_, fixnum index,
cell cache_) {
data_root<array> methods(methods_, parent);
data_root<array> cache(cache_, parent);
/* Load the object from the datastack. */
// Load the object from the datastack.
emit_with_literal(parent->special_objects[PIC_LOAD],
tag_fixnum(-index * sizeof(cell)));
/* Do a cache lookup. */
// Do a cache lookup.
emit_with_literal(parent->special_objects[MEGA_LOOKUP], cache.value());
/* If we end up here, the cache missed. */
// If we end up here, the cache missed.
emit(parent->special_objects[JIT_PROLOG]);
/* Push index, method table and cache on the stack. */
// Push index, method table and cache on the stack.
push(methods.value());
push(tag_fixnum(index));
push(cache.value());
word_call(parent->special_objects[MEGA_MISS_WORD]);
/* Now the new method has been stored into the cache, and its on
the stack. */
// Now the new method has been stored into the cache, and its on
// the stack.
emit(parent->special_objects[JIT_EPILOG]);
emit(parent->special_objects[JIT_EXECUTE]);
}
/* Allocates memory */
// Allocates memory
code_block* factor_vm::jit_compile_quotation(cell owner_, cell quot_,
bool relocating) {
data_root<object> owner(owner_, this);
@ -301,7 +301,7 @@ code_block* factor_vm::jit_compile_quotation(cell owner_, cell quot_,
return compiled;
}
/* Allocates memory */
// Allocates memory
void factor_vm::jit_compile_quotation(cell quot_, bool relocating) {
data_root<quotation> quot(quot_, this);
if (!quotation_compiled_p(quot.untagged())) {
@ -311,7 +311,7 @@ void factor_vm::jit_compile_quotation(cell quot_, bool relocating) {
}
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_jit_compile() {
jit_compile_quotation(ctx->pop(), true);
}
@ -320,8 +320,8 @@ cell factor_vm::lazy_jit_compile_entry_point() {
return untag<word>(special_objects[LAZY_JIT_COMPILE_WORD])->entry_point;
}
/* push a new quotation on the stack */
/* Allocates memory */
// push a new quotation on the stack
// Allocates memory
void factor_vm::primitive_array_to_quotation() {
quotation* quot = allot<quotation>(sizeof(quotation));
@ -333,7 +333,7 @@ void factor_vm::primitive_array_to_quotation() {
ctx->replace(tag<quotation>(quot));
}
/* Allocates memory (from_unsigned_cell) */
// Allocates memory (from_unsigned_cell)
void factor_vm::primitive_quotation_code() {
data_root<quotation> quot(ctx->pop(), this);
@ -341,7 +341,7 @@ void factor_vm::primitive_quotation_code() {
ctx->push(from_unsigned_cell((cell)quot->code() + quot->code()->size()));
}
/* Allocates memory */
// Allocates memory
fixnum factor_vm::quot_code_offset_to_scan(cell quot_, cell offset) {
data_root<quotation> quot(quot_, this);
data_root<array> array(quot->array, this);
@ -354,7 +354,7 @@ fixnum factor_vm::quot_code_offset_to_scan(cell quot_, cell offset) {
return compiler.get_position();
}
/* Allocates memory */
// Allocates memory
cell factor_vm::lazy_jit_compile(cell quot_) {
data_root<quotation> quot(quot_, this);
@ -367,7 +367,7 @@ cell factor_vm::lazy_jit_compile(cell quot_) {
return quot.value();
}
/* Allocates memory */
// Allocates memory
VM_C_API cell lazy_jit_compile(cell quot, factor_vm* parent) {
return parent->lazy_jit_compile(quot);
}

View File

@ -4,7 +4,7 @@ struct quotation_jit : public jit {
data_root<array> elements;
bool compiling, relocate;
/* Allocates memory */
// Allocates memory
quotation_jit(cell owner, bool compiling, bool relocate, factor_vm* vm)
: jit(code_block_unoptimized, owner, vm),
elements(false_object, vm),
@ -32,12 +32,12 @@ struct quotation_jit : public jit {
bool stack_frame_p();
void iterate_quotation();
/* Allocates memory */
// Allocates memory
void word_call(cell word) {
emit_with_literal(parent->special_objects[JIT_WORD_CALL], word);
}
/* Allocates memory (literal(), emit())*/
// Allocates memory (literal(), emit())
void word_jump(cell word_) {
data_root<word> word(word_, parent);
#ifndef FACTOR_AMD64

View File

@ -36,8 +36,8 @@ void factor_vm::record_sample(bool prolog_p) {
if (counts.empty()) {
return;
}
/* Appends the callstack, which is just a sequence of quotation or
word references, to sample_callstacks. */
// Appends the callstack, which is just a sequence of quotation or
// word references, to sample_callstacks.
cell begin = sample_callstacks.size();
bool skip_p = prolog_p;
@ -51,7 +51,7 @@ void factor_vm::record_sample(bool prolog_p) {
cell end = sample_callstacks.size();
std::reverse(sample_callstacks.begin() + begin, sample_callstacks.end());
/* Add the sample. */
// Add the sample.
cell thread = special_objects[OBJ_CURRENT_THREAD];
samples.push_back(profiling_sample(counts, thread, begin, end));
}
@ -89,7 +89,7 @@ void factor_vm::primitive_sampling_profiler() {
set_sampling_profiler(to_fixnum(ctx->pop()));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_get_samples() {
if (atomic::load(&sampling_profiler_p) || samples.empty()) {
ctx->push(false_object);

View File

@ -36,8 +36,8 @@ struct profiling_sample {
profiling_sample_count counts;
// Active thread during sample
cell thread;
/* The callstack at safepoint time. Indexes to the beginning and ending
code_block entries in the vm sample_callstacks array. */
// The callstack at safepoint time. Indexes to the beginning and ending
// code_block entries in the vm sample_callstacks array.
cell callstack_begin, callstack_end;
profiling_sample(profiling_sample_count const& counts, cell thread,

View File

@ -4,8 +4,8 @@ inline cell align_page(cell a) { return align(a, getpagesize()); }
bool set_memory_locked(cell base, cell size, bool locked);
/* segments set up guard pages to check for under/overflow.
size must be a multiple of the page size */
// segments set up guard pages to check for under/overflow.
// size must be a multiple of the page size
struct segment {
cell start;
cell size;

View File

@ -1,6 +1,6 @@
namespace factor {
/* Size sans alignment. */
// Size sans alignment.
template <typename Fixup>
cell object::base_size(Fixup fixup) const {
switch (type()) {
@ -39,7 +39,7 @@ cell object::base_size(Fixup fixup) const {
}
}
/* Size of the object pointed to by an untagged pointer */
// Size of the object pointed to by an untagged pointer
template <typename Fixup>
cell object::size(Fixup fixup) const {
if (free_p())
@ -49,9 +49,9 @@ cell object::size(Fixup fixup) const {
inline cell object::size() const { return size(no_fixup()); }
/* The number of slots (cells) in an object which should be scanned by
the GC. The number can vary in arrays and tuples, in all other
types the number is a constant. */
// The number of slots (cells) in an object which should be scanned by
// the GC. The number can vary in arrays and tuples, in all other
// types the number is a constant.
template <typename Fixup>
inline cell object::slot_count(Fixup fixup) const {
if (free_p())
@ -59,16 +59,16 @@ inline cell object::slot_count(Fixup fixup) const {
cell t = type();
if (t == ARRAY_TYPE) {
/* capacity + n slots */
// capacity + n slots
return 1 + array_capacity((array*)this);
} else if (t == TUPLE_TYPE) {
tuple_layout* layout = (tuple_layout*)fixup.translate_data(
untag<object>(((tuple*)this)->layout));
/* layout + n slots */
// layout + n slots
return 1 + tuple_capacity(layout);
} else {
switch (t) {
/* these objects do not refer to other objects at all */
// these objects do not refer to other objects at all
case FLOAT_TYPE:
case BYTE_ARRAY_TYPE:
case BIGNUM_TYPE:
@ -81,7 +81,7 @@ inline cell object::slot_count(Fixup fixup) const {
case WRAPPER_TYPE: return 1;
default:
critical_error("Invalid header in slot_count", (cell)this);
return 0; /* can't happen */
return 0; // can't happen
}
}
}
@ -90,33 +90,32 @@ inline cell object::slot_count() const {
return slot_count(no_fixup());
}
/* Slot visitors iterate over the slots of an object, applying a functor to
each one that is a non-immediate slot. The pointer is untagged first. The
functor returns a new untagged object pointer. The return value may or may not
equal the old one,
however the new pointer receives the same tag before being stored back to the
original location.
// Slot visitors iterate over the slots of an object, applying a functor to
// each one that is a non-immediate slot. The pointer is untagged first.
// The functor returns a new untagged object pointer. The return value may
// or may not equal the old one, however the new pointer receives the same
// tag before being stored back to the original location.
Slots storing immediate values are left unchanged and the visitor does inspect
them.
// Slots storing immediate values are left unchanged and the visitor does
// inspect them.
This is used by GC's copying, sweep and compact phases, and the implementation
of the become primitive.
// This is used by GC's copying, sweep and compact phases, and the
// implementation of the become primitive.
Iteration is driven by visit_*() methods. Only one of them define GC roots:
- visit_all_roots()
// Iteration is driven by visit_*() methods. Only one of them define GC
// roots:
// - visit_all_roots()
Code block visitors iterate over sets of code blocks, applying a functor to
each one. The functor returns a new code_block pointer, which may or may not
equal the old one. This is stored back to the original location.
// Code block visitors iterate over sets of code blocks, applying a functor
// to each one. The functor returns a new code_block pointer, which may or
// may not equal the old one. This is stored back to the original location.
This is used by GC's sweep and compact phases, and the implementation of the
modify-code-heap primitive.
// This is used by GC's sweep and compact phases, and the implementation of
// the modify-code-heap primitive.
Iteration is driven by visit_*() methods. Some of them define GC roots:
- visit_context_code_blocks()
- visit_callback_code_blocks()
*/
// Iteration is driven by visit_*() methods. Some of them define GC roots:
// - visit_context_code_blocks()
// - visit_callback_code_blocks()
template <typename Fixup> struct slot_visitor {
factor_vm* parent;
@ -223,18 +222,17 @@ template <typename Fixup> void slot_visitor<Fixup>::visit_all_roots() {
}
}
/* primitive_minor_gc() is invoked by inline GC checks, and it needs to fill in
uninitialized stack locations before actually calling the GC. See the
documentation in compiler.cfg.stacks.vacant for details.
// primitive_minor_gc() is invoked by inline GC checks, and it needs to
// fill in uninitialized stack locations before actually calling the GC.
// See the documentation in compiler.cfg.stacks.vacant for details.
So for each call frame:
// So for each call frame:
// - scrub some uninitialized locations
// - trace roots in spill slots
- scrub some uninitialized locations
- trace roots in spill slots
*/
template <typename Fixup> struct call_frame_slot_visitor {
slot_visitor<Fixup>* visitor;
/* NULL in case we're a visitor for a callstack object. */
// NULL in case we're a visitor for a callstack object.
context* ctx;
void scrub_stack(cell stack, uint8_t* bitmap, cell base, uint32_t count) {
@ -251,13 +249,12 @@ template <typename Fixup> struct call_frame_slot_visitor {
call_frame_slot_visitor(slot_visitor<Fixup>* visitor, context* ctx)
: visitor(visitor), ctx(ctx) {}
/*
frame top -> [return address]
[spill area]
...
[entry_point]
[size]
*/
// frame top -> [return address]
// [spill area]
// ...
// [entry_point]
// [size]
void operator()(cell frame_top, cell size, code_block* owner, cell addr) {
cell return_address = owner->offset(addr);
@ -279,7 +276,7 @@ template <typename Fixup> struct call_frame_slot_visitor {
uint8_t* bitmap = info->gc_info_bitmap();
if (ctx) {
/* Scrub vacant stack locations. */
// Scrub vacant stack locations.
scrub_stack(ctx->datastack,
bitmap,
info->callsite_scrub_d(callsite),
@ -290,7 +287,7 @@ template <typename Fixup> struct call_frame_slot_visitor {
info->scrub_r_count);
}
/* Subtract old value of base pointer from every derived pointer. */
// Subtract old value of base pointer from every derived pointer.
for (cell spill_slot = 0; spill_slot < info->derived_root_count;
spill_slot++) {
uint32_t base_pointer = info->lookup_base_pointer(callsite, spill_slot);
@ -303,7 +300,7 @@ template <typename Fixup> struct call_frame_slot_visitor {
}
}
/* Update all GC roots, including base pointers. */
// Update all GC roots, including base pointers.
cell callsite_gc_roots = info->callsite_gc_roots(callsite);
for (cell spill_slot = 0; spill_slot < info->gc_root_count; spill_slot++) {
@ -315,7 +312,7 @@ template <typename Fixup> struct call_frame_slot_visitor {
}
}
/* Add the base pointers to obtain new derived pointer values. */
// Add the base pointers to obtain new derived pointer values.
for (cell spill_slot = 0; spill_slot < info->derived_root_count;
spill_slot++) {
uint32_t base_pointer = info->lookup_base_pointer(callsite, spill_slot);
@ -339,8 +336,8 @@ void slot_visitor<Fixup>::visit_callstack(context* ctx) {
template <typename Fixup>
void slot_visitor<Fixup>::visit_context(context* ctx) {
/* Callstack is visited first because it scrubs the data and retain
stacks. */
// Callstack is visited first because it scrubs the data and retain
// stacks.
visit_callstack(ctx);
cell ds_ptr = ctx->datastack;
@ -352,8 +349,8 @@ void slot_visitor<Fixup>::visit_context(context* ctx) {
visit_object_array(ctx->context_objects,
ctx->context_objects + context_object_count);
/* Clear out the space not visited with a known pattern. That makes
it easier to see if uninitialized reads are made. */
// Clear out the space not visited with a known pattern. That makes
// it easier to see if uninitialized reads are made.
ctx->fill_stack_seg(ds_ptr, ds_seg, 0xbaadbadd);
ctx->fill_stack_seg(rs_ptr, rs_seg, 0xdaabdaab);
}
@ -461,9 +458,9 @@ void slot_visitor<Fixup>::visit_object(object *ptr) {
((alien*)ptr)->update_address();
}
/* Pops items from the mark stack and visits them until the stack is
empty. Used when doing a full collection and when collecting to
tenured space. */
// Pops items from the mark stack and visits them until the stack is
// empty. Used when doing a full collection and when collecting to
// tenured space.
template <typename Fixup>
void slot_visitor<Fixup>::visit_mark_stack(std::vector<cell>* mark_stack) {
while (!mark_stack->empty()) {
@ -483,11 +480,11 @@ void slot_visitor<Fixup>::visit_mark_stack(std::vector<cell>* mark_stack) {
}
}
/* Visits the instruction operands in a code block. If the operand is
a pointer to a code block or data object, then the fixup is applied
to it. Otherwise, if it is an external addess, that address is
recomputed. If it is an untagged number literal (RT_UNTAGGED) or an
immediate value, then nothing is done with it. */
// Visits the instruction operands in a code block. If the operand is
// a pointer to a code block or data object, then the fixup is applied
// to it. Otherwise, if it is an external addess, that address is
// recomputed. If it is an untagged number literal (RT_UNTAGGED) or an
// immediate value, then nothing is done with it.
template <typename Fixup>
void slot_visitor<Fixup>::visit_instruction_operands(code_block* block,
cell rel_base) {
@ -541,10 +538,10 @@ cell slot_visitor<Fixup>::visit_card(SourceGeneration* gen,
cell start_addr = heap_base + index * card_size;
cell end_addr = start_addr + card_size;
/* Forward to the next object whose address is in the card. */
// Forward to the next object whose address is in the card.
if (!start || (start + ((object*)start)->size()) < start_addr) {
/* Optimization because finding the objects in a memory range is
expensive. It helps a lot when tracing consecutive cards. */
// Optimization because finding the objects in a memory range is
// expensive. It helps a lot when tracing consecutive cards.
cell gen_start_card = (gen->start - heap_base) / card_size;
start = gen->starts
.find_object_containing_card(index - gen_start_card);
@ -553,9 +550,9 @@ cell slot_visitor<Fixup>::visit_card(SourceGeneration* gen,
while (start && start < end_addr) {
visit_partial_objects(start, start_addr, end_addr);
if ((start + ((object*)start)->size()) >= end_addr) {
/* The object can overlap the card boundary, then the
remainder of it will be handled in the next card
tracing if that card is marked. */
// The object can overlap the card boundary, then the
// remainder of it will be handled in the next card
// tracing if that card is marked.
break;
}
start = gen->next_object_after(start);
@ -574,7 +571,7 @@ void slot_visitor<Fixup>::visit_cards(SourceGeneration* gen,
cell first_deck = (gen->start - heap_base) / deck_size;
cell last_deck = (gen->end - heap_base) / deck_size;
/* Address of last traced object. */
// Address of last traced object.
cell start = 0;
for (cell di = first_deck; di < last_deck; di++) {
if (decks[di] & mask) {
@ -591,7 +588,7 @@ void slot_visitor<Fixup>::visit_cards(SourceGeneration* gen,
start = visit_card(gen, ci, start);
if (!start) {
/* At end of generation, no need to scan more cards. */
// At end of generation, no need to scan more cards.
return;
}
}

View File

@ -2,7 +2,7 @@
namespace factor {
/* Allocates memory */
// Allocates memory
string* factor_vm::allot_string_internal(cell capacity) {
string* str = allot<string>(string_size(capacity));
@ -13,7 +13,7 @@ string* factor_vm::allot_string_internal(cell capacity) {
return str;
}
/* Allocates memory */
// Allocates memory
void factor_vm::fill_string(string* str_, cell start, cell capacity,
cell fill) {
data_root<string> str(str_, this);
@ -39,14 +39,14 @@ void factor_vm::fill_string(string* str_, cell start, cell capacity,
}
}
/* Allocates memory */
// Allocates memory
string* factor_vm::allot_string(cell capacity, cell fill) {
data_root<string> str(allot_string_internal(capacity), this);
fill_string(str.untagged(), 0, capacity, fill);
return str.untagged();
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_string() {
cell initial = to_cell(ctx->pop());
cell length = unbox_array_size();
@ -60,7 +60,7 @@ bool factor_vm::reallot_string_in_place_p(string* str, cell capacity) {
capacity <= string_capacity(str);
}
/* Allocates memory */
// Allocates memory
string* factor_vm::reallot_string(string* str_, cell capacity) {
data_root<string> str(str_, this);
@ -97,7 +97,7 @@ string* factor_vm::reallot_string(string* str_, cell capacity) {
}
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_resize_string() {
data_root<string> str(ctx->pop(), this);
check_tagged(str);

View File

@ -3,7 +3,7 @@
namespace factor {
void factor_vm::collect_to_tenured() {
/* Copy live objects from aging space to tenured space. */
// Copy live objects from aging space to tenured space.
gc_workhorse<tenured_space, to_tenured_policy>
workhorse(this, data->tenured, to_tenured_policy(this));
slot_visitor<gc_workhorse<tenured_space, to_tenured_policy>>

View File

@ -2,8 +2,8 @@
namespace factor {
/* push a new tuple on the stack, filling its slots with f */
/* Allocates memory */
// push a new tuple on the stack, filling its slots with f
// Allocates memory
void factor_vm::primitive_tuple() {
data_root<tuple_layout> layout(ctx->pop(), this);
tagged<tuple> t(allot<tuple>(tuple_size(layout.untagged())));
@ -15,8 +15,8 @@ void factor_vm::primitive_tuple() {
ctx->push(t.value());
}
/* push a new tuple on the stack, filling its slots from the stack */
/* Allocates memory */
// push a new tuple on the stack, filling its slots from the stack
// Allocates memory
void factor_vm::primitive_tuple_boa() {
data_root<tuple_layout> layout(ctx->pop(), this);
tagged<tuple> t(allot<tuple>(tuple_size(layout.untagged())));

View File

@ -2,7 +2,7 @@
namespace factor {
/* Fill in a PPC function descriptor */
// Fill in a PPC function descriptor
void* fill_function_descriptor(void* ptr, void* code) {
void** descriptor = (void**)ptr;
descriptor[0] = code;
@ -11,12 +11,12 @@ void* fill_function_descriptor(void* ptr, void* code) {
return descriptor;
}
/* Get a field from a PPC function descriptor */
// Get a field from a PPC function descriptor
void* function_descriptor_field(void* ptr, size_t idx) {
return ptr ? ((void**)ptr)[idx] : ptr;
}
/* If memory allocation fails, bail out */
// If memory allocation fails, bail out
vm_char* safe_strdup(const vm_char* str) {
vm_char* ptr = STRDUP(str);
if (!ptr)
@ -32,8 +32,8 @@ cell read_cell_hex() {
return cell;
}
/* On Windows, memcpy() is in a different DLL and the non-optimizing
compiler can't find it */
// On Windows, memcpy() is in a different DLL and the non-optimizing
// compiler can't find it
VM_C_API void* factor_memcpy(void* dst, void* src, size_t len) {
return memcpy(dst, src, len);
}

View File

@ -17,136 +17,136 @@ struct factor_vm {
// basis/vm/vm.factor
// basis/compiler/constants/constants.factor
/* Current context */
// Current context
context* ctx;
/* Spare context -- for callbacks */
// Spare context -- for callbacks
context* spare_ctx;
/* New objects are allocated here, use the data->nursery reference
instead from c++ code. */
// New objects are allocated here, use the data->nursery reference
// instead from c++ code.
bump_allocator nursery;
/* Add this to a shifted address to compute write barrier offsets */
// Add this to a shifted address to compute write barrier offsets
cell cards_offset;
cell decks_offset;
/* cdecl signal handler address, used by signal handler subprimitives */
// cdecl signal handler address, used by signal handler subprimitives
cell signal_handler_addr;
/* are we handling a memory error? used to detect double faults */
// are we handling a memory error? used to detect double faults
cell faulting_p;
/* Various special objects, accessed by special-object and
set-special-object primitives */
// Various special objects, accessed by special-object and
// set-special-object primitives
cell special_objects[special_object_count];
// THESE FIELDS ARE ACCESSED DIRECTLY FROM FACTOR.
// ^^^^^^
//
/* Handle to the main thread we run in */
// Handle to the main thread we run in
THREADHANDLE thread;
/* Data stack and retain stack sizes */
// Data stack and retain stack sizes
cell datastack_size, retainstack_size, callstack_size;
/* Stack of callback IDs */
// Stack of callback IDs
std::vector<int> callback_ids;
/* Next callback ID */
// Next callback ID
int callback_id;
/* List of callback function descriptors for PPC */
// List of callback function descriptors for PPC
std::list<void**> function_descriptors;
/* Pooling unused contexts to make context allocation cheaper */
// Pooling unused contexts to make context allocation cheaper
std::list<context*> unused_contexts;
/* Active contexts, for tracing by the GC */
// Active contexts, for tracing by the GC
std::set<context*> active_contexts;
/* External entry points */
// External entry points
c_to_factor_func_type c_to_factor_func;
/* Is profiling enabled? */
// Is profiling enabled?
volatile cell sampling_profiler_p;
fixnum samples_per_second;
/* Global variables used to pass fault handler state from signal handler
to VM */
// Global variables used to pass fault handler state from signal handler
// to VM
bool signal_resumable;
cell signal_number;
cell signal_fault_addr;
cell signal_fault_pc;
unsigned int signal_fpu_status;
/* Pipe used to notify Factor multiplexer of signals */
// Pipe used to notify Factor multiplexer of signals
int signal_pipe_input, signal_pipe_output;
/* State kept by the sampling profiler */
// State kept by the sampling profiler
std::vector<profiling_sample> samples;
std::vector<cell> sample_callstacks;
volatile profiling_sample_count sample_counts;
/* GC is off during heap walking */
// GC is off during heap walking
bool gc_off;
/* Data heap */
// Data heap
data_heap* data;
/* Code heap */
// Code heap
code_heap* code;
/* Pinned callback stubs */
// Pinned callback stubs
callback_heap* callbacks;
/* Only set if we're performing a GC */
// Only set if we're performing a GC
gc_state* current_gc;
volatile cell current_gc_p;
/* Set if we're in the jit */
// Set if we're in the jit
volatile fixnum current_jit_count;
/* Mark stack used for mark & sweep GC */
// Mark stack used for mark & sweep GC
std::vector<cell> mark_stack;
/* If not NULL, we push GC events here */
// If not NULL, we push GC events here
std::vector<gc_event>* gc_events;
/* If a runtime function needs to call another function which potentially
allocates memory, it must wrap any references to the data and code
heaps with data_root and code_root smart pointers, which register
themselves here. See data_roots.hpp and code_roots.hpp */
// If a runtime function needs to call another function which potentially
// allocates memory, it must wrap any references to the data and code
// heaps with data_root and code_root smart pointers, which register
// themselves here. See data_roots.hpp and code_roots.hpp
std::vector<cell*> data_roots;
std::vector<code_root*> code_roots;
/* Debugger */
// Debugger
bool fep_p;
bool fep_help_was_shown;
bool fep_disabled;
bool full_output;
/* Method dispatch statistics */
// Method dispatch statistics
dispatch_statistics dispatch_stats;
/* Number of entries in a polymorphic inline cache */
// Number of entries in a polymorphic inline cache
cell max_pic_size;
/* Incrementing object counter for identity hashing */
// Incrementing object counter for identity hashing
cell object_counter;
/* Sanity check to ensure that monotonic counter doesn't decrease */
// Sanity check to ensure that monotonic counter doesn't decrease
uint64_t last_nano_count;
/* Stack for signal handlers, only used on Unix */
// Stack for signal handlers, only used on Unix
segment* signal_callstack_seg;
/* Are we already handling a fault? Used to catch double memory faults */
// Are we already handling a fault? Used to catch double memory faults
static bool fatal_erroring_p;
/* Two fep_p variants, one might be redundant. */
// Two fep_p variants, one might be redundant.
volatile cell safepoint_fep_p;
// contexts
@ -241,7 +241,7 @@ struct factor_vm {
void bignum_destructive_unnormalization(bignum* bn, int shift_right);
bignum_digit_type bignum_digit_divide(
bignum_digit_type uh, bignum_digit_type ul, bignum_digit_type v,
bignum_digit_type* q) /* return value */;
bignum_digit_type* q); // return value
bignum_digit_type bignum_digit_divide_subtract(bignum_digit_type v1,
bignum_digit_type v2,
bignum_digit_type guess,
@ -297,9 +297,9 @@ struct factor_vm {
template <typename Iterator> inline void each_object(Iterator& iterator) {
/* The nursery can't be iterated because there may be gaps between
the objects (see factor_vm::reallot_array) so we require it to
be empty first. */
// The nursery can't be iterated because there may be gaps between
// the objects (see factor_vm::reallot_array) so we require it to
// be empty first.
FACTOR_ASSERT(data->nursery->occupied_space() == 0);
gc_off = true;
@ -319,8 +319,8 @@ struct factor_vm {
each_object(each_object_func);
}
/* the write barrier must be called any time we are potentially storing a
pointer from an older generation to a younger one */
// the write barrier must be called any time we are potentially storing a
// pointer from an older generation to a younger one
inline void write_barrier(cell* slot_ptr) {
*(unsigned char*)(cards_offset + ((cell)slot_ptr >> card_bits)) = card_mark_mask;
*(unsigned char*)(decks_offset + ((cell)slot_ptr >> deck_bits)) = card_mark_mask;
@ -361,7 +361,7 @@ struct factor_vm {
object* allot_object(cell type, cell size);
object* allot_large_object(cell type, cell size);
/* Allocates memory */
// Allocates memory
template <typename Type> Type* allot(cell size) {
return (Type*)allot_object(Type::type_number, size);
}

View File

@ -2,14 +2,14 @@
namespace factor {
/* Compile a word definition with the non-optimizing compiler. */
/* Allocates memory */
// Compile a word definition with the non-optimizing compiler.
// Allocates memory
void factor_vm::jit_compile_word(cell word_, cell def_, bool relocating) {
data_root<word> word(word_, this);
data_root<quotation> def(def_, this);
/* Refuse to compile this word more than once, because quot_compiled_p()
depends on the identity of its code block */
// Refuse to compile this word more than once, because quot_compiled_p()
// depends on the identity of its code block
if (word->entry_point &&
word.value() == special_objects[LAZY_JIT_COMPILE_WORD])
return;
@ -24,7 +24,7 @@ void factor_vm::jit_compile_word(cell word_, cell def_, bool relocating) {
jit_compile_quotation(word->pic_tail_def, relocating);
}
/* Allocates memory */
// Allocates memory
word* factor_vm::allot_word(cell name_, cell vocab_, cell hashcode_) {
data_root<object> vocab(vocab_, this);
data_root<object> name(name_, this);
@ -46,8 +46,8 @@ word* factor_vm::allot_word(cell name_, cell vocab_, cell hashcode_) {
return new_word.untagged();
}
/* (word) ( name vocabulary hashcode -- word ) */
/* Allocates memory */
// (word) ( name vocabulary hashcode -- word )
// Allocates memory
void factor_vm::primitive_word() {
cell hashcode = ctx->pop();
cell vocab = ctx->pop();
@ -55,8 +55,8 @@ void factor_vm::primitive_word() {
ctx->push(tag<word>(allot_word(name, vocab, hashcode)));
}
/* word-code ( word -- start end ) */
/* Allocates memory (from_unsigned_cell allocates) */
// word-code ( word -- start end )
// Allocates memory (from_unsigned_cell allocates)
void factor_vm::primitive_word_code() {
data_root<word> w(ctx->pop(), this);
check_tagged(w);
@ -70,7 +70,7 @@ void factor_vm::primitive_word_optimized_p() {
ctx->replace(tag_boolean(w->code()->optimized_p()));
}
/* Allocates memory */
// Allocates memory
void factor_vm::primitive_wrapper() {
wrapper* new_wrapper = allot<wrapper>(sizeof(wrapper));
new_wrapper->object = ctx->peek();

View File

@ -1,14 +1,14 @@
/* card marking write barrier. a card is a byte storing a mark flag,
and the offset (in cells) of the first object in the card.
// card marking write barrier. a card is a byte storing a mark flag,
// and the offset (in cells) of the first object in the card.
the mark flag is set by the write barrier when an object in the
card has a slot written to.
// the mark flag is set by the write barrier when an object in the
// card has a slot written to.
the offset of the first object is set by the allocator. */
// the offset of the first object is set by the allocator.
namespace factor {
/* if card_points_to_nursery is set, card_points_to_aging must also be set. */
// if card_points_to_nursery is set, card_points_to_aging must also be set.
static const cell card_points_to_nursery = 0x80;
static const cell card_points_to_aging = 0x40;
static const cell card_mark_mask =
@ -22,8 +22,8 @@ static const cell addr_card_mask = card_size - 1;
typedef uint8_t card_deck;
static const cell deck_bits = card_bits + 10;
/* Number of bytes on the heap a deck addresses. Each deck as 1024
cards. So 256 kb. */
// Number of bytes on the heap a deck addresses. Each deck as 1024
// cards. So 256 kb.
static const cell deck_size = 1 << deck_bits;
static const cell cards_per_deck = 1 << 10;