Upgrade to nasal 1.0 test candidate

This commit is contained in:
andy
2005-04-18 19:48:47 +00:00
parent cf056bace7
commit 966331dac7
19 changed files with 1699 additions and 1072 deletions

View File

@@ -8,7 +8,6 @@ libsgnasal_a_SOURCES = \
code.c code.h \
codegen.c \
data.h \
debug.c \
gc.c \
hash.c \
lex.c \
@@ -18,6 +17,8 @@ libsgnasal_a_SOURCES = \
nasal.h \
parse.c parse.h \
string.c \
vector.c
vector.c \
thread-posix.c \
thread-win32.c
INCLUDES = -I$(top_srcdir)

View File

@@ -4,6 +4,7 @@
////////////////////////////////////////////////////////////////////////
// Debugging stuff. ////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
//#define DEBUG_NASAL
#if !defined(DEBUG_NASAL)
# define DBG(expr) /* noop */
#else
@@ -13,12 +14,12 @@
#endif
char* opStringDEBUG(int op);
void printOpDEBUG(int ip, int op);
void printRefDEBUG(naRef r);
void printStackDEBUG(struct Context* ctx);
////////////////////////////////////////////////////////////////////////
// FIXME: need to store a list of all contexts
struct Context globalContext;
struct Globals* globals = 0;
static naRef bindFunction(struct Context* ctx, struct Frame* f, naRef code);
#define ERR(c, msg) naRuntimeError((c),(msg))
void naRuntimeError(struct Context* c, char* msg)
@@ -63,7 +64,7 @@ static naRef stringify(struct Context* ctx, naRef r)
static int checkVec(struct Context* ctx, naRef vec, naRef idx)
{
int i = (int)numify(ctx, idx);
if(i < 0 || i >= vec.ref.ptr.vec->size)
if(i < 0 || i >= vec.ref.ptr.vec->rec->size)
ERR(ctx, "vector index out of bounds");
return i;
}
@@ -94,90 +95,157 @@ static void containerSet(struct Context* ctx, naRef box, naRef key, naRef val)
static void initContext(struct Context* c)
{
int i;
for(i=0; i<NUM_NASAL_TYPES; i++)
naGC_init(&(c->pools[i]), i);
c->fTop = c->opTop = c->markTop = 0;
for(i=0; i<NUM_NASAL_TYPES; i++)
c->nfree[i] = 0;
naVec_setsize(c->temps, 4);
c->callParent = 0;
c->callChild = 0;
c->dieArg = naNil();
c->error = 0;
}
naBZero(c->fStack, MAX_RECURSION * sizeof(struct Frame));
naBZero(c->opStack, MAX_STACK_DEPTH * sizeof(naRef));
static void initGlobals()
{
globals = (struct Globals*)naAlloc(sizeof(struct Globals));
naBZero(globals, sizeof(struct Globals));
// Make sure the args vectors (which are static with the context)
// are initialized to nil.
for(i=0; i<MAX_RECURSION; i++)
c->fStack[i].args = naNil();
globals->sem = naNewSem();
globals->lock = naNewLock();
// Note we can't use naNewVector() for this; it requires that
// temps exist first.
c->temps = naObj(T_VEC, naGC_get(&(c->pools[T_VEC])));
int i;
globals->allocCount = 256; // reasonable starting value
for(i=0; i<NUM_NASAL_TYPES; i++)
naGC_init(&(globals->pools[i]), i);
globals->deadsz = 256;
globals->ndead = 0;
globals->deadBlocks = naAlloc(sizeof(void*) * globals->deadsz);
c->save = naNil();
// Initialize a single context
globals->freeContexts = 0;
globals->allContexts = 0;
struct Context* c = naNewContext();
globals->symbols = naNewHash(c);
globals->save = naNewVector(c);
// Cache pre-calculated "me", "arg" and "parents" scalars
c->meRef = naStr_fromdata(naNewString(c), "me", 2);
c->argRef = naStr_fromdata(naNewString(c), "arg", 3);
c->parentsRef = naStr_fromdata(naNewString(c), "parents", 7);
globals->meRef = naInternSymbol(naStr_fromdata(naNewString(c), "me", 2));
globals->argRef = naInternSymbol(naStr_fromdata(naNewString(c), "arg", 3));
globals->parentsRef = naInternSymbol(naStr_fromdata(naNewString(c), "parents", 7));
naFreeContext(c);
}
struct Context* naNewContext()
{
// FIXME: need more than one!
struct Context* c = &globalContext;
initContext(c);
int dummy;
if(globals == 0)
initGlobals();
LOCK();
struct Context* c = globals->freeContexts;
if(c) {
globals->freeContexts = c->nextFree;
c->nextFree = 0;
UNLOCK();
initContext(c);
} else {
UNLOCK();
c = (struct Context*)naAlloc(sizeof(struct Context));
// Chicken and egg, can't use naNew because it requires temps to exist
c->temps = naObj(T_VEC, (naGC_get(&globals->pools[T_VEC], 1, &dummy))[0]);
initContext(c);
LOCK();
c->nextAll = globals->allContexts;
c->nextFree = 0;
globals->allContexts = c;
UNLOCK();
}
return c;
}
void naGarbageCollect()
void naFreeContext(struct Context* c)
{
int i;
struct Context* c = &globalContext; // FIXME: more than one!
for(i=0; i < c->fTop; i++) {
naGC_mark(c->fStack[i].func);
naGC_mark(c->fStack[i].locals);
}
for(i=0; i < MAX_RECURSION; i++)
naGC_mark(c->fStack[i].args); // collect *all* the argument lists
for(i=0; i < c->opTop; i++)
naGC_mark(c->opStack[i]);
naGC_mark(c->temps);
naGC_mark(c->save);
naGC_mark(c->meRef);
naGC_mark(c->argRef);
naGC_mark(c->parentsRef);
// Finally collect all the freed objects
for(i=0; i<NUM_NASAL_TYPES; i++)
naGC_reap(&(c->pools[i]));
naVec_setsize(c->temps, 0);
LOCK();
c->nextFree = globals->freeContexts;
globals->freeContexts = c;
UNLOCK();
}
void setupFuncall(struct Context* ctx, naRef func, naRef args)
#define PUSH(r) do { \
if(ctx->opTop >= MAX_STACK_DEPTH) ERR(ctx, "stack overflow"); \
ctx->opStack[ctx->opTop++] = r; \
} while(0)
struct Frame* setupFuncall(struct Context* ctx, int nargs, int mcall, int tail)
{
int i;
naRef *frame;
struct Frame* f;
if(!IS_FUNC(func) ||
!(IS_CCODE(func.ref.ptr.func->code) ||
IS_CODE(func.ref.ptr.func->code)))
{
struct naCode* c;
DBG(printf("setupFuncall(nargs:%d, mcall:%d)\n", nargs, mcall);)
frame = &ctx->opStack[ctx->opTop - nargs - 1];
if(!IS_FUNC(frame[0]))
ERR(ctx, "function/method call invoked on uncallable object");
}
// Just do native calls right here, and don't touch the stack
// frames; return the current one (unless it's a tail call!).
if(frame[0].ref.ptr.func->code.ref.ptr.obj->type == T_CCODE) {
naRef obj = mcall ? frame[-1] : naNil();
naCFunction fp = frame[0].ref.ptr.func->code.ref.ptr.ccode->fptr;
naRef result = (*fp)(ctx, obj, nargs, frame + 1);
ctx->opTop -= nargs + 1 + mcall;
PUSH(result);
return &(ctx->fStack[ctx->fTop-1]);
}
if(tail) ctx->fTop--;
else if(ctx->fTop >= MAX_RECURSION) ERR(ctx, "call stack overflow");
// Note: assign nil first, otherwise the naNew() can cause a GC,
// which will now (after fTop++) see the *old* reference as a
// markable value!
f = &(ctx->fStack[ctx->fTop++]);
f->func = func;
f->locals = f->func = naNil();
f->locals = naNewHash(ctx);
f->func = frame[0];
f->ip = 0;
f->bp = ctx->opTop;
f->line = 0;
f->bp = ctx->opTop - (nargs + 1 + mcall);
DBG(printf("Entering frame %d\n", ctx->fTop-1);)
if(mcall)
naHash_set(f->locals, globals->meRef, frame[-1]);
f->args = args;
if(IS_CCODE(func.ref.ptr.func->code)) {
f->locals = naNil();
} else if(IS_CODE(func.ref.ptr.func->code)) {
f->locals = naNewHash(ctx);
naHash_set(f->locals, ctx->argRef, args);
// Set the argument symbols, and put any remaining args in a vector
c = (*frame++).ref.ptr.func->code.ref.ptr.code;
if(nargs < c->nArgs) ERR(ctx, "not enough arguments to function call");
for(i=0; i<c->nArgs; i++)
naHash_newsym(f->locals.ref.ptr.hash,
&c->constants[c->argSyms[i]], &frame[i]);
frame += c->nArgs;
nargs -= c->nArgs;
for(i=0; i<c->nOptArgs; i++, nargs--) {
naRef val = nargs > 0 ? frame[i] : c->constants[c->optArgVals[i]];
if(IS_CODE(val))
val = bindFunction(ctx, &ctx->fStack[ctx->fTop-2], val);
naHash_newsym(f->locals.ref.ptr.hash, &c->constants[c->optArgSyms[i]],
&val);
}
if(c->needArgVector || nargs > 0)
{
naRef args = naNewVector(ctx);
naVec_setsize(args, nargs > 0 ? nargs : 0);
for(i=0; i<nargs; i++)
args.ref.ptr.vec->rec->array[i] = *frame++;
naHash_newsym(f->locals.ref.ptr.hash, &c->restArgSym, &args);
}
ctx->opTop = f->bp; // Pop the stack last, to avoid GC lossage
DBG(printf("Entering frame %d with %d args\n", ctx->fTop-1, nargs);)
return f;
}
static naRef evalAndOr(struct Context* ctx, int op, naRef ra, naRef rb)
@@ -196,114 +264,94 @@ static naRef evalEquality(int op, naRef ra, naRef rb)
return naNum((op==OP_EQ) ? result : !result);
}
static naRef evalBinaryNumeric(struct Context* ctx, int op, naRef ra, naRef rb)
{
double a = numify(ctx, ra), b = numify(ctx, rb);
switch(op) {
case OP_PLUS: return naNum(a + b);
case OP_MINUS: return naNum(a - b);
case OP_MUL: return naNum(a * b);
case OP_DIV: return naNum(a / b);
case OP_LT: return naNum(a < b ? 1 : 0);
case OP_LTE: return naNum(a <= b ? 1 : 0);
case OP_GT: return naNum(a > b ? 1 : 0);
case OP_GTE: return naNum(a >= b ? 1 : 0);
}
return naNil();
}
// When a code object comes out of the constant pool and shows up on
// the stack, it needs to be bound with the lexical context.
static naRef bindFunction(struct Context* ctx, struct Frame* f, naRef code)
{
naRef next = f->func.ref.ptr.func->closure;
naRef closure = naNewClosure(ctx, f->locals, next);
naRef result = naNewFunc(ctx, code);
result.ref.ptr.func->closure = closure;
result.ref.ptr.func->namespace = f->locals;
result.ref.ptr.func->next = f->func;
return result;
}
static int getClosure(struct naClosure* c, naRef sym, naRef* result)
static int getClosure(struct naFunc* c, naRef sym, naRef* result)
{
while(c) {
if(naHash_get(c->namespace, sym, result)) return 1;
c = c->next.ref.ptr.closure;
c = c->next.ref.ptr.func;
}
return 0;
}
// Get a local symbol, or check the closure list if it isn't there
static naRef getLocal(struct Context* ctx, struct Frame* f, naRef sym)
static naRef getLocal2(struct Context* ctx, struct Frame* f, naRef sym)
{
naRef result;
if(!naHash_get(f->locals, sym, &result)) {
naRef c = f->func.ref.ptr.func->closure;
if(!getClosure(c.ref.ptr.closure, sym, &result))
if(!naHash_get(f->locals, sym, &result))
if(!getClosure(f->func.ref.ptr.func, sym, &result))
ERR(ctx, "undefined symbol");
}
return result;
}
static int setClosure(naRef closure, naRef sym, naRef val)
static void getLocal(struct Context* ctx, struct Frame* f,
naRef* sym, naRef* out)
{
struct naClosure* c = closure.ref.ptr.closure;
struct naFunc* func;
struct naStr* str = sym->ref.ptr.str;
if(naHash_sym(f->locals.ref.ptr.hash, str, out))
return;
func = f->func.ref.ptr.func;
while(func && func->namespace.ref.ptr.hash) {
if(naHash_sym(func->namespace.ref.ptr.hash, str, out))
return;
func = func->next.ref.ptr.func;
}
// Now do it again using the more general naHash_get(). This will
// only be necessary if something has created the value in the
// namespace using the more generic hash syntax
// (e.g. namespace["symbol"] and not namespace.symbol).
*out = getLocal2(ctx, f, *sym);
}
static int setClosure(naRef func, naRef sym, naRef val)
{
struct naFunc* c = func.ref.ptr.func;
if(c == 0) { return 0; }
else if(naHash_tryset(c->namespace, sym, val)) { return 1; }
else { return setClosure(c->next, sym, val); }
}
static naRef setLocal(struct Frame* f, naRef sym, naRef val)
static naRef setSymbol(struct Frame* f, naRef sym, naRef val)
{
// Try the locals first, if not already there try the closures in
// order. Finally put it in the locals if nothing matched.
if(!naHash_tryset(f->locals, sym, val))
if(!setClosure(f->func.ref.ptr.func->closure, sym, val))
if(!setClosure(f->func, sym, val))
naHash_set(f->locals, sym, val);
return val;
}
// Recursively descend into the parents lists
static int getMember(struct Context* ctx, naRef obj, naRef fld, naRef* result)
static int getMember(struct Context* ctx, naRef obj, naRef fld,
naRef* result, int count)
{
naRef p;
if(--count < 0) ERR(ctx, "too many parents");
if(!IS_HASH(obj)) ERR(ctx, "non-objects have no members");
if(naHash_get(obj, fld, result)) {
return 1;
} else if(naHash_get(obj, ctx->parentsRef, &p)) {
int i;
if(!IS_VEC(p)) ERR(ctx, "parents field not vector");
for(i=0; i<p.ref.ptr.vec->size; i++)
if(getMember(ctx, p.ref.ptr.vec->array[i], fld, result))
return 1;
} else if(naHash_get(obj, globals->parentsRef, &p)) {
if(IS_VEC(p)) {
int i;
struct VecRec* v = p.ref.ptr.vec->rec;
for(i=0; i<v->size; i++)
if(getMember(ctx, v->array[i], fld, result, count))
return 1;
} else
ERR(ctx, "parents field not vector");
}
return 0;
}
static void PUSH(struct Context* ctx, naRef r)
{
if(ctx->opTop >= MAX_STACK_DEPTH) ERR(ctx, "stack overflow");
ctx->opStack[ctx->opTop++] = r;
}
static naRef POP(struct Context* ctx)
{
if(ctx->opTop == 0) ERR(ctx, "BUG: stack underflow");
return ctx->opStack[--ctx->opTop];
}
static naRef TOP(struct Context* ctx)
{
if(ctx->opTop == 0) ERR(ctx, "BUG: stack underflow");
return ctx->opStack[ctx->opTop-1];
}
static int ARG16(unsigned char* byteCode, struct Frame* f)
{
int arg = byteCode[f->ip]<<8 | byteCode[f->ip+1];
f->ip += 2;
return arg;
}
// OP_EACH works like a vector get, except that it leaves the vector
// and index on the stack, increments the index after use, and pops
// the arguments and pushes a nil if the index is beyond the end.
@@ -311,214 +359,250 @@ static void evalEach(struct Context* ctx)
{
int idx = (int)(ctx->opStack[ctx->opTop-1].num);
naRef vec = ctx->opStack[ctx->opTop-2];
if(idx >= vec.ref.ptr.vec->size) {
if(!IS_VEC(vec)) naRuntimeError(ctx, "foreach enumeration of non-vector");
if(!vec.ref.ptr.vec->rec || idx >= vec.ref.ptr.vec->rec->size) {
ctx->opTop -= 2; // pop two values
PUSH(ctx, naNil());
PUSH(naNil());
return;
}
ctx->opStack[ctx->opTop-1].num = idx+1; // modify in place
PUSH(ctx, naVec_get(vec, idx));
PUSH(naVec_get(vec, idx));
}
static void run1(struct Context* ctx, struct Frame* f, naRef code)
#define ARG() cd->byteCode[f->ip++]
#define CONSTARG() cd->constants[ARG()]
#define POP() ctx->opStack[--ctx->opTop]
#define STK(n) (ctx->opStack[ctx->opTop-(n)])
#define FIXFRAME() f = &(ctx->fStack[ctx->fTop-1]); \
cd = f->func.ref.ptr.func->code.ref.ptr.code;
static naRef run(struct Context* ctx)
{
naRef a, b, c;
struct naCode* cd = code.ref.ptr.code;
struct Frame* f;
struct naCode* cd;
int op, arg;
naRef a, b, c;
if(f->ip >= cd->nBytes) {
DBG(printf("Done with frame %d\n", ctx->fTop-1);)
ctx->fTop--;
if(ctx->fTop <= 0)
ctx->done = 1;
return;
}
FIXFRAME();
op = cd->byteCode[f->ip++];
DBG(printf("Stack Depth: %d\n", ctx->opTop));
DBG(printOpDEBUG(f->ip-1, op));
switch(op) {
case OP_POP:
POP(ctx);
break;
case OP_DUP:
PUSH(ctx, ctx->opStack[ctx->opTop-1]);
break;
case OP_XCHG:
a = POP(ctx); b = POP(ctx);
PUSH(ctx, a); PUSH(ctx, b);
break;
case OP_PLUS: case OP_MUL: case OP_DIV: case OP_MINUS:
case OP_LT: case OP_LTE: case OP_GT: case OP_GTE:
a = POP(ctx); b = POP(ctx);
PUSH(ctx, evalBinaryNumeric(ctx, op, b, a));
break;
case OP_EQ: case OP_NEQ:
a = POP(ctx); b = POP(ctx);
PUSH(ctx, evalEquality(op, b, a));
break;
case OP_AND: case OP_OR:
a = POP(ctx); b = POP(ctx);
PUSH(ctx, evalAndOr(ctx, op, a, b));
break;
case OP_CAT:
// stringify can call the GC, so don't take stuff of the stack!
if(ctx->opTop <= 1) ERR(ctx, "BUG: stack underflow");
a = stringify(ctx, ctx->opStack[ctx->opTop-1]);
b = stringify(ctx, ctx->opStack[ctx->opTop-2]);
c = naStr_concat(naNewString(ctx), b, a);
ctx->opTop -= 2;
PUSH(ctx, c);
break;
case OP_NEG:
a = POP(ctx);
PUSH(ctx, naNum(-numify(ctx, a)));
break;
case OP_NOT:
a = POP(ctx);
PUSH(ctx, naNum(boolify(ctx, a) ? 0 : 1));
break;
case OP_PUSHCONST:
a = cd->constants[ARG16(cd->byteCode, f)];
if(IS_CODE(a)) a = bindFunction(ctx, f, a);
PUSH(ctx, a);
break;
case OP_PUSHONE:
PUSH(ctx, naNum(1));
break;
case OP_PUSHZERO:
PUSH(ctx, naNum(0));
break;
case OP_PUSHNIL:
PUSH(ctx, naNil());
break;
case OP_NEWVEC:
PUSH(ctx, naNewVector(ctx));
break;
case OP_VAPPEND:
b = POP(ctx); a = TOP(ctx);
naVec_append(a, b);
break;
case OP_NEWHASH:
PUSH(ctx, naNewHash(ctx));
break;
case OP_HAPPEND:
c = POP(ctx); b = POP(ctx); a = TOP(ctx); // a,b,c: hash, key, val
naHash_set(a, b, c);
break;
case OP_LOCAL:
a = getLocal(ctx, f, POP(ctx));
PUSH(ctx, a);
break;
case OP_SETLOCAL:
a = POP(ctx); b = POP(ctx);
PUSH(ctx, setLocal(f, b, a));
break;
case OP_MEMBER:
a = POP(ctx); b = POP(ctx);
if(!getMember(ctx, b, a, &c))
ERR(ctx, "no such member");
PUSH(ctx, c);
break;
case OP_SETMEMBER:
c = POP(ctx); b = POP(ctx); a = POP(ctx); // a,b,c: hash, key, val
if(!IS_HASH(a)) ERR(ctx, "non-objects have no members");
naHash_set(a, b, c);
PUSH(ctx, c);
break;
case OP_INSERT:
c = POP(ctx); b = POP(ctx); a = POP(ctx); // a,b,c: box, key, val
containerSet(ctx, a, b, c);
PUSH(ctx, c);
break;
case OP_EXTRACT:
b = POP(ctx); a = POP(ctx); // a,b: box, key
PUSH(ctx, containerGet(ctx, a, b));
break;
case OP_JMP:
f->ip = ARG16(cd->byteCode, f);
DBG(printf(" [Jump to: %d]\n", f->ip);)
break;
case OP_JIFNIL:
arg = ARG16(cd->byteCode, f);
a = TOP(ctx);
if(IS_NIL(a)) {
POP(ctx); // Pops **ONLY** if it's nil!
f->ip = arg;
while(1) {
op = cd->byteCode[f->ip++];
DBG(printf("Stack Depth: %d\n", ctx->opTop));
DBG(printOpDEBUG(f->ip-1, op));
switch(op) {
case OP_POP:
ctx->opTop--;
break;
case OP_DUP:
PUSH(ctx->opStack[ctx->opTop-1]);
break;
case OP_XCHG:
a = STK(1); STK(1) = STK(2); STK(2) = a;
break;
#define BINOP(expr) do { \
double l = IS_NUM(STK(2)) ? STK(2).num : numify(ctx, STK(2)); \
double r = IS_NUM(STK(1)) ? STK(1).num : numify(ctx, STK(1)); \
STK(2).ref.reftag = ~NASAL_REFTAG; \
STK(2).num = expr; \
ctx->opTop--; } while(0)
case OP_PLUS: BINOP(l + r); break;
case OP_MINUS: BINOP(l - r); break;
case OP_MUL: BINOP(l * r); break;
case OP_DIV: BINOP(l / r); break;
case OP_LT: BINOP(l < r ? 1 : 0); break;
case OP_LTE: BINOP(l <= r ? 1 : 0); break;
case OP_GT: BINOP(l > r ? 1 : 0); break;
case OP_GTE: BINOP(l >= r ? 1 : 0); break;
#undef BINOP
case OP_EQ: case OP_NEQ:
STK(2) = evalEquality(op, STK(2), STK(1));
ctx->opTop--;
break;
case OP_AND: case OP_OR:
STK(2) = evalAndOr(ctx, op, STK(2), STK(1));
ctx->opTop--;
break;
case OP_CAT:
// stringify can call the GC, so don't take stuff of the stack!
a = stringify(ctx, ctx->opStack[ctx->opTop-1]);
b = stringify(ctx, ctx->opStack[ctx->opTop-2]);
c = naStr_concat(naNewString(ctx), b, a);
ctx->opTop -= 2;
PUSH(c);
break;
case OP_NEG:
STK(1) = naNum(-numify(ctx, STK(1)));
break;
case OP_NOT:
STK(1) = naNum(boolify(ctx, STK(1)) ? 0 : 1);
break;
case OP_PUSHCONST:
a = CONSTARG();
if(IS_CODE(a)) a = bindFunction(ctx, f, a);
PUSH(a);
break;
case OP_PUSHONE:
PUSH(naNum(1));
break;
case OP_PUSHZERO:
PUSH(naNum(0));
break;
case OP_PUSHNIL:
PUSH(naNil());
break;
case OP_NEWVEC:
PUSH(naNewVector(ctx));
break;
case OP_VAPPEND:
naVec_append(STK(2), STK(1));
ctx->opTop--;
break;
case OP_NEWHASH:
PUSH(naNewHash(ctx));
break;
case OP_HAPPEND:
naHash_set(STK(3), STK(2), STK(1));
ctx->opTop -= 2;
break;
case OP_LOCAL:
a = CONSTARG();
getLocal(ctx, f, &a, &b);
PUSH(b);
break;
case OP_SETSYM:
STK(2) = setSymbol(f, STK(2), STK(1));
ctx->opTop--;
break;
case OP_SETLOCAL:
naHash_set(f->locals, STK(2), STK(1));
STK(2) = STK(1); // FIXME: reverse order of arguments instead!
ctx->opTop--;
break;
case OP_MEMBER:
if(!getMember(ctx, STK(1), CONSTARG(), &STK(1), 64))
ERR(ctx, "no such member");
break;
case OP_SETMEMBER:
if(!IS_HASH(STK(3))) ERR(ctx, "non-objects have no members");
naHash_set(STK(3), STK(2), STK(1));
STK(3) = STK(1); // FIXME: fix arg order instead
ctx->opTop -= 2;
break;
case OP_INSERT:
containerSet(ctx, STK(3), STK(2), STK(1));
STK(3) = STK(1); // FIXME: codegen order again...
ctx->opTop -= 2;
break;
case OP_EXTRACT:
STK(2) = containerGet(ctx, STK(2), STK(1));
ctx->opTop--;
break;
case OP_JMPLOOP:
// Identical to JMP, except for locking
naCheckBottleneck();
f->ip = cd->byteCode[f->ip];
DBG(printf(" [Jump to: %d]\n", f->ip);)
}
break;
case OP_JIFNOT:
arg = ARG16(cd->byteCode, f);
if(!boolify(ctx, POP(ctx))) {
f->ip = arg;
break;
case OP_JMP:
f->ip = cd->byteCode[f->ip];
DBG(printf(" [Jump to: %d]\n", f->ip);)
break;
case OP_JIFNIL:
arg = ARG();
if(IS_NIL(STK(1))) {
ctx->opTop--; // Pops **ONLY** if it's nil!
f->ip = arg;
DBG(printf(" [Jump to: %d]\n", f->ip);)
}
break;
case OP_JIFNOT:
arg = ARG();
if(!boolify(ctx, POP())) {
f->ip = arg;
DBG(printf(" [Jump to: %d]\n", f->ip);)
}
break;
case OP_FCALL:
f = setupFuncall(ctx, ARG(), 0, 0);
cd = f->func.ref.ptr.func->code.ref.ptr.code;
break;
case OP_FTAIL:
f = setupFuncall(ctx, ARG(), 0, 1);
cd = f->func.ref.ptr.func->code.ref.ptr.code;
break;
case OP_MCALL:
f = setupFuncall(ctx, ARG(), 1, 0);
cd = f->func.ref.ptr.func->code.ref.ptr.code;
break;
case OP_MTAIL:
f = setupFuncall(ctx, ARG(), 1, 1);
cd = f->func.ref.ptr.func->code.ref.ptr.code;
break;
case OP_RETURN:
a = STK(1);
if(--ctx->fTop <= 0) return a;
ctx->opTop = f->bp + 1; // restore the correct opstack frame!
STK(1) = a;
FIXFRAME();
break;
case OP_EACH:
evalEach(ctx);
break;
case OP_MARK: // save stack state (e.g. "setjmp")
if(ctx->markTop >= MAX_MARK_DEPTH)
naRuntimeError(ctx, "mark stack overflow");
ctx->markStack[ctx->markTop++] = ctx->opTop;
break;
case OP_UNMARK: // pop stack state set by mark
ctx->markTop--;
break;
case OP_BREAK: // restore stack state (FOLLOW WITH JMP!)
ctx->opTop = ctx->markStack[--ctx->markTop];
break;
default:
ERR(ctx, "BUG: bad opcode");
}
break;
case OP_FCALL:
b = POP(ctx); a = POP(ctx); // a,b = func, args
setupFuncall(ctx, a, b);
break;
case OP_MCALL:
c = POP(ctx); b = POP(ctx); a = POP(ctx); // a,b,c = obj, func, args
naVec_append(ctx->temps, a);
setupFuncall(ctx, b, c);
naHash_set(ctx->fStack[ctx->fTop-1].locals, ctx->meRef, a);
break;
case OP_RETURN:
a = POP(ctx);
ctx->opTop = f->bp; // restore the correct stack frame!
ctx->fTop--;
ctx->fStack[ctx->fTop].args.ref.ptr.vec->size = 0;
PUSH(ctx, a);
break;
case OP_LINE:
f->line = ARG16(cd->byteCode, f);
break;
case OP_EACH:
evalEach(ctx);
break;
case OP_MARK: // save stack state (e.g. "setjmp")
ctx->markStack[ctx->markTop++] = ctx->opTop;
break;
case OP_UNMARK: // pop stack state set by mark
ctx->markTop--;
break;
case OP_BREAK: // restore stack state (FOLLOW WITH JMP!)
ctx->opTop = ctx->markStack[--ctx->markTop];
break;
default:
ERR(ctx, "BUG: bad opcode");
ctx->temps.ref.ptr.vec->rec->size = 0; // reset GC temp vector
DBG(printStackDEBUG(ctx);)
}
if(ctx->fTop <= 0)
ctx->done = 1;
}
static void nativeCall(struct Context* ctx, struct Frame* f, naRef ccode)
{
naCFunction fptr = ccode.ref.ptr.ccode->fptr;
naRef result = (*fptr)(ctx, f->args);
ctx->fTop--;
ctx->fStack[ctx->fTop].args.ref.ptr.vec->size = 0;
PUSH(ctx, result);
return naNil(); // unreachable
}
#undef POP
#undef CONSTARG
#undef STK
#undef FIXFRAME
void naSave(struct Context* ctx, naRef obj)
{
ctx->save = obj;
naVec_append(globals->save, obj);
}
// FIXME: handle ctx->callParent
int naStackDepth(struct Context* ctx)
{
return ctx->fTop;
}
// FIXME: handle ctx->callParent
int naGetLine(struct Context* ctx, int frame)
{
return ctx->fStack[ctx->fTop-1-frame].line;
struct Frame* f = &ctx->fStack[ctx->fTop-1-frame];
naRef func = f->func;
int ip = f->ip;
if(IS_FUNC(func) && IS_CODE(func.ref.ptr.func->code)) {
struct naCode* c = func.ref.ptr.func->code.ref.ptr.code;
unsigned short* p = c->lineIps + c->nLines - 2;
while(p >= c->lineIps && p[0] > ip)
p -= 2;
return p[1];
}
return -1;
}
// FIXME: handle ctx->callParent
naRef naGetSourceFile(struct Context* ctx, int frame)
{
naRef f = ctx->fStack[ctx->fTop-1-frame].func;
@@ -528,41 +612,33 @@ naRef naGetSourceFile(struct Context* ctx, int frame)
char* naGetError(struct Context* ctx)
{
if(IS_STR(ctx->dieArg))
return ctx->dieArg.ref.ptr.str->data;
return ctx->error;
}
static naRef run(naContext ctx)
{
// Return early if an error occurred. It will be visible to the
// caller via naGetError().
ctx->error = 0;
if(setjmp(ctx->jumpHandle))
return naNil();
ctx->done = 0;
while(!ctx->done) {
struct Frame* f = &(ctx->fStack[ctx->fTop-1]);
naRef code = f->func.ref.ptr.func->code;
if(IS_CCODE(code)) nativeCall(ctx, f, code);
else run1(ctx, f, code);
ctx->temps.ref.ptr.vec->size = 0; // Reset the temporaries
DBG(printStackDEBUG(ctx);)
}
DBG(printStackDEBUG(ctx);)
return ctx->opStack[--ctx->opTop];
}
naRef naBindFunction(naContext ctx, naRef code, naRef closure)
{
naRef func = naNewFunc(ctx, code);
func.ref.ptr.func->closure = naNewClosure(ctx, closure, naNil());
func.ref.ptr.func->namespace = closure;
func.ref.ptr.func->next = naNil();
return func;
}
naRef naBindToContext(naContext ctx, naRef code)
{
naRef func = naNewFunc(ctx, code);
struct Frame* f = &ctx->fStack[ctx->fTop-1];
func.ref.ptr.func->namespace = f->locals;
func.ref.ptr.func->next = f->func;
return func;
}
naRef naCall(naContext ctx, naRef func, naRef args, naRef obj, naRef locals)
{
naRef result;
if(!ctx->callParent) naModLock(ctx);
// We might have to allocate objects, which can call the GC. But
// the call isn't on the Nasal stack yet, so the GC won't find our
// C-space arguments.
@@ -571,23 +647,38 @@ naRef naCall(naContext ctx, naRef func, naRef args, naRef obj, naRef locals)
naVec_append(ctx->temps, obj);
naVec_append(ctx->temps, locals);
if(IS_NIL(args))
args = naNewVector(ctx);
if(IS_NIL(locals))
locals = naNewHash(ctx);
if(!IS_FUNC(func)) {
// Generate a noop closure for bare code objects
naRef code = func;
func = naNewFunc(ctx, code);
func.ref.ptr.func->closure = naNewClosure(ctx, locals, naNil());
}
if(!IS_FUNC(func))
func = naNewFunc(ctx, func); // bind bare code objects
if(!IS_NIL(args))
naHash_set(locals, globals->argRef, args);
if(!IS_NIL(obj))
naHash_set(locals, ctx->meRef, obj);
naHash_set(locals, globals->meRef, obj);
ctx->fTop = ctx->opTop = ctx->markTop = 0;
setupFuncall(ctx, func, args);
ctx->fStack[ctx->fTop-1].locals = locals;
ctx->dieArg = naNil();
return run(ctx);
ctx->opTop = ctx->markTop = 0;
ctx->fTop = 1;
ctx->fStack[0].func = func;
ctx->fStack[0].locals = locals;
ctx->fStack[0].ip = 0;
ctx->fStack[0].bp = ctx->opTop;
// Return early if an error occurred. It will be visible to the
// caller via naGetError().
ctx->error = 0;
if(setjmp(ctx->jumpHandle))
return naNil();
if(IS_CCODE(func.ref.ptr.func->code)) {
naCFunction fp = func.ref.ptr.func->code.ref.ptr.ccode->fptr;
struct naVec* av = args.ref.ptr.vec;
result = (*fp)(ctx, obj, av->rec->size, av->rec->array);
} else
result = run(ctx);
if(!ctx->callParent) naModUnlock(ctx);
return result;
}

View File

@@ -5,33 +5,65 @@
#include "nasal.h"
#include "data.h"
#define MAX_STACK_DEPTH 1024
#define MAX_STACK_DEPTH 512
#define MAX_RECURSION 128
#define MAX_MARK_DEPTH 32
#define MAX_MARK_DEPTH 128
// Number of objects (per pool per thread) asked for using naGC_get().
// Testing with fib.nas shows that this gives the best performance,
// without too much per-thread overhead.
#define OBJ_CACHE_SZ 128
enum {
OP_AND, OP_OR, OP_NOT, OP_MUL, OP_PLUS, OP_MINUS, OP_DIV, OP_NEG,
OP_CAT, OP_LT, OP_LTE, OP_GT, OP_GTE, OP_EQ, OP_NEQ, OP_EACH,
OP_JMP, OP_JIFNOT, OP_JIFNIL, OP_FCALL, OP_MCALL, OP_RETURN,
OP_JMP, OP_JMPLOOP, OP_JIFNOT, OP_JIFNIL, OP_FCALL, OP_MCALL, OP_RETURN,
OP_PUSHCONST, OP_PUSHONE, OP_PUSHZERO, OP_PUSHNIL, OP_POP,
OP_DUP, OP_XCHG, OP_INSERT, OP_EXTRACT, OP_MEMBER, OP_SETMEMBER,
OP_LOCAL, OP_SETLOCAL, OP_NEWVEC, OP_VAPPEND, OP_NEWHASH, OP_HAPPEND,
OP_LINE, OP_MARK, OP_UNMARK, OP_BREAK
OP_MARK, OP_UNMARK, OP_BREAK, OP_FTAIL, OP_MTAIL, OP_SETSYM
};
struct Frame {
naRef func; // naFunc object
naRef locals; // local per-call namespace
naRef args; // vector of arguments
int ip; // instruction pointer into code
int bp; // opStack pointer to start of frame
int line; // current line number
};
struct Globals {
// Garbage collecting allocators:
struct naPool pools[NUM_NASAL_TYPES];
int allocCount;
// Dead blocks waiting to be freed when it is safe
void** deadBlocks;
int deadsz;
int ndead;
// Threading stuff
int nThreads;
int waitCount;
int needGC;
int bottleneck;
void* sem;
void* lock;
// Constants
naRef meRef;
naRef argRef;
naRef parentsRef;
// A hash of symbol names
naRef symbols;
naRef save;
struct Context* freeContexts;
struct Context* allContexts;
};
struct Context {
// Garbage collecting allocators:
struct naPool pools[NUM_NASAL_TYPES];
// Stack(s)
struct Frame fStack[MAX_RECURSION];
int fTop;
@@ -39,26 +71,44 @@ struct Context {
int opTop;
int markStack[MAX_MARK_DEPTH];
int markTop;
int done;
// Constants
naRef meRef;
naRef argRef;
naRef parentsRef;
// Free object lists, cached from the global GC
struct naObj** free[NUM_NASAL_TYPES];
int nfree[NUM_NASAL_TYPES];
// GC-findable reference point for objects that may live on the
// processor ("real") stack during execution. naNew() places them
// here, and clears the array each instruction
naRef temps;
// Error handling
jmp_buf jumpHandle;
char* error;
naRef dieArg;
// GC-findable reference point for objects that may live on the
// processor ("real") stack during execution. naNew() places them
// here, and clears the array each time we return from a C
// function.
naRef temps;
// Sub-call lists
struct Context* callParent;
struct Context* callChild;
naRef save;
// Linked list pointers in globals
struct Context* nextFree;
struct Context* nextAll;
};
void printRefDEBUG(naRef r);
#define globals nasal_globals
extern struct Globals* globals;
// Threading low-level functions
void* naNewLock();
void naLock(void* lock);
void naUnlock(void* lock);
void* naNewSem();
void naSemDown(void* sem);
void naSemUpAll(void* sem, int count);
void naCheckBottleneck();
#define LOCK() naLock(globals->lock)
#define UNLOCK() naUnlock(globals->lock)
#endif // _CODE_H

View File

@@ -1,6 +1,8 @@
#include "parse.h"
#include "code.h"
#define MAX_FUNARGS 32
// These are more sensical predicate names in most contexts in this file
#define LEFT(tok) ((tok)->children)
#define RIGHT(tok) ((tok)->lastChild)
@@ -9,24 +11,24 @@
// Forward references for recursion
static void genExpr(struct Parser* p, struct Token* t);
static void genExprList(struct Parser* p, struct Token* t);
static naRef newLambda(struct Parser* p, struct Token* t);
static void emit(struct Parser* p, int byte)
static void emit(struct Parser* p, int val)
{
if(p->cg->nBytes >= p->cg->codeAlloced) {
if(p->cg->codesz >= p->cg->codeAlloced) {
int i, sz = p->cg->codeAlloced * 2;
unsigned char* buf = naParseAlloc(p, sz);
unsigned short* buf = naParseAlloc(p, sz*sizeof(unsigned short));
for(i=0; i<p->cg->codeAlloced; i++) buf[i] = p->cg->byteCode[i];
p->cg->byteCode = buf;
p->cg->codeAlloced = sz;
}
p->cg->byteCode[p->cg->nBytes++] = (unsigned char)byte;
p->cg->byteCode[p->cg->codesz++] = (unsigned short)val;
}
static void emitImmediate(struct Parser* p, int byte, int arg)
static void emitImmediate(struct Parser* p, int val, int arg)
{
emit(p, byte);
emit(p, arg >> 8);
emit(p, arg & 0xff);
emit(p, val);
emit(p, arg);
}
static void genBinOp(int op, struct Parser* p, struct Token* t)
@@ -55,36 +57,87 @@ static naRef getConstant(struct Parser* p, int idx)
// Interns a scalar (!) constant and returns its index
static int internConstant(struct Parser* p, naRef c)
{
int i, j, n = naVec_size(p->cg->consts);
int i, n = naVec_size(p->cg->consts);
if(IS_CODE(c)) return newConstant(p, c);
for(i=0; i<n; i++) {
naRef b = naVec_get(p->cg->consts, i);
if(IS_NUM(b) && IS_NUM(c) && b.num == c.num)
return i;
if(IS_STR(b) && IS_STR(c)) {
int len = naStr_len(c);
char* cs = naStr_data(c);
char* bs = naStr_data(b);
if(naStr_len(b) != len)
continue;
for(j=0; j<len; j++)
if(cs[j] != bs[j])
continue;
}
if(IS_REF(b) && IS_REF(c))
if(b.ref.ptr.obj->type == c.ref.ptr.obj->type)
if(naEqual(b, c))
return i;
if(IS_NUM(b) && IS_NUM(c) && b.num == c.num) return i;
else if(IS_NIL(b) && IS_NIL(c)) return i;
else if(naStrEqual(b, c)) return i;
}
return newConstant(p, c);
}
naRef naInternSymbol(naRef sym)
{
naRef result;
if(naHash_get(globals->symbols, sym, &result))
return result;
naHash_set(globals->symbols, sym, sym);
return sym;
}
static int findConstantIndex(struct Parser* p, struct Token* t)
{
naRef c;
if(t->type == TOK_NIL) c = naNil();
else if(t->str) {
c = naStr_fromdata(naNewString(p->context), t->str, t->strlen);
if(t->type == TOK_SYMBOL) c = naInternSymbol(c);
} else if(t->type == TOK_FUNC) c = newLambda(p, t);
else if(t->type == TOK_LITERAL) c = naNum(t->num);
else naParseError(p, "invalid/non-constant constant", t->line);
return internConstant(p, c);
}
static int lastExprInBlock(struct Token* t)
{
if(!t->parent) return 1;
if(t->parent->type == TOK_TOP || t->parent->type == TOK_LCURL) return 1;
if(t->parent->type == TOK_SEMI)
if(!t->next || t->next->type == TOK_EMPTY)
return 1;
return 0;
}
// Returns true if the node is in "tail context" -- either a child of
// a return, the last child of a func block, or else the
// last child of an if/elsif/if that is itself in tail context.
static int tailContext(struct Token* t)
{
if(t->parent && t->parent->type == TOK_RETURN)
return 1;
else if(!lastExprInBlock(t))
return 0;
// Walk up the tree. It is ok to see semicolons, else's, elsifs
// and curlies. If we reach the top or a func, then we are in
// tail context. If we hit an if, then we are in tail context
// only if the "if" node is.
while((t = t->parent) != 0)
switch(t->type) {
case TOK_SEMI: case TOK_LCURL: break;
case TOK_ELSE: case TOK_ELSIF: break;
case TOK_TOP: case TOK_FUNC: return 1;
case TOK_IF: return tailContext(t);
default: return 0;
}
return 0;
}
static void genScalarConstant(struct Parser* p, struct Token* t)
{
naRef c = (t->str
? naStr_fromdata(naNewString(p->context), t->str, t->strlen)
: naNum(t->num));
int idx = internConstant(p, c);
emitImmediate(p, OP_PUSHCONST, idx);
// These opcodes are for special-case use in other constructs, but
// we might as well use them here to save a few bytes in the
// instruction stream.
if(t->str == 0 && t->num == 1) {
emit(p, OP_PUSHONE);
} else if(t->str == 0 && t->num == 0) {
emit(p, OP_PUSHZERO);
} else {
int idx = findConstantIndex(p, t);
emitImmediate(p, OP_PUSHCONST, idx);
}
}
static int genLValue(struct Parser* p, struct Token* t)
@@ -93,7 +146,7 @@ static int genLValue(struct Parser* p, struct Token* t)
return genLValue(p, LEFT(t)); // Handle stuff like "(a) = 1"
} else if(t->type == TOK_SYMBOL) {
genScalarConstant(p, t);
return OP_SETLOCAL;
return OP_SETSYM;
} else if(t->type == TOK_DOT && RIGHT(t) && RIGHT(t)->type == TOK_SYMBOL) {
genExpr(p, LEFT(t));
genScalarConstant(p, RIGHT(t));
@@ -102,40 +155,85 @@ static int genLValue(struct Parser* p, struct Token* t)
genExpr(p, LEFT(t));
genExpr(p, RIGHT(t));
return OP_INSERT;
} else if(t->type == TOK_VAR && RIGHT(t)->type == TOK_SYMBOL) {
genScalarConstant(p, RIGHT(t));
return OP_SETLOCAL;
} else {
naParseError(p, "bad lvalue", t->line);
return -1;
}
}
static void genLambda(struct Parser* p, struct Token* t)
static int defArg(struct Parser* p, struct Token* t)
{
if(t->type == TOK_LPAR) return defArg(p, RIGHT(t));
return findConstantIndex(p, t);
}
static void genArgList(struct Parser* p, struct naCode* c, struct Token* t)
{
naRef sym;
if(t->type == TOK_EMPTY) return;
if(!IDENTICAL(c->restArgSym, globals->argRef))
naParseError(p, "remainder must be last", t->line);
if(t->type == TOK_ELLIPSIS) {
if(LEFT(t)->type != TOK_SYMBOL)
naParseError(p, "bad function argument expression", t->line);
sym = naStr_fromdata(naNewString(p->context),
LEFT(t)->str, LEFT(t)->strlen);
c->restArgSym = naInternSymbol(sym);
c->needArgVector = 1;
} else if(t->type == TOK_ASSIGN) {
if(LEFT(t)->type != TOK_SYMBOL)
naParseError(p, "bad function argument expression", t->line);
c->optArgSyms[c->nOptArgs] = findConstantIndex(p, LEFT(t));
c->optArgVals[c->nOptArgs++] = defArg(p, RIGHT(t));
} else if(t->type == TOK_SYMBOL) {
if(c->nOptArgs)
naParseError(p, "optional arguments must be last", t->line);
if(c->nArgs >= MAX_FUNARGS)
naParseError(p, "too many named function arguments", t->line);
c->argSyms[c->nArgs++] = findConstantIndex(p, t);
} else if(t->type == TOK_COMMA) {
genArgList(p, c, LEFT(t));
genArgList(p, c, RIGHT(t));
} else
naParseError(p, "bad function argument expression", t->line);
}
static naRef newLambda(struct Parser* p, struct Token* t)
{
int idx;
struct CodeGenerator* cgSave;
naRef codeObj;
if(LEFT(t)->type != TOK_LCURL)
struct Token* arglist;
if(RIGHT(t)->type != TOK_LCURL)
naParseError(p, "bad function definition", t->line);
// Save off the generator state while we do the new one
cgSave = p->cg;
codeObj = naCodeGen(p, LEFT(LEFT(t)));
arglist = LEFT(t)->type == TOK_LPAR ? LEFT(LEFT(t)) : 0;
codeObj = naCodeGen(p, LEFT(RIGHT(t)), arglist);
p->cg = cgSave;
idx = newConstant(p, codeObj);
emitImmediate(p, OP_PUSHCONST, idx);
return codeObj;
}
static void genList(struct Parser* p, struct Token* t)
static void genLambda(struct Parser* p, struct Token* t)
{
emitImmediate(p, OP_PUSHCONST, newConstant(p, newLambda(p, t)));
}
static int genList(struct Parser* p, struct Token* t, int doAppend)
{
if(t->type == TOK_COMMA) {
genExpr(p, LEFT(t));
emit(p, OP_VAPPEND);
genList(p, RIGHT(t));
if(doAppend) emit(p, OP_VAPPEND);
return 1 + genList(p, RIGHT(t), doAppend);
} else if(t->type == TOK_EMPTY) {
return;
return 0;
} else {
genExpr(p, t);
emit(p, OP_VAPPEND);
if(doAppend) emit(p, OP_VAPPEND);
return 1;
}
}
@@ -165,18 +263,19 @@ static void genHash(struct Parser* p, struct Token* t)
static void genFuncall(struct Parser* p, struct Token* t)
{
int op = OP_FCALL;
int nargs = 0;
if(LEFT(t)->type == TOK_DOT) {
genExpr(p, LEFT(LEFT(t)));
emit(p, OP_DUP);
genScalarConstant(p, RIGHT(LEFT(t)));
emit(p, OP_MEMBER);
emitImmediate(p, OP_MEMBER, findConstantIndex(p, RIGHT(LEFT(t))));
op = OP_MCALL;
} else {
genExpr(p, LEFT(t));
}
emit(p, OP_NEWVEC);
if(RIGHT(t)) genList(p, RIGHT(t));
emit(p, op);
if(RIGHT(t)) nargs = genList(p, RIGHT(t), 0);
if(tailContext(t))
op = op == OP_FCALL ? OP_FTAIL : OP_MTAIL;
emitImmediate(p, op, nargs);
}
static void pushLoop(struct Parser* p, struct Token* label)
@@ -202,17 +301,15 @@ static int emitJump(struct Parser* p, int op)
{
int ip;
emit(p, op);
ip = p->cg->nBytes;
emit(p, 0xff); // dummy address
emit(p, 0xff);
ip = p->cg->codesz;
emit(p, 0xffff); // dummy address
return ip;
}
// Points a previous jump instruction at the current "end-of-bytecode"
static void fixJumpTarget(struct Parser* p, int spot)
{
p->cg->byteCode[spot] = p->cg->nBytes >> 8;
p->cg->byteCode[spot+1] = p->cg->nBytes & 0xff;
p->cg->byteCode[spot] = p->cg->codesz;
}
static void genShortCircuit(struct Parser* p, struct Token* t)
@@ -251,6 +348,20 @@ static void genIfElse(struct Parser* p, struct Token* t)
genIf(p, t, t->children->next->next);
}
static void genQuestion(struct Parser* p, struct Token* t)
{
int jumpNext, jumpEnd;
if(!RIGHT(t) || RIGHT(t)->type != TOK_COLON)
naParseError(p, "invalid ?: expression", t->line);
genExpr(p, LEFT(t)); // the test
jumpNext = emitJump(p, OP_JIFNOT);
genExpr(p, LEFT(RIGHT(t))); // the "if true" expr
jumpEnd = emitJump(p, OP_JMP);
fixJumpTarget(p, jumpNext);
genExpr(p, RIGHT(RIGHT(t))); // the "else" expr
fixJumpTarget(p, jumpEnd);
}
static int countSemis(struct Token* t)
{
if(!t || t->type != TOK_SEMI) return 0;
@@ -266,7 +377,7 @@ static void genLoop(struct Parser* p, struct Token* body,
p->cg->loops[p->cg->loopTop-1].breakIP = jumpEnd-1;
jumpOverContinue = emitJump(p, OP_JMP);
p->cg->loops[p->cg->loopTop-1].contIP = p->cg->nBytes;
p->cg->loops[p->cg->loopTop-1].contIP = p->cg->codesz;
cont = emitJump(p, OP_JMP);
fixJumpTarget(p, jumpOverContinue);
@@ -274,7 +385,7 @@ static void genLoop(struct Parser* p, struct Token* body,
emit(p, OP_POP);
fixJumpTarget(p, cont);
if(update) { genExpr(p, update); emit(p, OP_POP); }
emitImmediate(p, OP_JMP, loopTop);
emitImmediate(p, OP_JMPLOOP, loopTop);
fixJumpTarget(p, jumpEnd);
popLoop(p);
emit(p, OP_PUSHNIL); // Leave something on the stack
@@ -287,7 +398,7 @@ static void genForWhile(struct Parser* p, struct Token* init,
int loopTop, jumpEnd;
if(init) { genExpr(p, init); emit(p, OP_POP); }
pushLoop(p, label);
loopTop = p->cg->nBytes;
loopTop = p->cg->codesz;
genExpr(p, test);
jumpEnd = emitJump(p, OP_JIFNOT);
genLoop(p, body, update, label, loopTop, jumpEnd);
@@ -352,7 +463,7 @@ static void genForEach(struct Parser* p, struct Token* t)
pushLoop(p, label);
genExpr(p, vec);
emit(p, OP_PUSHZERO);
loopTop = p->cg->nBytes;
loopTop = p->cg->codesz;
emit(p, OP_EACH);
jumpEnd = emitJump(p, OP_JIFNIL);
assignOp = genLValue(p, elem);
@@ -393,18 +504,34 @@ static void genBreakContinue(struct Parser* p, struct Token* t)
emitImmediate(p, OP_JMP, t->type == TOK_BREAK ? bp : cp);
}
static void newLineEntry(struct Parser* p, int line)
{
int i;
if(p->cg->nextLineIp >= p->cg->nLineIps) {
int nsz = p->cg->nLineIps*2 + 1;
unsigned short* n = naParseAlloc(p, sizeof(unsigned short)*2*nsz);
for(i=0; i<(p->cg->nextLineIp*2); i++)
n[i] = p->cg->lineIps[i];
p->cg->lineIps = n;
p->cg->nLineIps = nsz;
}
p->cg->lineIps[p->cg->nextLineIp++] = (unsigned short) p->cg->codesz;
p->cg->lineIps[p->cg->nextLineIp++] = (unsigned short) line;
}
static void genExpr(struct Parser* p, struct Token* t)
{
int i;
if(t == 0)
naParseError(p, "BUG: null subexpression", -1);
if(t->line != p->cg->lastLine)
emitImmediate(p, OP_LINE, t->line);
newLineEntry(p, t->line);
p->cg->lastLine = t->line;
switch(t->type) {
case TOK_IF:
genIfElse(p, t);
break;
case TOK_QUESTION:
genQuestion(p, t);
break;
case TOK_WHILE:
genWhile(p, t);
break;
@@ -432,7 +559,7 @@ static void genExpr(struct Parser* p, struct Token* t)
genBinOp(OP_EXTRACT, p, t); // a[i]
} else {
emit(p, OP_NEWVEC);
genList(p, LEFT(t));
genList(p, LEFT(t), 1);
}
break;
case TOK_LCURL:
@@ -447,6 +574,7 @@ static void genExpr(struct Parser* p, struct Token* t)
case TOK_RETURN:
if(RIGHT(t)) genExpr(p, RIGHT(t));
else emit(p, OP_PUSHNIL);
for(i=0; i<p->cg->loopTop; i++) emit(p, OP_UNMARK);
emit(p, OP_RETURN);
break;
case TOK_NOT:
@@ -454,8 +582,7 @@ static void genExpr(struct Parser* p, struct Token* t)
emit(p, OP_NOT);
break;
case TOK_SYMBOL:
genScalarConstant(p, t);
emit(p, OP_LOCAL);
emitImmediate(p, OP_LOCAL, findConstantIndex(p, t));
break;
case TOK_LITERAL:
genScalarConstant(p, t);
@@ -479,8 +606,7 @@ static void genExpr(struct Parser* p, struct Token* t)
genExpr(p, LEFT(t));
if(RIGHT(t)->type != TOK_SYMBOL)
naParseError(p, "object field not symbol", RIGHT(t)->line);
genScalarConstant(p, RIGHT(t));
emit(p, OP_MEMBER);
emitImmediate(p, OP_MEMBER, findConstantIndex(p, RIGHT(t)));
break;
case TOK_EMPTY: case TOK_NIL:
emit(p, OP_PUSHNIL); break; // *NOT* a noop!
@@ -515,7 +641,7 @@ static void genExprList(struct Parser* p, struct Token* t)
}
}
naRef naCodeGen(struct Parser* p, struct Token* t)
naRef naCodeGen(struct Parser* p, struct Token* block, struct Token* arglist)
{
int i;
naRef codeObj;
@@ -524,26 +650,62 @@ naRef naCodeGen(struct Parser* p, struct Token* t)
cg.lastLine = 0;
cg.codeAlloced = 1024; // Start fairly big, this is a cheap allocation
cg.byteCode = naParseAlloc(p, cg.codeAlloced);
cg.nBytes = 0;
cg.byteCode = naParseAlloc(p, cg.codeAlloced *sizeof(unsigned short));
cg.codesz = 0;
cg.consts = naNewVector(p->context);
cg.loopTop = 0;
cg.lineIps = 0;
cg.nLineIps = 0;
cg.nextLineIp = 0;
p->cg = &cg;
genExprList(p, t);
genExprList(p, block);
emit(p, OP_RETURN);
// Now make a code object
codeObj = naNewCode(p->context);
code = codeObj.ref.ptr.code;
code->nBytes = cg.nBytes;
code->byteCode = naAlloc(cg.nBytes);
for(i=0; i < cg.nBytes; i++)
// Parse the argument list, if any
code->restArgSym = globals->argRef;
code->nArgs = code->nOptArgs = 0;
code->argSyms = code->optArgSyms = code->optArgVals = 0;
code->needArgVector = 1;
if(arglist) {
code->argSyms = naParseAlloc(p, sizeof(int) * MAX_FUNARGS);
code->optArgSyms = naParseAlloc(p, sizeof(int) * MAX_FUNARGS);
code->optArgVals = naParseAlloc(p, sizeof(int) * MAX_FUNARGS);
code->needArgVector = 0;
genArgList(p, code, arglist);
if(code->nArgs) {
int i, *nsyms;
nsyms = naAlloc(sizeof(int) * code->nArgs);
for(i=0; i<code->nArgs; i++) nsyms[i] = code->argSyms[i];
code->argSyms = nsyms;
} else code->argSyms = 0;
if(code->nOptArgs) {
int i, *nsyms, *nvals;
nsyms = naAlloc(sizeof(int) * code->nOptArgs);
nvals = naAlloc(sizeof(int) * code->nOptArgs);
for(i=0; i<code->nOptArgs; i++) nsyms[i] = code->optArgSyms[i];
for(i=0; i<code->nOptArgs; i++) nvals[i] = code->optArgVals[i];
code->optArgSyms = nsyms;
code->optArgVals = nvals;
} else code->optArgSyms = code->optArgVals = 0;
}
code->codesz = cg.codesz;
code->byteCode = naAlloc(cg.codesz * sizeof(unsigned short));
for(i=0; i < cg.codesz; i++)
code->byteCode[i] = cg.byteCode[i];
code->nConstants = naVec_size(cg.consts);
code->constants = naAlloc(code->nConstants * sizeof(naRef));
code->srcFile = p->srcFile;
for(i=0; i<code->nConstants; i++)
code->constants[i] = getConstant(p, i);
code->nLines = p->cg->nextLineIp;
code->lineIps = naAlloc(sizeof(unsigned short)*p->cg->nLineIps*2);
for(i=0; i<p->cg->nLineIps*2; i++)
code->lineIps[i] = p->cg->lineIps[i];
return codeObj;
}

View File

@@ -5,25 +5,27 @@
// Notes: A CODE object is a compiled set of bytecode instructions.
// What actually gets executed at runtime is a bound FUNC object,
// which combines the raw code with a pointer to a CLOSURE chain of
// namespaces.
enum { T_STR, T_VEC, T_HASH, T_CODE, T_CLOSURE, T_FUNC, T_CCODE, T_GHOST,
// which combines the raw code with a namespace and a pointer to
// parent function in the lexical closure.
enum { T_STR, T_VEC, T_HASH, T_CODE, T_FUNC, T_CCODE, T_GHOST,
NUM_NASAL_TYPES }; // V. important that this come last!
#define IS_REF(r) ((r).ref.reftag == NASAL_REFTAG)
#define IS_NUM(r) ((r).ref.reftag != NASAL_REFTAG)
#define IS_OBJ(r) (IS_REF((r)) && (r).ref.ptr.obj != 0)
//#define IS_OBJ(r) (IS_REF((r)) && (r).ref.ptr.obj != 0 && (((r).ref.ptr.obj->type == 123) ? *(int*)0 : 1))
#define IS_NIL(r) (IS_REF((r)) && (r).ref.ptr.obj == 0)
#define IS_STR(r) (IS_OBJ((r)) && (r).ref.ptr.obj->type == T_STR)
#define IS_VEC(r) (IS_OBJ((r)) && (r).ref.ptr.obj->type == T_VEC)
#define IS_HASH(r) (IS_OBJ((r)) && (r).ref.ptr.obj->type == T_HASH)
#define IS_CODE(r) (IS_OBJ((r)) && (r).ref.ptr.obj->type == T_CODE)
#define IS_FUNC(r) (IS_OBJ((r)) && (r).ref.ptr.obj->type == T_FUNC)
#define IS_CLOSURE(r) (IS_OBJ((r)) && (r).ref.ptr.obj->type == T_CLOSURE)
#define IS_CCODE(r) (IS_OBJ((r)) && (r).ref.ptr.obj->type == T_CCODE)
#define IS_GHOST(r) (IS_OBJ((r)) && (r).ref.ptr.obj->type == T_GHOST)
#define IS_CONTAINER(r) (IS_VEC(r)||IS_HASH(r))
#define IS_SCALAR(r) (IS_NUM((r)) || IS_STR((r)))
#define IDENTICAL(a, b) (IS_REF(a) && IS_REF(b) \
&& a.ref.ptr.obj == b.ref.ptr.obj)
// This is a macro instead of a separate struct to allow compilers to
// avoid padding. GCC on x86, at least, will always padd the size of
@@ -31,7 +33,7 @@ enum { T_STR, T_VEC, T_HASH, T_CODE, T_CLOSURE, T_FUNC, T_CCODE, T_GHOST,
// implementing objects to pack in 16 bits worth of data "for free".
#define GC_HEADER \
unsigned char mark; \
unsigned char type
unsigned char type; \
struct naObj {
GC_HEADER;
@@ -41,13 +43,18 @@ struct naStr {
GC_HEADER;
int len;
unsigned char* data;
unsigned int hashcode;
};
struct VecRec {
int size;
int alloced;
naRef array[];
};
struct naVec {
GC_HEADER;
int size;
int alloced;
naRef* array;
struct VecRec* rec;
};
struct HashNode {
@@ -56,32 +63,40 @@ struct HashNode {
struct HashNode* next;
};
struct naHash {
GC_HEADER;
struct HashRec {
int size;
int dels;
int lgalloced;
struct HashNode* nodes;
struct HashNode** table;
int nextnode;
struct HashNode* table[];
};
struct naHash {
GC_HEADER;
struct HashRec* rec;
};
struct naCode {
GC_HEADER;
unsigned char* byteCode;
int nBytes;
unsigned char nArgs;
unsigned char nOptArgs;
unsigned char needArgVector;
unsigned short nConstants;
unsigned short nLines;
unsigned short codesz;
unsigned short* byteCode;
naRef* constants;
int nConstants;
int* argSyms; // indices into constants
int* optArgSyms;
int* optArgVals;
unsigned short* lineIps; // pairs of {ip, line}
naRef srcFile;
naRef restArgSym; // The "..." vector name, defaults to "arg"
};
struct naFunc {
GC_HEADER;
naRef code;
naRef closure;
};
struct naClosure {
GC_HEADER;
naRef namespace;
naRef next; // parent closure
};
@@ -100,11 +115,12 @@ struct naGhost {
struct naPool {
int type;
int elemsz;
int nblocks;
struct Block* blocks;
int nfree; // number of entries in the free array
int freesz; // size of the free array
void** free; // pointers to usable elements
void** free0; // pointer to the alloced buffer
int freesz; // size of the alloced buffer
void** free; // current "free frame"
int nfree; // down-counting index within the free frame
int freetop; // curr. top of the free list
};
void naFree(void* m);
@@ -112,11 +128,9 @@ void* naAlloc(int n);
void naBZero(void* m, int n);
int naTypeSize(int type);
void naGarbageCollect();
naRef naObj(int type, struct naObj* o);
naRef naNew(naContext c, int type);
naRef naNewCode(naContext c);
naRef naNewClosure(naContext c, naRef namespace, naRef next);
int naStr_equal(naRef s1, naRef s2);
naRef naStr_fromnum(naRef dest, double num);
@@ -124,16 +138,14 @@ int naStr_numeric(naRef str);
int naStr_parsenum(char* str, int len, double* result);
int naStr_tonum(naRef str, double* out);
void naVec_init(naRef vec);
int naHash_tryset(naRef hash, naRef key, naRef val); // sets if exists
void naHash_init(naRef hash);
int naHash_sym(struct naHash* h, struct naStr* sym, naRef* out);
void naHash_newsym(struct naHash* h, naRef* sym, naRef* val);
void naGC_init(struct naPool* p, int type);
struct naObj* naGC_get(struct naPool* p);
int naGC_size(struct naPool* p);
void naGC_mark(naRef r);
void naGC_reap(struct naPool* p);
struct naObj** naGC_get(struct naPool* p, int n, int* nout);
void naGC_swapfree(void** target, void* val);
void naGC_freedead();
void naStr_gcclean(struct naStr* s);
void naVec_gcclean(struct naVec* s);

View File

@@ -1,211 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "nasal.h"
#include "parse.h"
#include "code.h"
// Bytecode operator to string
char* opStringDEBUG(int op)
{
static char buf[256];
switch(op) {
case OP_AND: return "AND";
case OP_OR: return "OR";
case OP_NOT: return "NOT";
case OP_MUL: return "MUL";
case OP_PLUS: return "PLUS";
case OP_MINUS: return "MINUS";
case OP_DIV: return "DIV";
case OP_NEG: return "NEG";
case OP_CAT: return "CAT";
case OP_LT: return "LT";
case OP_LTE: return "LTE";
case OP_GT: return "GT";
case OP_GTE: return "GTE";
case OP_EQ: return "EQ";
case OP_NEQ: return "NEQ";
case OP_EACH: return "EACH";
case OP_JMP: return "JMP";
case OP_JIFNOT: return "JIFNOT";
case OP_JIFNIL: return "JIFNIL";
case OP_FCALL: return "FCALL";
case OP_MCALL: return "MCALL";
case OP_RETURN: return "RETURN";
case OP_PUSHCONST: return "PUSHCONST";
case OP_PUSHONE: return "PUSHONE";
case OP_PUSHZERO: return "PUSHZERO";
case OP_PUSHNIL: return "PUSHNIL";
case OP_POP: return "POP";
case OP_DUP: return "DUP";
case OP_XCHG: return "XCHG";
case OP_INSERT: return "INSERT";
case OP_EXTRACT: return "EXTRACT";
case OP_MEMBER: return "MEMBER";
case OP_SETMEMBER: return "SETMEMBER";
case OP_LOCAL: return "LOCAL";
case OP_SETLOCAL: return "SETLOCAL";
case OP_NEWVEC: return "NEWVEC";
case OP_VAPPEND: return "VAPPEND";
case OP_NEWHASH: return "NEWHASH";
case OP_HAPPEND: return "HAPPEND";
case OP_LINE: return "LINE";
case OP_MARK: return "MARK";
case OP_UNMARK: return "UNMARK";
case OP_BREAK: return "BREAK";
}
sprintf(buf, "<bad opcode: %d>\n", op);
return buf;
}
// Print a bytecode operator
void printOpDEBUG(int ip, int op)
{
printf("IP: %d OP: %s\n", ip, opStringDEBUG(op));
}
// Print a naRef
void printRefDEBUG(naRef r)
{
int i;
if(IS_NUM(r)) {
printf("%f\n", r.num);
} else if(IS_NIL(r)) {
printf("<nil>\n");
} else if(IS_STR(r)) {
printf("\"");
for(i=0; i<r.ref.ptr.str->len; i++)
printf("%c", r.ref.ptr.str->data[i]);
printf("\"\n");
} else if(IS_VEC(r)) {
printf("<vec>\n");
} else if(IS_HASH(r)) {
printf("<hash>\n");
} else if(IS_FUNC(r)) {
printf("<func>\n");
} else if(IS_CLOSURE(r)) {
printf("DEBUG: closure object on stack!\n");
} else if(IS_CODE(r)) {
printf("DEBUG: code object on stack!\n");
} else printf("DEBUG ACK\n");
}
// Print the operand stack of the specified context
void printStackDEBUG(struct Context* ctx)
{
int i;
printf("\n");
for(i=ctx->opTop-1; i>=0; i--) {
printf("] ");
printRefDEBUG(ctx->opStack[i]);
}
printf("\n");
}
// Token type to string
char* tokString(int tok)
{
switch(tok) {
case TOK_TOP: return "TOK_TOP";
case TOK_AND: return "TOK_AND";
case TOK_OR: return "TOK_OR";
case TOK_NOT: return "TOK_NOT";
case TOK_LPAR: return "TOK_LPAR";
case TOK_RPAR: return "TOK_RPAR";
case TOK_LBRA: return "TOK_LBRA";
case TOK_RBRA: return "TOK_RBRA";
case TOK_LCURL: return "TOK_LCURL";
case TOK_RCURL: return "TOK_RCURL";
case TOK_MUL: return "TOK_MUL";
case TOK_PLUS: return "TOK_PLUS";
case TOK_MINUS: return "TOK_MINUS";
case TOK_NEG: return "TOK_NEG";
case TOK_DIV: return "TOK_DIV";
case TOK_CAT: return "TOK_CAT";
case TOK_COLON: return "TOK_COLON";
case TOK_DOT: return "TOK_DOT";
case TOK_COMMA: return "TOK_COMMA";
case TOK_SEMI: return "TOK_SEMI";
case TOK_ASSIGN: return "TOK_ASSIGN";
case TOK_LT: return "TOK_LT";
case TOK_LTE: return "TOK_LTE";
case TOK_EQ: return "TOK_EQ";
case TOK_NEQ: return "TOK_NEQ";
case TOK_GT: return "TOK_GT";
case TOK_GTE: return "TOK_GTE";
case TOK_IF: return "TOK_IF";
case TOK_ELSIF: return "TOK_ELSIF";
case TOK_ELSE: return "TOK_ELSE";
case TOK_FOR: return "TOK_FOR";
case TOK_FOREACH: return "TOK_FOREACH";
case TOK_WHILE: return "TOK_WHILE";
case TOK_RETURN: return "TOK_RETURN";
case TOK_BREAK: return "TOK_BREAK";
case TOK_CONTINUE: return "TOK_CONTINUE";
case TOK_FUNC: return "TOK_FUNC";
case TOK_SYMBOL: return "TOK_SYMBOL";
case TOK_LITERAL: return "TOK_LITERAL";
case TOK_EMPTY: return "TOK_EMPTY";
case TOK_NIL: return "TOK_NIL";
}
return 0;
}
// Diagnostic: check all list pointers for sanity
void ack()
{
printf("Bad token list!\n");
exit(1);
}
void checkList(struct Token* start, struct Token* end)
{
struct Token* t = start;
while(t) {
if(t->next && t->next->prev != t) ack();
if(t->next==0 && t != end) ack();
t = t->next;
}
t = end;
while(t) {
if(t->prev && t->prev->next != t) ack();
if(t->prev==0 && t != start) ack();
t = t->prev;
};
}
// Prints a single parser token to stdout
void printToken(struct Token* t, char* prefix)
{
int i;
printf("%sline %d %s ", prefix, t->line, tokString(t->type));
if(t->type == TOK_LITERAL || t->type == TOK_SYMBOL) {
if(t->str) {
printf("\"");
for(i=0; i<t->strlen; i++) printf("%c", t->str[i]);
printf("\" (len: %d)", t->strlen);
} else {
printf("%f ", t->num);
}
}
printf("\n");
}
// Prints a parse tree to stdout
void dumpTokenList(struct Token* t, int prefix)
{
char prefstr[128];
int i;
prefstr[0] = 0;
for(i=0; i<prefix; i++)
strcat(prefstr, ". ");
while(t) {
printToken(t, prefstr);
dumpTokenList(t->children, prefix+1);
t = t->next;
}
}

View File

@@ -1,44 +1,123 @@
#include "nasal.h"
#include "data.h"
#include "code.h"
#define MIN_BLOCK_SIZE 256
// "type" for an object freed by the collector
#define T_GCFREED 123 // DEBUG
static void reap(struct naPool* p);
static void mark(naRef r);
struct Block {
int size;
char* block;
struct Block* next;
};
// Decremented every allocation. When it reaches zero, we do a
// garbage collection. The value is reset to 1/2 of the total object
// count each collection, which is sane: it ensures that no more than
// 50% growth can happen between collections, and ensures that garbage
// collection work is constant with allocation work (i.e. that O(N)
// work is done only every O(1/2N) allocations).
static int GlobalAllocCount = 256;
static void appendfree(struct naPool*p, struct naObj* o)
// Must be called with the giant exclusive lock!
static void freeDead()
{
// Need more space?
if(p->freesz <= p->nfree) {
int i, n = 1+((3*p->nfree)>>1);
void** newf = naAlloc(n * sizeof(void*));
for(i=0; i<p->nfree; i++)
newf[i] = p->free[i];
naFree(p->free);
p->free = newf;
p->freesz = n;
int i;
for(i=0; i<globals->ndead; i++)
naFree(globals->deadBlocks[i]);
globals->ndead = 0;
}
// Must be called with the big lock!
static void garbageCollect()
{
int i;
struct Context* c;
globals->allocCount = 0;
c = globals->allContexts;
while(c) {
for(i=0; i<NUM_NASAL_TYPES; i++)
c->nfree[i] = 0;
for(i=0; i < c->fTop; i++) {
mark(c->fStack[i].func);
mark(c->fStack[i].locals);
}
for(i=0; i < c->opTop; i++)
mark(c->opStack[i]);
mark(c->dieArg);
mark(c->temps);
c = c->nextAll;
}
p->free[p->nfree++] = o;
mark(globals->save);
mark(globals->symbols);
mark(globals->meRef);
mark(globals->argRef);
mark(globals->parentsRef);
// Finally collect all the freed objects
for(i=0; i<NUM_NASAL_TYPES; i++)
reap(&(globals->pools[i]));
// Make enough space for the dead blocks we need to free during
// execution. This works out to 1 spot for every 2 live objects,
// which should be limit the number of bottleneck operations
// without imposing an undue burden of extra "freeable" memory.
if(globals->deadsz < globals->allocCount) {
globals->deadsz = globals->allocCount;
if(globals->deadsz < 256) globals->deadsz = 256;
naFree(globals->deadBlocks);
globals->deadBlocks = naAlloc(sizeof(void*) * globals->deadsz);
}
globals->needGC = 0;
}
void naModLock()
{
naCheckBottleneck();
LOCK();
globals->nThreads++;
UNLOCK();
}
void naModUnlock()
{
LOCK();
globals->nThreads--;
UNLOCK();
}
// Must be called with the main lock. Engages the "bottleneck", where
// all threads will block so that one (the last one to call this
// function) can run alone. This is done for GC, and also to free the
// list of "dead" blocks when it gets full (which is part of GC, if
// you think about it).
static void bottleneck()
{
struct Globals* g = globals;
g->bottleneck = 1;
while(g->bottleneck && g->waitCount < g->nThreads - 1) {
g->waitCount++;
UNLOCK(); naSemDown(g->sem); LOCK();
g->waitCount--;
}
if(g->waitCount >= g->nThreads - 1) {
freeDead();
if(g->needGC) garbageCollect();
if(g->waitCount) naSemUpAll(g->sem, g->waitCount);
g->bottleneck = 0;
}
}
void naCheckBottleneck()
{
if(globals->bottleneck) { LOCK(); bottleneck(); UNLOCK(); }
}
static void naCode_gcclean(struct naCode* o)
{
naFree(o->byteCode); o->byteCode = 0;
naFree(o->constants); o->constants = 0;
naFree(o->byteCode); o->byteCode = 0;
naFree(o->constants); o->constants = 0;
naFree(o->argSyms); o->argSyms = 0;
naFree(o->optArgSyms); o->argSyms = 0;
}
static void naGhost_gcclean(struct naGhost* g)
@@ -49,9 +128,6 @@ static void naGhost_gcclean(struct naGhost* g)
static void freeelem(struct naPool* p, struct naObj* o)
{
// Mark the object as "freed" for debugging purposes
o->type = T_GCFREED; // DEBUG
// Free any intrinsic (i.e. non-garbage collected) storage the
// object might have
switch(p->type) {
@@ -73,77 +149,78 @@ static void freeelem(struct naPool* p, struct naObj* o)
}
// And add it to the free list
appendfree(p, o);
o->type = T_GCFREED; // DEBUG
p->free[p->nfree++] = o;
}
static void newBlock(struct naPool* p, int need)
{
int i;
char* buf;
struct Block* newblocks;
struct Block* newb;
if(need < MIN_BLOCK_SIZE)
need = MIN_BLOCK_SIZE;
if(need < MIN_BLOCK_SIZE) need = MIN_BLOCK_SIZE;
newb = naAlloc(sizeof(struct Block));
newb->block = naAlloc(need * p->elemsz);
newb->size = need;
newb->next = p->blocks;
p->blocks = newb;
naBZero(newb->block, need * p->elemsz);
newblocks = naAlloc((p->nblocks+1) * sizeof(struct Block));
for(i=0; i<p->nblocks; i++) newblocks[i] = p->blocks[i];
naFree(p->blocks);
p->blocks = newblocks;
buf = naAlloc(need * p->elemsz);
naBZero(buf, need * p->elemsz);
p->blocks[p->nblocks].size = need;
p->blocks[p->nblocks].block = buf;
p->nblocks++;
for(i=0; i<need; i++) {
struct naObj* o = (struct naObj*)(buf + i*p->elemsz);
if(need > p->freesz - p->freetop) need = p->freesz - p->freetop;
p->nfree = 0;
p->free = p->free0 + p->freetop;
for(i=0; i < need; i++) {
struct naObj* o = (struct naObj*)(newb->block + i*p->elemsz);
o->mark = 0;
o->type = p->type;
appendfree(p, o);
o->type = T_GCFREED; // DEBUG
p->free[p->nfree++] = o;
}
p->freetop += need;
}
void naGC_init(struct naPool* p, int type)
{
p->type = type;
p->elemsz = naTypeSize(type);
p->nblocks = 0;
p->blocks = 0;
p->nfree = 0;
p->freesz = 0;
p->free = 0;
naGC_reap(p);
p->free0 = p->free = 0;
p->nfree = p->freesz = p->freetop = 0;
reap(p);
}
int naGC_size(struct naPool* p)
static int poolsize(struct naPool* p)
{
int i, total=0;
for(i=0; i<p->nblocks; i++)
total += ((struct Block*)(p->blocks + i))->size;
int total = 0;
struct Block* b = p->blocks;
while(b) { total += b->size; b = b->next; }
return total;
}
struct naObj* naGC_get(struct naPool* p)
struct naObj** naGC_get(struct naPool* p, int n, int* nout)
{
// Collect every GlobalAllocCount allocations.
// This gets set to ~50% of the total object count each
// collection (it's incremented in naGC_reap()).
if(--GlobalAllocCount < 0) {
GlobalAllocCount = 0;
naGarbageCollect();
struct naObj** result;
naCheckBottleneck();
LOCK();
while(globals->allocCount < 0 || (p->nfree == 0 && p->freetop >= p->freesz)) {
globals->needGC = 1;
bottleneck();
}
// If we're out, then allocate an extra 12.5%
if(p->nfree == 0)
newBlock(p, naGC_size(p)/8);
return p->free[--p->nfree];
newBlock(p, poolsize(p)/8);
n = p->nfree < n ? p->nfree : n;
*nout = n;
p->nfree -= n;
globals->allocCount -= n;
result = (struct naObj**)(p->free + p->nfree);
UNLOCK();
return result;
}
// Sets the reference bit on the object, and recursively on all
// objects reachable from it. Clumsy: uses C stack recursion, which
// is slower than it need be and may cause problems on some platforms
// due to the very large stack depths that result.
void naGC_mark(naRef r)
// objects reachable from it. Uses the processor stack for recursion...
static void mark(naRef r)
{
int i;
@@ -159,56 +236,61 @@ void naGC_mark(naRef r)
r.ref.ptr.obj->mark = 1;
switch(r.ref.ptr.obj->type) {
case T_VEC:
for(i=0; i<r.ref.ptr.vec->size; i++)
naGC_mark(r.ref.ptr.vec->array[i]);
if(r.ref.ptr.vec->rec)
for(i=0; i<r.ref.ptr.vec->rec->size; i++)
mark(r.ref.ptr.vec->rec->array[i]);
break;
case T_HASH:
if(r.ref.ptr.hash->table == 0)
break;
for(i=0; i < (1<<r.ref.ptr.hash->lgalloced); i++) {
struct HashNode* hn = r.ref.ptr.hash->table[i];
while(hn) {
naGC_mark(hn->key);
naGC_mark(hn->val);
hn = hn->next;
if(r.ref.ptr.hash->rec != 0) {
struct HashRec* hr = r.ref.ptr.hash->rec;
for(i=0; i < (1<<hr->lgalloced); i++) {
struct HashNode* hn = hr->table[i];
while(hn) {
mark(hn->key);
mark(hn->val);
hn = hn->next;
}
}
}
break;
case T_CODE:
naGC_mark(r.ref.ptr.code->srcFile);
mark(r.ref.ptr.code->srcFile);
for(i=0; i<r.ref.ptr.code->nConstants; i++)
naGC_mark(r.ref.ptr.code->constants[i]);
break;
case T_CLOSURE:
naGC_mark(r.ref.ptr.closure->namespace);
naGC_mark(r.ref.ptr.closure->next);
mark(r.ref.ptr.code->constants[i]);
break;
case T_FUNC:
naGC_mark(r.ref.ptr.func->code);
naGC_mark(r.ref.ptr.func->closure);
mark(r.ref.ptr.func->code);
mark(r.ref.ptr.func->namespace);
mark(r.ref.ptr.func->next);
break;
}
}
// Collects all the unreachable objects into a free list, and
// allocates more space if needed.
void naGC_reap(struct naPool* p)
static void reap(struct naPool* p)
{
int i, elem, total = 0;
struct Block* b;
int elem, freesz, total = poolsize(p);
p->nfree = 0;
for(i=0; i<p->nblocks; i++) {
struct Block* b = p->blocks + i;
total += b->size;
freesz = total < MIN_BLOCK_SIZE ? MIN_BLOCK_SIZE : total;
freesz = (3 * freesz / 2) + (globals->nThreads * OBJ_CACHE_SZ);
if(p->freesz < freesz) {
naFree(p->free0);
p->freesz = freesz;
p->free = p->free0 = naAlloc(sizeof(void*) * p->freesz);
}
for(b = p->blocks; b; b = b->next)
for(elem=0; elem < b->size; elem++) {
struct naObj* o = (struct naObj*)(b->block + elem * p->elemsz);
if(o->mark == 0)
freeelem(p, o);
o->mark = 0;
}
}
// Add 50% of our total to the global count
GlobalAllocCount += total/2;
// allocs of this type until the next collection
globals->allocCount += total/2;
// Allocate more if necessary (try to keep 25-50% of the objects
// available)
@@ -219,5 +301,18 @@ void naGC_reap(struct naPool* p)
if(need > 0)
newBlock(p, need);
}
p->freetop = p->nfree;
}
// Atomically replaces target with a new pointer, and adds the old one
// to the list of blocks to free the next time something holds the
// giant lock.
void naGC_swapfree(void** target, void* val)
{
LOCK();
while(globals->ndead >= globals->deadsz)
bottleneck();
globals->deadBlocks[globals->ndead++] = *target;
*target = val;
UNLOCK();
}

View File

@@ -1,42 +1,22 @@
#include "nasal.h"
#include "data.h"
static void realloc(naRef hash)
{
struct naHash* h = hash.ref.ptr.hash;
int i, sz, oldsz = h->size;
int oldcols = h->table ? 1 << h->lgalloced : 0;
#define MIN_HASH_SIZE 4
// Keep a handle to our original objects
struct HashNode* oldnodes = h->nodes;
struct HashNode** oldtable = h->table;
#define EQUAL(a, b) (((a).ref.reftag == (b).ref.reftag \
&& (a).ref.ptr.obj == (b).ref.ptr.obj) \
|| naEqual(a, b))
// Figure out how big we need to be (start with a minimum size of
// 16 entries)
for(i=3; 1<<i < oldsz; i++);
h->lgalloced = i+1;
// Allocate new ones (note that all the records are allocated in a
// single chunk, to avoid zillions of tiny node allocations)
sz = 1<<h->lgalloced;
h->nodes = naAlloc(sz * (sizeof(struct HashNode) + sizeof(void*)));
h->table = (struct HashNode**)(((char*)h->nodes) + sz*sizeof(struct HashNode));
naBZero(h->table, sz * sizeof(void*));
h->nextnode = 0;
h->size = 0;
#define HASH_MAGIC 2654435769u
// Re-insert everything from scratch
for(i=0; i<oldcols; i++) {
struct HashNode* hn = oldtable[i];
while(hn) {
naHash_set(hash, hn->key, hn->val);
hn = hn->next;
}
}
// Free the old memory
naFree(oldnodes);
}
#define INSERT(hh, hkey, hval, hcol) do { \
unsigned int cc = (hcol), iidx=(hh)->size++; \
if(iidx < (1<<(hh)->lgalloced)) { \
struct HashNode* hnn = &(hh)->nodes[iidx]; \
hnn->key = (hkey); hnn->val = (hval); \
hnn->next = (hh)->table[cc]; \
(hh)->table[cc] = hnn; \
}} while(0)
// Computes a hash code for a given scalar
static unsigned int hashcode(naRef r)
@@ -48,61 +28,106 @@ static unsigned int hashcode(naRef r)
// 2*sizeof(int).
unsigned int* p = (unsigned int*)&(r.num);
return p[0] ^ p[1];
} else if(r.ref.ptr.str->hashcode) {
return r.ref.ptr.str->hashcode;
} else {
// This is Daniel Bernstein's djb2 hash function that I found
// on the web somewhere. It appears to work pretty well.
unsigned int i, hash = 5831;
for(i=0; i<r.ref.ptr.str->len; i++)
hash = (hash * 33) ^ r.ref.ptr.str->data[i];
r.ref.ptr.str->hashcode = hash;
return hash;
}
}
// Which column in a given hash does the key correspond to.
static unsigned int hashcolumn(struct naHash* h, naRef key)
static unsigned int hashcolumn(struct HashRec* h, naRef key)
{
// Multiply by a big number, and take the top N bits. Note
// assumption that sizeof(unsigned int) == 4.
return (2654435769u * hashcode(key)) >> (32 - h->lgalloced);
return (HASH_MAGIC * hashcode(key)) >> (32 - h->lgalloced);
}
struct HashNode* find(struct naHash* h, naRef key)
static struct HashRec* realloc(struct naHash* hash)
{
struct HashNode* hn;
if(h->table == 0)
return 0;
hn = h->table[hashcolumn(h, key)];
while(hn) {
if(naEqual(key, hn->key))
return hn;
hn = hn->next;
struct HashRec *h, *h0 = hash->rec;
int lga, cols, need = h0 ? h0->size - h0->dels : MIN_HASH_SIZE;
for(lga=0; 1<<lga <= need; lga++);
cols = 1<<lga;
h = naAlloc(sizeof(struct HashRec) +
cols * (sizeof(struct HashNode*) + sizeof(struct HashNode)));
naBZero(h, sizeof(struct HashRec) + cols * sizeof(struct HashNode*));
h->lgalloced = lga;
h->nodes = (struct HashNode*)(((char*)h)
+ sizeof(struct HashRec)
+ cols * sizeof(struct HashNode*));
for(lga=0; h0 != 0 && lga<(1<<h0->lgalloced); lga++) {
struct HashNode* hn = h0->table[lga];
while(hn) {
INSERT(h, hn->key, hn->val, hashcolumn(h, hn->key));
hn = hn->next;
}
}
naGC_swapfree((void**)&hash->rec, h);
return h;
}
// Special, optimized version of naHash_get for the express purpose of
// looking up symbols in the local variables hash (OP_LOCAL is by far
// the most common opcode and deserves some special case
// optimization). Elides all the typing checks that are normally
// required, presumes that the key is a string and has had its
// hashcode precomputed, checks only for object identity, and inlines
// the column computation.
int naHash_sym(struct naHash* hash, struct naStr* sym, naRef* out)
{
struct HashRec* h = hash->rec;
if(h) {
int col = (HASH_MAGIC * sym->hashcode) >> (32 - h->lgalloced);
struct HashNode* hn = h->table[col];
while(hn) {
if(hn->key.ref.ptr.str == sym) {
*out = hn->val;
return 1;
}
hn = hn->next;
}
}
return 0;
}
void naHash_init(naRef hash)
static struct HashNode* find(struct naHash* hash, naRef key)
{
struct naHash* h = hash.ref.ptr.hash;
h->size = 0;
h->lgalloced = 0;
h->table = 0;
h->nodes = 0;
struct HashRec* h = hash->rec;
if(h) {
struct HashNode* hn = h->table[hashcolumn(h, key)];
while(hn) {
if(EQUAL(key, hn->key))
return hn;
hn = hn->next;
}
}
return 0;
}
// Make a temporary string on the stack
static naRef tmpStr(struct naStr* str, char* key)
static void tmpStr(naRef* out, struct naStr* str, char* key)
{
char* p = key;
while(*p) { p++; }
str->len = p - key;
str->len = 0;
str->data = key;
return naObj(T_STR, (struct naObj*)str);
while(key[str->len]) str->len++;
*out = naNil();
out->ref.ptr.str = str;
}
naRef naHash_cget(naRef hash, char* key)
{
struct naStr str;
naRef result, key2 = tmpStr(&str, key);
naRef result, key2;
tmpStr(&key2, &str, key);
if(naHash_get(hash, key2, &result))
return result;
return naNil();
@@ -111,80 +136,86 @@ naRef naHash_cget(naRef hash, char* key)
void naHash_cset(naRef hash, char* key, naRef val)
{
struct naStr str;
naRef key2 = tmpStr(&str, key);
naRef key2;
tmpStr(&key2, &str, key);
naHash_tryset(hash, key2, val);
}
int naHash_get(naRef hash, naRef key, naRef* out)
{
struct naHash* h = hash.ref.ptr.hash;
struct HashNode* n;
if(!IS_HASH(hash)) return 0;
n = find(h, key);
if(n) {
*out = n->val;
return 1;
} else {
*out = naNil();
return 0;
if(IS_HASH(hash)) {
struct HashNode* n = find(hash.ref.ptr.hash, key);
if(n) { *out = n->val; return 1; }
}
return 0;
}
// Simpler version. Don't create a new node if the value isn't there
int naHash_tryset(naRef hash, naRef key, naRef val)
{
struct HashNode* n;
if(!IS_HASH(hash)) return 0;
n = find(hash.ref.ptr.hash, key);
if(n) n->val = val;
return n != 0;
if(IS_HASH(hash)) {
struct HashNode* n = find(hash.ref.ptr.hash, key);
if(n) n->val = val;
return n != 0;
}
return 0;
}
// Special purpose optimization for use in function call setups. Sets
// a value that is known *not* to be present in the hash table. As
// for naHash_sym, the key must be a string with a precomputed hash
// code.
void naHash_newsym(struct naHash* hash, naRef* sym, naRef* val)
{
int col;
struct HashRec* h = hash->rec;
if(!h || h->size >= 1<<h->lgalloced)
h = realloc(hash);
col = (HASH_MAGIC * sym->ref.ptr.str->hashcode) >> (32 - h->lgalloced);
INSERT(h, *sym, *val, col);
}
// The cycle check is an integrity requirement for multithreading,
// where raced inserts can potentially cause cycles. This ensures
// that the "last" thread to hold a reference to an inserted node
// breaks any cycles that might have happened (at the expense of
// potentially dropping items out of the hash). Under normal
// circumstances, chains will be very short and this will be fast.
static void chkcycle(struct HashNode* node, int count)
{
struct HashNode* hn = node;
while(hn && (hn = hn->next) != 0)
if(count-- <= 0) { node->next = 0; return; }
}
void naHash_set(naRef hash, naRef key, naRef val)
{
struct naHash* h = hash.ref.ptr.hash;
unsigned int col;
int col;
struct HashRec* h;
struct HashNode* n;
if(!IS_HASH(hash)) return;
n = find(h, key);
if(n) {
n->val = val;
return;
}
if(h->size+1 >= 1<<h->lgalloced)
realloc(hash);
if((n = find(hash.ref.ptr.hash, key))) { n->val = val; return; }
h = hash.ref.ptr.hash->rec;
while(!h || h->size >= 1<<h->lgalloced)
h = realloc(hash.ref.ptr.hash);
col = hashcolumn(h, key);
n = h->nodes + h->nextnode++;
n->key = key;
n->val = val;
n->next = h->table[col];
h->table[col] = n;
h->size++;
INSERT(h, key, val, hashcolumn(h, key));
chkcycle(h->table[col], h->size - h->dels);
}
// FIXME: this implementation does a realloc() after each delete, and
// is therefore needlessly O(N). (The reason is that this avoids the
// need to keep a free list around for the much more common case of
// adding a new value. Modifying an existing value is O(1), of
// course.)
void naHash_delete(naRef hash, naRef key)
{
struct naHash* h = hash.ref.ptr.hash;
struct HashRec* h = hash.ref.ptr.hash->rec;
int col;
struct HashNode *last=0, *hn;
if(!IS_HASH(hash)) return;
if(!IS_HASH(hash) || !h) return;
col = hashcolumn(h, key);
hn = h->table[col];
while(hn) {
if(naEqual(hn->key, key)) {
if(EQUAL(hn->key, key)) {
if(last == 0) h->table[col] = hn->next;
else last->next = hn->next;
h->size--;
realloc(hash);
h->dels++;
return;
}
last = hn;
@@ -194,9 +225,9 @@ void naHash_delete(naRef hash, naRef key)
void naHash_keys(naRef dst, naRef hash)
{
struct naHash* h = hash.ref.ptr.hash;
int i;
if(!IS_HASH(hash) || !h->table) return;
struct HashRec* h = hash.ref.ptr.hash->rec;
if(!IS_HASH(hash) || !h) return;
for(i=0; i<(1<<h->lgalloced); i++) {
struct HashNode* hn = h->table[i];
while(hn) {
@@ -206,18 +237,15 @@ void naHash_keys(naRef dst, naRef hash)
}
}
int naHash_size(naRef h)
int naHash_size(naRef hash)
{
if(!IS_HASH(h)) return 0;
return h.ref.ptr.hash->size;
struct HashRec* h = hash.ref.ptr.hash->rec;
if(!IS_HASH(hash) || !h) return 0;
return h->size - h->dels;
}
void naHash_gcclean(struct naHash* h)
{
naFree(h->nodes);
h->nodes = 0;
h->size = 0;
h->lgalloced = 0;
h->table = 0;
h->nextnode = 0;
naFree(h->rec);
h->rec = 0;
}

View File

@@ -40,7 +40,10 @@ struct Lexeme {
{"return", TOK_RETURN},
{"break", TOK_BREAK},
{"continue", TOK_CONTINUE},
{"func", TOK_FUNC}
{"func", TOK_FUNC},
{"...", TOK_ELLIPSIS},
{"?", TOK_QUESTION},
{"var", TOK_VAR},
};
// Build a table of where each line ending is

View File

@@ -1,68 +1,70 @@
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#ifdef _MSC_VER // sigh...
#define vsnprintf _vsnprintf
#endif
#include "nasal.h"
#include "code.h"
// No need to include <string.h> just for this:
// It needs a funny name because MSVC wants to treat "strlen" as a
// special symbol. Ugh...
static int StrLen(char* s)
{
char* s0 = s;
while(*s) s++;
return s - s0;
}
#define NEWSTR(c, s, l) naStr_fromdata(naNewString(c), s, l)
static naRef size(naContext c, naRef args)
static naRef size(naContext c, naRef me, int argc, naRef* args)
{
naRef r;
if(naVec_size(args) == 0) return naNil();
r = naVec_get(args, 0);
if(naIsString(r)) return naNum(naStr_len(r));
if(naIsVector(r)) return naNum(naVec_size(r));
if(naIsHash(r)) return naNum(naHash_size(r));
if(argc == 0) return naNil();
if(naIsString(args[0])) return naNum(naStr_len(args[0]));
if(naIsVector(args[0])) return naNum(naVec_size(args[0]));
if(naIsHash(args[0])) return naNum(naHash_size(args[0]));
naRuntimeError(c, "object has no size()");
return naNil();
}
static naRef keys(naContext c, naRef args)
static naRef keys(naContext c, naRef me, int argc, naRef* args)
{
naRef v, h = naVec_get(args, 0);
naRef v, h = args[0];
if(!naIsHash(h)) return naNil();
v = naNewVector(c);
naHash_keys(v, h);
return v;
}
static naRef append(naContext c, naRef args)
{
naRef v = naVec_get(args, 0);
naRef e = naVec_get(args, 1);
if(!naIsVector(v)) return naNil();
naVec_append(v, e);
return v;
}
static naRef pop(naContext c, naRef args)
{
naRef v = naVec_get(args, 0);
if(!naIsVector(v)) return naNil();
return naVec_removelast(v);
}
static naRef setsize(naContext c, naRef args)
{
naRef v = naVec_get(args, 0);
int sz = (int)naNumValue(naVec_get(args, 1)).num;
if(!naIsVector(v)) return naNil();
naVec_setsize(v, sz);
return v;
}
static naRef subvec(naContext c, naRef args)
static naRef append(naContext c, naRef me, int argc, naRef* args)
{
int i;
naRef nlen, result, v = naVec_get(args, 0);
int len = 0, start = (int)naNumValue(naVec_get(args, 1)).num;
nlen = naNumValue(naVec_get(args, 2));
if(argc < 2) return naNil();
if(!naIsVector(args[0])) return naNil();
for(i=1; i<argc; i++) naVec_append(args[0], args[i]);
return args[0];
}
static naRef pop(naContext c, naRef me, int argc, naRef* args)
{
if(argc < 1 || !naIsVector(args[0])) return naNil();
return naVec_removelast(args[0]);
}
static naRef setsize(naContext c, naRef me, int argc, naRef* args)
{
if(argc < 2) return naNil();
int sz = (int)naNumValue(args[1]).num;
if(!naIsVector(args[0])) return naNil();
naVec_setsize(args[0], sz);
return args[0];
}
static naRef subvec(naContext c, naRef me, int argc, naRef* args)
{
int i;
naRef nlen, result, v = args[0];
int len = 0, start = (int)naNumValue(args[1]).num;
if(argc < 2) return naNil();
nlen = argc > 2 ? naNumValue(args[2]) : naNil();
if(!naIsNil(nlen))
len = (int)naNumValue(naVec_get(args, 2)).num;
len = (int)nlen.num;
if(!naIsVector(v) || start < 0 || start >= naVec_size(v) || len < 0)
return naNil();
if(len == 0 || len > naVec_size(v) - start) len = naVec_size(v) - start;
@@ -73,44 +75,38 @@ static naRef subvec(naContext c, naRef args)
return result;
}
static naRef delete(naContext c, naRef args)
static naRef delete(naContext c, naRef me, int argc, naRef* args)
{
naRef h = naVec_get(args, 0);
naRef k = naVec_get(args, 1);
if(naIsHash(h)) naHash_delete(h, k);
if(argc > 1 && naIsHash(args[0])) naHash_delete(args[0], args[1]);
return naNil();
}
static naRef intf(naContext c, naRef args)
static naRef intf(naContext c, naRef me, int argc, naRef* args)
{
naRef n = naNumValue(naVec_get(args, 0));
if(!naIsNil(n)) n.num = (int)n.num;
return n;
if(argc > 0) {
naRef n = naNumValue(args[0]);
if(naIsNil(n)) return n;
if(n.num < 0) n.num = -floor(-n.num);
else n.num = floor(n.num);
return n;
} else return naNil();
}
static naRef num(naContext c, naRef args)
static naRef num(naContext c, naRef me, int argc, naRef* args)
{
return naNumValue(naVec_get(args, 0));
return argc > 0 ? naNumValue(args[0]) : naNil();
}
static naRef streq(naContext c, naRef args)
static naRef streq(naContext c, naRef me, int argc, naRef* args)
{
int i;
naRef a = naVec_get(args, 0);
naRef b = naVec_get(args, 1);
if(!naIsString(a) || !naIsString(b)) return naNil();
if(naStr_len(a) != naStr_len(b)) return naNum(0);
for(i=0; i<naStr_len(a); i++)
if(naStr_data(a)[i] != naStr_data(b)[i])
return naNum(0);
return naNum(1);
return argc > 1 ? naNum(naStrEqual(args[0], args[1])) : naNil();
}
static naRef substr(naContext c, naRef args)
static naRef substr(naContext c, naRef me, int argc, naRef* args)
{
naRef src = naVec_get(args, 0);
naRef startR = naVec_get(args, 1);
naRef lenR = naVec_get(args, 2);
naRef src = argc > 1 ? args[0] : naNil();
naRef startR = argc > 1 ? args[1] : naNil();
naRef lenR = argc > 2 ? args[2] : naNil();
int start, len;
if(!naIsString(src)) return naNil();
startR = naNumValue(startR);
@@ -126,18 +122,39 @@ static naRef substr(naContext c, naRef args)
return naStr_substr(naNewString(c), src, start, len);
}
static naRef contains(naContext c, naRef args)
static naRef f_strc(naContext c, naRef me, int argc, naRef* args)
{
naRef hash = naVec_get(args, 0);
naRef key = naVec_get(args, 1);
int idx;
struct naStr* str = args[0].ref.ptr.str;
naRef idr = argc > 1 ? naNumValue(args[1]) : naNum(0);
if(argc < 2 || IS_NIL(idr) || !IS_STR(args[0]))
naRuntimeError(c, "bad arguments to strc");
idx = (int)naNumValue(idr).num;
if(idx > str->len) naRuntimeError(c, "strc index out of bounds");
return naNum(str->data[idx]);
}
static naRef f_chr(naContext c, naRef me, int argc, naRef* args)
{
char chr[1];
naRef cr = argc ? naNumValue(args[0]) : naNil();
if(IS_NIL(cr)) naRuntimeError(c, "chr argument not string");
chr[0] = (char)cr.num;
return NEWSTR(c, chr, 1);
}
static naRef contains(naContext c, naRef me, int argc, naRef* args)
{
naRef hash = argc > 0 ? args[0] : naNil();
naRef key = argc > 1 ? args[1] : naNil();
if(naIsNil(hash) || naIsNil(key)) return naNil();
if(!naIsHash(hash)) return naNil();
return naHash_get(hash, key, &key) ? naNum(1) : naNum(0);
}
static naRef typeOf(naContext c, naRef args)
static naRef typeOf(naContext c, naRef me, int argc, naRef* args)
{
naRef r = naVec_get(args, 0);
naRef r = argc > 0 ? args[0] : naNil();
char* t = "unknown";
if(naIsNil(r)) t = "nil";
else if(naIsNum(r)) t = "scalar";
@@ -146,10 +163,258 @@ static naRef typeOf(naContext c, naRef args)
else if(naIsHash(r)) t = "hash";
else if(naIsFunc(r)) t = "func";
else if(naIsGhost(r)) t = "ghost";
r = naStr_fromdata(naNewString(c), t, StrLen(t));
r = NEWSTR(c, t, strlen(t));
return r;
}
static naRef f_compile(naContext c, naRef me, int argc, naRef* args)
{
int errLine;
naRef script, code, fname;
script = argc > 0 ? args[0] : naNil();
if(!naIsString(script)) return naNil();
fname = NEWSTR(c, "<compile>", 9);
code = naParseCode(c, fname, 1,
naStr_data(script), naStr_len(script), &errLine);
if(!naIsCode(code)) return naNil(); // FIXME: export error to caller...
return naBindToContext(c, code);
}
// Funcation metacall API. Allows user code to generate an arg vector
// at runtime and/or call function references on arbitrary objects.
static naRef f_call(naContext c, naRef me, int argc, naRef* args)
{
naContext subc;
naRef callargs, callme, result;
callargs = argc > 1 ? args[1] : naNil();
callme = argc > 2 ? args[2] : naNil(); // Might be nil, that's OK
if(!naIsFunc(args[0])) naRuntimeError(c, "call() on non-function");
if(naIsNil(callargs)) callargs = naNewVector(c);
else if(!naIsVector(callargs)) naRuntimeError(c, "call() args not vector");
if(!naIsHash(callme)) callme = naNil();
subc = naNewContext();
subc->callParent = c;
c->callChild = subc;
result = naCall(subc, args[0], callargs, callme, naNil());
c->callChild = 0;
if(argc > 2 && !IS_NIL(subc->dieArg))
if(naIsVector(args[argc-1]))
naVec_append(args[argc-1], subc->dieArg);
naFreeContext(subc);
return result;
}
static naRef f_die(naContext c, naRef me, int argc, naRef* args)
{
c->dieArg = argc > 0 ? args[0] : naNil();
naRuntimeError(c, "__die__");
return naNil(); // never executes
}
// Wrapper around vsnprintf, iteratively increasing the buffer size
// until it fits. Returned buffer should be freed by the caller.
char* dosprintf(char* f, ...)
{
char* buf;
va_list va;
int len = 16;
while(1) {
buf = naAlloc(len);
va_start(va, f);
if(vsnprintf(buf, len, f, va) < len) {
va_end(va);
return buf;
}
va_end(va);
naFree(buf);
len *= 2;
}
}
// Inspects a printf format string f, and finds the next "%..." format
// specifier. Stores the start of the specifier in out, the length in
// len, and the type in type. Returns a pointer to the remainder of
// the format string, or 0 if no format string was found. Recognizes
// all of ANSI C's syntax except for the "length modifier" feature.
// Note: this does not validate the format character returned in
// "type". That is the caller's job.
static char* nextFormat(naContext ctx, char* f, char** out, int* len, char* type)
{
// Skip to the start of the format string
while(*f && *f != '%') f++;
if(!*f) return 0;
*out = f++;
while(*f && (*f=='-' || *f=='+' || *f==' ' || *f=='0' || *f=='#')) f++;
// Test for duplicate flags. This is pure pedantry and could
// be removed on all known platforms, but just to be safe...
{ char *p1, *p2;
for(p1 = *out + 1; p1 < f; p1++)
for(p2 = p1+1; p2 < f; p2++)
if(*p1 == *p2)
naRuntimeError(ctx, "duplicate flag in format string"); }
while(*f && *f >= '0' && *f <= '9') f++;
if(*f && *f == '.') f++;
while(*f && *f >= '0' && *f <= '9') f++;
if(!*f) naRuntimeError(ctx, "invalid format string");
*type = *f++;
*len = f - *out;
return f;
}
#define ERR(m) naRuntimeError(ctx, m)
#define APPEND(r) result = naStr_concat(naNewString(ctx), result, r)
static naRef f_sprintf(naContext ctx, naRef me, int argc, naRef* args)
{
char t, nultmp, *fstr, *next, *fout=0, *s;
int flen, argn=1;
naRef format, arg, result = naNewString(ctx);
if(argc < 1) ERR("not enough arguments to sprintf");
format = naStringValue(ctx, argc > 0 ? args[0] : naNil());
if(naIsNil(format)) ERR("bad format string in sprintf");
s = naStr_data(format);
while((next = nextFormat(ctx, s, &fstr, &flen, &t))) {
APPEND(NEWSTR(ctx, s, fstr-s)); // stuff before the format string
if(flen == 2 && fstr[1] == '%') {
APPEND(NEWSTR(ctx, "%", 1));
s = next;
continue;
}
if(argn >= argc) ERR("not enough arguments to sprintf");
arg = args[argn++];
nultmp = fstr[flen]; // sneaky nul termination...
fstr[flen] = 0;
if(t == 's') {
arg = naStringValue(ctx, arg);
if(naIsNil(arg)) fout = dosprintf(fstr, "nil");
else fout = dosprintf(fstr, naStr_data(arg));
} else {
arg = naNumValue(arg);
if(naIsNil(arg))
fout = dosprintf(fstr, "nil");
else if(t=='d' || t=='i' || t=='c')
fout = dosprintf(fstr, (int)naNumValue(arg).num);
else if(t=='o' || t=='u' || t=='x' || t=='X')
fout = dosprintf(fstr, (unsigned int)naNumValue(arg).num);
else if(t=='e' || t=='E' || t=='f' || t=='F' || t=='g' || t=='G')
fout = dosprintf(fstr, naNumValue(arg).num);
else
ERR("invalid sprintf format type");
}
fstr[flen] = nultmp;
APPEND(NEWSTR(ctx, fout, strlen(fout)));
naFree(fout);
s = next;
}
APPEND(NEWSTR(ctx, s, strlen(s)));
return result;
}
// FIXME: handle ctx->callParent frames too!
static naRef f_caller(naContext ctx, naRef me, int argc, naRef* args)
{
int fidx;
struct Frame* frame;
naRef result, fr = argc ? naNumValue(args[0]) : naNil();
if(IS_NIL(fr)) naRuntimeError(ctx, "non numeric argument to caller()");
fidx = (int)fr.num;
if(fidx > ctx->fTop - 1) return naNil();
frame = &ctx->fStack[ctx->fTop - 1 - fidx];
result = naNewVector(ctx);
naVec_append(result, frame->locals);
naVec_append(result, frame->func);
naVec_append(result, frame->func.ref.ptr.func->code.ref.ptr.code->srcFile);
naVec_append(result, naNum(naGetLine(ctx, fidx)));
return result;
}
static naRef f_closure(naContext ctx, naRef me, int argc, naRef* args)
{
int i;
naRef func, idx;
struct naFunc* f;
func = argc > 0 ? args[0] : naNil();
idx = argc > 1 ? naNumValue(args[1]) : naNil();
if(!IS_FUNC(func) || IS_NIL(idx))
naRuntimeError(ctx, "bad arguments to closure()");
i = (int)idx.num;
f = func.ref.ptr.func;
while(i > 0 && f) { i--; f = f->next.ref.ptr.func; }
if(!f) return naNil();
return f->namespace;
}
static int match(char* a, char* b, int l)
{
int i;
for(i=0; i<l; i++) if(a[i] != b[i]) return 0;
return 1;
}
static int find(char* a, int al, char* s, int sl)
{
int i;
if(al == 0) return 0;
for(i=0; i<sl-al+1; i++) if(match(a, s+i, al)) return i;
return -1;
}
static naRef f_find(naContext ctx, naRef me, int argc, naRef* args)
{
if(argc < 2 || !IS_STR(args[0]) || !IS_STR(args[1]))
naRuntimeError(ctx, "bad/missing argument to split");
return naNum(find(args[0].ref.ptr.str->data, args[0].ref.ptr.str->len,
args[1].ref.ptr.str->data, args[1].ref.ptr.str->len));
}
static naRef f_split(naContext ctx, naRef me, int argc, naRef* args)
{
int sl, dl, i;
char *s, *d, *s0;
naRef result;
if(argc < 2 || !IS_STR(args[0]) || !IS_STR(args[1]))
naRuntimeError(ctx, "bad/missing argument to split");
d = naStr_data(args[0]); dl = naStr_len(args[0]);
s = naStr_data(args[1]); sl = naStr_len(args[1]);
result = naNewVector(ctx);
if(dl == 0) { // special case zero-length delimiter
for(i=0; i<sl; i++) naVec_append(result, NEWSTR(ctx, s+i, 1));
return result;
}
s0 = s;
for(i=0; i <= sl-dl; i++) {
if(match(s+i, d, dl)) {
naVec_append(result, NEWSTR(ctx, s0, s+i-s0));
s0 = s + i + dl;
i += dl - 1;
}
}
if(s0 - s <= sl) naVec_append(result, NEWSTR(ctx, s0, s+sl-s0));
return result;
}
// This is a comparatively weak RNG, based on the C library's rand()
// function, which is usually not threadsafe and often of limited
// precision. The 5x loop guarantees that we get a full double worth
// of precision even for 15 bit (Win32...) rand() implementations.
static naRef f_rand(naContext ctx, naRef me, int argc, naRef* args)
{
int i;
double r = 0;
if(argc) {
if(!IS_NUM(args[0])) naRuntimeError(ctx, "rand() seed not number");
srand((unsigned int)args[0].num);
return naNil();
}
for(i=0; i<5; i++) r = (r + rand()) * (1.0/(RAND_MAX+1.0));
return naNum(r);
}
struct func { char* name; naCFunction func; };
static struct func funcs[] = {
{ "size", size },
@@ -163,8 +428,19 @@ static struct func funcs[] = {
{ "num", num },
{ "streq", streq },
{ "substr", substr },
{ "strc", f_strc },
{ "chr", f_chr },
{ "contains", contains },
{ "typeof", typeOf },
{ "compile", f_compile },
{ "call", f_call },
{ "die", f_die },
{ "sprintf", f_sprintf },
{ "caller", f_caller },
{ "closure", f_closure },
{ "find", f_find },
{ "split", f_split },
{ "rand", f_rand },
};
naRef naStdLib(naContext c)
@@ -173,8 +449,8 @@ naRef naStdLib(naContext c)
int i, n = sizeof(funcs)/sizeof(struct func);
for(i=0; i<n; i++) {
naRef code = naNewCCode(c, funcs[i].func);
naRef name = naStr_fromdata(naNewString(c),
funcs[i].name, StrLen(funcs[i].name));
naRef name = NEWSTR(c, funcs[i].name, strlen(funcs[i].name));
name = naInternSymbol(name);
naHash_set(namespace, name, naNewFunc(c, code));
}
return namespace;

View File

@@ -7,55 +7,55 @@
#include "nasal.h"
static naRef f_sin(naContext c, naRef args)
static naRef f_sin(naContext c, naRef me, int argc, naRef* args)
{
naRef a = naNumValue(naVec_get(args, 0));
naRef a = naNumValue(argc > 0 ? args[0] : naNil());
if(naIsNil(a))
naRuntimeError(c, "non numeric argument to sin()");
a.num = sin(a.num);
return a;
}
static naRef f_cos(naContext c, naRef args)
static naRef f_cos(naContext c, naRef me, int argc, naRef* args)
{
naRef a = naNumValue(naVec_get(args, 0));
naRef a = naNumValue(argc > 0 ? args[0] : naNil());
if(naIsNil(a))
naRuntimeError(c, "non numeric argument to cos()");
a.num = cos(a.num);
return a;
}
static naRef f_exp(naContext c, naRef args)
static naRef f_exp(naContext c, naRef me, int argc, naRef* args)
{
naRef a = naNumValue(naVec_get(args, 0));
naRef a = naNumValue(argc > 0 ? args[0] : naNil());
if(naIsNil(a))
naRuntimeError(c, "non numeric argument to exp()");
a.num = exp(a.num);
return a;
}
static naRef f_ln(naContext c, naRef args)
static naRef f_ln(naContext c, naRef me, int argc, naRef* args)
{
naRef a = naNumValue(naVec_get(args, 0));
naRef a = naNumValue(argc > 0 ? args[0] : naNil());
if(naIsNil(a))
naRuntimeError(c, "non numeric argument to ln()");
a.num = log(a.num);
return a;
}
static naRef f_sqrt(naContext c, naRef args)
static naRef f_sqrt(naContext c, naRef me, int argc, naRef* args)
{
naRef a = naNumValue(naVec_get(args, 0));
naRef a = naNumValue(argc > 0 ? args[0] : naNil());
if(naIsNil(a))
naRuntimeError(c, "non numeric argument to sqrt()");
a.num = sqrt(a.num);
return a;
}
static naRef f_atan2(naContext c, naRef args)
static naRef f_atan2(naContext c, naRef me, int argc, naRef* args)
{
naRef a = naNumValue(naVec_get(args, 0));
naRef b = naNumValue(naVec_get(args, 1));
naRef a = naNumValue(argc > 0 ? args[0] : naNil());
naRef b = naNumValue(argc > 1 ? args[1] : naNil());
if(naIsNil(a) || naIsNil(b))
naRuntimeError(c, "non numeric argument to atan2()");
a.num = atan2(a.num, b.num);
@@ -87,6 +87,7 @@ naRef naMathLib(naContext c)
naHash_set(namespace, name, naNum(M_PI));
name = naStr_fromdata(naNewString(c), "e", 1);
name = naInternSymbol(name);
naHash_set(namespace, name, naNum(M_E));
return namespace;

View File

@@ -49,7 +49,10 @@ naRef naStringValue(naContext c, naRef r)
naRef naNew(struct Context* c, int type)
{
naRef result = naObj(type, naGC_get(&(c->pools[type])));
if(c->nfree[type] == 0)
c->free[type] = naGC_get(&globals->pools[type],
OBJ_CACHE_SZ, &c->nfree[type]);
naRef result = naObj(type, c->free[type][--c->nfree[type]]);
naVec_append(c->temps, result);
return result;
}
@@ -59,20 +62,21 @@ naRef naNewString(struct Context* c)
naRef s = naNew(c, T_STR);
s.ref.ptr.str->len = 0;
s.ref.ptr.str->data = 0;
s.ref.ptr.str->hashcode = 0;
return s;
}
naRef naNewVector(struct Context* c)
{
naRef r = naNew(c, T_VEC);
naVec_init(r);
r.ref.ptr.vec->rec = 0;
return r;
}
naRef naNewHash(struct Context* c)
{
naRef r = naNew(c, T_HASH);
naHash_init(r);
r.ref.ptr.hash->rec = 0;
return r;
}
@@ -92,18 +96,11 @@ naRef naNewFunc(struct Context* c, naRef code)
{
naRef func = naNew(c, T_FUNC);
func.ref.ptr.func->code = code;
func.ref.ptr.func->closure = naNil();
func.ref.ptr.func->namespace = naNil();
func.ref.ptr.func->next = naNil();
return func;
}
naRef naNewClosure(struct Context* c, naRef namespace, naRef next)
{
naRef closure = naNew(c, T_CLOSURE);
closure.ref.ptr.closure->namespace = namespace;
closure.ref.ptr.closure->next = next;
return closure;
}
naRef naNewGhost(naContext c, naGhostType* type, void* ptr)
{
naRef ghost = naNew(c, T_GHOST);
@@ -162,6 +159,19 @@ int naEqual(naRef a, naRef b)
return na == nb ? 1 : 0;
}
int naStrEqual(naRef a, naRef b)
{
int i;
if(!(IS_STR(a) && IS_STR(b)))
return 0;
if(a.ref.ptr.str->len != b.ref.ptr.str->len)
return 0;
for(i=0; i<a.ref.ptr.str->len; i++)
if(a.ref.ptr.str->data[i] != b.ref.ptr.str->data[i])
return 0;
return 1;
}
int naTypeSize(int type)
{
switch(type) {
@@ -170,7 +180,6 @@ int naTypeSize(int type)
case T_HASH: return sizeof(struct naHash);
case T_CODE: return sizeof(struct naCode);
case T_FUNC: return sizeof(struct naFunc);
case T_CLOSURE: return sizeof(struct naClosure);
case T_CCODE: return sizeof(struct naCCode);
case T_GHOST: return sizeof(struct naGhost);
};

View File

@@ -62,7 +62,6 @@ typedef union {
struct naHash* hash;
struct naCode* code;
struct naFunc* func;
struct naClosure* closure;
struct naCCode* ccode;
struct naGhost* ghost;
} ptr;
@@ -75,10 +74,11 @@ typedef union {
typedef struct Context* naContext;
// The function signature for an extension function:
typedef naRef (*naCFunction)(naContext ctx, naRef args);
typedef naRef (*naCFunction)(naContext ctx, naRef me, int argc, naRef* args);
// All Nasal code runs under the watch of a naContext:
naContext naNewContext();
void naFreeContext(naContext c);
// Save this object in the context, preventing it (and objects
// referenced by it) from being garbage collected.
@@ -95,6 +95,10 @@ naRef naParseCode(naContext c, naRef srcFile, int firstLine,
// information from function objects.
naRef naBindFunction(naContext ctx, naRef code, naRef closure);
// Similar, but it binds to the current context's closure (i.e. the
// namespace at the top of the current call stack).
naRef naBindToContext(naContext ctx, naRef code);
// Call a code or function object with the specifed arguments "on" the
// specified object and using the specified hash for the local
// variables. Any of args, obj or locals may be nil.
@@ -146,6 +150,7 @@ naRef naNewCCode(naContext c, naCFunction fptr);
// Some useful conversion/comparison routines
int naEqual(naRef a, naRef b);
int naStrEqual(naRef a, naRef b);
int naTrue(naRef b);
naRef naNumValue(naRef n);
naRef naStringValue(naContext c, naRef n);
@@ -156,6 +161,7 @@ char* naStr_data(naRef s);
naRef naStr_fromdata(naRef dst, char* data, int len);
naRef naStr_concat(naRef dest, naRef s1, naRef s2);
naRef naStr_substr(naRef dest, naRef str, int start, int len);
naRef naInternSymbol(naRef sym);
// Vector utilities:
int naVec_size(naRef v);
@@ -183,6 +189,20 @@ naGhostType* naGhost_type(naRef ghost);
void* naGhost_ptr(naRef ghost);
int naIsGhost(naRef r);
// Acquires a "modification lock" on a context, allowing the C code to
// modify Nasal data without fear that such data may be "lost" by the
// garbage collector (the C stack is not examined in GC!). This
// disallows garbage collection until the current thread can be
// blocked. The lock should be acquired whenever modifications to
// Nasal objects are made. It need not be acquired when only read
// access is needed. It MUST NOT be acquired by naCFunction's, as
// those are called with the lock already held; acquiring two locks
// for the same thread will cause a deadlock when the GC is invoked.
// It should be UNLOCKED by naCFunction's when they are about to do
// any long term non-nasal processing and/or blocking I/O.
void naModLock();
void naModUnlock();
#ifdef __cplusplus
} // extern "C"
#endif

View File

@@ -12,14 +12,16 @@ struct precedence {
int rule;
} PRECEDENCE[] = {
{ { TOK_SEMI, TOK_COMMA }, PREC_REVERSE },
{ { TOK_COLON }, PREC_BINARY },
{ { TOK_ELLIPSIS }, PREC_SUFFIX },
{ { TOK_RETURN, TOK_BREAK, TOK_CONTINUE }, PREC_PREFIX },
{ { TOK_ASSIGN }, PREC_REVERSE },
{ { TOK_COLON, TOK_QUESTION }, PREC_REVERSE },
{ { TOK_VAR }, PREC_PREFIX },
{ { TOK_OR }, PREC_BINARY },
{ { TOK_AND }, PREC_BINARY },
{ { TOK_EQ, TOK_NEQ }, PREC_BINARY },
{ { TOK_LT, TOK_LTE, TOK_GT, TOK_GTE }, PREC_BINARY },
{ { TOK_PLUS, TOK_MINUS, TOK_CAT }, PREC_REVERSE },
{ { TOK_PLUS, TOK_MINUS, TOK_CAT }, PREC_REVERSE },
{ { TOK_MUL, TOK_DIV }, PREC_BINARY },
{ { TOK_MINUS, TOK_NEG, TOK_NOT }, PREC_PREFIX },
{ { TOK_LPAR, TOK_LBRA }, PREC_SUFFIX },
@@ -210,7 +212,15 @@ static void fixBlockStructure(struct Parser* p, struct Token* start)
t = start;
while(t) {
switch(t->type) {
case TOK_ELSE: case TOK_FUNC:
case TOK_FUNC:
// Slurp an optional paren block containing an arglist, then
// fall through to parse the curlies...
if(t->next && t->next->type == TOK_LPAR) {
c = t->next;
addNewChild(t, c);
fixBlockStructure(p, c);
}
case TOK_ELSE: // and TOK_FUNC!
// These guys precede a single curly block
if(!t->next || t->next->type != TOK_LCURL) oops(p, t);
c = t->next;
@@ -276,6 +286,8 @@ static void fixBlockStructure(struct Parser* p, struct Token* start)
addSemi = 1;
break;
}
if(t->next && t->next->type == TOK_SEMI)
addSemi = 0; // don't bother if it's already there!
if(addSemi) {
struct Token* semi = emptyToken(p);
semi->type = TOK_SEMI;
@@ -519,7 +531,7 @@ naRef naParseCode(struct Context* c, naRef srcFile, int firstLine,
p.tree.lastChild = t;
// Generate code!
codeObj = naCodeGen(&p, &(p.tree));
codeObj = naCodeGen(&p, &(p.tree), 0);
// Clean up our mess
naParseDestroy(&p);

View File

@@ -14,7 +14,7 @@ enum {
TOK_ASSIGN, TOK_LT, TOK_LTE, TOK_EQ, TOK_NEQ, TOK_GT, TOK_GTE,
TOK_IF, TOK_ELSIF, TOK_ELSE, TOK_FOR, TOK_FOREACH, TOK_WHILE,
TOK_RETURN, TOK_BREAK, TOK_CONTINUE, TOK_FUNC, TOK_SYMBOL,
TOK_LITERAL, TOK_EMPTY, TOK_NIL
TOK_LITERAL, TOK_EMPTY, TOK_NIL, TOK_ELLIPSIS, TOK_QUESTION, TOK_VAR
};
struct Token {
@@ -58,7 +58,7 @@ struct Parser {
// Computed line number table for the lexer
int* lines;
int nLines;
struct CodeGenerator* cg;
};
@@ -66,10 +66,15 @@ struct CodeGenerator {
int lastLine;
// Accumulated byte code array
unsigned char* byteCode;
int nBytes;
unsigned short* byteCode;
int codesz;
int codeAlloced;
// Inst. -> line table, stores pairs of {ip, line}
unsigned short* lineIps;
int nLineIps; // number of pairs
int nextLineIp;
// Stack of "loop" frames for break/continue statements
struct {
int breakIP;
@@ -87,7 +92,7 @@ void naParseInit(struct Parser* p);
void* naParseAlloc(struct Parser* p, int bytes);
void naParseDestroy(struct Parser* p);
void naLex(struct Parser* p);
naRef naCodeGen(struct Parser* p, struct Token* tok);
naRef naCodeGen(struct Parser* p, struct Token* block, struct Token* arglist);
void naParse(struct Parser* p);

View File

@@ -8,11 +8,6 @@
// double.
#define DIGITS 16
// The minimum size we'll allocate for a string. Since a string
// structure is already 12 bytes, and each naRef that points to it is
// 8, there isn't much point in being stingy.
#define MINLEN 16
static int tonum(unsigned char* s, int len, double* result);
static int fromnum(double val, unsigned char* s);
@@ -30,16 +25,10 @@ char* naStr_data(naRef s)
static void setlen(struct naStr* s, int sz)
{
int currSz, waste;
sz += 1; // Allow for an extra nul terminator
currSz = s->len+1 < MINLEN ? MINLEN : s->len+1;
waste = currSz - sz; // how much extra if we don't reallocate?
if(s->data == 0 || waste < 0 || waste > MINLEN) {
naFree(s->data);
s->data = naAlloc(sz < MINLEN ? MINLEN : sz);
}
s->len = sz - 1;
s->data[s->len] = 0; // nul terminate
if(s->data) naFree(s->data);
s->len = sz;
s->data = naAlloc(sz+1);
s->data[sz] = 0; // nul terminate
}
naRef naStr_fromdata(naRef dst, char* data, int len)
@@ -110,10 +99,8 @@ int naStr_numeric(naRef str)
void naStr_gcclean(struct naStr* str)
{
if(str->len > MINLEN) {
naFree(str->data);
str->data = 0;
}
naFree(str->data);
str->data = 0;
str->len = 0;
}
@@ -203,8 +190,11 @@ static int tonum(unsigned char* s, int len, double* result)
if(i == 0) return 0;
// Read the exponent, if any
if(i < len && (s[i] == 'e' || s[i] == 'E'))
if(i < len && (s[i] == 'e' || s[i] == 'E')) {
int i0 = i+1;
i = readsigned(s, len, i+1, &exp);
if(i == i0) return 0; // Must have a number after the "e"
}
// compute the result
*result = sgn * (val + frac * decpow(-fraclen)) * decpow(exp);
@@ -281,7 +271,7 @@ static int fromnum(double val, unsigned char* s)
if(raw[i] != '0') break;
digs = i+1;
if(exp > 0 || exp < -(DIGITS+2)) {
if(exp > 0 || exp < -(DIGITS+3)) {
// Standard scientific notation
exp += DIGITS-1;
*ptr++ = raw[0];

View File

@@ -0,0 +1,57 @@
#ifndef _WIN32
#include <pthread.h>
#include "code.h"
void* naNewLock()
{
pthread_mutex_t* lock = naAlloc(sizeof(pthread_mutex_t));
pthread_mutex_init(lock, 0);
return lock;
}
void naLock(void* lock)
{
pthread_mutex_lock((pthread_mutex_t*)lock);
}
void naUnlock(void* lock)
{
pthread_mutex_unlock((pthread_mutex_t*)lock);
}
struct naSem {
pthread_mutex_t lock;
pthread_cond_t cvar;
int count;
};
void* naNewSem()
{
struct naSem* sem = naAlloc(sizeof(struct naSem));
pthread_mutex_init(&sem->lock , 0);
pthread_cond_init(&sem->cvar, 0);
sem->count = 0;
return sem;
}
void naSemDown(void* sh)
{
struct naSem* sem = (struct naSem*)sh;
pthread_mutex_lock(&sem->lock);
while(sem->count <= 0)
pthread_cond_wait(&sem->cvar, &sem->lock);
sem->count--;
pthread_mutex_unlock(&sem->lock);
}
void naSemUpAll(void* sh, int count)
{
struct naSem* sem = (struct naSem*)sh;
pthread_mutex_lock(&sem->lock);
sem->count = count;
pthread_cond_broadcast(&sem->cvar);
pthread_mutex_unlock(&sem->lock);
}
#endif

View File

@@ -0,0 +1,20 @@
#ifdef _WIN32
#include <windows.h>
#define MAX_SEM_COUNT 1024 // What are the tradeoffs with this value?
void* naNewLock()
{
LPCRITICAL_SECTION lock = malloc(sizeof(CRITICAL_SECTION));
InitializeCriticalSection(lock);
return lock;
}
void naLock(void* lock) { EnterCriticalSection((LPCRITICAL_SECTION)lock); }
void naUnlock(void* lock) { LeaveCriticalSection((LPCRITICAL_SECTION)lock); }
void* naNewSem() { return CreateSemaphore(0, 0, MAX_SEM_COUNT, 0); }
void naSemDown(void* sem) { WaitForSingleObject((HANDLE)sem, INFINITE); }
void naSemUpAll(void* sem, int count) { ReleaseSemaphore(sem, count, 0); }
#endif

View File

@@ -3,82 +3,88 @@
static void realloc(struct naVec* v)
{
int i, newsz = 1 + ((v->size*3)>>1);
naRef* na = naAlloc(sizeof(naRef) * newsz);
v->alloced = newsz;
for(i=0; i<v->size; i++)
na[i] = v->array[i];
naFree(v->array);
v->array = na;
}
void naVec_init(naRef vec)
{
struct naVec* v = vec.ref.ptr.vec;
v->array = 0;
v->size = 0;
v->alloced = 0;
struct VecRec* old = v->rec;
int i, oldsz = old ? old->size : 0, newsz = 1 + ((oldsz*3)>>1);
struct VecRec* vr = naAlloc(sizeof(struct VecRec) + sizeof(naRef) * newsz);
if(oldsz > newsz) oldsz = newsz; // race protection
vr->alloced = newsz;
vr->size = oldsz;
for(i=0; i<oldsz; i++)
vr->array[i] = old->array[i];
naGC_swapfree((void**)&(v->rec), vr);
}
void naVec_gcclean(struct naVec* v)
{
naFree(v->array);
v->size = 0;
v->alloced = 0;
v->array = 0;
naFree(v->rec);
v->rec = 0;
}
naRef naVec_get(naRef v, int i)
{
if(!IS_VEC(v)) return naNil();
if(i >= v.ref.ptr.vec->size) return naNil();
return v.ref.ptr.vec->array[i];
if(IS_VEC(v)) {
struct VecRec* r = v.ref.ptr.vec->rec;
if(r && i < r->size) return r->array[i];
}
return naNil();
}
void naVec_set(naRef vec, int i, naRef o)
{
struct naVec* v = vec.ref.ptr.vec;
if(!IS_VEC(vec) || i >= v->size) return;
v->array[i] = o;
if(IS_VEC(vec)) {
struct VecRec* r = vec.ref.ptr.vec->rec;
if(r && i >= r->size) return;
r->array[i] = o;
}
}
int naVec_size(naRef v)
{
if(!IS_VEC(v)) return 0;
return v.ref.ptr.vec->size;
if(IS_VEC(v)) {
struct VecRec* r = v.ref.ptr.vec->rec;
return r ? r->size : 0;
}
return 0;
}
int naVec_append(naRef vec, naRef o)
{
struct naVec* v = vec.ref.ptr.vec;
if(!IS_VEC(vec)) return 0;
if(v->size >= v->alloced)
realloc(v);
v->array[v->size] = o;
return v->size++;
if(IS_VEC(vec)) {
struct VecRec* r = vec.ref.ptr.vec->rec;
if(!r || r->size >= r->alloced) {
realloc(vec.ref.ptr.vec);
r = vec.ref.ptr.vec->rec;
}
r->array[r->size] = o;
return r->size++;
}
return 0;
}
void naVec_setsize(naRef vec, int sz)
{
int i;
struct naVec* v = vec.ref.ptr.vec;
naRef* na = naAlloc(sizeof(naRef) * sz);
struct VecRec* v = vec.ref.ptr.vec->rec;
struct VecRec* nv = naAlloc(sizeof(struct VecRec) + sizeof(naRef) * sz);
nv->size = sz;
nv->alloced = sz;
for(i=0; i<sz; i++)
na[i] = (i < v->size) ? v->array[i] : naNil();
naFree(v->array);
v->array = na;
v->size = sz;
v->alloced = sz;
nv->array[i] = (v && i < v->size) ? v->array[i] : naNil();
naFree(v);
vec.ref.ptr.vec->rec = nv;
}
naRef naVec_removelast(naRef vec)
{
naRef o;
struct naVec* v = vec.ref.ptr.vec;
if(!IS_VEC(vec) || v->size == 0) return naNil();
o = v->array[v->size - 1];
v->size--;
if(v->size < (v->alloced >> 1))
realloc(v);
return o;
if(IS_VEC(vec)) {
struct VecRec* v = vec.ref.ptr.vec->rec;
if(!v || v->size == 0) return naNil();
o = v->array[v->size - 1];
v->size--;
if(v->size < (v->alloced >> 1))
realloc(vec.ref.ptr.vec);
return o;
}
return naNil();
}