Compare commits

..

No commits in common. "5d6734be4af088ecb52051729dfdc4eb0933a60b" and "8aab5dd153e2ccc6b16ccd6400517673b4a45d89" have entirely different histories.

8 changed files with 40 additions and 362 deletions

View File

@ -1,2 +1,2 @@
all:
$(CC) -D_USE_MATH_DEFINES -I./ -O2 -fopenmp -flto -fwhole-program -g -std=c11 -o nua main.c parse.c vm.c lexer.c -lm -pthread
$(CC) -I./ -O2 -std=c11 -o nua main.c parse.c vm.c lexer.c -lm

View File

@ -21,5 +21,3 @@ Impotent is still work-in-progress:
Impotent requires C11 and an architecture with 8-byte atomic operations, but otherwise it is completely cross-platform.
Performance-wise, it's surprisingly competitive with PoC Lua, considering how quickly it was made up to the point of writing this README (~2 weeks). By far the worst bottleneck is the GC, since it requires all threads and their heaps to synchronize.
Certain Lua idioms become impossible under Impotent. For example the idiom of appending to tables (`t[#t + 1] = x`) isn't atomic, therefore `table.insert` should be used instead.

11
lexer.c
View File

@ -99,9 +99,6 @@ vec_Token ltokenize(const char *buf, size_t len) {
} else if(buf[0] == '}') {
vec_Token_push(&tokens, (Token) {.type = TOK_SQUIGGLY_R});
buf++, len--;
} else if(buf[0] == '#') {
vec_Token_push(&tokens, (Token) {.type = TOK_SHARP});
buf++, len--;
} else if(len > 1 && buf[0] == '~' && buf[1] == '=') {
vec_Token_push(&tokens, (Token) {.type = TOK_NOT_EQUAL});
buf++, len--;
@ -119,14 +116,6 @@ vec_Token ltokenize(const char *buf, size_t len) {
vec_Token_push(&tokens, (Token) {.text = strndup(buf, idlen), .type = TOK_NUMBER});
buf += idlen, len -= idlen;
} else if(buf[0] == '-' && (len == 1 || buf[1] != '-')) {
vec_Token_push(&tokens, (Token) {.type = TOK_MINUS});
buf++, len--;
} else if(len >= 2 && buf[0] == '-' && buf[1] == '-') {
while(*buf != '\n') {
buf++, len--;
}
row++;
} else if(buf[0] == '\'' || buf[0] == '\"') {
bool single = buf[0] == '\'';

85
main.c
View File

@ -5,102 +5,38 @@
#include"lexer.h"
#include"str.h"
#include"dump.h"
#include<stdatomic.h>
#include<math.h>
static size_t native_print(LVM *lvm, void *ud, size_t argn, set_LValueU *heap, LRegSet *regset) {
static size_t native_print(LVM *lvm, void *ud, size_t argn, LRegSet *regset) {
if(lvalue_tag(regset->regs[0]) == LTAG_STRING) {
LString *lstr = (void*) (regset->regs[0].u & ~LTAG_MASK);
printf("%.*s\n", (int) lstr->length, lstr->data);
} else if(lvalue_tag(regset->regs[0]) == LTAG_I32) {
printf("%i\n", lvalue_to_int32(regset->regs[0]));
} else if(lvalue_tag(regset->regs[0]) == LTAG_TABLE) {
printf("table: 0x%lx\n", (uintptr_t) (regset->regs[0].u & ~LTAG_MASK));
} else if(lvalue_tag(regset->regs[0]) == LTAG_FUNCTION) {
printf("function: 0x%lx\n", (uintptr_t) (regset->regs[0].u & ~LTAG_MASK));
} else if(regset->regs[0].u == LTAG_NIL) {
printf("nil\n");
} else if(regset->regs[0].u == LTAG_TRUE) {
printf("true\n");
} else if(regset->regs[0].u == LTAG_FALSE) {
printf("false\n");
} else if(!isnan(regset->regs[0].f)) {
printf("%f\n", regset->regs[0].f);
}
return 0;
}
static size_t table_insert(LVM *lvm, void *ud, size_t argn, set_LValueU *heap, LRegSet *regset) {
LTable *tbl = (LTable*) (regset->regs[0].u & ~LTAG_MASK);
ltable_insert(tbl, regset->regs[1], 0);
return 0;
}
// This function is intended for small-medium runtimes, since the caller thread's heap is not touchable during this call.
static size_t threads_parallel(LVM *lvm, void *ud, size_t argn, set_LValueU *heap, LRegSet *regset) {
size_t no = lvalue_to_int32(regset->regs[0]);
LFunc *func = (LFunc*) (regset->regs[1].u & ~LTAG_MASK);
atomic_fetch_sub(&lvm->active_thread_count, 1);
#pragma omp parallel for
for(size_t i = 0; i < no; i++) {
LRegSet regset = {};
lvm_reset_regs(&regset);
lvm_run(lvm, func, 0, &regset);
}
atomic_fetch_add(&lvm->active_thread_count, 1);
return 0;
}
static char* read_full_file(const char *fn) {
FILE *f = fn ? fopen(fn, "rb") : stdin;
FILE *f = fopen(fn, "rb");
fseek(f, 0, SEEK_END);
size_t filesize = ftell(f);
fseek(f, 0, SEEK_SET);
char *buf = malloc(filesize + 1);
fread(buf, 1, filesize, f);
buf[filesize] = '\0';
if(fn) fclose(f);
fclose(f);
return buf;
}
int main(int argc, char **argv) {
LTable *env = ltable_new(128);
ltable_set(env,
lvalue_from_string(lstring_newz("print")),
lvalue_from_func(lvm_func_from_native(native_print, NULL)));
LString *key = lstring_newz("print");
LFunc *func = lvm_func_from_native(native_print, NULL);
LTable *table = ltable_new(16);
{
ltable_set(table,
lvalue_from_string(lstring_newz("insert")),
lvalue_from_func(lvm_func_from_native(table_insert, NULL)));
}
ltable_set(env,
lvalue_from_string(lstring_newz("table")),
lvalue_from_table(table));
LTable *threads = ltable_new(16);
{
ltable_set(threads,
lvalue_from_string(lstring_newz("parallel")),
lvalue_from_func(lvm_func_from_native(threads_parallel, NULL)));
}
ltable_set(env,
lvalue_from_string(lstring_newz("threads")),
lvalue_from_table(threads));
LTable *math = ltable_new(16);
{
ltable_set(math, lvalue_from_string(lstring_newz("pi")), (LValue) {.f = 3.141592653589793238463});
}
ltable_set(env,
lvalue_from_string(lstring_newz("math")),
lvalue_from_table(math));
ltable_set(env, lvalue_from_string(key), lvalue_from_func(func));
//const char *bufs = "for i = 1, 10000 do print(i) if i % 3 == 0 then print(\"Fizz\") end if i % 5 == 0 then print(\"Buzz\") end end";
//const char *bufs = "local t = {a = 9} print(t.a)";
@ -123,9 +59,12 @@ int main(int argc, char **argv) {
LVM lvm = {};
lvm_init(&lvm);
LRegSet regset = {.parent = NULL};
lvm_reset_regs(&regset);
lvm_run(&lvm, &unit->funcs[0], 0, &regset);
//#pragma omp parallel for
for(int i = 0; i < 1; i++) {
LRegSet regset = {.parent = NULL};
lvm_reset_regs(&regset);
lvm_run(&lvm, &unit->funcs[0], 0, &regset);
}
lvm_destroy(&lvm);
}

137
parse.c
View File

@ -92,12 +92,9 @@ typedef enum ExprKind {
EX_EQ,
EX_NEQ,
EX_TBL_LIT,
EX_FUNC_LIT,
EX_DOT,
EX_INDEX,
EX_CALL,
EX_STR,
EX_LENGTH,
} ExprKind;
typedef struct Expr {
ExprKind kind;
@ -417,11 +414,7 @@ Expr *desc_subexp(Parser *P, int priority) {
} else if(priority == 3) {
Expr *e = NULL;
if(maybe(P, TOK_SHARP)) {
e = new_expr(0);
e->kind = EX_LENGTH;
e->A = desc_subexp(P, priority);
} else if(maybe(P, TOK_TRUE)) {
if(maybe(P, TOK_TRUE)) {
e = new_expr(0);
e->kind = EX_BOOL;
e->b = true;
@ -456,25 +449,6 @@ Expr *desc_subexp(Parser *P, int priority) {
e = new_expr(0);
e->kind = EX_STR;
e->name = str;
} else if(maybe(P, TOK_FUNCTION)) {
e = new_expr(0);
e->kind = EX_FUNC_LIT;
e->table_first_token = P->i - 1;
size_t depth = 1;
while(1) {
Token t = get(P);
if(t.type == TOK_THEN || t.type == TOK_DO) {
depth++;
} else if(t.type == TOK_END) {
depth--;
if(depth == 0) {
break;
}
}
}
e->table_last_token = P->i - 1;
} else if(maybe(P, TOK_SQUIGGLY_L)) {
e = new_expr(0);
e->kind = EX_TBL_LIT;
@ -497,7 +471,7 @@ Expr *desc_subexp(Parser *P, int priority) {
}
if(e) {
while(maybe(P, TOK_PAREN_L) || maybe(P, TOK_DOT) || maybe(P, TOK_SQUAREN_L)) {
while(maybe(P, TOK_PAREN_L) || maybe(P, TOK_DOT)) {
if(peek(P, -1).type == TOK_PAREN_L) {
Expr *call = new_expr(sizeof(Expr*) + sizeof(Expr*) * 32);
call->kind = EX_CALL;
@ -519,20 +493,11 @@ Expr *desc_subexp(Parser *P, int priority) {
e = call;
} else if(peek(P, -1).type == TOK_DOT) {
Expr *dot = new_expr(0);
dot->kind = EX_DOT;
dot->kind = EX_INDEX;
dot->A = e;
dot->B_tok = expect(P, TOK_NAME);
e = dot;
} else if(peek(P, -1).type == TOK_SQUAREN_L) {
Expr *index = new_expr(0);
index->kind = EX_INDEX;
index->A = e;
index->B = desc_exp(P);
expect(P, TOK_SQUAREN_R);
e = index;
}
}
@ -689,7 +654,7 @@ void emit_expr(Parser *P, int assigned_vreg, Expr *expr) {
for(size_t i = 1; i < expr->sub_count; i++) {
int av = find_vreg(P);
emit_expr(P, av, expr->subs[i]);
emit_expr(P, av, expr->subs[i++]);
args[(*arg_count)++] = av;
alloc_vreg(P, av);
}
@ -702,66 +667,6 @@ void emit_expr(Parser *P, int assigned_vreg, Expr *expr) {
}
vec_LInst_push(&P->current_chunk.instrs, (LInst) {.opcode = L_CALL, .a = vreg, .bc = abyss_insert(P, buf, 2 + *arg_count)});
} else if(expr->kind == EX_FUNC_LIT) {
size_t old_idx = P->i;
P->i = expr->table_first_token;
expect(P, TOK_FUNCTION);
expect(P, TOK_PAREN_L);
Chunk old_chunk = P->current_chunk;
P->current_chunk = (Chunk) {};
Scope *new_scope = calloc(1, sizeof(*new_scope));
new_scope->parent = P->scope;
P->scope = new_scope;
size_t arg_count = 0;
if(!maybe(P, TOK_PAREN_R)) {
while(1) {
expect(P, TOK_NAME);
P->i--;
ScopeItem *si = calloc(1, sizeof(*si));
si->name = expect(P, TOK_NAME);
si->vreg = arg_count++;
scope_set_direct(P->scope, si);
if(maybe(P, TOK_PAREN_R)) {
break;
} else {
expect(P, TOK_COMMA);
}
}
}
parse_chunk(P);
if(P->current_chunk.instrs.data[P->current_chunk.instrs.size - 1].opcode != L_RET) {
vec_LInst_push(&P->current_chunk.instrs, (LInst) {.opcode = L_RET, .argb = {0}});
}
expect(P, TOK_END);
assert(P->unit_functions.size > 0);
LFunc lf = {};
lf.unit = P->unit_functions.data[0].unit;
lf.is_native = false;
lf.lua_instrs = P->current_chunk.instrs.data;
lf.env = P->environment;
vec_LFunc_push(&P->unit_functions, lf);
size_t function_idx = P->unit_functions.size - 1;
P->current_chunk = old_chunk;
scope_kill(P);
P->i = old_idx;
vec_LInst_push(&P->current_chunk.instrs, (LInst) {.opcode = L_SETFUNC, .a = assigned_vreg, .bc = function_idx});
} else if(expr->kind == EX_TBL_LIT) {
vec_LInst_push(&P->current_chunk.instrs, (LInst) {.opcode = L_SETTABLE, .a = assigned_vreg, .bc = 16});
@ -770,8 +675,6 @@ void emit_expr(Parser *P, int assigned_vreg, Expr *expr) {
expect(P, TOK_SQUIGGLY_L);
alloc_vreg(P, assigned_vreg);
if(!maybe(P, TOK_SQUIGGLY_R)) {
while(1) {
int keyv = find_vreg(P);
@ -807,10 +710,8 @@ void emit_expr(Parser *P, int assigned_vreg, Expr *expr) {
}
}
free_vreg(P, assigned_vreg);
P->i = old_idx;
} else if(expr->kind == EX_DOT) {
} else if(expr->kind == EX_INDEX) {
Token field = expr->B_tok;
emit_expr(P, assigned_vreg, expr->A);
@ -827,19 +728,6 @@ void emit_expr(Parser *P, int assigned_vreg, Expr *expr) {
vec_LInst_push(&P->current_chunk.instrs, (LInst) {.opcode = L_GETFIELD, .a = assigned_vreg, .b = assigned_vreg, .c = keyv});
free_vreg(P, assigned_vreg);
} else if(expr->kind == EX_INDEX) {
emit_expr(P, assigned_vreg, expr->A);
alloc_vreg(P, assigned_vreg);
int keyv = find_vreg(P);
emit_expr(P, keyv, expr->B);
free_vreg(P, assigned_vreg);
vec_LInst_push(&P->current_chunk.instrs, (LInst) {.opcode = L_GETFIELD, .a = assigned_vreg, .b = assigned_vreg, .c = keyv});
} else if(expr->kind == EX_LENGTH) {
emit_expr(P, assigned_vreg, expr->A);
vec_LInst_push(&P->current_chunk.instrs, (LInst) {.opcode = L_LEN, .a = assigned_vreg, .b = assigned_vreg, .c = 0});
} else {
assert(expr->kind == EX_ADD || expr->kind == EX_SUB || expr->kind == EX_MUL || expr->kind == EX_DIV || expr->kind == EX_IDIV || expr->kind == EX_MOD || expr->kind == EX_POW || expr->kind == EX_BAND || expr->kind == EX_BOR || expr->kind == EX_BXOR || expr->kind == EX_AND || expr->kind == EX_OR || expr->kind == EX_EQ || expr->kind == EX_NEQ);
@ -908,7 +796,7 @@ bool parse_assignment(Parser *P) {
goto err;
}
if(lhs[lhsi - 1]->kind != EX_LOCAL && lhs[lhsi - 1]->kind != EX_GLOBAL && lhs[lhsi - 1]->kind != EX_DOT && lhs[lhsi - 1]->kind != EX_INDEX) {
if(lhs[lhsi - 1]->kind != EX_LOCAL && lhs[lhsi - 1]->kind != EX_GLOBAL && lhs[lhsi - 1]->kind != EX_INDEX) {
goto err;
}
@ -953,19 +841,6 @@ bool parse_assignment(Parser *P) {
emit_expr(P, lhsv, lhs[i]->A);
alloc_vreg(P, lhsv);
int keyv = find_vreg(P);
assert(keyv != -1);
emit_expr(P, keyv, lhs[i]->B);
vec_LInst_push(&P->current_chunk.instrs, (LInst) {.opcode = L_SETFIELD, .a = lhsv, .b = keyv, .c = rhsv[i]});
free_vreg(P, lhsv);
} else if(lhs[i]->kind == EX_DOT) {
int lhsv = find_vreg(P);
assert(lhsv != -1);
emit_expr(P, lhsv, lhs[i]->A);
alloc_vreg(P, lhsv);
Token field = lhs[i]->B_tok;
size_t abyss_idx = abyss_insert(P, NULL, sizeof(uint16_t) + strlen(field.text));

130
table.h
View File

@ -11,7 +11,6 @@
#include<assert.h>
#include"value.h"
#include"lrwl.h"
typedef struct LTableEntry {
LValue key;
@ -25,8 +24,6 @@ typedef struct LTableBuckets {
typedef struct LTable {
bool ref;
LRWL lock;
size_t border_hint;
LTableBuckets *buckets;
} LTable;
@ -38,7 +35,7 @@ static inline LTable *ltable_new(size_t capacity) {
tbl->buckets = calloc(1, sizeof(LTableBuckets) + sizeof(LTableEntry) * capacity);
tbl->buckets->capacity = capacity;
for(size_t i = 0; i < tbl->buckets->capacity; i++) {
for(int i = 0; i < tbl->buckets->capacity; i++) {
tbl->buckets->data[i] = (LTableEntry) {
.key = {.u = LTAG_NIL},
.val = {.u = LTAG_NIL},
@ -48,42 +45,6 @@ static inline LTable *ltable_new(size_t capacity) {
return tbl;
}
static bool ltablebuckets_set(LTableBuckets*, LValue, LValue);
// ltable_expand MUST BE CALLED DURING A WRITE LOCK
static void ltable_expand(LTable *self) {
LTableBuckets *newb = calloc(1, sizeof(*newb) + sizeof(LTableEntry) * (self->buckets->capacity * 2));
newb->capacity = self->buckets->capacity * 2;
for(size_t i = 0; i < newb->capacity; i++) {
newb->data[i] = (LTableEntry) {
.key = {.u = LTAG_NIL},
.val = {.u = LTAG_NIL},
};
}
for(size_t i = 0; i < self->buckets->capacity; i++) {
LTableEntry *e = &self->buckets->data[i];
if(e->val.u == LTAG_NIL) {
// Non-existent or tombstone.
} else {
assert(e->key.u != LTAG_NIL);
if(!ltablebuckets_set(newb, e->key, e->val)) {
// Must expand again.
free(newb);
ltable_expand(self);
return;
}
}
}
LTableBuckets *oldb = self->buckets;
self->buckets = newb;
free(oldb);
}
static inline bool ltablebuckets_set(LTableBuckets *self, LValue key, LValue val) {
size_t idx = lvalue_hash(key);
@ -114,43 +75,17 @@ static inline bool ltablebuckets_set(LTableBuckets *self, LValue key, LValue val
return true;
}
#define L_MAX_SEQUENCE 0x7FFFFFFF
static inline void ltable_set(LTable *self, LValue key, LValue val) {
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
intmax_t idx = nearbyint(key.f);
key = lvalue_from_int32(idx);
}
// Yes, read lock. Setting itself is lock-free, but other operations may block us.
lrwl_read_lock(&self->lock);
bool success = ltablebuckets_set(self->buckets, key, val);
lrwl_read_unlock(&self->lock);
// Intuitively, it would seem like you should upgrade the RW-lock from read
// mode to write mode if you need to resize, but I'm pretty sure that's
// unnecessary? If we fail above, then nothing changes in the table.
// To the outside, it would look as though this thread happened to set the value a bit later.
// TODO: Am I correct?
if(!success) {
lrwl_write_lock(&self->lock);
ltable_expand(self);
ltablebuckets_set(self->buckets, key, val);
lrwl_write_unlock(&self->lock);
}
}
static inline void ltable_set_nolock(LTable *self, LValue key, LValue val) {
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
intmax_t idx = nearbyint(key.f);
key = lvalue_from_int32(idx);
if(lvalue_tag(key) == LTAG_I32 && lvalue_to_int32(key) < 9007199254740993UL) {
key = lvalue_from_double(lvalue_to_int32(key));
}
if(!ltablebuckets_set(self->buckets, key, val)) {
ltable_expand(self);
assert(ltablebuckets_set(self->buckets, key, val));
assert(0 && "No table resizing");
}
}
/*static inline bool ltable_set_no_overwrite(LTable *tbl, LValue key, LValue val) {
static inline bool ltable_set_no_overwrite(LTable *tbl, LValue key, LValue val) {
LTableBuckets *self = tbl->buckets;
size_t idx = lvalue_hash(key);
@ -170,7 +105,7 @@ static inline void ltable_set_nolock(LTable *self, LValue key, LValue val) {
idx++;
}
}*/
}
static inline LValue ltablebuckets_get(LTableBuckets *self, LValue key) {
size_t idx = lvalue_hash(key);
@ -194,56 +129,9 @@ static inline LValue ltablebuckets_get(LTableBuckets *self, LValue key) {
}
static inline LValue ltable_get(LTable *self, LValue key) {
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
intmax_t idx = nearbyint(key.f);
key = lvalue_from_int32(idx);
if(lvalue_tag(key) == LTAG_I32 && lvalue_to_int32(key) < 9007199254740993UL) {
key = lvalue_from_double(lvalue_to_int32(key));
}
lrwl_read_lock(&self->lock);
LValue ret = ltablebuckets_get(self->buckets, key);
lrwl_read_unlock(&self->lock);
return ret;
}
static inline LValue ltable_get_nolock(LTable *self, LValue key) {
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
intmax_t idx = nearbyint(key.f);
key = lvalue_from_int32(idx);
}
LValue ret = ltablebuckets_get(self->buckets, key);
return ret;
}
static inline size_t ltable_len_nolock(LTable *self) {
size_t border = self->border_hint;
if(border == 0 || ltable_get_nolock(self, lvalue_from_int32(border)).u != LTAG_NIL) {
do {
border++;
} while(ltable_get_nolock(self, lvalue_from_int32(border)).u != LTAG_NIL);
border--;
} else if(ltable_get_nolock(self, lvalue_from_int32(border)).u == LTAG_NIL) {
do {
border--;
} while(ltable_get_nolock(self, lvalue_from_int32(border)).u == LTAG_NIL);
}
self->border_hint = border;
return border;
}
static inline size_t ltable_len(LTable *self) {
// As it turns out, finding the length cannot be atomic, and especially cannot be lock-free.
// Therefore we do a WRITE lock
lrwl_write_lock(&self->lock);
size_t ret = ltable_len_nolock(self);
lrwl_write_unlock(&self->lock);
return ret;
}
static inline void ltable_insert(LTable *self, LValue val, size_t index) {
lrwl_write_lock(&self->lock);
index = ltable_len_nolock(self) + 1;
ltable_set_nolock(self, lvalue_from_int32(index), val);
lrwl_write_unlock(&self->lock);
return ltablebuckets_get(self->buckets, key);
}

21
vm.c
View File

@ -7,9 +7,9 @@
#include<math.h>
#include<malloc.h>
size_t lvm_call(LVM *L, LFunc *func, size_t arg_count, set_LValueU *heap, LRegSet *regset) {
static size_t lvm_run_internal(LVM *L, LFunc *func, size_t arg_count, set_LValueU *heap, LRegSet *regset) {
if(func->is_native) {
return func->native_func(L, func->ud, arg_count, heap, regset);
return func->native_func(L, func->ud, arg_count, regset);
}
static const void *dispatch_table[] = {
@ -38,7 +38,6 @@ size_t lvm_call(LVM *L, LFunc *func, size_t arg_count, set_LValueU *heap, LRegSe
[L_COND_NEQ] = &&do_cond_neq,
[L_SETFIELD] = &&do_setfield,
[L_GETFIELD] = &&do_getfield,
[L_LEN] = &&do_len,
};
LUnit *unit = func->unit;
@ -249,7 +248,7 @@ do_call:;
for(int i = 0; i < arg_count; i++) {
regset2.regs[i] = regset->regs[args[i]];
}
size_t returned_count = lvm_call(L, (LFunc*) (regset->regs[inst->a].u & ~LTAG_MASK), arg_count, heap, &regset2);
size_t returned_count = lvm_run_internal(L, (LFunc*) (regset->regs[inst->a].u & ~LTAG_MASK), arg_count, heap, &regset2);
if(returned_count) {
// TODO: more than 1 return
@ -287,7 +286,7 @@ do_setfield:;
goto err;
}
if(regset->regs[inst->b].u == LTAG_NIL) {
if(lvalue_tag(regset->regs[inst->b]) == LTAG_NIL) {
goto err;
}
@ -309,14 +308,6 @@ do_getfield:;
}
DISPATCH();
do_len:
if(lvalue_tag(regset->regs[inst->b]) == LTAG_STRING) {
regset->regs[inst->a] = lvalue_from_int32(((LString*) (regset->regs[inst->b].u & ~LTAG_MASK))->length);
} else if(lvalue_tag(regset->regs[inst->b]) == LTAG_TABLE) {
regset->regs[inst->a] = lvalue_from_int32(ltable_len((LTable*) (regset->regs[inst->b].u & ~LTAG_MASK)));
} else goto err;
DISPATCH();
err:;
puts("Error");
do_ret:;
@ -329,7 +320,7 @@ size_t lvm_run(LVM *L, LFunc *func, size_t arg_count, LRegSet *regset) {
atomic_fetch_add(&L->active_thread_count, 1);
size_t ret = lvm_call(L, func, arg_count, &heap, regset);
size_t ret = lvm_run_internal(L, func, arg_count, &heap, regset);
mtx_lock(&L->dead_heap_mut);
for(c_each(i, set_LValueU, heap)) {
@ -409,7 +400,7 @@ static void gc_mark(LValue v) {
LTable *tbl = gco;
tbl->ref = true;
for(size_t i = 0; i < tbl->buckets->capacity; i++) {
for(size_t i = 0; tbl->buckets->capacity; i++) {
LTableEntry e = tbl->buckets->data[i];
gc_mark(e.key);
gc_mark(e.val);

14
vm.h
View File

@ -42,7 +42,6 @@ typedef enum {
L_SETFIELD,
L_GETFIELD,
L_SETINT32,
L_LEN,
} LOp;
typedef union __attribute__((packed)) {
@ -75,12 +74,7 @@ typedef struct LRegSet {
LValue regs[256];
} LRegSet;
#define i_header
#define T set_LValueU, uint64_t
#include"stc/hashset.h"
#undef i_header
typedef size_t(*LFuncCallback)(struct LVM*, void *ud, size_t argn, set_LValueU *heap, LRegSet *regset);
typedef size_t(*LFuncCallback)(struct LVM*, void *ud, size_t argn, LRegSet *regset);
typedef struct LFunc {
struct LUnit *unit;
bool is_native;
@ -105,6 +99,11 @@ typedef struct LUnit {
LFunc *funcs;
} LUnit;
#define i_header
#define T set_LValueU, uint64_t
#include"stc/hashset.h"
#undef i_header
typedef struct LThreadPrivates {
set_LValueU *heap;
LRegSet *regset;
@ -131,7 +130,6 @@ typedef struct LVM {
set_LValueU dead_heap;
} LVM;
size_t lvm_call(LVM *L, LFunc *func, size_t arg_count, set_LValueU *heap, LRegSet *regset);
size_t lvm_run(LVM *L, LFunc *func, size_t arg_count, LRegSet *regset);
LFunc *lvm_func_from_native(LFuncCallback, void *ud);