table expansion

This commit is contained in:
Mid 2025-09-05 21:15:02 +03:00
parent a29b250f30
commit 8cd83188ad

130
table.h
View File

@ -11,6 +11,7 @@
#include<assert.h>
#include"value.h"
#include"lrwl.h"
typedef struct LTableEntry {
LValue key;
@ -24,6 +25,8 @@ typedef struct LTableBuckets {
typedef struct LTable {
bool ref;
LRWL lock;
size_t border_hint;
LTableBuckets *buckets;
} LTable;
@ -35,7 +38,7 @@ static inline LTable *ltable_new(size_t capacity) {
tbl->buckets = calloc(1, sizeof(LTableBuckets) + sizeof(LTableEntry) * capacity);
tbl->buckets->capacity = capacity;
for(int i = 0; i < tbl->buckets->capacity; i++) {
for(size_t i = 0; i < tbl->buckets->capacity; i++) {
tbl->buckets->data[i] = (LTableEntry) {
.key = {.u = LTAG_NIL},
.val = {.u = LTAG_NIL},
@ -45,6 +48,42 @@ static inline LTable *ltable_new(size_t capacity) {
return tbl;
}
static bool ltablebuckets_set(LTableBuckets*, LValue, LValue);
// ltable_expand MUST BE CALLED DURING A WRITE LOCK
static void ltable_expand(LTable *self) {
LTableBuckets *newb = calloc(1, sizeof(*newb) + sizeof(LTableEntry) * (self->buckets->capacity * 2));
newb->capacity = self->buckets->capacity * 2;
for(size_t i = 0; i < newb->capacity; i++) {
newb->data[i] = (LTableEntry) {
.key = {.u = LTAG_NIL},
.val = {.u = LTAG_NIL},
};
}
for(size_t i = 0; i < self->buckets->capacity; i++) {
LTableEntry *e = &self->buckets->data[i];
if(e->val.u == LTAG_NIL) {
// Non-existent or tombstone.
} else {
assert(e->key.u != LTAG_NIL);
if(!ltablebuckets_set(newb, e->key, e->val)) {
// Must expand again.
free(newb);
ltable_expand(self);
return;
}
}
}
LTableBuckets *oldb = self->buckets;
self->buckets = newb;
free(oldb);
}
static inline bool ltablebuckets_set(LTableBuckets *self, LValue key, LValue val) {
size_t idx = lvalue_hash(key);
@ -75,17 +114,43 @@ static inline bool ltablebuckets_set(LTableBuckets *self, LValue key, LValue val
return true;
}
#define L_MAX_SEQUENCE 0x7FFFFFFF
static inline void ltable_set(LTable *self, LValue key, LValue val) {
if(lvalue_tag(key) == LTAG_I32 && lvalue_to_int32(key) < 9007199254740993UL) {
key = lvalue_from_double(lvalue_to_int32(key));
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
intmax_t idx = nearbyint(key.f);
key = lvalue_from_int32(idx);
}
// Yes, read lock. Setting itself is lock-free, but other operations may block us.
lrwl_read_lock(&self->lock);
bool success = ltablebuckets_set(self->buckets, key, val);
lrwl_read_unlock(&self->lock);
// Intuitively, it would seem like you should upgrade the RW-lock from read
// mode to write mode if you need to resize, but I'm pretty sure that's
// unnecessary? If we fail above, then nothing changes in the table.
// To the outside, it would look as though this thread happened to set the value a bit later.
// TODO: Am I correct?
if(!success) {
lrwl_write_lock(&self->lock);
ltable_expand(self);
ltablebuckets_set(self->buckets, key, val);
lrwl_write_unlock(&self->lock);
}
}
static inline void ltable_set_nolock(LTable *self, LValue key, LValue val) {
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
intmax_t idx = nearbyint(key.f);
key = lvalue_from_int32(idx);
}
if(!ltablebuckets_set(self->buckets, key, val)) {
assert(0 && "No table resizing");
ltable_expand(self);
assert(ltablebuckets_set(self->buckets, key, val));
}
}
static inline bool ltable_set_no_overwrite(LTable *tbl, LValue key, LValue val) {
/*static inline bool ltable_set_no_overwrite(LTable *tbl, LValue key, LValue val) {
LTableBuckets *self = tbl->buckets;
size_t idx = lvalue_hash(key);
@ -105,7 +170,7 @@ static inline bool ltable_set_no_overwrite(LTable *tbl, LValue key, LValue val)
idx++;
}
}
}*/
static inline LValue ltablebuckets_get(LTableBuckets *self, LValue key) {
size_t idx = lvalue_hash(key);
@ -129,9 +194,56 @@ static inline LValue ltablebuckets_get(LTableBuckets *self, LValue key) {
}
static inline LValue ltable_get(LTable *self, LValue key) {
if(lvalue_tag(key) == LTAG_I32 && lvalue_to_int32(key) < 9007199254740993UL) {
key = lvalue_from_double(lvalue_to_int32(key));
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
intmax_t idx = nearbyint(key.f);
key = lvalue_from_int32(idx);
}
return ltablebuckets_get(self->buckets, key);
lrwl_read_lock(&self->lock);
LValue ret = ltablebuckets_get(self->buckets, key);
lrwl_read_unlock(&self->lock);
return ret;
}
static inline LValue ltable_get_nolock(LTable *self, LValue key) {
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
intmax_t idx = nearbyint(key.f);
key = lvalue_from_int32(idx);
}
LValue ret = ltablebuckets_get(self->buckets, key);
return ret;
}
static inline size_t ltable_len_nolock(LTable *self) {
size_t border = self->border_hint;
if(border == 0 || ltable_get_nolock(self, lvalue_from_int32(border)).u != LTAG_NIL) {
do {
border++;
} while(ltable_get_nolock(self, lvalue_from_int32(border)).u != LTAG_NIL);
border--;
} else if(ltable_get_nolock(self, lvalue_from_int32(border)).u == LTAG_NIL) {
do {
border--;
} while(ltable_get_nolock(self, lvalue_from_int32(border)).u == LTAG_NIL);
}
self->border_hint = border;
return border;
}
static inline size_t ltable_len(LTable *self) {
// As it turns out, finding the length cannot be atomic, and especially cannot be lock-free.
// Therefore we do a WRITE lock
lrwl_write_lock(&self->lock);
size_t ret = ltable_len_nolock(self);
lrwl_write_unlock(&self->lock);
return ret;
}
static inline void ltable_insert(LTable *self, LValue val, size_t index) {
lrwl_write_lock(&self->lock);
index = ltable_len_nolock(self) + 1;
ltable_set_nolock(self, lvalue_from_int32(index), val);
lrwl_write_unlock(&self->lock);
}