250 lines
6.3 KiB
C
250 lines
6.3 KiB
C
#pragma once
|
|
|
|
#include<stdbool.h>
|
|
#include<stdint.h>
|
|
#include<stddef.h>
|
|
#include<ctype.h>
|
|
#include<stdlib.h>
|
|
#include<stdio.h>
|
|
#include<string.h>
|
|
#include<stdatomic.h>
|
|
#include<assert.h>
|
|
|
|
#include"value.h"
|
|
#include"lrwl.h"
|
|
|
|
typedef struct LTableEntry {
|
|
LValue key;
|
|
LValue val;
|
|
} LTableEntry;
|
|
|
|
typedef struct LTableBuckets {
|
|
size_t capacity;
|
|
LTableEntry data[];
|
|
} LTableBuckets;
|
|
|
|
typedef struct LTable {
|
|
bool ref;
|
|
LRWL lock;
|
|
size_t border_hint;
|
|
LTableBuckets *buckets;
|
|
} LTable;
|
|
|
|
static inline LTable *ltable_new(size_t capacity) {
|
|
assert(capacity >= 8 && (capacity & (capacity - 1)) == 0);
|
|
|
|
LTable *tbl = calloc(1, sizeof(*tbl));
|
|
tbl->ref = false;
|
|
tbl->buckets = calloc(1, sizeof(LTableBuckets) + sizeof(LTableEntry) * capacity);
|
|
tbl->buckets->capacity = capacity;
|
|
|
|
for(size_t i = 0; i < tbl->buckets->capacity; i++) {
|
|
tbl->buckets->data[i] = (LTableEntry) {
|
|
.key = {.u = LTAG_NIL},
|
|
.val = {.u = LTAG_NIL},
|
|
};
|
|
}
|
|
|
|
return tbl;
|
|
}
|
|
|
|
|
|
static bool ltablebuckets_set(LTableBuckets*, LValue, LValue);
|
|
// ltable_expand MUST BE CALLED DURING A WRITE LOCK
|
|
static void ltable_expand(LTable *self) {
|
|
LTableBuckets *newb = calloc(1, sizeof(*newb) + sizeof(LTableEntry) * (self->buckets->capacity * 2));
|
|
newb->capacity = self->buckets->capacity * 2;
|
|
|
|
for(size_t i = 0; i < newb->capacity; i++) {
|
|
newb->data[i] = (LTableEntry) {
|
|
.key = {.u = LTAG_NIL},
|
|
.val = {.u = LTAG_NIL},
|
|
};
|
|
}
|
|
|
|
for(size_t i = 0; i < self->buckets->capacity; i++) {
|
|
LTableEntry *e = &self->buckets->data[i];
|
|
|
|
if(e->val.u == LTAG_NIL) {
|
|
// Non-existent or tombstone.
|
|
} else {
|
|
assert(e->key.u != LTAG_NIL);
|
|
if(!ltablebuckets_set(newb, e->key, e->val)) {
|
|
// Must expand again.
|
|
free(newb);
|
|
ltable_expand(self);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
LTableBuckets *oldb = self->buckets;
|
|
self->buckets = newb;
|
|
|
|
free(oldb);
|
|
}
|
|
|
|
static inline bool ltablebuckets_set(LTableBuckets *self, LValue key, LValue val) {
|
|
size_t idx = lvalue_hash(key);
|
|
|
|
int probe_limit = __builtin_ctz(self->capacity);
|
|
probe_limit += probe_limit >> 1;
|
|
|
|
LTableEntry *current = self->data;
|
|
|
|
while(1) {
|
|
idx &= self->capacity - 1;
|
|
|
|
LValue prevKey = {.u = LTAG_NIL};
|
|
atomic_compare_exchange_strong(¤t[idx].key.u, &prevKey.u, key.u);
|
|
|
|
if(prevKey.u == LTAG_NIL || lvalue_eq(prevKey, key)) {
|
|
atomic_store(¤t[idx].val.u, val.u);
|
|
break;
|
|
}
|
|
|
|
idx++;
|
|
|
|
probe_limit--;
|
|
if(probe_limit == 0) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
#define L_MAX_SEQUENCE 0x7FFFFFFF
|
|
static inline void ltable_set(LTable *self, LValue key, LValue val) {
|
|
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
|
|
intmax_t idx = nearbyint(key.f);
|
|
key = lvalue_from_int32(idx);
|
|
}
|
|
|
|
// Yes, read lock. Setting itself is lock-free, but other operations may block us.
|
|
lrwl_read_lock(&self->lock);
|
|
bool success = ltablebuckets_set(self->buckets, key, val);
|
|
lrwl_read_unlock(&self->lock);
|
|
|
|
// Intuitively, it would seem like you should upgrade the RW-lock from read
|
|
// mode to write mode if you need to resize, but I'm pretty sure that's
|
|
// unnecessary? If we fail above, then nothing changes in the table.
|
|
// To the outside, it would look as though this thread happened to set the value a bit later.
|
|
// TODO: Am I correct?
|
|
if(!success) {
|
|
lrwl_write_lock(&self->lock);
|
|
ltable_expand(self);
|
|
ltablebuckets_set(self->buckets, key, val);
|
|
lrwl_write_unlock(&self->lock);
|
|
}
|
|
}
|
|
static inline void ltable_set_nolock(LTable *self, LValue key, LValue val) {
|
|
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
|
|
intmax_t idx = nearbyint(key.f);
|
|
key = lvalue_from_int32(idx);
|
|
}
|
|
|
|
if(!ltablebuckets_set(self->buckets, key, val)) {
|
|
ltable_expand(self);
|
|
assert(ltablebuckets_set(self->buckets, key, val));
|
|
}
|
|
}
|
|
|
|
/*static inline bool ltable_set_no_overwrite(LTable *tbl, LValue key, LValue val) {
|
|
LTableBuckets *self = tbl->buckets;
|
|
|
|
size_t idx = lvalue_hash(key);
|
|
LTableEntry *current = self->data;
|
|
while(1) {
|
|
idx &= self->capacity - 1;
|
|
|
|
LValue prevKey = {.u = LTAG_NIL};
|
|
atomic_compare_exchange_strong(¤t[idx].key.u, &prevKey.u, key.u);
|
|
|
|
if(prevKey.u == LTAG_NIL) {
|
|
atomic_store(¤t[idx].val.u, val.u);
|
|
return true;
|
|
} else if(lvalue_eq(prevKey, key)) {
|
|
return false;
|
|
}
|
|
|
|
idx++;
|
|
}
|
|
}*/
|
|
|
|
static inline LValue ltablebuckets_get(LTableBuckets *self, LValue key) {
|
|
size_t idx = lvalue_hash(key);
|
|
|
|
size_t tries = self->capacity;
|
|
|
|
while(1) {
|
|
idx &= self->capacity - 1;
|
|
|
|
LValue foundKey;
|
|
foundKey.u = atomic_load(&self->data[idx].key.u);
|
|
|
|
if(lvalue_eq(foundKey, key)) {
|
|
return (LValue) {.u = atomic_load(&self->data[idx].val.u)};
|
|
} else if(--tries == 0) {
|
|
return (LValue) {.u = LTAG_NIL};
|
|
}
|
|
|
|
idx++;
|
|
}
|
|
}
|
|
|
|
static inline LValue ltable_get(LTable *self, LValue key) {
|
|
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
|
|
intmax_t idx = nearbyint(key.f);
|
|
key = lvalue_from_int32(idx);
|
|
}
|
|
|
|
lrwl_read_lock(&self->lock);
|
|
LValue ret = ltablebuckets_get(self->buckets, key);
|
|
lrwl_read_unlock(&self->lock);
|
|
|
|
return ret;
|
|
}
|
|
static inline LValue ltable_get_nolock(LTable *self, LValue key) {
|
|
if(lvalue_tag(key) == LTAG_FLOAT && nearbyint(key.f) == key.f && fabs(key.f) <= L_MAX_SEQUENCE) {
|
|
intmax_t idx = nearbyint(key.f);
|
|
key = lvalue_from_int32(idx);
|
|
}
|
|
|
|
LValue ret = ltablebuckets_get(self->buckets, key);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline size_t ltable_len_nolock(LTable *self) {
|
|
size_t border = self->border_hint;
|
|
if(border == 0 || ltable_get_nolock(self, lvalue_from_int32(border)).u != LTAG_NIL) {
|
|
do {
|
|
border++;
|
|
} while(ltable_get_nolock(self, lvalue_from_int32(border)).u != LTAG_NIL);
|
|
border--;
|
|
} else if(ltable_get_nolock(self, lvalue_from_int32(border)).u == LTAG_NIL) {
|
|
do {
|
|
border--;
|
|
} while(ltable_get_nolock(self, lvalue_from_int32(border)).u == LTAG_NIL);
|
|
}
|
|
self->border_hint = border;
|
|
return border;
|
|
}
|
|
|
|
static inline size_t ltable_len(LTable *self) {
|
|
// As it turns out, finding the length cannot be atomic, and especially cannot be lock-free.
|
|
// Therefore we do a WRITE lock
|
|
lrwl_write_lock(&self->lock);
|
|
size_t ret = ltable_len_nolock(self);
|
|
lrwl_write_unlock(&self->lock);
|
|
return ret;
|
|
}
|
|
|
|
static inline void ltable_insert(LTable *self, LValue val, size_t index) {
|
|
lrwl_write_lock(&self->lock);
|
|
index = ltable_len_nolock(self) + 1;
|
|
ltable_set_nolock(self, lvalue_from_int32(index), val);
|
|
lrwl_write_unlock(&self->lock);
|
|
}
|