Commit 957947bc7ad by Armin Rigo

hg merge hashtable

#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <pthread.h>
#include <semaphore.h>
#include <string.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "stmgc.h"
#define NUMTHREADS 4
typedef TLPREFIX struct node_s node_t;
typedef TLPREFIX struct dict_s dict_t;
struct node_s {
struct object_s header;
int typeid;
intptr_t freevalue;
};
struct dict_s {
struct node_s hdr;
stm_hashtable_t *hashtable;
};
#define TID_NODE 0x01234567
#define TID_DICT 0x56789ABC
#define TID_DICTENTRY 0x6789ABCD
static sem_t done;
__thread stm_thread_local_t stm_thread_local;
// global and per-thread-data
time_t default_seed;
dict_t *global_dict;
struct thread_data {
unsigned int thread_seed;
};
__thread struct thread_data td;
ssize_t stmcb_size_rounded_up(struct object_s *ob)
{
if (((struct node_s*)ob)->typeid == TID_NODE)
return sizeof(struct node_s);
if (((struct node_s*)ob)->typeid == TID_DICT)
return sizeof(struct dict_s);
if (((struct node_s*)ob)->typeid == TID_DICTENTRY)
return sizeof(struct stm_hashtable_entry_s);
abort();
}
void stmcb_trace(struct object_s *obj, void visit(object_t **))
{
struct node_s *n;
n = (struct node_s*)obj;
if (n->typeid == TID_NODE) {
return;
}
if (n->typeid == TID_DICT) {
stm_hashtable_tracefn(((struct dict_s *)n)->hashtable, visit);
return;
}
if (n->typeid == TID_DICTENTRY) {
object_t **ref = &((struct stm_hashtable_entry_s *)obj)->object;
visit(ref);
return;
}
abort();
}
void stmcb_commit_soon() {}
long stmcb_obj_supports_cards(struct object_s *obj)
{
return 0;
}
void stmcb_trace_cards(struct object_s *obj, void cb(object_t **),
uintptr_t start, uintptr_t stop) {
abort();
}
void stmcb_get_card_base_itemsize(struct object_s *obj,
uintptr_t offset_itemsize[2]) {
abort();
}
int get_rand(int max)
{
if (max == 0)
return 0;
return (int)(rand_r(&td.thread_seed) % (unsigned int)max);
}
void populate_hashtable(int keymin, int keymax)
{
int i;
int diff = get_rand(keymax - keymin);
for (i = 0; i < keymax - keymin; i++) {
int key = keymin + i + diff;
if (key >= keymax)
key -= (keymax - keymin);
object_t *o = stm_allocate(sizeof(struct node_s));
((node_t *)o)->typeid = TID_NODE;
((node_t *)o)->freevalue = key;
assert(global_dict->hdr.freevalue == 42);
stm_hashtable_write((object_t *)global_dict, global_dict->hashtable,
key, o, &stm_thread_local);
}
}
void setup_thread(void)
{
memset(&td, 0, sizeof(struct thread_data));
td.thread_seed = default_seed++;
}
void *demo_random(void *arg)
{
int threadnum = (uintptr_t)arg;
int status;
rewind_jmp_buf rjbuf;
stm_register_thread_local(&stm_thread_local);
stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
setup_thread();
volatile int start_count = 0;
stm_start_transaction(&stm_thread_local);
++start_count;
assert(start_count == 1); // all the writes that follow must not conflict
populate_hashtable(1291 * threadnum, 1291 * (threadnum + 1));
stm_commit_transaction();
stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
stm_unregister_thread_local(&stm_thread_local);
status = sem_post(&done); assert(status == 0);
return NULL;
}
void newthread(void*(*func)(void*), void *arg)
{
pthread_t th;
int status = pthread_create(&th, NULL, func, arg);
if (status != 0)
abort();
pthread_detach(th);
printf("started new thread\n");
}
void setup_globals(void)
{
stm_hashtable_t *my_hashtable = stm_hashtable_create();
struct dict_s new_templ = {
.hdr = {
.typeid = TID_DICT,
.freevalue = 42,
},
.hashtable = my_hashtable,
};
stm_start_inevitable_transaction(&stm_thread_local);
global_dict = (dict_t *)stm_setup_prebuilt(
(object_t* )(uintptr_t)&new_templ);
assert(global_dict->hashtable);
stm_commit_transaction();
}
int main(void)
{
int i, status;
rewind_jmp_buf rjbuf;
stm_hashtable_entry_userdata = TID_DICTENTRY;
/* pick a random seed from the time in seconds.
A bit pointless for now... because the interleaving of the
threads is really random. */
default_seed = time(NULL);
printf("running with seed=%lld\n", (long long)default_seed);
status = sem_init(&done, 0, 0);
assert(status == 0);
stm_setup();
stm_register_thread_local(&stm_thread_local);
stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
setup_globals();
for (i = 0; i < NUMTHREADS; i++) {
newthread(demo_random, (void *)(uintptr_t)i);
}
for (i=0; i < NUMTHREADS; i++) {
status = sem_wait(&done);
assert(status == 0);
printf("thread finished\n");
}
printf("Test OK!\n");
stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
stm_unregister_thread_local(&stm_thread_local);
stm_teardown();
return 0;
}
......@@ -372,6 +372,7 @@ static void _stm_start_transaction(stm_thread_local_t *tl)
assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows));
assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0]));
assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1]));
assert(list_is_empty(STM_PSEGMENT->young_objects_with_light_finalizers));
assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL);
assert(STM_PSEGMENT->large_overflow_objects == NULL);
assert(STM_PSEGMENT->finalizers == NULL);
......@@ -972,6 +973,8 @@ static void abort_data_structures_from_segment_num(int segment_num)
(int)pseg->transaction_state);
}
abort_finalizers(pseg);
/* throw away the content of the nursery */
long bytes_in_nursery = throw_away_nursery(pseg);
......@@ -1060,8 +1063,6 @@ static stm_thread_local_t *abort_with_mutex_no_longjmp(void)
/* invoke the callbacks */
invoke_and_clear_user_callbacks(1); /* for abort */
abort_finalizers();
if (is_abort(STM_SEGMENT->nursery_end)) {
/* done aborting */
STM_SEGMENT->nursery_end = pause_signalled ? NSE_SIGPAUSE
......
......@@ -58,28 +58,73 @@ static void _commit_finalizers(void)
STM_PSEGMENT->finalizers = NULL;
}
static void _abort_finalizers(void)
static void abort_finalizers(struct stm_priv_segment_info_s *pseg)
{
/* like _commit_finalizers(), but forget everything from the
current transaction */
if (STM_PSEGMENT->finalizers->run_finalizers != NULL) {
if (STM_PSEGMENT->finalizers->running_next != NULL) {
*STM_PSEGMENT->finalizers->running_next = (uintptr_t)-1;
if (pseg->finalizers != NULL) {
if (pseg->finalizers->run_finalizers != NULL) {
if (pseg->finalizers->running_next != NULL) {
*pseg->finalizers->running_next = (uintptr_t)-1;
}
list_free(pseg->finalizers->run_finalizers);
}
list_free(STM_PSEGMENT->finalizers->run_finalizers);
list_free(pseg->finalizers->objects_with_finalizers);
free(pseg->finalizers);
pseg->finalizers = NULL;
}
list_free(STM_PSEGMENT->finalizers->objects_with_finalizers);
free(STM_PSEGMENT->finalizers);
STM_PSEGMENT->finalizers = NULL;
/* call the light finalizers for objects that are about to
be forgotten from the current transaction */
char *old_gs_register = STM_SEGMENT->segment_base;
bool must_fix_gs = old_gs_register != pseg->pub.segment_base;
struct list_s *lst = pseg->young_objects_with_light_finalizers;
long i, count = list_count(lst);
if (lst > 0) {
for (i = 0; i < count; i++) {
object_t *obj = (object_t *)list_item(lst, i);
assert(_is_young(obj));
if (must_fix_gs) {
set_gs_register(pseg->pub.segment_base);
must_fix_gs = false;
}
stmcb_light_finalizer(obj);
}
list_clear(lst);
}
/* also deals with overflow objects: they are at the tail of
old_objects_with_light_finalizers (this list is kept in order
and we cannot add any already-committed object) */
lst = pseg->old_objects_with_light_finalizers;
count = list_count(lst);
while (count > 0) {
object_t *obj = (object_t *)list_item(lst, --count);
if (!IS_OVERFLOW_OBJ(pseg, obj))
break;
lst->count = count;
if (must_fix_gs) {
set_gs_register(pseg->pub.segment_base);
must_fix_gs = false;
}
stmcb_light_finalizer(obj);
}
if (STM_SEGMENT->segment_base != old_gs_register)
set_gs_register(old_gs_register);
}
void stm_enable_light_finalizer(object_t *obj)
{
if (_is_young(obj))
if (_is_young(obj)) {
LIST_APPEND(STM_PSEGMENT->young_objects_with_light_finalizers, obj);
else
}
else {
assert(_is_from_same_transaction(obj));
LIST_APPEND(STM_PSEGMENT->old_objects_with_light_finalizers, obj);
}
}
object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up)
......@@ -108,7 +153,7 @@ static void deal_with_young_objects_with_finalizers(void)
struct list_s *lst = STM_PSEGMENT->young_objects_with_light_finalizers;
long i, count = list_count(lst);
for (i = 0; i < count; i++) {
object_t* obj = (object_t *)list_item(lst, i);
object_t *obj = (object_t *)list_item(lst, i);
assert(_is_young(obj));
object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj;
......@@ -138,7 +183,7 @@ static void deal_with_old_objects_with_finalizers(void)
long i, count = list_count(lst);
lst->count = 0;
for (i = 0; i < count; i++) {
object_t* obj = (object_t *)list_item(lst, i);
object_t *obj = (object_t *)list_item(lst, i);
if (!mark_visited_test(obj)) {
/* not marked: object dies */
/* we're calling the light finalizer in the same
......@@ -345,6 +390,24 @@ static void deal_with_objects_with_finalizers(void)
LIST_FREE(_finalizer_emptystack);
}
static void mark_visit_from_finalizer1(char *base, struct finalizers_s *f)
{
if (f != NULL && f->run_finalizers != NULL) {
LIST_FOREACH_R(f->run_finalizers, object_t * /*item*/,
mark_visit_object(item, base));
}
}
static void mark_visit_from_finalizer_pending(void)
{
long j;
for (j = 1; j <= NB_SEGMENTS; j++) {
struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
mark_visit_from_finalizer1(pseg->pub.segment_base, pseg->finalizers);
}
mark_visit_from_finalizer1(stm_object_pages, &g_finalizers);
}
static void _execute_finalizers(struct finalizers_s *f)
{
if (f->run_finalizers == NULL)
......
......@@ -6,6 +6,7 @@ struct finalizers_s {
uintptr_t *running_next;
};
static void mark_visit_from_finalizer_pending(void);
static void deal_with_young_objects_with_finalizers(void);
static void deal_with_old_objects_with_finalizers(void);
static void deal_with_objects_with_finalizers(void);
......@@ -14,18 +15,13 @@ static void setup_finalizer(void);
static void teardown_finalizer(void);
static void _commit_finalizers(void);
static void _abort_finalizers(void);
static void abort_finalizers(struct stm_priv_segment_info_s *);
#define commit_finalizers() do { \
if (STM_PSEGMENT->finalizers != NULL) \
_commit_finalizers(); \
} while (0)
#define abort_finalizers() do { \
if (STM_PSEGMENT->finalizers != NULL) \
_abort_finalizers(); \
} while (0)
/* regular finalizers (objs from already-committed transactions) */
static struct finalizers_s g_finalizers;
......
......@@ -201,9 +201,6 @@ static void forksupport_child(void)
just release these locks early */
s_mutex_unlock();
/* Open a new profiling file, if any */
forksupport_open_new_profiling_file();
/* Move the copy of the mmap over the old one, overwriting it
and thus freeing the old mapping in this process
*/
......
......@@ -344,6 +344,8 @@ static inline void mark_record_trace(object_t **pobj)
LIST_APPEND(mark_objects_to_trace, obj);
}
#define TRACE_FOR_MAJOR_COLLECTION (&mark_record_trace)
static void mark_trace(object_t *obj, char *segment_base)
{
assert(list_is_empty(mark_objects_to_trace));
......@@ -352,7 +354,7 @@ static void mark_trace(object_t *obj, char *segment_base)
/* trace into the object (the version from 'segment_base') */
struct object_s *realobj =
(struct object_s *)REAL_ADDRESS(segment_base, obj);
stmcb_trace(realobj, &mark_record_trace);
stmcb_trace(realobj, TRACE_FOR_MAJOR_COLLECTION);
if (list_is_empty(mark_objects_to_trace))
break;
......@@ -629,6 +631,7 @@ static void major_collection_now_at_safe_point(void)
mark_visit_from_modified_objects();
mark_visit_from_markers();
mark_visit_from_roots();
mark_visit_from_finalizer_pending();
LIST_FREE(mark_objects_to_trace);
/* finalizer support: will mark as WL_VISITED all objects with a
......
......@@ -44,6 +44,10 @@ static inline bool _is_young(object_t *obj)
tree_contains(STM_PSEGMENT->young_outside_nursery, (uintptr_t)obj));
}
static inline bool _is_from_same_transaction(object_t *obj) {
return _is_young(obj) || IS_OVERFLOW_OBJ(STM_PSEGMENT, obj);
}
long stm_can_move(object_t *obj)
{
/* 'long' return value to avoid using 'bool' in the public interface */
......@@ -329,6 +333,7 @@ static void _trace_card_object(object_t *obj)
}
#define TRACE_FOR_MINOR_COLLECTION (&minor_trace_if_young)
static inline void _collect_now(object_t *obj)
{
......@@ -342,7 +347,7 @@ static inline void _collect_now(object_t *obj)
outside the nursery, possibly forcing nursery objects out and
adding them to 'objects_pointing_to_nursery' as well. */
char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
stmcb_trace((struct object_s *)realobj, &minor_trace_if_young);
stmcb_trace((struct object_s *)realobj, TRACE_FOR_MINOR_COLLECTION);
obj->stm_flags |= GCFLAG_WRITE_BARRIER;
}
......
......@@ -74,7 +74,13 @@ static bool close_timing_log(void)
return false;
}
static void forksupport_open_new_profiling_file(void)
static void prof_forksupport_prepare(void)
{
if (profiling_file != NULL)
fflush(profiling_file);
}
static void prof_forksupport_child(void)
{
if (close_timing_log() && profiling_basefn != NULL) {
char filename[1024];
......@@ -98,6 +104,15 @@ int stm_set_timing_log(const char *profiling_file_name,
expand_marker = default_expand_marker;
profiling_expand_marker = expand_marker;
static bool fork_support_ready = false;
if (!fork_support_ready) {
int res = pthread_atfork(prof_forksupport_prepare,
NULL, prof_forksupport_child);
if (res != 0)
stm_fatalerror("pthread_atfork() failed: %m");
fork_support_ready = true;
}
if (!open_timing_log(profiling_file_name))
return -1;
......
static void forksupport_open_new_profiling_file(void);
......@@ -15,7 +15,6 @@
#include "stm/fprintcolor.h"
#include "stm/weakref.h"
#include "stm/marker.h"
#include "stm/prof.h"
#include "stm/finalizer.h"
#include "stm/misc.c"
......@@ -39,3 +38,4 @@
#include "stm/prof.c"
#include "stm/rewind_setjmp.c"
#include "stm/finalizer.c"
#include "stm/hashtable.c"
......@@ -508,7 +508,7 @@ int stm_set_timing_log(const char *profiling_file_name,
/* Support for light finalizers. This is a simple version of
finalizers that guarantees not to do anything fancy, like not
resurrecting objects. */
void (*stmcb_light_finalizer)(object_t *);
extern void (*stmcb_light_finalizer)(object_t *);
void stm_enable_light_finalizer(object_t *);
/* Support for regular finalizers. Unreachable objects with
......@@ -525,9 +525,34 @@ void stm_enable_light_finalizer(object_t *);
transaction. For older objects, the finalizer is called from a
random thread between regular transactions, in a new custom
transaction. */
void (*stmcb_finalizer)(object_t *);
extern void (*stmcb_finalizer)(object_t *);
object_t *stm_allocate_with_finalizer(ssize_t size_rounded_up);
/* Hashtables. Keys are 64-bit unsigned integers, values are
'object_t *'. Note that the type 'stm_hashtable_t' is not an
object type at all; you need to allocate and free it explicitly.
If you want to embed the hashtable inside an 'object_t' you
probably need a light finalizer to do the freeing. */
typedef struct stm_hashtable_s stm_hashtable_t;
typedef TLPREFIX struct stm_hashtable_entry_s stm_hashtable_entry_t;
stm_hashtable_t *stm_hashtable_create(void);
void stm_hashtable_free(stm_hashtable_t *);
stm_hashtable_entry_t *stm_hashtable_lookup(object_t *, stm_hashtable_t *,
uintptr_t key);
object_t *stm_hashtable_read(object_t *, stm_hashtable_t *, uintptr_t key);
void stm_hashtable_write(object_t *, stm_hashtable_t *, uintptr_t key,
object_t *nvalue, stm_thread_local_t *);
extern uint32_t stm_hashtable_entry_userdata;
void stm_hashtable_tracefn(stm_hashtable_t *, void (object_t **));
struct stm_hashtable_entry_s {
struct object_s header;
uint32_t userdata;
uintptr_t index;
object_t *object;
};
/* ==================== END ==================== */
#endif
......@@ -165,6 +165,19 @@ void (*stmcb_light_finalizer)(object_t *);
void stm_enable_light_finalizer(object_t *);
void (*stmcb_finalizer)(object_t *);
typedef struct stm_hashtable_s stm_hashtable_t;
stm_hashtable_t *stm_hashtable_create(void);
void stm_hashtable_free(stm_hashtable_t *);
bool _check_hashtable_read(object_t *, stm_hashtable_t *, uintptr_t key);
object_t *hashtable_read_result;
bool _check_hashtable_write(object_t *, stm_hashtable_t *, uintptr_t key,
object_t *nvalue, stm_thread_local_t *tl);
uint32_t stm_hashtable_entry_userdata;
void stm_hashtable_tracefn(stm_hashtable_t *, void (object_t **));
void _set_hashtable(object_t *obj, stm_hashtable_t *h);
stm_hashtable_t *_get_hashtable(object_t *obj);
""")
......@@ -240,6 +253,19 @@ bool _check_become_globally_unique_transaction(stm_thread_local_t *tl) {
CHECKED(stm_become_globally_unique_transaction(tl, "TESTGUT"));
}
object_t *hashtable_read_result;
bool _check_hashtable_read(object_t *hobj, stm_hashtable_t *h, uintptr_t key)
{
CHECKED(hashtable_read_result = stm_hashtable_read(hobj, h, key));
}
bool _check_hashtable_write(object_t *hobj, stm_hashtable_t *h, uintptr_t key,
object_t *nvalue, stm_thread_local_t *tl)
{
CHECKED(stm_hashtable_write(hobj, h, key, nvalue, tl));
}
#undef CHECKED
......@@ -268,6 +294,20 @@ object_t * _get_weakref(object_t *obj)
return *WEAKREF_PTR(obj, size);
}
void _set_hashtable(object_t *obj, stm_hashtable_t *h)
{
stm_char *field_addr = ((stm_char*)obj);
field_addr += SIZEOF_MYOBJ; /* header */
*(stm_hashtable_t *TLPREFIX *)field_addr = h;
}
stm_hashtable_t *_get_hashtable(object_t *obj)
{
stm_char *field_addr = ((stm_char*)obj);
field_addr += SIZEOF_MYOBJ; /* header */
return *(stm_hashtable_t *TLPREFIX *)field_addr;
}
void _set_ptr(object_t *obj, int n, object_t *v)
{
long nrefs = (long)((myobj_t*)obj)->type_id - 421420;
......@@ -296,7 +336,14 @@ object_t * _get_ptr(object_t *obj, int n)
ssize_t stmcb_size_rounded_up(struct object_s *obj)
{
struct myobj_s *myobj = (struct myobj_s*)obj;
assert(myobj->type_id != 0);
if (myobj->type_id < 421420) {
if (myobj->type_id == 421419) { /* hashtable */
return sizeof(struct myobj_s) + 1 * sizeof(void*);
}
if (myobj->type_id == 421418) { /* hashtable entry */
return sizeof(struct stm_hashtable_entry_s);
}
/* basic case: tid equals 42 plus the size of the object */
assert(myobj->type_id >= 42 + sizeof(struct myobj_s));
assert((myobj->type_id - 42) >= 16);
......@@ -316,6 +363,17 @@ void stmcb_trace(struct object_s *obj, void visit(object_t **))
{
int i;
struct myobj_s *myobj = (struct myobj_s*)obj;
if (myobj->type_id == 421419) {
/* hashtable */
stm_hashtable_t *h = *((stm_hashtable_t **)(myobj + 1));
stm_hashtable_tracefn(h, visit);
return;
}
if (myobj->type_id == 421418) {
/* hashtable entry */
object_t **ref = &((struct stm_hashtable_entry_s *)myobj)->object;
visit(ref);
}
if (myobj->type_id < 421420) {
/* basic case: no references */
return;
......@@ -334,6 +392,8 @@ void stmcb_trace_cards(struct object_s *obj, void visit(object_t **),
{
int i;
struct myobj_s *myobj = (struct myobj_s*)obj;
assert(myobj->type_id != 421419);
assert(myobj->type_id != 421418);
if (myobj->type_id < 421420) {
/* basic case: no references */
return;
......@@ -404,6 +464,7 @@ GCFLAG_WRITE_BARRIER = lib._STM_GCFLAG_WRITE_BARRIER
CARD_SIZE = lib._STM_CARD_SIZE # 16b at least
NB_SEGMENTS = lib.STM_NB_SEGMENTS
FAST_ALLOC = lib._STM_FAST_ALLOC
lib.stm_hashtable_entry_userdata = 421418
class Conflict(Exception):
pass
......@@ -441,6 +502,18 @@ def stm_allocate_weakref(point_to_obj, size=None):
lib._set_weakref(o, point_to_obj)
return o
def stm_allocate_hashtable():
o = lib.stm_allocate(16)
tid = 421419
lib._set_type_id(o, tid)
h = lib.stm_hashtable_create()
lib._set_hashtable(o, h)
return o
def get_hashtable(o):
assert lib._get_type_id(o) == 421419
return lib._get_hashtable(o)
def stm_get_weakref(o):
return lib._get_weakref(o)
......@@ -558,7 +631,6 @@ def old_objects_with_cards():
SHADOWSTACK_LENGTH = 1000
_keepalive = weakref.WeakKeyDictionary()
......
......@@ -9,6 +9,7 @@ class TestLightFinalizer(BaseTest):
#
@ffi.callback("void(object_t *)")
def light_finalizer(obj):
assert stm_get_obj_size(obj) == 48
segnum = lib.current_segment_num()
tlnum = '?'
for n, tl in enumerate(self.tls):
......@@ -20,6 +21,10 @@ class TestLightFinalizer(BaseTest):
lib.stmcb_light_finalizer = light_finalizer
self._light_finalizer_keepalive = light_finalizer
def teardown_method(self, meth):
lib.stmcb_light_finalizer = ffi.NULL
BaseTest.teardown_method(self, meth)
def expect_finalized(self, objs, from_tlnum=None):
assert [obj for (obj, tlnum) in self.light_finalizers_called] == objs
if from_tlnum is not None:
......@@ -49,6 +54,15 @@ class TestLightFinalizer(BaseTest):
self.commit_transaction()
self.expect_finalized([])
def test_young_light_finalizer_aborts(self):
self.start_transaction()
lp1 = stm_allocate(48)
lib.stm_enable_light_finalizer(lp1)
self.expect_finalized([])
self.abort_transaction()
self.start_transaction()
self.expect_finalized([lp1], from_tlnum=0)
def test_old_light_finalizer(self):
self.start_transaction()
lp1 = stm_allocate(48)
......@@ -99,15 +113,47 @@ class TestLightFinalizer(BaseTest):
stm_major_collect()
self.expect_finalized([lp1], from_tlnum=1)
def test_old_light_finalizer_aborts(self):
self.start_transaction()
lp1 = stm_allocate(48)
lib.stm_enable_light_finalizer(lp1)
self.push_root(lp1)
self.commit_transaction()
#
self.start_transaction()
self.expect_finalized([])
self.abort_transaction()
self.expect_finalized([])
def test_overflow_light_finalizer_aborts(self):
self.start_transaction()
lp1 = stm_allocate(48)
lib.stm_enable_lig