Read about our upcoming Code of Conduct on this issue

semispace.py 32.5 KB
Newer Older
Alexander Hesse's avatar
Alexander Hesse committed
1
2
3
from rpython.rtyper.lltypesystem.llmemory import raw_malloc, raw_free
from rpython.rtyper.lltypesystem.llmemory import raw_memcopy, raw_memclear
from rpython.rtyper.lltypesystem.llmemory import NULL, raw_malloc_usage
4
5
from rpython.memory.support import get_address_stack, get_address_deque
from rpython.memory.support import AddressDict
Alexander Hesse's avatar
Alexander Hesse committed
6
7
8
9
10
11
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, rffi, llgroup
from rpython.rlib.objectmodel import free_non_gc_object
from rpython.rlib.debug import ll_assert, have_debug_prints
from rpython.rlib.debug import debug_print, debug_start, debug_stop
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT
12
from rpython.memory.gc.base import MovingGCBase, ARRAY_TYPEID_MAP,\
13
     TYPEID_MAP
14

15
import sys, os
16

17
first_gcflag = 1 << (LONG_BIT//2)
18
GCFLAG_FORWARDED = first_gcflag
Armin Rigo's avatar
Armin Rigo committed
19
20
21
# GCFLAG_EXTERNAL is set on objects not living in the semispace:
# either immortal objects or (for HybridGC) externally raw_malloc'ed
GCFLAG_EXTERNAL = first_gcflag << 1
22
GCFLAG_FINALIZATION_ORDERING = first_gcflag << 2
Armin Rigo's avatar
Armin Rigo committed
23
24
25
26
27
28
29
30
31
32
33
34

_GCFLAG_HASH_BASE = first_gcflag << 3
GCFLAG_HASHMASK = _GCFLAG_HASH_BASE * 0x3   # also consumes 'first_gcflag << 4'
# the two bits in GCFLAG_HASHMASK can have one of the following values:
#   - nobody ever asked for the hash of the object
GC_HASH_NOTTAKEN   = _GCFLAG_HASH_BASE * 0x0
#   - someone asked, and we gave the address of the object
GC_HASH_TAKEN_ADDR = _GCFLAG_HASH_BASE * 0x1
#   - someone asked, and we gave the address plus 'nursery_hash_base'
GC_HASH_TAKEN_NURS = _GCFLAG_HASH_BASE * 0x2
#   - we have our own extra field to store the hash
GC_HASH_HASFIELD   = _GCFLAG_HASH_BASE * 0x3
35

36
37
GCFLAG_EXTRA = first_gcflag << 5    # for RPython abuse only

38
39
memoryError = MemoryError()

Armin Rigo's avatar
Armin Rigo committed
40

41
class SemiSpaceGC(MovingGCBase):
42
43
    _alloc_flavor_ = "raw"
    inline_simple_malloc = True
44
    inline_simple_malloc_varsize = True
45
    malloc_zero_filled = True
46
47
    first_unused_gcflag = first_gcflag << 6
    gcflag_extra = GCFLAG_EXTRA
48

49
50
    HDR = lltype.Struct('header', ('tid', lltype.Signed))   # XXX or rffi.INT?
    typeid_is_in_field = 'tid'
51
52
53
    FORWARDSTUB = lltype.GcStruct('forwarding_stub',
                                  ('forw', llmemory.Address))
    FORWARDSTUBPTR = lltype.Ptr(FORWARDSTUB)
54

55
56
    object_minimal_size = llmemory.sizeof(FORWARDSTUB)

57
58
59
60
    # the following values override the default arguments of __init__ when
    # translating to a real backend.
    TRANSLATION_PARAMS = {'space_size': 8*1024*1024} # XXX adjust

Armin Rigo's avatar
Armin Rigo committed
61
62
    def __init__(self, config, space_size=4096, max_space_size=sys.maxint//2+1,
                 **kwds):
63
64
        self.param_space_size = space_size
        self.param_max_space_size = max_space_size
Armin Rigo's avatar
Armin Rigo committed
65
        MovingGCBase.__init__(self, config, **kwds)
66
67

    def setup(self):
68
        #self.total_collection_time = 0.0
69
70
71
72
73
74
        self.total_collection_count = 0

        self.space_size = self.param_space_size
        self.max_space_size = self.param_max_space_size
        self.red_zone = 0

75
        #self.program_start_time = time.time()
76
        self.tospace = llarena.arena_malloc(self.space_size, True)
77
        ll_assert(bool(self.tospace), "couldn't allocate tospace")
78
79
        self.top_of_space = self.tospace + self.space_size
        self.fromspace = llarena.arena_malloc(self.space_size, True)
80
        ll_assert(bool(self.fromspace), "couldn't allocate fromspace")
81
        self.free = self.tospace
82
        MovingGCBase.setup(self)
83
        self.objects_with_finalizers = self.AddressDeque()
Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
84
        self.objects_with_light_finalizers = self.AddressStack()
85
        self.objects_with_weakrefs = self.AddressStack()
86

87
    def _teardown(self):
88
        debug_print("Teardown")
89
90
91
        llarena.arena_free(self.fromspace)
        llarena.arena_free(self.tospace)

92
93
94
    # This class only defines the malloc_{fixed,var}size_clear() methods
    # because the spaces are filled with zeroes in advance.

Armin Rigo's avatar
Armin Rigo committed
95
    def malloc_fixedsize_clear(self, typeid16, size,
Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
96
                               has_finalizer=False,
97
                               is_finalizer_light=False,
Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
98
                               contains_weakptr=False):
99
100
101
102
103
104
        size_gc_header = self.gcheaderbuilder.size_gc_header
        totalsize = size_gc_header + size
        result = self.free
        if raw_malloc_usage(totalsize) > self.top_of_space - result:
            result = self.obtain_free_space(totalsize)
        llarena.arena_reserve(result, totalsize)
105
        self.init_gc_object(result, typeid16)
106
        self.free = result + totalsize
107
108
109
110
        #if is_finalizer_light:
        #    self.objects_with_light_finalizers.append(result + size_gc_header)
        #else:
        if has_finalizer:
Armin Rigo's avatar
Armin Rigo committed
111
            from rpython.rtyper.lltypesystem import rffi
112
            self.objects_with_finalizers.append(result + size_gc_header)
Armin Rigo's avatar
Armin Rigo committed
113
            self.objects_with_finalizers.append(rffi.cast(llmemory.Address, -1))
114
115
116
117
        if contains_weakptr:
            self.objects_with_weakrefs.append(result + size_gc_header)
        return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)

118
    def malloc_varsize_clear(self, typeid16, length, size, itemsize,
Armin Rigo's avatar
Armin Rigo committed
119
                             offset_to_length):
120
121
122
123
124
125
126
127
128
129
130
        size_gc_header = self.gcheaderbuilder.size_gc_header
        nonvarsize = size_gc_header + size
        try:
            varsize = ovfcheck(itemsize * length)
            totalsize = ovfcheck(nonvarsize + varsize)
        except OverflowError:
            raise memoryError
        result = self.free
        if raw_malloc_usage(totalsize) > self.top_of_space - result:
            result = self.obtain_free_space(totalsize)
        llarena.arena_reserve(result, totalsize)
131
        self.init_gc_object(result, typeid16)
132
133
134
135
        (result + size_gc_header + offset_to_length).signed[0] = length
        self.free = result + llarena.round_up_for_allocation(totalsize)
        return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)

Armin Rigo's avatar
Armin Rigo committed
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
    def shrink_array(self, addr, smallerlength):
        size_gc_header = self.gcheaderbuilder.size_gc_header
        if self._is_in_the_space(addr - size_gc_header):
            typeid = self.get_type_id(addr)
            totalsmallersize = (
                size_gc_header + self.fixed_size(typeid) +
                self.varsize_item_sizes(typeid) * smallerlength)
            llarena.arena_shrink_obj(addr - size_gc_header, totalsmallersize)
            #
            offset_to_length = self.varsize_offset_to_length(typeid)
            (addr + offset_to_length).signed[0] = smallerlength
            return True
        else:
            return False

Armin Rigo's avatar
Armin Rigo committed
151
152
153
154
155
156
157
    def register_finalizer(self, fq_index, gcobj):
        from rpython.rtyper.lltypesystem import rffi
        obj = llmemory.cast_ptr_to_adr(gcobj)
        fq_index = rffi.cast(llmemory.Address, fq_index)
        self.objects_with_finalizers.append(obj)
        self.objects_with_finalizers.append(fq_index)

158
159
    def obtain_free_space(self, needed):
        # a bit of tweaking to maximize the performance and minimize the
160
        # amount of code in an inlined version of malloc_fixedsize_clear()
161
162
163
        if not self.try_obtain_free_space(needed):
            raise memoryError
        return self.free
164
    obtain_free_space._dont_inline_ = True
165
166
167
168

    def try_obtain_free_space(self, needed):
        # XXX for bonus points do big objects differently
        needed = raw_malloc_usage(needed)
169
170
171
172
173
        if (self.red_zone >= 2 and self.space_size < self.max_space_size and
            self.double_space_size()):
            pass    # collect was done during double_space_size()
        else:
            self.semispace_collect()
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
        missing = needed - (self.top_of_space - self.free)
        if missing <= 0:
            return True      # success
        else:
            # first check if the object could possibly fit
            proposed_size = self.space_size
            while missing > 0:
                if proposed_size >= self.max_space_size:
                    return False    # no way
                missing -= proposed_size
                proposed_size *= 2
            # For address space fragmentation reasons, we double the space
            # size possibly several times, moving the objects at each step,
            # instead of going directly for the final size.  We assume that
            # it's a rare case anyway.
            while self.space_size < proposed_size:
                if not self.double_space_size():
                    return False
192
            ll_assert(needed <= self.top_of_space - self.free,
193
194
195
196
                         "double_space_size() failed to do its job")
            return True

    def double_space_size(self):
197
        self.red_zone = 0
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
        old_fromspace = self.fromspace
        newsize = self.space_size * 2
        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            return False    # out of memory
        llarena.arena_free(old_fromspace)
        self.fromspace = newspace
        # now self.tospace contains the existing objects and
        # self.fromspace is the freshly allocated bigger space

        self.semispace_collect(size_changing=True)
        self.top_of_space = self.tospace + newsize
        # now self.tospace is the freshly allocated bigger space,
        # and self.fromspace is the old smaller space, now empty
        llarena.arena_free(self.fromspace)

        newspace = llarena.arena_malloc(newsize, True)
        if not newspace:
            # Complex failure case: we have in self.tospace a big chunk
            # of memory, and the two smaller original spaces are already gone.
            # Unsure if it's worth these efforts, but we can artificially
            # split self.tospace in two again...
            self.max_space_size = self.space_size    # don't try to grow again,
            #              because doing arena_free(self.fromspace) would crash
            self.fromspace = self.tospace + self.space_size
            self.top_of_space = self.fromspace
224
            ll_assert(self.free <= self.top_of_space,
225
226
227
228
229
230
231
                         "unexpected growth of GC space usage during collect")
            return False     # out of memory

        self.fromspace = newspace
        self.space_size = newsize
        return True    # success

232
    def set_max_heap_size(self, size):
233
234
        # Set the maximum semispace size.
        # The size is rounded down to the next power of two.  Also, this is
235
236
237
        # the size of one semispace only, so actual usage can be the double
        # during a collection.  Finally, note that this will never shrink
        # an already-allocated heap.
238
239
240
241
242
        if size < 1:
            size = 1     # actually, the minimum is 8MB in default translations
        self.max_space_size = sys.maxint//2+1
        while self.max_space_size > size:
            self.max_space_size >>= 1
243

244
245
246
247
    @classmethod
    def JIT_minimal_size_in_nursery(cls):
        return cls.object_minimal_size

Samuele Pedroni's avatar
Samuele Pedroni committed
248
    def collect(self, gen=0):
249
        self.debug_check_consistency()
250
251
252
        self.semispace_collect()
        # the indirection is required by the fact that collect() is referred
        # to by the gc transformer, and the default argument would crash
253
        # (this is also a hook for the HybridGC)
254
255

    def semispace_collect(self, size_changing=False):
256
257
258
259
260
261
262
        debug_start("gc-collect")
        debug_print()
        debug_print(".----------- Full collection ------------------")
        start_usage = self.free - self.tospace
        debug_print("| used before collection:          ",
                    start_usage, "bytes")
        #start_time = time.time()
263
        #llop.debug_print(lltype.Void, 'semispace_collect', int(size_changing))
Samuele Pedroni's avatar
Samuele Pedroni committed
264
265
266
267
268

        # Switch the spaces.  We copy everything over to the empty space
        # (self.fromspace at the beginning of the collection), and clear the old
        # one (self.tospace at the beginning).  Their purposes will be reversed
        # for the next collection.
269
270
271
272
273
274
        tospace = self.fromspace
        fromspace = self.tospace
        self.fromspace = fromspace
        self.tospace = tospace
        self.top_of_space = tospace + self.space_size
        scan = self.free = tospace
Armin Rigo's avatar
Armin Rigo committed
275
        self.starting_full_collect()
276
        self.collect_roots()
Armin Rigo's avatar
Armin Rigo committed
277
        self.copy_pending_finalizers(self.copy)
278
        scan = self.scan_copied(scan)
Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
279
280
        if self.objects_with_light_finalizers.non_empty():
            self.deal_with_objects_with_light_finalizers()
281
282
        if self.objects_with_finalizers.non_empty():
            scan = self.deal_with_objects_with_finalizers(scan)
283
284
        if self.objects_with_weakrefs.non_empty():
            self.invalidate_weakrefs()
285
        self.update_objects_with_id()
Armin Rigo's avatar
Armin Rigo committed
286
        self.finished_full_collect()
287
        self.debug_check_consistency()
288
289
        if not size_changing:
            llarena.arena_reset(fromspace, self.space_size, True)
290
            self.record_red_zone()
291
            self.execute_finalizers()
292
        #llop.debug_print(lltype.Void, 'collected', self.space_size, size_changing, self.top_of_space - self.free)
293
294
295
296
        if have_debug_prints():
            #end_time = time.time()
            #elapsed_time = end_time - start_time
            #self.total_collection_time += elapsed_time
297
            self.total_collection_count += 1
298
            #total_program_time = end_time - self.program_start_time
299
            end_usage = self.free - self.tospace
300
301
302
303
304
305
306
307
308
            debug_print("| used after collection:           ",
                        end_usage, "bytes")
            debug_print("| freed:                           ",
                        start_usage - end_usage, "bytes")
            debug_print("| size of each semispace:          ",
                        self.space_size, "bytes")
            debug_print("| fraction of semispace now used:  ",
                        end_usage * 100.0 / self.space_size, "%")
            #ct = self.total_collection_time
309
            cc = self.total_collection_count
310
311
312
313
314
315
316
317
318
319
            debug_print("| number of semispace_collects:    ",
                        cc)
            #debug_print("|                         i.e.:    ",
            #            cc / total_program_time, "per second")
            #debug_print("| total time in semispace_collect: ",
            #            ct, "seconds")
            #debug_print("|                            i.e.: ",
            #            ct * 100.0 / total_program_time, "%")
            debug_print("`----------------------------------------------")
        debug_stop("gc-collect")
320

Armin Rigo's avatar
Armin Rigo committed
321
322
323
324
325
326
    def starting_full_collect(self):
        pass    # hook for the HybridGC

    def finished_full_collect(self):
        pass    # hook for the HybridGC

327
328
329
330
331
332
333
334
335
336
337
338
339
340
    def record_red_zone(self):
        # red zone: if the space is more than 80% full, the next collection
        # should double its size.  If it is more than 66% full twice in a row,
        # then it should double its size too.  (XXX adjust)
        # The goal is to avoid many repeated collection that don't free a lot
        # of memory each, if the size of the live object set is just below the
        # size of the space.
        free_after_collection = self.top_of_space - self.free
        if free_after_collection > self.space_size // 3:
            self.red_zone = 0
        else:
            self.red_zone += 1
            if free_after_collection < self.space_size // 5:
                self.red_zone += 1
341

342
343
344
    def get_size_incl_hash(self, obj):
        size = self.get_size(obj)
        hdr = self.header(obj)
Armin Rigo's avatar
Armin Rigo committed
345
        if (hdr.tid & GCFLAG_HASHMASK) == GC_HASH_HASFIELD:
346
347
348
            size += llmemory.sizeof(lltype.Signed)
        return size

349
350
351
352
    def scan_copied(self, scan):
        while scan < self.free:
            curr = scan + self.size_gc_header()
            self.trace_and_copy(curr)
353
            scan += self.size_gc_header() + self.get_size_incl_hash(curr)
354
355
356
        return scan

    def collect_roots(self):
357
358
359
360
361
362
363
        self.root_walker.walk_roots(
            SemiSpaceGC._collect_root,  # stack roots
            SemiSpaceGC._collect_root,  # static in prebuilt non-gc structures
            SemiSpaceGC._collect_root)  # static in prebuilt gc objects

    def _collect_root(self, root):
        root.address[0] = self.copy(root.address[0])
364
365

    def copy(self, obj):
366
367
        if self.DEBUG:
            self.debug_check_can_copy(obj)
368
        if self.is_forwarded(obj):
369
            #llop.debug_print(lltype.Void, obj, "already copied to", self.get_forwarding_address(obj))
370
371
            return self.get_forwarding_address(obj)
        else:
372
            objsize = self.get_size(obj)
373
            newobj = self.make_a_copy(obj, objsize)
374
375
376
            #llop.debug_print(lltype.Void, obj, "copied to", newobj,
            #                 "tid", self.header(obj).tid,
            #                 "size", totalsize)
377
            self.set_forwarding_address(obj, newobj, objsize)
378
379
            return newobj

Armin Rigo's avatar
Armin Rigo committed
380
381
382
383
384
385
386
387
388
389
390
391
392
    def _get_object_hash(self, obj, objsize, tid):
        # Returns the hash of the object, which must not be GC_HASH_NOTTAKEN.
        gc_hash = tid & GCFLAG_HASHMASK
        if gc_hash == GC_HASH_HASFIELD:
            obj = llarena.getfakearenaaddress(obj)
            return (obj + objsize).signed[0]
        elif gc_hash == GC_HASH_TAKEN_ADDR:
            return llmemory.cast_adr_to_int(obj)
        elif gc_hash == GC_HASH_TAKEN_NURS:
            return self._compute_current_nursery_hash(obj)
        else:
            assert 0, "gc_hash == GC_HASH_NOTTAKEN"

393
    def _make_a_copy_with_tid(self, obj, objsize, tid):
394
395
396
397
        totalsize = self.size_gc_header() + objsize
        newaddr = self.free
        llarena.arena_reserve(newaddr, totalsize)
        raw_memcopy(obj - self.size_gc_header(), newaddr, totalsize)
Armin Rigo's avatar
Armin Rigo committed
398
399
400
401
        if tid & GCFLAG_HASHMASK:
            hash = self._get_object_hash(obj, objsize, tid)
            llarena.arena_reserve(newaddr + totalsize,
                                  llmemory.sizeof(lltype.Signed))
402
            (newaddr + totalsize).signed[0] = hash
Armin Rigo's avatar
Armin Rigo committed
403
            tid |= GC_HASH_HASFIELD
404
405
406
407
            totalsize += llmemory.sizeof(lltype.Signed)
        self.free += totalsize
        newhdr = llmemory.cast_adr_to_ptr(newaddr, lltype.Ptr(self.HDR))
        newhdr.tid = tid
408
409
410
        newobj = newaddr + self.size_gc_header()
        return newobj

411
412
413
414
    def make_a_copy(self, obj, objsize):
        tid = self.header(obj).tid
        return self._make_a_copy_with_tid(obj, objsize, tid)

415
    def trace_and_copy(self, obj):
416
417
418
        self.trace(obj, self._trace_copy, None)

    def _trace_copy(self, pointer, ignored):
419
        pointer.address[0] = self.copy(pointer.address[0])
420

421
422
423
424
425
426
427
428
    def surviving(self, obj):
        # To use during a collection.  Check if the object is currently
        # marked as surviving the collection.  This is equivalent to
        # self.is_forwarded() for all objects except the nonmoving objects
        # created by the HybridGC subclass.  In all cases, if an object
        # survives, self.get_forwarding_address() returns its new address.
        return self.is_forwarded(obj)

429
    def is_forwarded(self, obj):
430
431
        return self.header(obj).tid & GCFLAG_FORWARDED != 0
        # note: all prebuilt objects also have this flag set
432
433

    def get_forwarding_address(self, obj):
434
        tid = self.header(obj).tid
Armin Rigo's avatar
Armin Rigo committed
435
436
437
438
        if tid & GCFLAG_EXTERNAL:
            self.visit_external_object(obj)
            return obj      # external or prebuilt objects are "forwarded"
                            # to themselves
439
440
441
442
        else:
            stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
            return stub.forw

Armin Rigo's avatar
Armin Rigo committed
443
444
445
    def visit_external_object(self, obj):
        pass    # hook for the HybridGC

446
447
448
449
450
451
    def get_possibly_forwarded_type_id(self, obj):
        tid = self.header(obj).tid
        if self.is_forwarded(obj) and not (tid & GCFLAG_EXTERNAL):
            obj = self.get_forwarding_address(obj)
        return self.get_type_id(obj)

452
453
454
455
456
457
458
459
    def set_forwarding_address(self, obj, newobj, objsize):
        # To mark an object as forwarded, we set the GCFLAG_FORWARDED and
        # overwrite the object with a FORWARDSTUB.  Doing so is a bit
        # long-winded on llarena, but it all melts down to two memory
        # writes after translation to C.
        size_gc_header = self.size_gc_header()
        stubsize = llmemory.sizeof(self.FORWARDSTUB)
        tid = self.header(obj).tid
Armin Rigo's avatar
Armin Rigo committed
460
        ll_assert(tid & GCFLAG_EXTERNAL == 0,  "unexpected GCFLAG_EXTERNAL")
461
462
463
464
465
466
467
468
469
        ll_assert(tid & GCFLAG_FORWARDED == 0, "unexpected GCFLAG_FORWARDED")
        # replace the object at 'obj' with a FORWARDSTUB.
        hdraddr = obj - size_gc_header
        llarena.arena_reset(hdraddr, size_gc_header + objsize, False)
        llarena.arena_reserve(hdraddr, size_gc_header + stubsize)
        hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(self.HDR))
        hdr.tid = tid | GCFLAG_FORWARDED
        stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
        stub.forw = newobj
470

471
472
473
    def combine(self, typeid16, flags):
        return llop.combine_ushort(lltype.Signed, typeid16, flags)

474
    def get_type_id(self, addr):
475
        tid = self.header(addr).tid
Armin Rigo's avatar
Armin Rigo committed
476
        ll_assert(tid & (GCFLAG_FORWARDED|GCFLAG_EXTERNAL) != GCFLAG_FORWARDED,
477
478
479
480
481
                  "get_type_id on forwarded obj")
        # Non-prebuilt forwarded objects are overwritten with a FORWARDSTUB.
        # Although calling get_type_id() on a forwarded object works by itself,
        # we catch it as an error because it's likely that what is then
        # done with the typeid is bogus.
482
        return llop.extract_ushort(llgroup.HALFWORD, tid)
483

484
    def init_gc_object(self, addr, typeid16, flags=0):
485
        hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
486
        hdr.tid = self.combine(typeid16, flags)
487

488
    def init_gc_object_immortal(self, addr, typeid16, flags=0):
489
        hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
Armin Rigo's avatar
Armin Rigo committed
490
        flags |= GCFLAG_EXTERNAL | GCFLAG_FORWARDED | GC_HASH_TAKEN_ADDR
491
        hdr.tid = self.combine(typeid16, flags)
492
493
        # immortal objects always have GCFLAG_FORWARDED set;
        # see get_forwarding_address().
494

Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
495
496
497
498
499
500
    def deal_with_objects_with_light_finalizers(self):
        """ This is a much simpler version of dealing with finalizers
        and an optimization - we can reasonably assume that those finalizers
        don't do anything fancy and *just* call them. Among other things
        they won't resurrect objects
        """
Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
501
502
503
504
505
506
        new_objects = self.AddressStack()
        while self.objects_with_light_finalizers.non_empty():
            obj = self.objects_with_light_finalizers.pop()
            if self.surviving(obj):
                new_objects.append(self.get_forwarding_address(obj))
            else:
Armin Rigo's avatar
Armin Rigo committed
507
                self.call_destructor(obj)
Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
508
        self.objects_with_light_finalizers.delete()
Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
509
        self.objects_with_light_finalizers = new_objects
Maciej Fijalkowski's avatar
Maciej Fijalkowski committed
510

511
    def deal_with_objects_with_finalizers(self, scan):
512
513
514
        # walk over list of objects with finalizers
        # if it is not copied, add it to the list of to-be-called finalizers
        # and copy it, to me make the finalizer runnable
515
516
517
518
519
520
        # We try to run the finalizers in a "reasonable" order, like
        # CPython does.  The details of this algorithm are in
        # pypy/doc/discussion/finalizer-order.txt.
        new_with_finalizer = self.AddressDeque()
        marked = self.AddressDeque()
        pending = self.AddressStack()
521
        self.tmpstack = self.AddressStack()
522
        while self.objects_with_finalizers.non_empty():
523
            x = self.objects_with_finalizers.popleft()
Armin Rigo's avatar
Armin Rigo committed
524
            fq_nr = self.objects_with_finalizers.popleft()
525
526
            ll_assert(self._finalization_state(x) != 1, 
                      "bad finalization state 1")
527
            if self.surviving(x):
528
                new_with_finalizer.append(self.get_forwarding_address(x))
Armin Rigo's avatar
Armin Rigo committed
529
                new_with_finalizer.append(fq_nr)
530
531
                continue
            marked.append(x)
Armin Rigo's avatar
Armin Rigo committed
532
            marked.append(fq_nr)
533
534
535
536
537
538
            pending.append(x)
            while pending.non_empty():
                y = pending.pop()
                state = self._finalization_state(y)
                if state == 0:
                    self._bump_finalization_state_from_0_to_1(y)
539
                    self.trace(y, self._append_if_nonnull, pending)
540
                elif state == 2:
541
                    self._recursively_bump_finalization_state_from_2_to_3(y)
542
543
544
545
546
            scan = self._recursively_bump_finalization_state_from_1_to_2(
                       x, scan)

        while marked.non_empty():
            x = marked.popleft()
Armin Rigo's avatar
Armin Rigo committed
547
            fq_nr = marked.popleft()
548
549
550
551
            state = self._finalization_state(x)
            ll_assert(state >= 2, "unexpected finalization state < 2")
            newx = self.get_forwarding_address(x)
            if state == 2:
Armin Rigo's avatar
Armin Rigo committed
552
553
554
                from rpython.rtyper.lltypesystem import rffi
                fq_index = rffi.cast(lltype.Signed, fq_nr)
                self.mark_finalizer_to_run(fq_index, newx)
555
556
557
                # we must also fix the state from 2 to 3 here, otherwise
                # we leave the GCFLAG_FINALIZATION_ORDERING bit behind
                # which will confuse the next collection
558
                self._recursively_bump_finalization_state_from_2_to_3(x)
559
            else:
560
                new_with_finalizer.append(newx)
Armin Rigo's avatar
Armin Rigo committed
561
                new_with_finalizer.append(fq_nr)
562

563
        self.tmpstack.delete()
564
565
        pending.delete()
        marked.delete()
566
567
        self.objects_with_finalizers.delete()
        self.objects_with_finalizers = new_with_finalizer
568
569
570
        return scan

    def _append_if_nonnull(pointer, stack):
571
        stack.append(pointer.address[0])
572
573
574
    _append_if_nonnull = staticmethod(_append_if_nonnull)

    def _finalization_state(self, obj):
575
        if self.surviving(obj):
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
            newobj = self.get_forwarding_address(obj)
            hdr = self.header(newobj)
            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:
                return 2
            else:
                return 3
        else:
            hdr = self.header(obj)
            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:
                return 1
            else:
                return 0

    def _bump_finalization_state_from_0_to_1(self, obj):
        ll_assert(self._finalization_state(obj) == 0,
                  "unexpected finalization state != 0")
        hdr = self.header(obj)
        hdr.tid |= GCFLAG_FINALIZATION_ORDERING

595
    def _recursively_bump_finalization_state_from_2_to_3(self, obj):
596
597
598
        ll_assert(self._finalization_state(obj) == 2,
                  "unexpected finalization state != 2")
        newobj = self.get_forwarding_address(obj)
599
600
601
602
603
604
605
606
607
        pending = self.tmpstack
        ll_assert(not pending.non_empty(), "tmpstack not empty")
        pending.append(newobj)
        while pending.non_empty():
            y = pending.pop()
            hdr = self.header(y)
            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:     # state 2 ?
                hdr.tid &= ~GCFLAG_FINALIZATION_ORDERING   # change to state 3
                self.trace(y, self._append_if_nonnull, pending)
608
609
610
611
612
613
614
615
616

    def _recursively_bump_finalization_state_from_1_to_2(self, obj, scan):
        # recursively convert objects from state 1 to state 2.
        # Note that copy() copies all bits, including the
        # GCFLAG_FINALIZATION_ORDERING.  The mapping between
        # state numbers and the presence of this bit was designed
        # for the following to work :-)
        self.copy(obj)
        return self.scan_copied(scan)
617
618
619
620
621

    def invalidate_weakrefs(self):
        # walk over list of objects that contain weakrefs
        # if the object it references survives then update the weakref
        # otherwise invalidate the weakref
622
        new_with_weakref = self.AddressStack()
623
624
        while self.objects_with_weakrefs.non_empty():
            obj = self.objects_with_weakrefs.pop()
625
            if not self.surviving(obj):
626
627
                continue # weakref itself dies
            obj = self.get_forwarding_address(obj)
628
            offset = self.weakpointer_offset(self.get_type_id(obj))
629
            pointing_to = (obj + offset).address[0]
630
            # XXX I think that pointing_to cannot be NULL here
631
            if pointing_to:
632
                if self.surviving(pointing_to):
633
634
635
636
637
638
639
640
                    (obj + offset).address[0] = self.get_forwarding_address(
                        pointing_to)
                    new_with_weakref.append(obj)
                else:
                    (obj + offset).address[0] = NULL
        self.objects_with_weakrefs.delete()
        self.objects_with_weakrefs = new_with_weakref

641
642
    def _is_external(self, obj):
        return (self.header(obj).tid & GCFLAG_EXTERNAL) != 0
643

Armin Rigo's avatar
Armin Rigo committed
644
645
646
    def _is_in_the_space(self, obj):
        return self.tospace <= obj < self.free

647
648
649
650
651
    def debug_check_object(self, obj):
        """Check the invariants about 'obj' that should be true
        between collections."""
        tid = self.header(obj).tid
        if tid & GCFLAG_EXTERNAL:
652
            ll_assert(tid & GCFLAG_FORWARDED != 0, "bug: external+!forwarded")
653
654
655
656
657
658
659
660
661
662
663
664
665
            ll_assert(not (self.tospace <= obj < self.free),
                      "external flag but object inside the semispaces")
        else:
            ll_assert(not (tid & GCFLAG_FORWARDED), "bug: !external+forwarded")
            ll_assert(self.tospace <= obj < self.free,
                      "!external flag but object outside the semispaces")
        ll_assert(not (tid & GCFLAG_FINALIZATION_ORDERING),
                  "unexpected GCFLAG_FINALIZATION_ORDERING")

    def debug_check_can_copy(self, obj):
        ll_assert(not (self.tospace <= obj < self.free),
                  "copy() on already-copied object")

666
667
    STATISTICS_NUMBERS = 0

Armin Rigo's avatar
Armin Rigo committed
668
669
670
671
672
673
674
675
    def is_in_nursery(self, addr):
        # overridden in generation.py.
        return False

    def _compute_current_nursery_hash(self, obj):
        # overridden in generation.py.
        raise AssertionError("should not be called")

676
    def identityhash(self, gcobj):
Armin Rigo's avatar
Armin Rigo committed
677
        # The following loop should run at most twice.
678
679
680
        while 1:
            obj = llmemory.cast_ptr_to_adr(gcobj)
            hdr = self.header(obj)
Armin Rigo's avatar
Armin Rigo committed
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
            if hdr.tid & GCFLAG_HASHMASK:
                break
            # It's the first time we ask for a hash, and it's not an
            # external object.  Shrink the top of space by the extra
            # hash word that will be needed after a collect.
            shrunk_top = self.top_of_space - llmemory.sizeof(lltype.Signed)
            if shrunk_top < self.free:
                # Cannot shrink!  Do a collection, asking for at least
                # one word of free space, and try again.  May raise
                # MemoryError.  Obscure: not called directly, but
                # across an llop, to make sure that there is the
                # correct push_roots/pop_roots around the call...
                llop.gc_obtain_free_space(llmemory.Address,
                                          llmemory.sizeof(lltype.Signed))
                continue
            else:
                # Now we can have side-effects: lower the top of space
                # and set one of the GC_HASH_TAKEN_xxx flags.
699
                self.top_of_space = shrunk_top
Armin Rigo's avatar
Armin Rigo committed
700
701
702
703
704
705
706
707
                if self.is_in_nursery(obj):
                    hdr.tid |= GC_HASH_TAKEN_NURS
                else:
                    hdr.tid |= GC_HASH_TAKEN_ADDR
                break
        # Now we can return the result
        objsize = self.get_size(obj)
        return self._get_object_hash(obj, objsize, hdr.tid)
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726

    def track_heap_parent(self, obj, parent):
        addr = obj.address[0]
        parent_idx = llop.get_member_index(lltype.Signed,
                                           self.get_type_id(parent))
        idx = llop.get_member_index(lltype.Signed, self.get_type_id(addr))
        self._ll_typeid_map[parent_idx].links[idx] += 1
        self.track_heap(addr)

    def track_heap(self, adr):
        if self._tracked_dict.contains(adr):
            return
        self._tracked_dict.add(adr)
        idx = llop.get_member_index(lltype.Signed, self.get_type_id(adr))
        self._ll_typeid_map[idx].count += 1
        totsize = self.get_size(adr) + self.size_gc_header()
        self._ll_typeid_map[idx].size += llmemory.raw_malloc_usage(totsize)
        self.trace(adr, self.track_heap_parent, adr)

727
728
729
    @staticmethod
    def _track_heap_root(obj, self):
        self.track_heap(obj)
730
731
732
733
734
735
736
737
738
739
740
741
742

    def heap_stats(self):
        self._tracked_dict = self.AddressDict()
        max_tid = self.root_walker.gcdata.max_type_id
        ll_typeid_map = lltype.malloc(ARRAY_TYPEID_MAP, max_tid, zero=True)
        for i in range(max_tid):
            ll_typeid_map[i] = lltype.malloc(TYPEID_MAP, max_tid, zero=True)
        self._ll_typeid_map = ll_typeid_map
        self._tracked_dict.add(llmemory.cast_ptr_to_adr(ll_typeid_map))
        i = 0
        while i < max_tid:
            self._tracked_dict.add(llmemory.cast_ptr_to_adr(ll_typeid_map[i]))
            i += 1
743
        self.enumerate_all_roots(SemiSpaceGC._track_heap_root, self)
744
745
746
        self._ll_typeid_map = lltype.nullptr(ARRAY_TYPEID_MAP)
        self._tracked_dict.delete()
        return ll_typeid_map