/
heap.h
2600 lines (2040 loc) Β· 97.9 KB
/
heap.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_HEAP_H_
#define V8_HEAP_HEAP_H_
#include <cmath>
#include <map>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
#include "include/v8-internal.h"
#include "include/v8.h"
#include "src/base/atomic-utils.h"
#include "src/base/enum-set.h"
#include "src/base/platform/condition-variable.h"
#include "src/builtins/accessors.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/init/heap-symbols.h"
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/objects.h"
#include "src/objects/smi.h"
#include "src/objects/string-table.h"
#include "src/objects/visitors.h"
#include "src/roots/roots.h"
#include "src/utils/allocation.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
namespace v8 {
namespace debug {
using OutOfMemoryCallback = void (*)(void* data);
} // namespace debug
namespace internal {
namespace heap {
class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
namespace third_party_heap {
class Heap;
}
class IncrementalMarking;
class BackingStore;
class JSArrayBuffer;
class JSPromise;
class NativeContext;
using v8::MemoryPressureLevel;
class AllocationObserver;
class ArrayBufferCollector;
class ArrayBufferSweeper;
class CodeLargeObjectSpace;
class ConcurrentMarking;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
class GlobalSafepoint;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
class Isolate;
class JSFinalizationRegistry;
class LocalEmbedderHeapTracer;
class LocalHeap;
class MemoryAllocator;
class MemoryChunk;
class MemoryMeasurement;
class MemoryReducer;
class MinorMarkCompactCollector;
class ObjectIterator;
class ObjectStats;
class OffThreadHeap;
class Page;
class PagedSpace;
class ReadOnlyHeap;
class RootVisitor;
class ScavengeJob;
class Scavenger;
class ScavengerCollector;
class SharedReadOnlySpace;
class Space;
class StressScavengeObserver;
class TimedHistogram;
class WeakObjectRetainer;
enum ArrayStorageAllocationMode {
DONT_INITIALIZE_ARRAY_ELEMENTS,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
};
enum class ClearRecordedSlots { kYes, kNo };
enum class InvalidateRecordedSlots { kYes, kNo };
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
enum class AllocationOrigin {
kGeneratedCode = 0,
kRuntime = 1,
kGC = 2,
kFirstAllocationOrigin = kGeneratedCode,
kLastAllocationOrigin = kGC,
kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
};
enum class GarbageCollectionReason {
kUnknown = 0,
kAllocationFailure = 1,
kAllocationLimit = 2,
kContextDisposal = 3,
kCountersExtension = 4,
kDebugger = 5,
kDeserializer = 6,
kExternalMemoryPressure = 7,
kFinalizeMarkingViaStackGuard = 8,
kFinalizeMarkingViaTask = 9,
kFullHashtable = 10,
kHeapProfiler = 11,
kTask = 12,
kLastResort = 13,
kLowMemoryNotification = 14,
kMakeHeapIterable = 15,
kMemoryPressure = 16,
kMemoryReducer = 17,
kRuntime = 18,
kSamplingProfiler = 19,
kSnapshotCreator = 20,
kTesting = 21,
kExternalFinalize = 22,
kGlobalAllocationLimit = 23,
kMeasureMemory = 24
// If you add new items here, then update the incremental_marking_reason,
// mark_compact_reason, and scavenge_reason counters in counters.h.
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
};
enum class YoungGenerationHandling {
kRegularScavenge = 0,
kFastPromotionDuringScavenge = 1,
// Histogram::InspectConstructionArguments in chromium requires us to have at
// least three buckets.
kUnusedBucket = 2,
// If you add new items here, then update the young_generation_handling in
// counters.h.
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
};
enum class GCIdleTimeAction : uint8_t;
enum class SkipRoot {
kExternalStringTable,
kGlobalHandles,
kOldGeneration,
kStack,
kUnserializable,
kWeak
};
class AllocationResult {
public:
static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
return AllocationResult(space);
}
// Implicit constructor from Object.
AllocationResult(Object object) // NOLINT
: object_(object) {
// AllocationResults can't return Smis, which are used to represent
// failure and the space to retry in.
CHECK(!object.IsSmi());
}
AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
inline bool IsRetry() { return object_.IsSmi(); }
inline HeapObject ToObjectChecked();
inline AllocationSpace RetrySpace();
template <typename T>
bool To(T* obj) {
if (IsRetry()) return false;
*obj = T::cast(object_);
return true;
}
private:
explicit AllocationResult(AllocationSpace space)
: object_(Smi::FromInt(static_cast<int>(space))) {}
Object object_;
};
STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
int size;
int count;
void Clear() {
comment = nullptr;
size = 0;
count = 0;
}
// Must be small, since an iteration is used for lookup.
static const int kMaxComments = 64;
};
#endif
using EphemeronRememberedSet =
std::unordered_map<EphemeronHashTable, std::unordered_set<int>,
Object::Hasher>;
class Heap {
public:
// Stores ephemeron entries where the EphemeronHashTable is in old-space,
// and the key of the entry is in new-space. Such keys do not appear in the
// usual OLD_TO_NEW remembered set.
EphemeronRememberedSet ephemeron_remembered_set_;
enum FindMementoMode { kForRuntime, kForGC };
enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
enum HeapState {
NOT_IN_GC,
SCAVENGE,
MARK_COMPACT,
MINOR_MARK_COMPACT,
TEAR_DOWN
};
// Emits GC events for DevTools timeline.
class DevToolsTraceEventScope {
public:
DevToolsTraceEventScope(Heap* heap, const char* event_name,
const char* event_type);
~DevToolsTraceEventScope();
private:
Heap* heap_;
const char* event_name_;
};
using PretenuringFeedbackMap =
std::unordered_map<AllocationSite, size_t, Object::Hasher>;
// Taking this mutex prevents the GC from entering a phase that relocates
// object references.
base::Mutex* relocation_mutex() { return &relocation_mutex_; }
// Support for context snapshots. After calling this we have a linear
// space to write objects in each space.
struct Chunk {
uint32_t size;
Address start;
Address end;
};
using Reservation = std::vector<Chunk>;
#if V8_OS_ANDROID
// Don't apply pointer multiplier on Android since it has no swap space and
// should instead adapt it's heap size based on available physical memory.
static const int kPointerMultiplier = 1;
static const int kHeapLimitMultiplier = 1;
#else
static const int kPointerMultiplier = kTaggedSize / 4;
// The heap limit needs to be computed based on the system pointer size
// because we want a pointer-compressed heap to have larger limit than
// an orinary 32-bit which that is contrained by 2GB virtual address space.
static const int kHeapLimitMultiplier = kSystemPointerSize / 4;
#endif
static const size_t kMaxInitialOldGenerationSize =
256 * MB * kHeapLimitMultiplier;
// These constants control heap configuration based on the physical memory.
static constexpr size_t kPhysicalMemoryToOldGenerationRatio = 4;
// Young generation size is the same for compressed heaps and 32-bit heaps.
static constexpr size_t kOldGenerationToSemiSpaceRatio =
128 * kHeapLimitMultiplier / kPointerMultiplier;
static constexpr size_t kOldGenerationToSemiSpaceRatioLowMemory =
256 * kHeapLimitMultiplier / kPointerMultiplier;
static constexpr size_t kOldGenerationLowMemory =
128 * MB * kHeapLimitMultiplier;
static constexpr size_t kNewLargeObjectSpaceToSemiSpaceRatio = 1;
static constexpr size_t kMinSemiSpaceSize = 512 * KB * kPointerMultiplier;
static constexpr size_t kMaxSemiSpaceSize = 8192 * KB * kPointerMultiplier;
STATIC_ASSERT(kMinSemiSpaceSize % (1 << kPageSizeBits) == 0);
STATIC_ASSERT(kMaxSemiSpaceSize % (1 << kPageSizeBits) == 0);
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
// GCs that are forced, either through testing configurations (requring
// --expose-gc) or through DevTools (using LowMemoryNotificaton).
static const int kForcedGC = 2;
// The minimum size of a HeapObject on the heap.
static const int kMinObjectSizeInTaggedWords = 2;
static const int kMinPromotedPercentForFastPromotionMode = 90;
STATIC_ASSERT(static_cast<int>(RootIndex::kUndefinedValue) ==
Internals::kUndefinedValueRootIndex);
STATIC_ASSERT(static_cast<int>(RootIndex::kTheHoleValue) ==
Internals::kTheHoleValueRootIndex);
STATIC_ASSERT(static_cast<int>(RootIndex::kNullValue) ==
Internals::kNullValueRootIndex);
STATIC_ASSERT(static_cast<int>(RootIndex::kTrueValue) ==
Internals::kTrueValueRootIndex);
STATIC_ASSERT(static_cast<int>(RootIndex::kFalseValue) ==
Internals::kFalseValueRootIndex);
STATIC_ASSERT(static_cast<int>(RootIndex::kempty_string) ==
Internals::kEmptyStringRootIndex);
// Calculates the maximum amount of filler that could be required by the
// given alignment.
V8_EXPORT_PRIVATE static int GetMaximumFillToAlign(
AllocationAlignment alignment);
// Calculates the actual amount of filler required for a given address at the
// given alignment.
V8_EXPORT_PRIVATE static int GetFillToAlign(Address address,
AllocationAlignment alignment);
// Returns the size of the initial area of a code-range, which is marked
// writable and reserved to contain unwind information.
static size_t GetCodeRangeReservedAreaSize();
[[noreturn]] void FatalProcessOutOfMemory(const char* location);
// Checks whether the space is valid.
static bool IsValidAllocationSpace(AllocationSpace space);
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
#ifdef DEBUG
return true;
#else
#ifdef VERIFY_HEAP
return FLAG_verify_heap;
#else
return false;
#endif
#endif
}
// Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker.
static inline BytecodeFlushMode GetBytecodeFlushMode() {
if (FLAG_stress_flush_bytecode) {
return BytecodeFlushMode::kStressFlushBytecode;
} else if (FLAG_flush_bytecode) {
return BytecodeFlushMode::kFlushBytecode;
}
return BytecodeFlushMode::kDoNotFlushBytecode;
}
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
}
static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
}
static inline GarbageCollector YoungGenerationCollector() {
#if ENABLE_MINOR_MC
return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
#else
return SCAVENGER;
#endif // ENABLE_MINOR_MC
}
static inline const char* CollectorName(GarbageCollector collector) {
switch (collector) {
case SCAVENGER:
return "Scavenger";
case MARK_COMPACTOR:
return "Mark-Compact";
case MINOR_MARK_COMPACTOR:
return "Minor Mark-Compact";
}
return "Unknown collector";
}
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
// Executes generational and/or marking write barrier for a [start, end) range
// of non-weak slots inside |object|.
template <typename TSlot>
V8_EXPORT_PRIVATE void WriteBarrierForRange(HeapObject object, TSlot start,
TSlot end);
V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
V8_EXPORT_PRIVATE static void MarkingBarrierForArrayBufferExtensionSlow(
HeapObject object, ArrayBufferExtension* extension);
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
V8_EXPORT_PRIVATE inline void RecordEphemeronKeyWrite(
EphemeronHashTable table, Address key_slot);
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
Address raw_object, Address address, Isolate* isolate);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
Code host, RelocInfo* rinfo, HeapObject value);
V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo,
HeapObject value);
static void MarkingBarrierForArrayBufferExtension(
JSArrayBuffer object, ArrayBufferExtension* extension);
V8_EXPORT_PRIVATE static void MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject host, HeapObject descriptor_array,
int number_of_own_descriptors);
V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject object);
// Notifies the heap that is ok to start marking or other activities that
// should not happen during deserialization.
void NotifyDeserializationComplete();
void NotifyBootstrapComplete();
void NotifyOldGenerationExpansion();
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
// Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges can overlap.
void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
ObjectSlot src_slot, int len, WriteBarrierMode mode);
// Copy len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges must not overlap.
template <typename TSlot>
void CopyRange(HeapObject dst_object, TSlot dst_slot, TSlot src_slot, int len,
WriteBarrierMode mode);
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
// pass ClearRecordedSlots::kNo. Clears memory if clearing slots.
V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
Address addr, int size, ClearRecordedSlots clear_slots_mode);
template <typename T>
void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
bool CanMoveObjectStart(HeapObject object);
bool IsImmovable(HeapObject object);
V8_EXPORT_PRIVATE static bool IsLargeObject(HeapObject object);
// This method supports the deserialization allocator. All allocations
// are word-aligned. The method should never fail to allocate since the
// total space requirements of the deserializer are known at build time.
inline Address DeserializerAllocate(AllocationType type, int size_in_bytes);
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
V8_EXPORT_PRIVATE FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj,
int elements_to_trim);
// Trim the given array from the right.
V8_EXPORT_PRIVATE void RightTrimFixedArray(FixedArrayBase obj,
int elements_to_trim);
void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
// Converts the given boolean condition to JavaScript boolean value.
inline Oddball ToBoolean(bool condition);
// Notify the heap that a context has been disposed.
V8_EXPORT_PRIVATE int NotifyContextDisposed(bool dependant_context);
void set_native_contexts_list(Object object) {
native_contexts_list_ = object;
}
Object native_contexts_list() const { return native_contexts_list_; }
void set_allocation_sites_list(Object object) {
allocation_sites_list_ = object;
}
Object allocation_sites_list() { return allocation_sites_list_; }
void set_dirty_js_finalization_registries_list(Object object) {
dirty_js_finalization_registries_list_ = object;
}
Object dirty_js_finalization_registries_list() {
return dirty_js_finalization_registries_list_;
}
void set_dirty_js_finalization_registries_list_tail(Object object) {
dirty_js_finalization_registries_list_tail_ = object;
}
Object dirty_js_finalization_registries_list_tail() {
return dirty_js_finalization_registries_list_tail_;
}
// Used in CreateAllocationSiteStub and the (de)serializer.
Address allocation_sites_list_address() {
return reinterpret_cast<Address>(&allocation_sites_list_);
}
// Traverse all the allocaions_sites [nested_site and weak_next] in the list
// and foreach call the visitor
void ForeachAllocationSite(
Object list, const std::function<void(AllocationSite)>& visitor);
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
// Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging.
bool AllowedToBeMigrated(Map map, HeapObject object, AllocationSpace dest);
void CheckHandleCount();
// Number of "runtime allocations" done so far.
uint32_t allocations_count() { return allocations_count_; }
// Print short heap statistics.
void PrintShortHeapStatistics();
// Print statistics of freelists of old_space:
// with FLAG_trace_gc_freelists: summary of each FreeListCategory.
// with FLAG_trace_gc_freelists_verbose: also prints the statistics of each
// FreeListCategory of each page.
void PrintFreeListsStats();
// Dump heap statistics in JSON format.
void DumpJSONHeapStatistics(std::stringstream& stream);
bool write_protect_code_memory() const { return write_protect_code_memory_; }
uintptr_t code_space_memory_modification_scope_depth() {
return code_space_memory_modification_scope_depth_;
}
void increment_code_space_memory_modification_scope_depth() {
code_space_memory_modification_scope_depth_++;
}
void decrement_code_space_memory_modification_scope_depth() {
code_space_memory_modification_scope_depth_--;
}
void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void UnprotectAndRegisterMemoryChunk(HeapObject object);
void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
void EnableUnprotectedMemoryChunksRegistry() {
unprotected_memory_chunks_registry_enabled_ = true;
}
void DisableUnprotectedMemoryChunksRegistry() {
unprotected_memory_chunks_registry_enabled_ = false;
}
bool unprotected_memory_chunks_registry_enabled() {
return unprotected_memory_chunks_registry_enabled_;
}
inline HeapState gc_state() { return gc_state_; }
void SetGCState(HeapState state);
bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
// If an object has an AllocationMemento trailing it, return it, otherwise
// return a null AllocationMemento.
template <FindMementoMode mode>
inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
void RequestAndWaitForCollection();
//
// Support for the API.
//
void CreateApiObjects();
// Implements the corresponding V8 API function.
bool IdleNotification(double deadline_in_seconds);
bool IdleNotification(int idle_time_in_ms);
V8_EXPORT_PRIVATE void MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked);
void CheckMemoryPressure();
V8_EXPORT_PRIVATE void AddNearHeapLimitCallback(v8::NearHeapLimitCallback,
void* data);
V8_EXPORT_PRIVATE void RemoveNearHeapLimitCallback(
v8::NearHeapLimitCallback callback, size_t heap_limit);
V8_EXPORT_PRIVATE void AutomaticallyRestoreInitialHeapLimit(
double threshold_percent);
void AppendArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension);
GlobalSafepoint* safepoint() { return safepoint_.get(); }
V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs();
void RecordStats(HeapStats* stats, bool take_snapshot = false);
bool MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,
v8::MeasureMemoryExecution execution);
std::unique_ptr<v8::MeasureMemoryDelegate> MeasureMemoryDelegate(
Handle<NativeContext> context, Handle<JSPromise> promise,
v8::MeasureMemoryMode mode);
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
// An object should be promoted if the object has survived a
// scavenge operation.
inline bool ShouldBePromoted(Address old_address);
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
inline int NextScriptId();
inline int NextDebuggingId();
inline int GetNextTemplateSerialNumber();
void SetSerializedObjects(FixedArray objects);
void SetSerializedGlobalProxySizes(FixedArray sizes);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
int64_t external_memory_hard_limit() { return max_old_generation_size_ / 2; }
V8_INLINE int64_t external_memory();
V8_INLINE void update_external_memory(int64_t delta);
V8_INLINE void update_external_memory_concurrently_freed(uintptr_t freed);
V8_INLINE void account_external_memory_concurrently_freed();
V8_EXPORT_PRIVATE size_t YoungArrayBufferBytes();
V8_EXPORT_PRIVATE size_t OldArrayBufferBytes();
size_t backing_store_bytes() const { return backing_store_bytes_; }
void CompactWeakArrayLists(AllocationType allocation);
V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context,
Handle<Map> map);
// This event is triggered after successful allocation of a new object made
// by runtime. Allocations of target space for object evacuation do not
// trigger the event. In order to track ALL allocations one must turn off
// FLAG_inline_new.
inline void OnAllocationEvent(HeapObject object, int size_in_bytes);
// This event is triggered after object is moved to a new place.
void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
inline bool CanAllocateInReadOnlySpace();
bool deserialization_complete() const { return deserialization_complete_; }
bool HasLowAllocationRate();
bool HasHighFragmentation();
bool HasHighFragmentation(size_t used, size_t committed);
void ActivateMemoryReducerIfNeeded();
V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage();
bool HighMemoryPressure() {
return memory_pressure_level_ != MemoryPressureLevel::kNone;
}
void RestoreHeapLimit(size_t heap_limit) {
// Do not set the limit lower than the live size + some slack.
size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
max_old_generation_size_ =
Min(max_old_generation_size_, Max(heap_limit, min_limit));
}
// ===========================================================================
// Initialization. ===========================================================
// ===========================================================================
void ConfigureHeap(const v8::ResourceConstraints& constraints);
void ConfigureHeapDefault();
// Prepares the heap, setting up for deserialization.
void SetUp();
// Sets read-only heap and space.
void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
void ReplaceReadOnlySpace(SharedReadOnlySpace* shared_ro_space);
// Sets up the heap memory without creating any objects.
void SetUpSpaces();
// (Re-)Initialize hash seed from flag or RNG.
void InitializeHashSeed();
// Bootstraps the object heap with the core set of objects required to run.
// Returns whether it succeeded.
bool CreateHeapObjects();
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
void CreateObjectStats();
// Sets the TearDown state, so no new GC tasks get posted.
void StartTearDown();
// Destroys all memory allocated by the heap.
void TearDown();
// Returns whether SetUp has been called.
bool HasBeenSetUp();
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
inline Address NewSpaceTop();
NewSpace* new_space() { return new_space_; }
OldSpace* old_space() { return old_space_; }
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
OldLargeObjectSpace* lo_space() { return lo_space_; }
CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
ReadOnlySpace* read_only_space() { return read_only_space_; }
inline PagedSpace* paged_space(int idx);
inline Space* space(int idx);
// Returns name of the space.
V8_EXPORT_PRIVATE static const char* GetSpaceName(AllocationSpace space);
// ===========================================================================
// Getters to other components. ==============================================
// ===========================================================================
GCTracer* tracer() { return tracer_.get(); }
MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
inline Isolate* isolate();
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_.get();
}
MinorMarkCompactCollector* minor_mark_compact_collector() {
return minor_mark_compact_collector_;
}
ArrayBufferCollector* array_buffer_collector() {
return array_buffer_collector_.get();
}
ArrayBufferSweeper* array_buffer_sweeper() {
return array_buffer_sweeper_.get();
}
const base::AddressRegion& code_range();
// ===========================================================================
// Root set access. ==========================================================
// ===========================================================================
// Shortcut to the roots table stored in the Isolate.
V8_INLINE RootsTable& roots_table();
// Heap root getters.
#define ROOT_ACCESSOR(type, name, CamelName) inline type name();
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
V8_INLINE void SetRootMaterializedObjects(FixedArray objects);
V8_INLINE void SetRootScriptList(Object value);
V8_INLINE void SetRootStringTable(StringTable value);
V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object value);
V8_INLINE void SetMessageListeners(TemplateList value);
V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
void UnregisterStrongRoots(FullObjectSlot start);
void SetBuiltinsConstantsTable(FixedArray cache);
void SetDetachedContexts(WeakArrayList detached_contexts);
// A full copy of the interpreter entry trampoline, used as a template to
// create copies of the builtin at runtime. The copies are used to create
// better profiling information for ticks in bytecode execution. Note that
// this is always a copy of the full builtin, i.e. not the off-heap
// trampoline.
// See also: FLAG_interpreted_frames_native_stack.
void SetInterpreterEntryTrampolineForProfiling(Code code);
void EnqueueDirtyJSFinalizationRegistry(
JSFinalizationRegistry finalization_registry,
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot);
MaybeHandle<JSFinalizationRegistry> DequeueDirtyJSFinalizationRegistry();
// Called from Heap::NotifyContextDisposed to remove all
// FinalizationRegistries with {context} from the dirty list when the context
// e.g. navigates away or is detached. If the dirty list is empty afterwards,
// the cleanup task is aborted if needed.
void RemoveDirtyFinalizationRegistriesOnContext(NativeContext context);
inline bool HasDirtyJSFinalizationRegistries();
void PostFinalizationRegistryCleanupTaskIfNeeded();
void set_is_finalization_registry_cleanup_task_posted(bool posted) {
is_finalization_registry_cleanup_task_posted_ = posted;
}
bool is_finalization_registry_cleanup_task_posted() {
return is_finalization_registry_cleanup_task_posted_;
}
V8_EXPORT_PRIVATE void KeepDuringJob(Handle<JSReceiver> target);
void ClearKeptObjects();
// ===========================================================================
// Inline allocation. ========================================================
// ===========================================================================
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
// Switch whether inline bump-pointer allocation should be used.
V8_EXPORT_PRIVATE void EnableInlineAllocation();
V8_EXPORT_PRIVATE void DisableInlineAllocation();
// ===========================================================================
// Methods triggering GCs. ===================================================
// ===========================================================================
// Performs garbage collection operation.
// Returns whether there is a chance that another major GC could
// collect more garbage.
V8_EXPORT_PRIVATE bool CollectGarbage(
AllocationSpace space, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs a full garbage collection.
V8_EXPORT_PRIVATE void CollectAllGarbage(
int flags, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Last hope GC, should try to squeeze as much as possible.
V8_EXPORT_PRIVATE void CollectAllAvailableGarbage(
GarbageCollectionReason gc_reason);
// Precise garbage collection that potentially finalizes already running
// incremental marking before performing an atomic garbage collection.
// Only use if absolutely necessary or in tests to avoid floating garbage!
V8_EXPORT_PRIVATE void PreciseCollectAllGarbage(
int flags, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Reports and external memory pressure event, either performs a major GC or
// completes incremental marking in order to free external resources.
void ReportExternalMemoryPressure();
using GetExternallyAllocatedMemoryInBytesCallback =
v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback;
void SetGetExternallyAllocatedMemoryInBytesCallback(
GetExternallyAllocatedMemoryInBytesCallback callback) {
external_memory_callback_ = callback;
}
// Invoked when GC was requested via the stack guard.
void HandleGCRequest();
// ===========================================================================
// Builtins. =================================================================
// ===========================================================================
V8_EXPORT_PRIVATE Code builtin(int index);
Address builtin_address(int index);
void set_builtin(int index, Code builtin);
// ===========================================================================
// Iterators. ================================================================
// ===========================================================================
// None of these methods iterate over the read-only roots. To do this use
// ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for
// garbage collection and is usually only performed as part of
// (de)serialization or heap verification.
// Iterates over the strong roots and the weak roots.
void IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
void IterateSmiRoots(RootVisitor* v);
// Iterates over weak string tables.
void IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
void IterateWeakGlobalHandles(RootVisitor* v);
void IterateBuiltins(RootVisitor* v);
void IterateStackRoots(RootVisitor* v);
// ===========================================================================
// Store buffer API. =========================================================
// ===========================================================================
// Used for query incremental marking status in generated code.
Address* IsMarkingFlagAddress() {
return reinterpret_cast<Address*>(&is_marking_flag_);
}
void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
V8_EXPORT_PRIVATE Address* store_buffer_top_address();
static intptr_t store_buffer_mask_constant();
static Address store_buffer_overflow_function_address();
void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot);
#ifdef DEBUG
void VerifyClearedSlot(HeapObject object, ObjectSlot slot);
void VerifySlotRangeHasNoRecordedSlots(Address start, Address end);
#endif
// ===========================================================================
// Incremental marking API. ==================================================
// ===========================================================================
int GCFlagsForIncrementalMarking() {
return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
: kNoGCFlags;
}
// Start incremental marking and ensure that idle time handler can perform
// incremental steps.
V8_EXPORT_PRIVATE void StartIdleIncrementalMarking(
GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
// Starts incremental marking assuming incremental marking is currently
// stopped.
V8_EXPORT_PRIVATE void StartIncrementalMarking(
int gc_flags, GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
void StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
void StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
// Synchronously finalizes incremental marking.
V8_EXPORT_PRIVATE void FinalizeIncrementalMarkingAtomically(
GarbageCollectionReason gc_reason);
void RegisterDeserializedObjectsForBlackAllocation(