1/*
2 * Copyright (C) 2003-2019 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */
20
21#include "config.h"
22#include "Heap.h"
23
24#include "BlockDirectoryInlines.h"
25#include "BuiltinExecutables.h"
26#include "CodeBlock.h"
27#include "CodeBlockSetInlines.h"
28#include "CollectingScope.h"
29#include "ConservativeRoots.h"
30#include "DFGWorklistInlines.h"
31#include "EdenGCActivityCallback.h"
32#include "Exception.h"
33#include "FullGCActivityCallback.h"
34#include "GCActivityCallback.h"
35#include "GCIncomingRefCountedSetInlines.h"
36#include "GCSegmentedArrayInlines.h"
37#include "GCTypeMap.h"
38#include "HasOwnPropertyCache.h"
39#include "HeapHelperPool.h"
40#include "HeapIterationScope.h"
41#include "HeapProfiler.h"
42#include "HeapSnapshot.h"
43#include "HeapVerifier.h"
44#include "IncrementalSweeper.h"
45#include "InferredValueInlines.h"
46#include "Interpreter.h"
47#include "IsoCellSetInlines.h"
48#include "JITStubRoutineSet.h"
49#include "JITWorklist.h"
50#include "JSCInlines.h"
51#include "JSGlobalObject.h"
52#include "JSLock.h"
53#include "JSVirtualMachineInternal.h"
54#include "JSWeakMap.h"
55#include "JSWeakSet.h"
56#include "JSWebAssemblyCodeBlock.h"
57#include "MachineStackMarker.h"
58#include "MarkStackMergingConstraint.h"
59#include "MarkedSpaceInlines.h"
60#include "MarkingConstraintSet.h"
61#include "PreventCollectionScope.h"
62#include "SamplingProfiler.h"
63#include "ShadowChicken.h"
64#include "SpaceTimeMutatorScheduler.h"
65#include "StochasticSpaceTimeMutatorScheduler.h"
66#include "StopIfNecessaryTimer.h"
67#include "SubspaceInlines.h"
68#include "SuperSampler.h"
69#include "SweepingScope.h"
70#include "SynchronousStopTheWorldMutatorScheduler.h"
71#include "TypeProfiler.h"
72#include "TypeProfilerLog.h"
73#include "UnlinkedCodeBlock.h"
74#include "VM.h"
75#include "VisitCounter.h"
76#include "WasmMemory.h"
77#include "WeakMapImplInlines.h"
78#include "WeakSetInlines.h"
79#include <algorithm>
80#include <wtf/ListDump.h>
81#include <wtf/MainThread.h>
82#include <wtf/ParallelVectorIterator.h>
83#include <wtf/ProcessID.h>
84#include <wtf/RAMSize.h>
85#include <wtf/SimpleStats.h>
86#include <wtf/Threading.h>
87
88#if PLATFORM(IOS_FAMILY)
89#include <bmalloc/bmalloc.h>
90#endif
91
92#if USE(FOUNDATION)
93#include <wtf/spi/cocoa/objcSPI.h>
94#endif
95
96#if USE(GLIB)
97#include "JSCGLibWrapperObject.h"
98#endif
99
100namespace JSC {
101
102namespace {
103
104bool verboseStop = false;
105
106double maxPauseMS(double thisPauseMS)
107{
108 static double maxPauseMS;
109 maxPauseMS = std::max(thisPauseMS, maxPauseMS);
110 return maxPauseMS;
111}
112
113size_t minHeapSize(HeapType heapType, size_t ramSize)
114{
115 if (heapType == LargeHeap) {
116 double result = std::min(
117 static_cast<double>(Options::largeHeapSize()),
118 ramSize * Options::smallHeapRAMFraction());
119 return static_cast<size_t>(result);
120 }
121 return Options::smallHeapSize();
122}
123
124size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
125{
126 if (VM::isInMiniMode())
127 return Options::miniVMHeapGrowthFactor() * heapSize;
128
129#if PLATFORM(IOS_FAMILY)
130 size_t memoryFootprint = bmalloc::api::memoryFootprint();
131 if (memoryFootprint < ramSize * Options::smallHeapRAMFraction())
132 return Options::smallHeapGrowthFactor() * heapSize;
133 if (memoryFootprint < ramSize * Options::mediumHeapRAMFraction())
134 return Options::mediumHeapGrowthFactor() * heapSize;
135#else
136 if (heapSize < ramSize * Options::smallHeapRAMFraction())
137 return Options::smallHeapGrowthFactor() * heapSize;
138 if (heapSize < ramSize * Options::mediumHeapRAMFraction())
139 return Options::mediumHeapGrowthFactor() * heapSize;
140#endif
141 return Options::largeHeapGrowthFactor() * heapSize;
142}
143
144bool isValidSharedInstanceThreadState(VM* vm)
145{
146 return vm->currentThreadIsHoldingAPILock();
147}
148
149bool isValidThreadState(VM* vm)
150{
151 if (vm->atomicStringTable() != Thread::current().atomicStringTable())
152 return false;
153
154 if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
155 return false;
156
157 return true;
158}
159
160void recordType(VM& vm, TypeCountSet& set, JSCell* cell)
161{
162 const char* typeName = "[unknown]";
163 const ClassInfo* info = cell->classInfo(vm);
164 if (info && info->className)
165 typeName = info->className;
166 set.add(typeName);
167}
168
169bool measurePhaseTiming()
170{
171 return false;
172}
173
174HashMap<const char*, GCTypeMap<SimpleStats>>& timingStats()
175{
176 static HashMap<const char*, GCTypeMap<SimpleStats>>* result;
177 static std::once_flag once;
178 std::call_once(
179 once,
180 [] {
181 result = new HashMap<const char*, GCTypeMap<SimpleStats>>();
182 });
183 return *result;
184}
185
186SimpleStats& timingStats(const char* name, CollectionScope scope)
187{
188 return timingStats().add(name, GCTypeMap<SimpleStats>()).iterator->value[scope];
189}
190
191class TimingScope {
192public:
193 TimingScope(Optional<CollectionScope> scope, const char* name)
194 : m_scope(scope)
195 , m_name(name)
196 {
197 if (measurePhaseTiming())
198 m_before = MonotonicTime::now();
199 }
200
201 TimingScope(Heap& heap, const char* name)
202 : TimingScope(heap.collectionScope(), name)
203 {
204 }
205
206 void setScope(Optional<CollectionScope> scope)
207 {
208 m_scope = scope;
209 }
210
211 void setScope(Heap& heap)
212 {
213 setScope(heap.collectionScope());
214 }
215
216 ~TimingScope()
217 {
218 if (measurePhaseTiming()) {
219 MonotonicTime after = MonotonicTime::now();
220 Seconds timing = after - m_before;
221 SimpleStats& stats = timingStats(m_name, *m_scope);
222 stats.add(timing.milliseconds());
223 dataLog("[GC:", *m_scope, "] ", m_name, " took: ", timing.milliseconds(), "ms (average ", stats.mean(), "ms).\n");
224 }
225 }
226private:
227 Optional<CollectionScope> m_scope;
228 MonotonicTime m_before;
229 const char* m_name;
230};
231
232} // anonymous namespace
233
234class Heap::HeapThread : public AutomaticThread {
235public:
236 HeapThread(const AbstractLocker& locker, Heap& heap)
237 : AutomaticThread(locker, heap.m_threadLock, heap.m_threadCondition.copyRef())
238 , m_heap(heap)
239 {
240 }
241
242 const char* name() const override
243 {
244 return "JSC Heap Collector Thread";
245 }
246
247protected:
248 PollResult poll(const AbstractLocker& locker) override
249 {
250 if (m_heap.m_threadShouldStop) {
251 m_heap.notifyThreadStopping(locker);
252 return PollResult::Stop;
253 }
254 if (m_heap.shouldCollectInCollectorThread(locker))
255 return PollResult::Work;
256 return PollResult::Wait;
257 }
258
259 WorkResult work() override
260 {
261 m_heap.collectInCollectorThread();
262 return WorkResult::Continue;
263 }
264
265 void threadDidStart() override
266 {
267 Thread::registerGCThread(GCThreadType::Main);
268 }
269
270private:
271 Heap& m_heap;
272};
273
274Heap::Heap(VM* vm, HeapType heapType)
275 : m_heapType(heapType)
276 , m_ramSize(Options::forceRAMSize() ? Options::forceRAMSize() : ramSize())
277 , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
278 , m_maxEdenSize(m_minBytesPerCycle)
279 , m_maxHeapSize(m_minBytesPerCycle)
280 , m_objectSpace(this)
281 , m_machineThreads(std::make_unique<MachineThreads>())
282 , m_collectorSlotVisitor(std::make_unique<SlotVisitor>(*this, "C"))
283 , m_mutatorSlotVisitor(std::make_unique<SlotVisitor>(*this, "M"))
284 , m_mutatorMarkStack(std::make_unique<MarkStackArray>())
285 , m_raceMarkStack(std::make_unique<MarkStackArray>())
286 , m_constraintSet(std::make_unique<MarkingConstraintSet>(*this))
287 , m_handleSet(vm)
288 , m_codeBlocks(std::make_unique<CodeBlockSet>())
289 , m_jitStubRoutines(std::make_unique<JITStubRoutineSet>())
290 , m_vm(vm)
291 // We seed with 10ms so that GCActivityCallback::didAllocate doesn't continuously
292 // schedule the timer if we've never done a collection.
293 , m_fullActivityCallback(GCActivityCallback::tryCreateFullTimer(this))
294 , m_edenActivityCallback(GCActivityCallback::tryCreateEdenTimer(this))
295 , m_sweeper(adoptRef(*new IncrementalSweeper(this)))
296 , m_stopIfNecessaryTimer(adoptRef(*new StopIfNecessaryTimer(vm)))
297 , m_sharedCollectorMarkStack(std::make_unique<MarkStackArray>())
298 , m_sharedMutatorMarkStack(std::make_unique<MarkStackArray>())
299 , m_helperClient(&heapHelperPool())
300 , m_threadLock(Box<Lock>::create())
301 , m_threadCondition(AutomaticThreadCondition::create())
302{
303 m_worldState.store(0);
304
305 if (Options::useConcurrentGC()) {
306 if (Options::useStochasticMutatorScheduler())
307 m_scheduler = std::make_unique<StochasticSpaceTimeMutatorScheduler>(*this);
308 else
309 m_scheduler = std::make_unique<SpaceTimeMutatorScheduler>(*this);
310 } else {
311 // We simulate turning off concurrent GC by making the scheduler say that the world
312 // should always be stopped when the collector is running.
313 m_scheduler = std::make_unique<SynchronousStopTheWorldMutatorScheduler>();
314 }
315
316 if (Options::verifyHeap())
317 m_verifier = std::make_unique<HeapVerifier>(this, Options::numberOfGCCyclesToRecordForVerification());
318
319 m_collectorSlotVisitor->optimizeForStoppedMutator();
320
321 // When memory is critical, allow allocating 25% of the amount above the critical threshold before collecting.
322 size_t memoryAboveCriticalThreshold = static_cast<size_t>(static_cast<double>(m_ramSize) * (1.0 - Options::criticalGCMemoryThreshold()));
323 m_maxEdenSizeWhenCritical = memoryAboveCriticalThreshold / 4;
324
325 LockHolder locker(*m_threadLock);
326 m_thread = adoptRef(new HeapThread(locker, *this));
327}
328
329Heap::~Heap()
330{
331 forEachSlotVisitor(
332 [&] (SlotVisitor& visitor) {
333 visitor.clearMarkStacks();
334 });
335 m_mutatorMarkStack->clear();
336 m_raceMarkStack->clear();
337
338 for (WeakBlock* block : m_logicallyEmptyWeakBlocks)
339 WeakBlock::destroy(*this, block);
340}
341
342bool Heap::isPagedOut(MonotonicTime deadline)
343{
344 return m_objectSpace.isPagedOut(deadline);
345}
346
347void Heap::dumpHeapStatisticsAtVMDestruction()
348{
349 unsigned counter = 0;
350 m_objectSpace.forEachBlock([&] (MarkedBlock::Handle* block) {
351 unsigned live = 0;
352 block->forEachCell([&] (HeapCell* cell, HeapCell::Kind) {
353 if (cell->isLive())
354 live++;
355 return IterationStatus::Continue;
356 });
357 dataLogLn("[", counter++, "] ", block->cellSize(), ", ", live, " / ", block->cellsPerBlock(), " ", static_cast<double>(live) / block->cellsPerBlock() * 100, "% ", block->attributes(), " ", block->subspace()->name());
358 block->forEachCell([&] (HeapCell* heapCell, HeapCell::Kind kind) {
359 if (heapCell->isLive() && kind == HeapCell::Kind::JSCell) {
360 auto* cell = static_cast<JSCell*>(heapCell);
361 if (cell->isObject())
362 dataLogLn(" ", JSValue((JSObject*)cell));
363 else
364 dataLogLn(" ", *cell);
365 }
366 return IterationStatus::Continue;
367 });
368 });
369}
370
371// The VM is being destroyed and the collector will never run again.
372// Run all pending finalizers now because we won't get another chance.
373void Heap::lastChanceToFinalize()
374{
375 MonotonicTime before;
376 if (Options::logGC()) {
377 before = MonotonicTime::now();
378 dataLog("[GC<", RawPointer(this), ">: shutdown ");
379 }
380
381 m_isShuttingDown = true;
382
383 RELEASE_ASSERT(!m_vm->entryScope);
384 RELEASE_ASSERT(m_mutatorState == MutatorState::Running);
385
386 if (m_collectContinuouslyThread) {
387 {
388 LockHolder locker(m_collectContinuouslyLock);
389 m_shouldStopCollectingContinuously = true;
390 m_collectContinuouslyCondition.notifyOne();
391 }
392 m_collectContinuouslyThread->waitForCompletion();
393 }
394
395 if (Options::logGC())
396 dataLog("1");
397
398 // Prevent new collections from being started. This is probably not even necessary, since we're not
399 // going to call into anything that starts collections. Still, this makes the algorithm more
400 // obviously sound.
401 m_isSafeToCollect = false;
402
403 if (Options::logGC())
404 dataLog("2");
405
406 bool isCollecting;
407 {
408 auto locker = holdLock(*m_threadLock);
409 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
410 isCollecting = m_lastServedTicket < m_lastGrantedTicket;
411 }
412 if (isCollecting) {
413 if (Options::logGC())
414 dataLog("...]\n");
415
416 // Wait for the current collection to finish.
417 waitForCollector(
418 [&] (const AbstractLocker&) -> bool {
419 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
420 return m_lastServedTicket == m_lastGrantedTicket;
421 });
422
423 if (Options::logGC())
424 dataLog("[GC<", RawPointer(this), ">: shutdown ");
425 }
426 if (Options::logGC())
427 dataLog("3");
428
429 RELEASE_ASSERT(m_requests.isEmpty());
430 RELEASE_ASSERT(m_lastServedTicket == m_lastGrantedTicket);
431
432 // Carefully bring the thread down.
433 bool stopped = false;
434 {
435 LockHolder locker(*m_threadLock);
436 stopped = m_thread->tryStop(locker);
437 m_threadShouldStop = true;
438 if (!stopped)
439 m_threadCondition->notifyOne(locker);
440 }
441
442 if (Options::logGC())
443 dataLog("4");
444
445 if (!stopped)
446 m_thread->join();
447
448 if (Options::logGC())
449 dataLog("5 ");
450
451 if (UNLIKELY(Options::dumpHeapStatisticsAtVMDestruction()))
452 dumpHeapStatisticsAtVMDestruction();
453
454 m_arrayBuffers.lastChanceToFinalize();
455 m_objectSpace.stopAllocatingForGood();
456 m_objectSpace.lastChanceToFinalize();
457 releaseDelayedReleasedObjects();
458
459 sweepAllLogicallyEmptyWeakBlocks();
460
461 m_objectSpace.freeMemory();
462
463 if (Options::logGC())
464 dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
465}
466
467void Heap::releaseDelayedReleasedObjects()
468{
469#if USE(FOUNDATION) || USE(GLIB)
470 // We need to guard against the case that releasing an object can create more objects due to the
471 // release calling into JS. When those JS call(s) exit and all locks are being dropped we end up
472 // back here and could try to recursively release objects. We guard that with a recursive entry
473 // count. Only the initial call will release objects, recursive calls simple return and let the
474 // the initial call to the function take care of any objects created during release time.
475 // This also means that we need to loop until there are no objects in m_delayedReleaseObjects
476 // and use a temp Vector for the actual releasing.
477 if (!m_delayedReleaseRecursionCount++) {
478 while (!m_delayedReleaseObjects.isEmpty()) {
479 ASSERT(m_vm->currentThreadIsHoldingAPILock());
480
481 auto objectsToRelease = WTFMove(m_delayedReleaseObjects);
482
483 {
484 // We need to drop locks before calling out to arbitrary code.
485 JSLock::DropAllLocks dropAllLocks(m_vm);
486
487#if USE(FOUNDATION)
488 void* context = objc_autoreleasePoolPush();
489#endif
490 objectsToRelease.clear();
491#if USE(FOUNDATION)
492 objc_autoreleasePoolPop(context);
493#endif
494 }
495 }
496 }
497 m_delayedReleaseRecursionCount--;
498#endif
499}
500
501void Heap::reportExtraMemoryAllocatedSlowCase(size_t size)
502{
503 didAllocate(size);
504 collectIfNecessaryOrDefer();
505}
506
507void Heap::deprecatedReportExtraMemorySlowCase(size_t size)
508{
509 // FIXME: Change this to use SaturatedArithmetic when available.
510 // https://bugs.webkit.org/show_bug.cgi?id=170411
511 Checked<size_t, RecordOverflow> checkedNewSize = m_deprecatedExtraMemorySize;
512 checkedNewSize += size;
513 m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
514 reportExtraMemoryAllocatedSlowCase(size);
515}
516
517bool Heap::overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType)
518{
519#if PLATFORM(IOS_FAMILY)
520 if (memoryThresholdCallType == MemoryThresholdCallType::Direct || ++m_precentAvailableMemoryCachedCallCount >= 100) {
521 m_overCriticalMemoryThreshold = bmalloc::api::percentAvailableMemoryInUse() > Options::criticalGCMemoryThreshold();
522 m_precentAvailableMemoryCachedCallCount = 0;
523 }
524
525 return m_overCriticalMemoryThreshold;
526#else
527 UNUSED_PARAM(memoryThresholdCallType);
528 return false;
529#endif
530}
531
532void Heap::reportAbandonedObjectGraph()
533{
534 // Our clients don't know exactly how much memory they
535 // are abandoning so we just guess for them.
536 size_t abandonedBytes = static_cast<size_t>(0.1 * capacity());
537
538 // We want to accelerate the next collection. Because memory has just
539 // been abandoned, the next collection has the potential to
540 // be more profitable. Since allocation is the trigger for collection,
541 // we hasten the next collection by pretending that we've allocated more memory.
542 if (m_fullActivityCallback) {
543 m_fullActivityCallback->didAllocate(*this,
544 m_sizeAfterLastCollect - m_sizeAfterLastFullCollect + m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
545 }
546 m_bytesAbandonedSinceLastFullCollect += abandonedBytes;
547}
548
549void Heap::protect(JSValue k)
550{
551 ASSERT(k);
552 ASSERT(m_vm->currentThreadIsHoldingAPILock());
553
554 if (!k.isCell())
555 return;
556
557 m_protectedValues.add(k.asCell());
558}
559
560bool Heap::unprotect(JSValue k)
561{
562 ASSERT(k);
563 ASSERT(m_vm->currentThreadIsHoldingAPILock());
564
565 if (!k.isCell())
566 return false;
567
568 return m_protectedValues.remove(k.asCell());
569}
570
571void Heap::addReference(JSCell* cell, ArrayBuffer* buffer)
572{
573 if (m_arrayBuffers.addReference(cell, buffer)) {
574 collectIfNecessaryOrDefer();
575 didAllocate(buffer->gcSizeEstimateInBytes());
576 }
577}
578
579template<typename CellType, typename CellSet>
580void Heap::finalizeMarkedUnconditionalFinalizers(CellSet& cellSet)
581{
582 cellSet.forEachMarkedCell(
583 [&] (HeapCell* cell, HeapCell::Kind) {
584 static_cast<CellType*>(cell)->finalizeUnconditionally(*vm());
585 });
586}
587
588void Heap::finalizeUnconditionalFinalizers()
589{
590 vm()->builtinExecutables()->finalizeUnconditionally();
591 if (vm()->m_inferredValueSpace)
592 finalizeMarkedUnconditionalFinalizers<InferredValue>(vm()->m_inferredValueSpace->space);
593 vm()->forEachCodeBlockSpace(
594 [&] (auto& space) {
595 this->finalizeMarkedUnconditionalFinalizers<CodeBlock>(space.set);
596 });
597 finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm()->executableToCodeBlockEdgesWithFinalizers);
598 finalizeMarkedUnconditionalFinalizers<StructureRareData>(vm()->structureRareDataSpace);
599 if (vm()->m_weakSetSpace)
600 finalizeMarkedUnconditionalFinalizers<JSWeakSet>(*vm()->m_weakSetSpace);
601 if (vm()->m_weakMapSpace)
602 finalizeMarkedUnconditionalFinalizers<JSWeakMap>(*vm()->m_weakMapSpace);
603 if (vm()->m_errorInstanceSpace)
604 finalizeMarkedUnconditionalFinalizers<ErrorInstance>(*vm()->m_errorInstanceSpace);
605
606#if ENABLE(WEBASSEMBLY)
607 if (vm()->m_webAssemblyCodeBlockSpace)
608 finalizeMarkedUnconditionalFinalizers<JSWebAssemblyCodeBlock>(*vm()->m_webAssemblyCodeBlockSpace);
609#endif
610}
611
612void Heap::willStartIterating()
613{
614 m_objectSpace.willStartIterating();
615}
616
617void Heap::didFinishIterating()
618{
619 m_objectSpace.didFinishIterating();
620}
621
622void Heap::completeAllJITPlans()
623{
624 if (!VM::canUseJIT())
625 return;
626#if ENABLE(JIT)
627 JITWorklist::ensureGlobalWorklist().completeAllForVM(*m_vm);
628#endif // ENABLE(JIT)
629 DFG::completeAllPlansForVM(*m_vm);
630}
631
632template<typename Func>
633void Heap::iterateExecutingAndCompilingCodeBlocks(const Func& func)
634{
635 m_codeBlocks->iterateCurrentlyExecuting(func);
636 if (VM::canUseJIT())
637 DFG::iterateCodeBlocksForGC(*m_vm, func);
638}
639
640template<typename Func>
641void Heap::iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func& func)
642{
643 Vector<CodeBlock*, 256> codeBlocks;
644 iterateExecutingAndCompilingCodeBlocks(
645 [&] (CodeBlock* codeBlock) {
646 codeBlocks.append(codeBlock);
647 });
648 for (CodeBlock* codeBlock : codeBlocks)
649 func(codeBlock);
650}
651
652void Heap::assertMarkStacksEmpty()
653{
654 bool ok = true;
655
656 if (!m_sharedCollectorMarkStack->isEmpty()) {
657 dataLog("FATAL: Shared collector mark stack not empty! It has ", m_sharedCollectorMarkStack->size(), " elements.\n");
658 ok = false;
659 }
660
661 if (!m_sharedMutatorMarkStack->isEmpty()) {
662 dataLog("FATAL: Shared mutator mark stack not empty! It has ", m_sharedMutatorMarkStack->size(), " elements.\n");
663 ok = false;
664 }
665
666 forEachSlotVisitor(
667 [&] (SlotVisitor& visitor) {
668 if (visitor.isEmpty())
669 return;
670
671 dataLog("FATAL: Visitor ", RawPointer(&visitor), " is not empty!\n");
672 ok = false;
673 });
674
675 RELEASE_ASSERT(ok);
676}
677
678void Heap::gatherStackRoots(ConservativeRoots& roots)
679{
680 m_machineThreads->gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks, m_currentThreadState, m_currentThread);
681}
682
683void Heap::gatherJSStackRoots(ConservativeRoots& roots)
684{
685#if ENABLE(C_LOOP)
686 m_vm->interpreter->cloopStack().gatherConservativeRoots(roots, *m_jitStubRoutines, *m_codeBlocks);
687#else
688 UNUSED_PARAM(roots);
689#endif
690}
691
692void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
693{
694#if ENABLE(DFG_JIT)
695 if (!VM::canUseJIT())
696 return;
697 m_vm->gatherScratchBufferRoots(roots);
698#else
699 UNUSED_PARAM(roots);
700#endif
701}
702
703void Heap::beginMarking()
704{
705 TimingScope timingScope(*this, "Heap::beginMarking");
706 m_jitStubRoutines->clearMarks();
707 m_objectSpace.beginMarking();
708 setMutatorShouldBeFenced(true);
709}
710
711void Heap::removeDeadCompilerWorklistEntries()
712{
713#if ENABLE(DFG_JIT)
714 if (!VM::canUseJIT())
715 return;
716 for (unsigned i = DFG::numberOfWorklists(); i--;)
717 DFG::existingWorklistForIndex(i).removeDeadPlans(*m_vm);
718#endif
719}
720
721bool Heap::isHeapSnapshotting() const
722{
723 HeapProfiler* heapProfiler = m_vm->heapProfiler();
724 if (UNLIKELY(heapProfiler))
725 return heapProfiler->activeSnapshotBuilder();
726 return false;
727}
728
729struct GatherHeapSnapshotData : MarkedBlock::CountFunctor {
730 GatherHeapSnapshotData(VM& vm, HeapSnapshotBuilder& builder)
731 : m_vm(vm)
732 , m_builder(builder)
733 {
734 }
735
736 IterationStatus operator()(HeapCell* heapCell, HeapCell::Kind kind) const
737 {
738 if (isJSCellKind(kind)) {
739 JSCell* cell = static_cast<JSCell*>(heapCell);
740 cell->methodTable(m_vm)->heapSnapshot(cell, m_builder);
741 }
742 return IterationStatus::Continue;
743 }
744
745 VM& m_vm;
746 HeapSnapshotBuilder& m_builder;
747};
748
749void Heap::gatherExtraHeapSnapshotData(HeapProfiler& heapProfiler)
750{
751 if (HeapSnapshotBuilder* builder = heapProfiler.activeSnapshotBuilder()) {
752 HeapIterationScope heapIterationScope(*this);
753 GatherHeapSnapshotData functor(*m_vm, *builder);
754 m_objectSpace.forEachLiveCell(heapIterationScope, functor);
755 }
756}
757
758struct RemoveDeadHeapSnapshotNodes : MarkedBlock::CountFunctor {
759 RemoveDeadHeapSnapshotNodes(HeapSnapshot& snapshot)
760 : m_snapshot(snapshot)
761 {
762 }
763
764 IterationStatus operator()(HeapCell* cell, HeapCell::Kind kind) const
765 {
766 if (isJSCellKind(kind))
767 m_snapshot.sweepCell(static_cast<JSCell*>(cell));
768 return IterationStatus::Continue;
769 }
770
771 HeapSnapshot& m_snapshot;
772};
773
774void Heap::removeDeadHeapSnapshotNodes(HeapProfiler& heapProfiler)
775{
776 if (HeapSnapshot* snapshot = heapProfiler.mostRecentSnapshot()) {
777 HeapIterationScope heapIterationScope(*this);
778 RemoveDeadHeapSnapshotNodes functor(*snapshot);
779 m_objectSpace.forEachDeadCell(heapIterationScope, functor);
780 snapshot->shrinkToFit();
781 }
782}
783
784void Heap::updateObjectCounts()
785{
786 if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full)
787 m_totalBytesVisited = 0;
788
789 m_totalBytesVisitedThisCycle = bytesVisited();
790
791 m_totalBytesVisited += m_totalBytesVisitedThisCycle;
792}
793
794void Heap::endMarking()
795{
796 forEachSlotVisitor(
797 [&] (SlotVisitor& visitor) {
798 visitor.reset();
799 });
800
801 assertMarkStacksEmpty();
802
803 RELEASE_ASSERT(m_raceMarkStack->isEmpty());
804
805 m_objectSpace.endMarking();
806 setMutatorShouldBeFenced(Options::forceFencedBarrier());
807}
808
809size_t Heap::objectCount()
810{
811 return m_objectSpace.objectCount();
812}
813
814size_t Heap::extraMemorySize()
815{
816 // FIXME: Change this to use SaturatedArithmetic when available.
817 // https://bugs.webkit.org/show_bug.cgi?id=170411
818 Checked<size_t, RecordOverflow> checkedTotal = m_extraMemorySize;
819 checkedTotal += m_deprecatedExtraMemorySize;
820 checkedTotal += m_arrayBuffers.size();
821 size_t total = UNLIKELY(checkedTotal.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedTotal.unsafeGet();
822
823 ASSERT(m_objectSpace.capacity() >= m_objectSpace.size());
824 return std::min(total, std::numeric_limits<size_t>::max() - m_objectSpace.capacity());
825}
826
827size_t Heap::size()
828{
829 return m_objectSpace.size() + extraMemorySize();
830}
831
832size_t Heap::capacity()
833{
834 return m_objectSpace.capacity() + extraMemorySize();
835}
836
837size_t Heap::protectedGlobalObjectCount()
838{
839 size_t result = 0;
840 forEachProtectedCell(
841 [&] (JSCell* cell) {
842 if (cell->isObject() && asObject(cell)->isGlobalObject())
843 result++;
844 });
845 return result;
846}
847
848size_t Heap::globalObjectCount()
849{
850 HeapIterationScope iterationScope(*this);
851 size_t result = 0;
852 m_objectSpace.forEachLiveCell(
853 iterationScope,
854 [&] (HeapCell* heapCell, HeapCell::Kind kind) -> IterationStatus {
855 if (!isJSCellKind(kind))
856 return IterationStatus::Continue;
857 JSCell* cell = static_cast<JSCell*>(heapCell);
858 if (cell->isObject() && asObject(cell)->isGlobalObject())
859 result++;
860 return IterationStatus::Continue;
861 });
862 return result;
863}
864
865size_t Heap::protectedObjectCount()
866{
867 size_t result = 0;
868 forEachProtectedCell(
869 [&] (JSCell*) {
870 result++;
871 });
872 return result;
873}
874
875std::unique_ptr<TypeCountSet> Heap::protectedObjectTypeCounts()
876{
877 std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
878 forEachProtectedCell(
879 [&] (JSCell* cell) {
880 recordType(*vm(), *result, cell);
881 });
882 return result;
883}
884
885std::unique_ptr<TypeCountSet> Heap::objectTypeCounts()
886{
887 std::unique_ptr<TypeCountSet> result = std::make_unique<TypeCountSet>();
888 HeapIterationScope iterationScope(*this);
889 m_objectSpace.forEachLiveCell(
890 iterationScope,
891 [&] (HeapCell* cell, HeapCell::Kind kind) -> IterationStatus {
892 if (isJSCellKind(kind))
893 recordType(*vm(), *result, static_cast<JSCell*>(cell));
894 return IterationStatus::Continue;
895 });
896 return result;
897}
898
899void Heap::deleteAllCodeBlocks(DeleteAllCodeEffort effort)
900{
901 if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
902 return;
903
904 VM& vm = *m_vm;
905 PreventCollectionScope preventCollectionScope(*this);
906
907 // If JavaScript is running, it's not safe to delete all JavaScript code, since
908 // we'll end up returning to deleted code.
909 RELEASE_ASSERT(!vm.entryScope);
910 RELEASE_ASSERT(!m_collectionScope);
911
912 completeAllJITPlans();
913
914 vm.forEachScriptExecutableSpace(
915 [&] (auto& spaceAndSet) {
916 HeapIterationScope heapIterationScope(*this);
917 auto& set = spaceAndSet.set;
918 set.forEachLiveCell(
919 [&] (HeapCell* cell, HeapCell::Kind) {
920 ScriptExecutable* executable = static_cast<ScriptExecutable*>(cell);
921 executable->clearCode(set);
922 });
923 });
924
925#if ENABLE(WEBASSEMBLY)
926 {
927 // We must ensure that we clear the JS call ICs from Wasm. Otherwise, Wasm will
928 // have no idea that we cleared the code from all of the Executables in the
929 // VM. This could leave Wasm in an inconsistent state where it has an IC that
930 // points into a CodeBlock that could be dead. The IC will still succeed because
931 // it uses a callee check, but then it will call into dead code.
932 HeapIterationScope heapIterationScope(*this);
933 if (vm.m_webAssemblyCodeBlockSpace) {
934 vm.m_webAssemblyCodeBlockSpace->forEachLiveCell([&] (HeapCell* cell, HeapCell::Kind kind) {
935 ASSERT_UNUSED(kind, kind == HeapCell::JSCell);
936 JSWebAssemblyCodeBlock* codeBlock = static_cast<JSWebAssemblyCodeBlock*>(cell);
937 codeBlock->clearJSCallICs(vm);
938 });
939 }
940 }
941#endif
942}
943
944void Heap::deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort effort)
945{
946 if (m_collectionScope && effort == DeleteAllCodeIfNotCollecting)
947 return;
948
949 VM& vm = *m_vm;
950 PreventCollectionScope preventCollectionScope(*this);
951
952 RELEASE_ASSERT(!m_collectionScope);
953
954 HeapIterationScope heapIterationScope(*this);
955 vm.unlinkedFunctionExecutableSpace.set.forEachLiveCell(
956 [&] (HeapCell* cell, HeapCell::Kind) {
957 UnlinkedFunctionExecutable* executable = static_cast<UnlinkedFunctionExecutable*>(cell);
958 executable->clearCode(vm);
959 });
960}
961
962void Heap::deleteUnmarkedCompiledCode()
963{
964 vm()->forEachScriptExecutableSpace([] (auto& space) { space.space.sweep(); });
965 vm()->forEachCodeBlockSpace([] (auto& space) { space.space.sweep(); }); // Sweeping must occur before deleting stubs, otherwise the stubs might still think they're alive as they get deleted.
966 m_jitStubRoutines->deleteUnmarkedJettisonedStubRoutines();
967}
968
969void Heap::addToRememberedSet(const JSCell* constCell)
970{
971 JSCell* cell = const_cast<JSCell*>(constCell);
972 ASSERT(cell);
973 ASSERT(!Options::useConcurrentJIT() || !isCompilationThread());
974 m_barriersExecuted++;
975 if (m_mutatorShouldBeFenced) {
976 WTF::loadLoadFence();
977 if (!isMarked(cell)) {
978 // During a full collection a store into an unmarked object that had surivived past
979 // collections will manifest as a store to an unmarked PossiblyBlack object. If the
980 // object gets marked at some time after this then it will go down the normal marking
981 // path. So, we don't have to remember this object. We could return here. But we go
982 // further and attempt to re-white the object.
983
984 RELEASE_ASSERT(m_collectionScope && m_collectionScope.value() == CollectionScope::Full);
985
986 if (cell->atomicCompareExchangeCellStateStrong(CellState::PossiblyBlack, CellState::DefinitelyWhite) == CellState::PossiblyBlack) {
987 // Now we protect against this race:
988 //
989 // 1) Object starts out black + unmarked.
990 // --> We do isMarked here.
991 // 2) Object is marked and greyed.
992 // 3) Object is scanned and blacked.
993 // --> We do atomicCompareExchangeCellStateStrong here.
994 //
995 // In this case we would have made the object white again, even though it should
996 // be black. This check lets us correct our mistake. This relies on the fact that
997 // isMarked converges monotonically to true.
998 if (isMarked(cell)) {
999 // It's difficult to work out whether the object should be grey or black at
1000 // this point. We say black conservatively.
1001 cell->setCellState(CellState::PossiblyBlack);
1002 }
1003
1004 // Either way, we can return. Most likely, the object was not marked, and so the
1005 // object is now labeled white. This means that future barrier executions will not
1006 // fire. In the unlikely event that the object had become marked, we can still
1007 // return anyway, since we proved that the object was not marked at the time that
1008 // we executed this slow path.
1009 }
1010
1011 return;
1012 }
1013 } else
1014 ASSERT(isMarked(cell));
1015 // It could be that the object was *just* marked. This means that the collector may set the
1016 // state to DefinitelyGrey and then to PossiblyOldOrBlack at any time. It's OK for us to
1017 // race with the collector here. If we win then this is accurate because the object _will_
1018 // get scanned again. If we lose then someone else will barrier the object again. That would
1019 // be unfortunate but not the end of the world.
1020 cell->setCellState(CellState::PossiblyGrey);
1021 m_mutatorMarkStack->append(cell);
1022}
1023
1024void Heap::sweepSynchronously()
1025{
1026 MonotonicTime before { };
1027 if (Options::logGC()) {
1028 dataLog("Full sweep: ", capacity() / 1024, "kb ");
1029 before = MonotonicTime::now();
1030 }
1031 m_objectSpace.sweep();
1032 m_objectSpace.shrink();
1033 if (Options::logGC()) {
1034 MonotonicTime after = MonotonicTime::now();
1035 dataLog("=> ", capacity() / 1024, "kb, ", (after - before).milliseconds(), "ms");
1036 }
1037}
1038
1039void Heap::collect(Synchronousness synchronousness, GCRequest request)
1040{
1041 switch (synchronousness) {
1042 case Async:
1043 collectAsync(request);
1044 return;
1045 case Sync:
1046 collectSync(request);
1047 return;
1048 }
1049 RELEASE_ASSERT_NOT_REACHED();
1050}
1051
1052void Heap::collectNow(Synchronousness synchronousness, GCRequest request)
1053{
1054 if (validateDFGDoesGC)
1055 RELEASE_ASSERT(expectDoesGC());
1056
1057 switch (synchronousness) {
1058 case Async: {
1059 collectAsync(request);
1060 stopIfNecessary();
1061 return;
1062 }
1063
1064 case Sync: {
1065 collectSync(request);
1066
1067 DeferGCForAWhile deferGC(*this);
1068 if (UNLIKELY(Options::useImmortalObjects()))
1069 sweeper().stopSweeping();
1070
1071 bool alreadySweptInCollectSync = shouldSweepSynchronously();
1072 if (!alreadySweptInCollectSync) {
1073 if (Options::logGC())
1074 dataLog("[GC<", RawPointer(this), ">: ");
1075 sweepSynchronously();
1076 if (Options::logGC())
1077 dataLog("]\n");
1078 }
1079 m_objectSpace.assertNoUnswept();
1080
1081 sweepAllLogicallyEmptyWeakBlocks();
1082 return;
1083 } }
1084 RELEASE_ASSERT_NOT_REACHED();
1085}
1086
1087void Heap::collectAsync(GCRequest request)
1088{
1089 if (validateDFGDoesGC)
1090 RELEASE_ASSERT(expectDoesGC());
1091
1092 if (!m_isSafeToCollect)
1093 return;
1094
1095 bool alreadyRequested = false;
1096 {
1097 LockHolder locker(*m_threadLock);
1098 for (const GCRequest& previousRequest : m_requests) {
1099 if (request.subsumedBy(previousRequest)) {
1100 alreadyRequested = true;
1101 break;
1102 }
1103 }
1104 }
1105 if (alreadyRequested)
1106 return;
1107
1108 requestCollection(request);
1109}
1110
1111void Heap::collectSync(GCRequest request)
1112{
1113 if (validateDFGDoesGC)
1114 RELEASE_ASSERT(expectDoesGC());
1115
1116 if (!m_isSafeToCollect)
1117 return;
1118
1119 waitForCollection(requestCollection(request));
1120}
1121
1122bool Heap::shouldCollectInCollectorThread(const AbstractLocker&)
1123{
1124 RELEASE_ASSERT(m_requests.isEmpty() == (m_lastServedTicket == m_lastGrantedTicket));
1125 RELEASE_ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
1126
1127 if (false)
1128 dataLog("Mutator has the conn = ", !!(m_worldState.load() & mutatorHasConnBit), "\n");
1129
1130 return !m_requests.isEmpty() && !(m_worldState.load() & mutatorHasConnBit);
1131}
1132
1133void Heap::collectInCollectorThread()
1134{
1135 for (;;) {
1136 RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Collector, nullptr);
1137 switch (result) {
1138 case RunCurrentPhaseResult::Finished:
1139 return;
1140 case RunCurrentPhaseResult::Continue:
1141 break;
1142 case RunCurrentPhaseResult::NeedCurrentThreadState:
1143 RELEASE_ASSERT_NOT_REACHED();
1144 break;
1145 }
1146 }
1147}
1148
1149ALWAYS_INLINE int asInt(CollectorPhase phase)
1150{
1151 return static_cast<int>(phase);
1152}
1153
1154void Heap::checkConn(GCConductor conn)
1155{
1156 unsigned worldState = m_worldState.load();
1157 switch (conn) {
1158 case GCConductor::Mutator:
1159 RELEASE_ASSERT(worldState & mutatorHasConnBit, worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm()->id(), VM::numberOfIDs(), vm()->isEntered());
1160 return;
1161 case GCConductor::Collector:
1162 RELEASE_ASSERT(!(worldState & mutatorHasConnBit), worldState, asInt(m_lastPhase), asInt(m_currentPhase), asInt(m_nextPhase), vm()->id(), VM::numberOfIDs(), vm()->isEntered());
1163 return;
1164 }
1165 RELEASE_ASSERT_NOT_REACHED();
1166}
1167
1168auto Heap::runCurrentPhase(GCConductor conn, CurrentThreadState* currentThreadState) -> RunCurrentPhaseResult
1169{
1170 checkConn(conn);
1171 m_currentThreadState = currentThreadState;
1172 m_currentThread = &Thread::current();
1173
1174 if (conn == GCConductor::Mutator)
1175 sanitizeStackForVM(vm());
1176
1177 // If the collector transfers the conn to the mutator, it leaves us in between phases.
1178 if (!finishChangingPhase(conn)) {
1179 // A mischevious mutator could repeatedly relinquish the conn back to us. We try to avoid doing
1180 // this, but it's probably not the end of the world if it did happen.
1181 if (false)
1182 dataLog("Conn bounce-back.\n");
1183 return RunCurrentPhaseResult::Finished;
1184 }
1185
1186 bool result = false;
1187 switch (m_currentPhase) {
1188 case CollectorPhase::NotRunning:
1189 result = runNotRunningPhase(conn);
1190 break;
1191
1192 case CollectorPhase::Begin:
1193 result = runBeginPhase(conn);
1194 break;
1195
1196 case CollectorPhase::Fixpoint:
1197 if (!currentThreadState && conn == GCConductor::Mutator)
1198 return RunCurrentPhaseResult::NeedCurrentThreadState;
1199
1200 result = runFixpointPhase(conn);
1201 break;
1202
1203 case CollectorPhase::Concurrent:
1204 result = runConcurrentPhase(conn);
1205 break;
1206
1207 case CollectorPhase::Reloop:
1208 result = runReloopPhase(conn);
1209 break;
1210
1211 case CollectorPhase::End:
1212 result = runEndPhase(conn);
1213 break;
1214 }
1215
1216 return result ? RunCurrentPhaseResult::Continue : RunCurrentPhaseResult::Finished;
1217}
1218
1219NEVER_INLINE bool Heap::runNotRunningPhase(GCConductor conn)
1220{
1221 // Check m_requests since the mutator calls this to poll what's going on.
1222 {
1223 auto locker = holdLock(*m_threadLock);
1224 if (m_requests.isEmpty())
1225 return false;
1226 }
1227
1228 return changePhase(conn, CollectorPhase::Begin);
1229}
1230
1231NEVER_INLINE bool Heap::runBeginPhase(GCConductor conn)
1232{
1233 m_currentGCStartTime = MonotonicTime::now();
1234
1235 {
1236 LockHolder locker(*m_threadLock);
1237 RELEASE_ASSERT(!m_requests.isEmpty());
1238 m_currentRequest = m_requests.first();
1239 }
1240
1241 if (Options::logGC())
1242 dataLog("[GC<", RawPointer(this), ">: START ", gcConductorShortName(conn), " ", capacity() / 1024, "kb ");
1243
1244 m_beforeGC = MonotonicTime::now();
1245
1246 if (m_collectionScope) {
1247 dataLog("Collection scope already set during GC: ", *m_collectionScope, "\n");
1248 RELEASE_ASSERT_NOT_REACHED();
1249 }
1250
1251 willStartCollection();
1252
1253 if (UNLIKELY(m_verifier)) {
1254 // Verify that live objects from the last GC cycle haven't been corrupted by
1255 // mutators before we begin this new GC cycle.
1256 m_verifier->verify(HeapVerifier::Phase::BeforeGC);
1257
1258 m_verifier->startGC();
1259 m_verifier->gatherLiveCells(HeapVerifier::Phase::BeforeMarking);
1260 }
1261
1262 prepareForMarking();
1263
1264 if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) {
1265 m_opaqueRoots.clear();
1266 m_collectorSlotVisitor->clearMarkStacks();
1267 m_mutatorMarkStack->clear();
1268 }
1269
1270 RELEASE_ASSERT(m_raceMarkStack->isEmpty());
1271
1272 beginMarking();
1273
1274 forEachSlotVisitor(
1275 [&] (SlotVisitor& visitor) {
1276 visitor.didStartMarking();
1277 });
1278
1279 m_parallelMarkersShouldExit = false;
1280
1281 m_helperClient.setFunction(
1282 [this] () {
1283 SlotVisitor* slotVisitor;
1284 {
1285 LockHolder locker(m_parallelSlotVisitorLock);
1286 if (m_availableParallelSlotVisitors.isEmpty()) {
1287 std::unique_ptr<SlotVisitor> newVisitor = std::make_unique<SlotVisitor>(
1288 *this, toCString("P", m_parallelSlotVisitors.size() + 1));
1289
1290 if (Options::optimizeParallelSlotVisitorsForStoppedMutator())
1291 newVisitor->optimizeForStoppedMutator();
1292
1293 newVisitor->didStartMarking();
1294
1295 slotVisitor = newVisitor.get();
1296 m_parallelSlotVisitors.append(WTFMove(newVisitor));
1297 } else
1298 slotVisitor = m_availableParallelSlotVisitors.takeLast();
1299 }
1300
1301 Thread::registerGCThread(GCThreadType::Helper);
1302
1303 {
1304 ParallelModeEnabler parallelModeEnabler(*slotVisitor);
1305 slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
1306 }
1307
1308 {
1309 LockHolder locker(m_parallelSlotVisitorLock);
1310 m_availableParallelSlotVisitors.append(slotVisitor);
1311 }
1312 });
1313
1314 SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1315
1316 m_constraintSet->didStartMarking();
1317
1318 m_scheduler->beginCollection();
1319 if (Options::logGC())
1320 m_scheduler->log();
1321
1322 // After this, we will almost certainly fall through all of the "slotVisitor.isEmpty()"
1323 // checks because bootstrap would have put things into the visitor. So, we should fall
1324 // through to draining.
1325
1326 if (!slotVisitor.didReachTermination()) {
1327 dataLog("Fatal: SlotVisitor should think that GC should terminate before constraint solving, but it does not think this.\n");
1328 dataLog("slotVisitor.isEmpty(): ", slotVisitor.isEmpty(), "\n");
1329 dataLog("slotVisitor.collectorMarkStack().isEmpty(): ", slotVisitor.collectorMarkStack().isEmpty(), "\n");
1330 dataLog("slotVisitor.mutatorMarkStack().isEmpty(): ", slotVisitor.mutatorMarkStack().isEmpty(), "\n");
1331 dataLog("m_numberOfActiveParallelMarkers: ", m_numberOfActiveParallelMarkers, "\n");
1332 dataLog("m_sharedCollectorMarkStack->isEmpty(): ", m_sharedCollectorMarkStack->isEmpty(), "\n");
1333 dataLog("m_sharedMutatorMarkStack->isEmpty(): ", m_sharedMutatorMarkStack->isEmpty(), "\n");
1334 dataLog("slotVisitor.didReachTermination(): ", slotVisitor.didReachTermination(), "\n");
1335 RELEASE_ASSERT_NOT_REACHED();
1336 }
1337
1338 return changePhase(conn, CollectorPhase::Fixpoint);
1339}
1340
1341NEVER_INLINE bool Heap::runFixpointPhase(GCConductor conn)
1342{
1343 RELEASE_ASSERT(conn == GCConductor::Collector || m_currentThreadState);
1344
1345 SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1346
1347 if (Options::logGC()) {
1348 HashMap<const char*, size_t> visitMap;
1349 forEachSlotVisitor(
1350 [&] (SlotVisitor& slotVisitor) {
1351 visitMap.add(slotVisitor.codeName(), slotVisitor.bytesVisited() / 1024);
1352 });
1353
1354 auto perVisitorDump = sortedMapDump(
1355 visitMap,
1356 [] (const char* a, const char* b) -> bool {
1357 return strcmp(a, b) < 0;
1358 },
1359 ":", " ");
1360
1361 dataLog("v=", bytesVisited() / 1024, "kb (", perVisitorDump, ") o=", m_opaqueRoots.size(), " b=", m_barriersExecuted, " ");
1362 }
1363
1364 if (slotVisitor.didReachTermination()) {
1365 m_opaqueRoots.deleteOldTables();
1366
1367 m_scheduler->didReachTermination();
1368
1369 assertMarkStacksEmpty();
1370
1371 // FIXME: Take m_mutatorDidRun into account when scheduling constraints. Most likely,
1372 // we don't have to execute root constraints again unless the mutator did run. At a
1373 // minimum, we could use this for work estimates - but it's probably more than just an
1374 // estimate.
1375 // https://bugs.webkit.org/show_bug.cgi?id=166828
1376
1377 // Wondering what this does? Look at Heap::addCoreConstraints(). The DOM and others can also
1378 // add their own using Heap::addMarkingConstraint().
1379 bool converged = m_constraintSet->executeConvergence(slotVisitor);
1380
1381 // FIXME: The slotVisitor.isEmpty() check is most likely not needed.
1382 // https://bugs.webkit.org/show_bug.cgi?id=180310
1383 if (converged && slotVisitor.isEmpty()) {
1384 assertMarkStacksEmpty();
1385 return changePhase(conn, CollectorPhase::End);
1386 }
1387
1388 m_scheduler->didExecuteConstraints();
1389 }
1390
1391 if (Options::logGC())
1392 dataLog(slotVisitor.collectorMarkStack().size(), "+", m_mutatorMarkStack->size() + slotVisitor.mutatorMarkStack().size(), " ");
1393
1394 {
1395 ParallelModeEnabler enabler(slotVisitor);
1396 slotVisitor.drainInParallel(m_scheduler->timeToResume());
1397 }
1398
1399 m_scheduler->synchronousDrainingDidStall();
1400
1401 // This is kinda tricky. The termination check looks at:
1402 //
1403 // - Whether the marking threads are active. If they are not, this means that the marking threads'
1404 // SlotVisitors are empty.
1405 // - Whether the collector's slot visitor is empty.
1406 // - Whether the shared mark stacks are empty.
1407 //
1408 // This doesn't have to check the mutator SlotVisitor because that one becomes empty after every GC
1409 // work increment, so it must be empty now.
1410 if (slotVisitor.didReachTermination())
1411 return true; // This is like relooping to the top if runFixpointPhase().
1412
1413 if (!m_scheduler->shouldResume())
1414 return true;
1415
1416 m_scheduler->willResume();
1417
1418 if (Options::logGC()) {
1419 double thisPauseMS = (MonotonicTime::now() - m_stopTime).milliseconds();
1420 dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), ")...]\n");
1421 }
1422
1423 // Forgive the mutator for its past failures to keep up.
1424 // FIXME: Figure out if moving this to different places results in perf changes.
1425 m_incrementBalance = 0;
1426
1427 return changePhase(conn, CollectorPhase::Concurrent);
1428}
1429
1430NEVER_INLINE bool Heap::runConcurrentPhase(GCConductor conn)
1431{
1432 SlotVisitor& slotVisitor = *m_collectorSlotVisitor;
1433
1434 switch (conn) {
1435 case GCConductor::Mutator: {
1436 // When the mutator has the conn, we poll runConcurrentPhase() on every time someone says
1437 // stopIfNecessary(), so on every allocation slow path. When that happens we poll if it's time
1438 // to stop and do some work.
1439 if (slotVisitor.didReachTermination()
1440 || m_scheduler->shouldStop())
1441 return changePhase(conn, CollectorPhase::Reloop);
1442
1443 // We could be coming from a collector phase that stuffed our SlotVisitor, so make sure we donate
1444 // everything. This is super cheap if the SlotVisitor is already empty.
1445 slotVisitor.donateAll();
1446 return false;
1447 }
1448 case GCConductor::Collector: {
1449 {
1450 ParallelModeEnabler enabler(slotVisitor);
1451 slotVisitor.drainInParallelPassively(m_scheduler->timeToStop());
1452 }
1453 return changePhase(conn, CollectorPhase::Reloop);
1454 } }
1455
1456 RELEASE_ASSERT_NOT_REACHED();
1457 return false;
1458}
1459
1460NEVER_INLINE bool Heap::runReloopPhase(GCConductor conn)
1461{
1462 if (Options::logGC())
1463 dataLog("[GC<", RawPointer(this), ">: ", gcConductorShortName(conn), " ");
1464
1465 m_scheduler->didStop();
1466
1467 if (Options::logGC())
1468 m_scheduler->log();
1469
1470 return changePhase(conn, CollectorPhase::Fixpoint);
1471}
1472
1473NEVER_INLINE bool Heap::runEndPhase(GCConductor conn)
1474{
1475 m_scheduler->endCollection();
1476
1477 {
1478 auto locker = holdLock(m_markingMutex);
1479 m_parallelMarkersShouldExit = true;
1480 m_markingConditionVariable.notifyAll();
1481 }
1482 m_helperClient.finish();
1483
1484 iterateExecutingAndCompilingCodeBlocks(
1485 [&] (CodeBlock* codeBlock) {
1486 writeBarrier(codeBlock);
1487 });
1488
1489 updateObjectCounts();
1490 endMarking();
1491
1492 if (UNLIKELY(m_verifier)) {
1493 m_verifier->gatherLiveCells(HeapVerifier::Phase::AfterMarking);
1494 m_verifier->verify(HeapVerifier::Phase::AfterMarking);
1495 }
1496
1497 if (vm()->typeProfiler())
1498 vm()->typeProfiler()->invalidateTypeSetCache(*vm());
1499
1500 reapWeakHandles();
1501 pruneStaleEntriesFromWeakGCMaps();
1502 sweepArrayBuffers();
1503 snapshotUnswept();
1504 finalizeUnconditionalFinalizers();
1505 removeDeadCompilerWorklistEntries();
1506 notifyIncrementalSweeper();
1507
1508 m_codeBlocks->iterateCurrentlyExecuting(
1509 [&] (CodeBlock* codeBlock) {
1510 writeBarrier(codeBlock);
1511 });
1512 m_codeBlocks->clearCurrentlyExecuting();
1513
1514 m_objectSpace.prepareForAllocation();
1515 updateAllocationLimits();
1516
1517 if (UNLIKELY(m_verifier)) {
1518 m_verifier->trimDeadCells();
1519 m_verifier->verify(HeapVerifier::Phase::AfterGC);
1520 }
1521
1522 didFinishCollection();
1523
1524 if (m_currentRequest.didFinishEndPhase)
1525 m_currentRequest.didFinishEndPhase->run();
1526
1527 if (false) {
1528 dataLog("Heap state after GC:\n");
1529 m_objectSpace.dumpBits();
1530 }
1531
1532 if (Options::logGC()) {
1533 double thisPauseMS = (m_afterGC - m_stopTime).milliseconds();
1534 dataLog("p=", thisPauseMS, "ms (max ", maxPauseMS(thisPauseMS), "), cycle ", (m_afterGC - m_beforeGC).milliseconds(), "ms END]\n");
1535 }
1536
1537 {
1538 auto locker = holdLock(*m_threadLock);
1539 m_requests.removeFirst();
1540 m_lastServedTicket++;
1541 clearMutatorWaiting();
1542 }
1543 ParkingLot::unparkAll(&m_worldState);
1544
1545 if (false)
1546 dataLog("GC END!\n");
1547
1548 setNeedFinalize();
1549
1550 m_lastGCStartTime = m_currentGCStartTime;
1551 m_lastGCEndTime = MonotonicTime::now();
1552 m_totalGCTime += m_lastGCEndTime - m_lastGCStartTime;
1553
1554 return changePhase(conn, CollectorPhase::NotRunning);
1555}
1556
1557bool Heap::changePhase(GCConductor conn, CollectorPhase nextPhase)
1558{
1559 checkConn(conn);
1560
1561 m_lastPhase = m_currentPhase;
1562 m_nextPhase = nextPhase;
1563
1564 return finishChangingPhase(conn);
1565}
1566
1567NEVER_INLINE bool Heap::finishChangingPhase(GCConductor conn)
1568{
1569 checkConn(conn);
1570
1571 if (m_nextPhase == m_currentPhase)
1572 return true;
1573
1574 if (false)
1575 dataLog(conn, ": Going to phase: ", m_nextPhase, " (from ", m_currentPhase, ")\n");
1576
1577 m_phaseVersion++;
1578
1579 bool suspendedBefore = worldShouldBeSuspended(m_currentPhase);
1580 bool suspendedAfter = worldShouldBeSuspended(m_nextPhase);
1581
1582 if (suspendedBefore != suspendedAfter) {
1583 if (suspendedBefore) {
1584 RELEASE_ASSERT(!suspendedAfter);
1585
1586 resumeThePeriphery();
1587 if (conn == GCConductor::Collector)
1588 resumeTheMutator();
1589 else
1590 handleNeedFinalize();
1591 } else {
1592 RELEASE_ASSERT(!suspendedBefore);
1593 RELEASE_ASSERT(suspendedAfter);
1594
1595 if (conn == GCConductor::Collector) {
1596 waitWhileNeedFinalize();
1597 if (!stopTheMutator()) {
1598 if (false)
1599 dataLog("Returning false.\n");
1600 return false;
1601 }
1602 } else {
1603 sanitizeStackForVM(m_vm);
1604 handleNeedFinalize();
1605 }
1606 stopThePeriphery(conn);
1607 }
1608 }
1609
1610 m_currentPhase = m_nextPhase;
1611 return true;
1612}
1613
1614void Heap::stopThePeriphery(GCConductor conn)
1615{
1616 if (m_worldIsStopped) {
1617 dataLog("FATAL: world already stopped.\n");
1618 RELEASE_ASSERT_NOT_REACHED();
1619 }
1620
1621 if (m_mutatorDidRun)
1622 m_mutatorExecutionVersion++;
1623
1624 m_mutatorDidRun = false;
1625
1626 suspendCompilerThreads();
1627 m_worldIsStopped = true;
1628
1629 forEachSlotVisitor(
1630 [&] (SlotVisitor& slotVisitor) {
1631 slotVisitor.updateMutatorIsStopped(NoLockingNecessary);
1632 });
1633
1634#if ENABLE(JIT)
1635 if (VM::canUseJIT()) {
1636 DeferGCForAWhile awhile(*this);
1637 if (JITWorklist::ensureGlobalWorklist().completeAllForVM(*m_vm)
1638 && conn == GCConductor::Collector)
1639 setGCDidJIT();
1640 }
1641#endif // ENABLE(JIT)
1642 UNUSED_PARAM(conn);
1643
1644 if (auto* shadowChicken = vm()->shadowChicken())
1645 shadowChicken->update(*vm(), vm()->topCallFrame);
1646
1647 m_structureIDTable.flushOldTables();
1648 m_objectSpace.stopAllocating();
1649
1650 m_stopTime = MonotonicTime::now();
1651}
1652
1653NEVER_INLINE void Heap::resumeThePeriphery()
1654{
1655 // Calling resumeAllocating does the Right Thing depending on whether this is the end of a
1656 // collection cycle or this is just a concurrent phase within a collection cycle:
1657 // - At end of collection cycle: it's a no-op because prepareForAllocation already cleared the
1658 // last active block.
1659 // - During collection cycle: it reinstates the last active block.
1660 m_objectSpace.resumeAllocating();
1661
1662 m_barriersExecuted = 0;
1663
1664 if (!m_worldIsStopped) {
1665 dataLog("Fatal: collector does not believe that the world is stopped.\n");
1666 RELEASE_ASSERT_NOT_REACHED();
1667 }
1668 m_worldIsStopped = false;
1669
1670 // FIXME: This could be vastly improved: we want to grab the locks in the order in which they
1671 // become available. We basically want a lockAny() method that will lock whatever lock is available
1672 // and tell you which one it locked. That would require teaching ParkingLot how to park on multiple
1673 // queues at once, which is totally achievable - it would just require memory allocation, which is
1674 // suboptimal but not a disaster. Alternatively, we could replace the SlotVisitor rightToRun lock
1675 // with a DLG-style handshake mechanism, but that seems not as general.
1676 Vector<SlotVisitor*, 8> slotVisitorsToUpdate;
1677
1678 forEachSlotVisitor(
1679 [&] (SlotVisitor& slotVisitor) {
1680 slotVisitorsToUpdate.append(&slotVisitor);
1681 });
1682
1683 for (unsigned countdown = 40; !slotVisitorsToUpdate.isEmpty() && countdown--;) {
1684 for (unsigned index = 0; index < slotVisitorsToUpdate.size(); ++index) {
1685 SlotVisitor& slotVisitor = *slotVisitorsToUpdate[index];
1686 bool remove = false;
1687 if (slotVisitor.hasAcknowledgedThatTheMutatorIsResumed())
1688 remove = true;
1689 else if (auto locker = tryHoldLock(slotVisitor.rightToRun())) {
1690 slotVisitor.updateMutatorIsStopped(locker);
1691 remove = true;
1692 }
1693 if (remove) {
1694 slotVisitorsToUpdate[index--] = slotVisitorsToUpdate.last();
1695 slotVisitorsToUpdate.takeLast();
1696 }
1697 }
1698 Thread::yield();
1699 }
1700
1701 for (SlotVisitor* slotVisitor : slotVisitorsToUpdate)
1702 slotVisitor->updateMutatorIsStopped();
1703
1704 resumeCompilerThreads();
1705}
1706
1707bool Heap::stopTheMutator()
1708{
1709 for (;;) {
1710 unsigned oldState = m_worldState.load();
1711 if (oldState & stoppedBit) {
1712 RELEASE_ASSERT(!(oldState & hasAccessBit));
1713 RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1714 RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1715 return true;
1716 }
1717
1718 if (oldState & mutatorHasConnBit) {
1719 RELEASE_ASSERT(!(oldState & hasAccessBit));
1720 RELEASE_ASSERT(!(oldState & stoppedBit));
1721 return false;
1722 }
1723
1724 if (!(oldState & hasAccessBit)) {
1725 RELEASE_ASSERT(!(oldState & mutatorHasConnBit));
1726 RELEASE_ASSERT(!(oldState & mutatorWaitingBit));
1727 // We can stop the world instantly.
1728 if (m_worldState.compareExchangeWeak(oldState, oldState | stoppedBit))
1729 return true;
1730 continue;
1731 }
1732
1733 // Transfer the conn to the mutator and bail.
1734 RELEASE_ASSERT(oldState & hasAccessBit);
1735 RELEASE_ASSERT(!(oldState & stoppedBit));
1736 unsigned newState = (oldState | mutatorHasConnBit) & ~mutatorWaitingBit;
1737 if (m_worldState.compareExchangeWeak(oldState, newState)) {
1738 if (false)
1739 dataLog("Handed off the conn.\n");
1740 m_stopIfNecessaryTimer->scheduleSoon();
1741 ParkingLot::unparkAll(&m_worldState);
1742 return false;
1743 }
1744 }
1745}
1746
1747NEVER_INLINE void Heap::resumeTheMutator()
1748{
1749 if (false)
1750 dataLog("Resuming the mutator.\n");
1751 for (;;) {
1752 unsigned oldState = m_worldState.load();
1753 if (!!(oldState & hasAccessBit) != !(oldState & stoppedBit)) {
1754 dataLog("Fatal: hasAccess = ", !!(oldState & hasAccessBit), ", stopped = ", !!(oldState & stoppedBit), "\n");
1755 RELEASE_ASSERT_NOT_REACHED();
1756 }
1757 if (oldState & mutatorHasConnBit) {
1758 dataLog("Fatal: mutator has the conn.\n");
1759 RELEASE_ASSERT_NOT_REACHED();
1760 }
1761
1762 if (!(oldState & stoppedBit)) {
1763 if (false)
1764 dataLog("Returning because not stopped.\n");
1765 return;
1766 }
1767
1768 if (m_worldState.compareExchangeWeak(oldState, oldState & ~stoppedBit)) {
1769 if (false)
1770 dataLog("CASing and returning.\n");
1771 ParkingLot::unparkAll(&m_worldState);
1772 return;
1773 }
1774 }
1775}
1776
1777void Heap::stopIfNecessarySlow()
1778{
1779 if (validateDFGDoesGC)
1780 RELEASE_ASSERT(expectDoesGC());
1781
1782 while (stopIfNecessarySlow(m_worldState.load())) { }
1783
1784 RELEASE_ASSERT(m_worldState.load() & hasAccessBit);
1785 RELEASE_ASSERT(!(m_worldState.load() & stoppedBit));
1786
1787 handleGCDidJIT();
1788 handleNeedFinalize();
1789 m_mutatorDidRun = true;
1790}
1791
1792bool Heap::stopIfNecessarySlow(unsigned oldState)
1793{
1794 if (validateDFGDoesGC)
1795 RELEASE_ASSERT(expectDoesGC());
1796
1797 RELEASE_ASSERT(oldState & hasAccessBit);
1798 RELEASE_ASSERT(!(oldState & stoppedBit));
1799
1800 // It's possible for us to wake up with finalization already requested but the world not yet
1801 // resumed. If that happens, we can't run finalization yet.
1802 if (handleNeedFinalize(oldState))
1803 return true;
1804
1805 // FIXME: When entering the concurrent phase, we could arrange for this branch not to fire, and then
1806 // have the SlotVisitor do things to the m_worldState to make this branch fire again. That would
1807 // prevent us from polling this so much. Ideally, stopIfNecessary would ignore the mutatorHasConnBit
1808 // and there would be some other bit indicating whether we were in some GC phase other than the
1809 // NotRunning or Concurrent ones.
1810 if (oldState & mutatorHasConnBit)
1811 collectInMutatorThread();
1812
1813 return false;
1814}
1815
1816NEVER_INLINE void Heap::collectInMutatorThread()
1817{
1818 CollectingScope collectingScope(*this);
1819 for (;;) {
1820 RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, nullptr);
1821 switch (result) {
1822 case RunCurrentPhaseResult::Finished:
1823 return;
1824 case RunCurrentPhaseResult::Continue:
1825 break;
1826 case RunCurrentPhaseResult::NeedCurrentThreadState:
1827 sanitizeStackForVM(m_vm);
1828 auto lambda = [&] (CurrentThreadState& state) {
1829 for (;;) {
1830 RunCurrentPhaseResult result = runCurrentPhase(GCConductor::Mutator, &state);
1831 switch (result) {
1832 case RunCurrentPhaseResult::Finished:
1833 return;
1834 case RunCurrentPhaseResult::Continue:
1835 break;
1836 case RunCurrentPhaseResult::NeedCurrentThreadState:
1837 RELEASE_ASSERT_NOT_REACHED();
1838 break;
1839 }
1840 }
1841 };
1842 callWithCurrentThreadState(scopedLambda<void(CurrentThreadState&)>(WTFMove(lambda)));
1843 return;
1844 }
1845 }
1846}
1847
1848template<typename Func>
1849void Heap::waitForCollector(const Func& func)
1850{
1851 for (;;) {
1852 bool done;
1853 {
1854 LockHolder locker(*m_threadLock);
1855 done = func(locker);
1856 if (!done) {
1857 setMutatorWaiting();
1858
1859 // At this point, the collector knows that we intend to wait, and he will clear the
1860 // waiting bit and then unparkAll when the GC cycle finishes. Clearing the bit
1861 // prevents us from parking except if there is also stop-the-world. Unparking after
1862 // clearing means that if the clearing happens after we park, then we will unpark.
1863 }
1864 }
1865
1866 // If we're in a stop-the-world scenario, we need to wait for that even if done is true.
1867 unsigned oldState = m_worldState.load();
1868 if (stopIfNecessarySlow(oldState))
1869 continue;
1870
1871 // FIXME: We wouldn't need this if stopIfNecessarySlow() had a mode where it knew to just
1872 // do the collection.
1873 relinquishConn();
1874
1875 if (done) {
1876 clearMutatorWaiting(); // Clean up just in case.
1877 return;
1878 }
1879
1880 // If mutatorWaitingBit is still set then we want to wait.
1881 ParkingLot::compareAndPark(&m_worldState, oldState | mutatorWaitingBit);
1882 }
1883}
1884
1885void Heap::acquireAccessSlow()
1886{
1887 for (;;) {
1888 unsigned oldState = m_worldState.load();
1889 RELEASE_ASSERT(!(oldState & hasAccessBit));
1890
1891 if (oldState & stoppedBit) {
1892 if (verboseStop) {
1893 dataLog("Stopping in acquireAccess!\n");
1894 WTFReportBacktrace();
1895 }
1896 // Wait until we're not stopped anymore.
1897 ParkingLot::compareAndPark(&m_worldState, oldState);
1898 continue;
1899 }
1900
1901 RELEASE_ASSERT(!(oldState & stoppedBit));
1902 unsigned newState = oldState | hasAccessBit;
1903 if (m_worldState.compareExchangeWeak(oldState, newState)) {
1904 handleGCDidJIT();
1905 handleNeedFinalize();
1906 m_mutatorDidRun = true;
1907 stopIfNecessary();
1908 return;
1909 }
1910 }
1911}
1912
1913void Heap::releaseAccessSlow()
1914{
1915 for (;;) {
1916 unsigned oldState = m_worldState.load();
1917 if (!(oldState & hasAccessBit)) {
1918 dataLog("FATAL: Attempting to release access but the mutator does not have access.\n");
1919 RELEASE_ASSERT_NOT_REACHED();
1920 }
1921 if (oldState & stoppedBit) {
1922 dataLog("FATAL: Attempting to release access but the mutator is stopped.\n");
1923 RELEASE_ASSERT_NOT_REACHED();
1924 }
1925
1926 if (handleNeedFinalize(oldState))
1927 continue;
1928
1929 unsigned newState = oldState & ~(hasAccessBit | mutatorHasConnBit);
1930
1931 if ((oldState & mutatorHasConnBit)
1932 && m_nextPhase != m_currentPhase) {
1933 // This means that the collector thread had given us the conn so that we would do something
1934 // for it. Stop ourselves as we release access. This ensures that acquireAccess blocks. In
1935 // the meantime, since we're handing the conn over, the collector will be awoken and it is
1936 // sure to have work to do.
1937 newState |= stoppedBit;
1938 }
1939
1940 if (m_worldState.compareExchangeWeak(oldState, newState)) {
1941 if (oldState & mutatorHasConnBit)
1942 finishRelinquishingConn();
1943 return;
1944 }
1945 }
1946}
1947
1948bool Heap::relinquishConn(unsigned oldState)
1949{
1950 RELEASE_ASSERT(oldState & hasAccessBit);
1951 RELEASE_ASSERT(!(oldState & stoppedBit));
1952
1953 if (!(oldState & mutatorHasConnBit))
1954 return false; // Done.
1955
1956 if (m_threadShouldStop)
1957 return false;
1958
1959 if (!m_worldState.compareExchangeWeak(oldState, oldState & ~mutatorHasConnBit))
1960 return true; // Loop around.
1961
1962 finishRelinquishingConn();
1963 return true;
1964}
1965
1966void Heap::finishRelinquishingConn()
1967{
1968 if (false)
1969 dataLog("Relinquished the conn.\n");
1970
1971 sanitizeStackForVM(m_vm);
1972
1973 auto locker = holdLock(*m_threadLock);
1974 if (!m_requests.isEmpty())
1975 m_threadCondition->notifyOne(locker);
1976 ParkingLot::unparkAll(&m_worldState);
1977}
1978
1979void Heap::relinquishConn()
1980{
1981 while (relinquishConn(m_worldState.load())) { }
1982}
1983
1984bool Heap::handleGCDidJIT(unsigned oldState)
1985{
1986 RELEASE_ASSERT(oldState & hasAccessBit);
1987 if (!(oldState & gcDidJITBit))
1988 return false;
1989 if (m_worldState.compareExchangeWeak(oldState, oldState & ~gcDidJITBit)) {
1990 WTF::crossModifyingCodeFence();
1991 return true;
1992 }
1993 return true;
1994}
1995
1996NEVER_INLINE bool Heap::handleNeedFinalize(unsigned oldState)
1997{
1998 RELEASE_ASSERT(oldState & hasAccessBit);
1999 RELEASE_ASSERT(!(oldState & stoppedBit));
2000
2001 if (!(oldState & needFinalizeBit))
2002 return false;
2003 if (m_worldState.compareExchangeWeak(oldState, oldState & ~needFinalizeBit)) {
2004 finalize();
2005 // Wake up anyone waiting for us to finalize. Note that they may have woken up already, in
2006 // which case they would be waiting for us to release heap access.
2007 ParkingLot::unparkAll(&m_worldState);
2008 return true;
2009 }
2010 return true;
2011}
2012
2013void Heap::handleGCDidJIT()
2014{
2015 while (handleGCDidJIT(m_worldState.load())) { }
2016}
2017
2018void Heap::handleNeedFinalize()
2019{
2020 while (handleNeedFinalize(m_worldState.load())) { }
2021}
2022
2023void Heap::setGCDidJIT()
2024{
2025 m_worldState.transaction(
2026 [&] (unsigned& state) -> bool {
2027 RELEASE_ASSERT(state & stoppedBit);
2028 state |= gcDidJITBit;
2029 return true;
2030 });
2031}
2032
2033void Heap::setNeedFinalize()
2034{
2035 m_worldState.exchangeOr(needFinalizeBit);
2036 ParkingLot::unparkAll(&m_worldState);
2037 m_stopIfNecessaryTimer->scheduleSoon();
2038}
2039
2040void Heap::waitWhileNeedFinalize()
2041{
2042 for (;;) {
2043 unsigned oldState = m_worldState.load();
2044 if (!(oldState & needFinalizeBit)) {
2045 // This means that either there was no finalize request or the main thread will finalize
2046 // with heap access, so a subsequent call to stopTheWorld() will return only when
2047 // finalize finishes.
2048 return;
2049 }
2050 ParkingLot::compareAndPark(&m_worldState, oldState);
2051 }
2052}
2053
2054void Heap::setMutatorWaiting()
2055{
2056 m_worldState.exchangeOr(mutatorWaitingBit);
2057}
2058
2059void Heap::clearMutatorWaiting()
2060{
2061 m_worldState.exchangeAnd(~mutatorWaitingBit);
2062}
2063
2064void Heap::notifyThreadStopping(const AbstractLocker&)
2065{
2066 m_threadIsStopping = true;
2067 clearMutatorWaiting();
2068 ParkingLot::unparkAll(&m_worldState);
2069}
2070
2071void Heap::finalize()
2072{
2073 MonotonicTime before;
2074 if (Options::logGC()) {
2075 before = MonotonicTime::now();
2076 dataLog("[GC<", RawPointer(this), ">: finalize ");
2077 }
2078
2079 {
2080 SweepingScope sweepingScope(*this);
2081 deleteUnmarkedCompiledCode();
2082 deleteSourceProviderCaches();
2083 sweepInFinalize();
2084 }
2085
2086 if (HasOwnPropertyCache* cache = vm()->hasOwnPropertyCache())
2087 cache->clear();
2088
2089 immutableButterflyToStringCache.clear();
2090
2091 for (const HeapFinalizerCallback& callback : m_heapFinalizerCallbacks)
2092 callback.run(*vm());
2093
2094 if (shouldSweepSynchronously())
2095 sweepSynchronously();
2096
2097 if (Options::logGC()) {
2098 MonotonicTime after = MonotonicTime::now();
2099 dataLog((after - before).milliseconds(), "ms]\n");
2100 }
2101}
2102
2103Heap::Ticket Heap::requestCollection(GCRequest request)
2104{
2105 stopIfNecessary();
2106
2107 ASSERT(vm()->currentThreadIsHoldingAPILock());
2108 RELEASE_ASSERT(vm()->atomicStringTable() == Thread::current().atomicStringTable());
2109
2110 LockHolder locker(*m_threadLock);
2111 // We may be able to steal the conn. That only works if the collector is definitely not running
2112 // right now. This is an optimization that prevents the collector thread from ever starting in most
2113 // cases.
2114 ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2115 if ((m_lastServedTicket == m_lastGrantedTicket) && (m_currentPhase == CollectorPhase::NotRunning)) {
2116 if (false)
2117 dataLog("Taking the conn.\n");
2118 m_worldState.exchangeOr(mutatorHasConnBit);
2119 }
2120
2121 m_requests.append(request);
2122 m_lastGrantedTicket++;
2123 if (!(m_worldState.load() & mutatorHasConnBit))
2124 m_threadCondition->notifyOne(locker);
2125 return m_lastGrantedTicket;
2126}
2127
2128void Heap::waitForCollection(Ticket ticket)
2129{
2130 waitForCollector(
2131 [&] (const AbstractLocker&) -> bool {
2132 return m_lastServedTicket >= ticket;
2133 });
2134}
2135
2136void Heap::sweepInFinalize()
2137{
2138 m_objectSpace.sweepLargeAllocations();
2139 vm()->eagerlySweptDestructibleObjectSpace.sweep();
2140}
2141
2142void Heap::suspendCompilerThreads()
2143{
2144#if ENABLE(DFG_JIT)
2145 // We ensure the worklists so that it's not possible for the mutator to start a new worklist
2146 // after we have suspended the ones that he had started before. That's not very expensive since
2147 // the worklists use AutomaticThreads anyway.
2148 if (!VM::canUseJIT())
2149 return;
2150 for (unsigned i = DFG::numberOfWorklists(); i--;)
2151 DFG::ensureWorklistForIndex(i).suspendAllThreads();
2152#endif
2153}
2154
2155void Heap::willStartCollection()
2156{
2157 if (Options::logGC())
2158 dataLog("=> ");
2159
2160 if (shouldDoFullCollection()) {
2161 m_collectionScope = CollectionScope::Full;
2162 m_shouldDoFullCollection = false;
2163 if (Options::logGC())
2164 dataLog("FullCollection, ");
2165 if (false)
2166 dataLog("Full collection!\n");
2167 } else {
2168 m_collectionScope = CollectionScope::Eden;
2169 if (Options::logGC())
2170 dataLog("EdenCollection, ");
2171 if (false)
2172 dataLog("Eden collection!\n");
2173 }
2174 if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) {
2175 m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2176 m_extraMemorySize = 0;
2177 m_deprecatedExtraMemorySize = 0;
2178#if ENABLE(RESOURCE_USAGE)
2179 m_externalMemorySize = 0;
2180#endif
2181
2182 if (m_fullActivityCallback)
2183 m_fullActivityCallback->willCollect();
2184 } else {
2185 ASSERT(m_collectionScope && m_collectionScope.value() == CollectionScope::Eden);
2186 m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
2187 }
2188
2189 if (m_edenActivityCallback)
2190 m_edenActivityCallback->willCollect();
2191
2192 for (auto* observer : m_observers)
2193 observer->willGarbageCollect();
2194}
2195
2196void Heap::prepareForMarking()
2197{
2198 m_objectSpace.prepareForMarking();
2199}
2200
2201void Heap::reapWeakHandles()
2202{
2203 m_objectSpace.reapWeakSets();
2204}
2205
2206void Heap::pruneStaleEntriesFromWeakGCMaps()
2207{
2208 if (!m_collectionScope || m_collectionScope.value() != CollectionScope::Full)
2209 return;
2210 for (WeakGCMapBase* weakGCMap : m_weakGCMaps)
2211 weakGCMap->pruneStaleEntries();
2212}
2213
2214void Heap::sweepArrayBuffers()
2215{
2216 m_arrayBuffers.sweep(*vm());
2217}
2218
2219void Heap::snapshotUnswept()
2220{
2221 TimingScope timingScope(*this, "Heap::snapshotUnswept");
2222 m_objectSpace.snapshotUnswept();
2223}
2224
2225void Heap::deleteSourceProviderCaches()
2226{
2227 if (m_lastCollectionScope && m_lastCollectionScope.value() == CollectionScope::Full)
2228 m_vm->clearSourceProviderCaches();
2229}
2230
2231void Heap::notifyIncrementalSweeper()
2232{
2233 if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) {
2234 if (!m_logicallyEmptyWeakBlocks.isEmpty())
2235 m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2236 }
2237
2238 m_sweeper->startSweeping(*this);
2239}
2240
2241void Heap::updateAllocationLimits()
2242{
2243 static const bool verbose = false;
2244
2245 if (verbose) {
2246 dataLog("\n");
2247 dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n");
2248 }
2249
2250 // Calculate our current heap size threshold for the purpose of figuring out when we should
2251 // run another collection. This isn't the same as either size() or capacity(), though it should
2252 // be somewhere between the two. The key is to match the size calculations involved calls to
2253 // didAllocate(), while never dangerously underestimating capacity(). In extreme cases of
2254 // fragmentation, we may have size() much smaller than capacity().
2255 size_t currentHeapSize = 0;
2256
2257 // For marked space, we use the total number of bytes visited. This matches the logic for
2258 // BlockDirectory's calls to didAllocate(), which effectively accounts for the total size of
2259 // objects allocated rather than blocks used. This will underestimate capacity(), and in case
2260 // of fragmentation, this may be substantial. Fortunately, marked space rarely fragments because
2261 // cells usually have a narrow range of sizes. So, the underestimation is probably OK.
2262 currentHeapSize += m_totalBytesVisited;
2263 if (verbose)
2264 dataLog("totalBytesVisited = ", m_totalBytesVisited, ", currentHeapSize = ", currentHeapSize, "\n");
2265
2266 // It's up to the user to ensure that extraMemorySize() ends up corresponding to allocation-time
2267 // extra memory reporting.
2268 currentHeapSize += extraMemorySize();
2269 if (!ASSERT_DISABLED) {
2270 Checked<size_t, RecordOverflow> checkedCurrentHeapSize = m_totalBytesVisited;
2271 checkedCurrentHeapSize += extraMemorySize();
2272 ASSERT(!checkedCurrentHeapSize.hasOverflowed() && checkedCurrentHeapSize.unsafeGet() == currentHeapSize);
2273 }
2274
2275 if (verbose)
2276 dataLog("extraMemorySize() = ", extraMemorySize(), ", currentHeapSize = ", currentHeapSize, "\n");
2277
2278 if (m_collectionScope && m_collectionScope.value() == CollectionScope::Full) {
2279 // To avoid pathological GC churn in very small and very large heaps, we set
2280 // the new allocation limit based on the current size of the heap, with a
2281 // fixed minimum.
2282 m_maxHeapSize = std::max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
2283 if (verbose)
2284 dataLog("Full: maxHeapSize = ", m_maxHeapSize, "\n");
2285 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2286 if (verbose)
2287 dataLog("Full: maxEdenSize = ", m_maxEdenSize, "\n");
2288 m_sizeAfterLastFullCollect = currentHeapSize;
2289 if (verbose)
2290 dataLog("Full: sizeAfterLastFullCollect = ", currentHeapSize, "\n");
2291 m_bytesAbandonedSinceLastFullCollect = 0;
2292 if (verbose)
2293 dataLog("Full: bytesAbandonedSinceLastFullCollect = ", 0, "\n");
2294 } else {
2295 ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
2296 // Theoretically, we shouldn't ever scan more memory than the heap size we planned to have.
2297 // But we are sloppy, so we have to defend against the overflow.
2298 m_maxEdenSize = currentHeapSize > m_maxHeapSize ? 0 : m_maxHeapSize - currentHeapSize;
2299 if (verbose)
2300 dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2301 m_sizeAfterLastEdenCollect = currentHeapSize;
2302 if (verbose)
2303 dataLog("Eden: sizeAfterLastEdenCollect = ", currentHeapSize, "\n");
2304 double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
2305 double minEdenToOldGenerationRatio = 1.0 / 3.0;
2306 if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
2307 m_shouldDoFullCollection = true;
2308 // This seems suspect at first, but what it does is ensure that the nursery size is fixed.
2309 m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
2310 if (verbose)
2311 dataLog("Eden: maxHeapSize = ", m_maxHeapSize, "\n");
2312 m_maxEdenSize = m_maxHeapSize - currentHeapSize;
2313 if (verbose)
2314 dataLog("Eden: maxEdenSize = ", m_maxEdenSize, "\n");
2315 if (m_fullActivityCallback) {
2316 ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
2317 m_fullActivityCallback->didAllocate(*this, currentHeapSize - m_sizeAfterLastFullCollect);
2318 }
2319 }
2320
2321#if PLATFORM(IOS_FAMILY)
2322 // Get critical memory threshold for next cycle.
2323 overCriticalMemoryThreshold(MemoryThresholdCallType::Direct);
2324#endif
2325
2326 m_sizeAfterLastCollect = currentHeapSize;
2327 if (verbose)
2328 dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n");
2329 m_bytesAllocatedThisCycle = 0;
2330
2331 if (Options::logGC())
2332 dataLog("=> ", currentHeapSize / 1024, "kb, ");
2333}
2334
2335void Heap::didFinishCollection()
2336{
2337 m_afterGC = MonotonicTime::now();
2338 CollectionScope scope = *m_collectionScope;
2339 if (scope == CollectionScope::Full)
2340 m_lastFullGCLength = m_afterGC - m_beforeGC;
2341 else
2342 m_lastEdenGCLength = m_afterGC - m_beforeGC;
2343
2344#if ENABLE(RESOURCE_USAGE)
2345 ASSERT(externalMemorySize() <= extraMemorySize());
2346#endif
2347
2348 if (HeapProfiler* heapProfiler = m_vm->heapProfiler()) {
2349 gatherExtraHeapSnapshotData(*heapProfiler);
2350 removeDeadHeapSnapshotNodes(*heapProfiler);
2351 }
2352
2353 if (UNLIKELY(m_verifier))
2354 m_verifier->endGC();
2355
2356 RELEASE_ASSERT(m_collectionScope);
2357 m_lastCollectionScope = m_collectionScope;
2358 m_collectionScope = WTF::nullopt;
2359
2360 for (auto* observer : m_observers)
2361 observer->didGarbageCollect(scope);
2362}
2363
2364void Heap::resumeCompilerThreads()
2365{
2366#if ENABLE(DFG_JIT)
2367 if (!VM::canUseJIT())
2368 return;
2369 for (unsigned i = DFG::numberOfWorklists(); i--;)
2370 DFG::existingWorklistForIndex(i).resumeAllThreads();
2371#endif
2372}
2373
2374GCActivityCallback* Heap::fullActivityCallback()
2375{
2376 return m_fullActivityCallback.get();
2377}
2378
2379GCActivityCallback* Heap::edenActivityCallback()
2380{
2381 return m_edenActivityCallback.get();
2382}
2383
2384IncrementalSweeper& Heap::sweeper()
2385{
2386 return m_sweeper.get();
2387}
2388
2389void Heap::setGarbageCollectionTimerEnabled(bool enable)
2390{
2391 if (m_fullActivityCallback)
2392 m_fullActivityCallback->setEnabled(enable);
2393 if (m_edenActivityCallback)
2394 m_edenActivityCallback->setEnabled(enable);
2395}
2396
2397void Heap::didAllocate(size_t bytes)
2398{
2399 if (m_edenActivityCallback)
2400 m_edenActivityCallback->didAllocate(*this, m_bytesAllocatedThisCycle + m_bytesAbandonedSinceLastFullCollect);
2401 m_bytesAllocatedThisCycle += bytes;
2402 performIncrement(bytes);
2403}
2404
2405bool Heap::isValidAllocation(size_t)
2406{
2407 if (!isValidThreadState(m_vm))
2408 return false;
2409
2410 if (isCurrentThreadBusy())
2411 return false;
2412
2413 return true;
2414}
2415
2416void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
2417{
2418 WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
2419}
2420
2421void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
2422{
2423 HandleSlot slot = handle.slot();
2424 Finalizer finalizer = reinterpret_cast<Finalizer>(context);
2425 finalizer(slot->asCell());
2426 WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
2427}
2428
2429void Heap::collectNowFullIfNotDoneRecently(Synchronousness synchronousness)
2430{
2431 if (!m_fullActivityCallback) {
2432 collectNow(synchronousness, CollectionScope::Full);
2433 return;
2434 }
2435
2436 if (m_fullActivityCallback->didGCRecently()) {
2437 // A synchronous GC was already requested recently so we merely accelerate next collection.
2438 reportAbandonedObjectGraph();
2439 return;
2440 }
2441
2442 m_fullActivityCallback->setDidGCRecently();
2443 collectNow(synchronousness, CollectionScope::Full);
2444}
2445
2446bool Heap::useGenerationalGC()
2447{
2448 return Options::useGenerationalGC() && !VM::isInMiniMode();
2449}
2450
2451bool Heap::shouldSweepSynchronously()
2452{
2453 return Options::sweepSynchronously() || VM::isInMiniMode();
2454}
2455
2456bool Heap::shouldDoFullCollection()
2457{
2458 if (!useGenerationalGC())
2459 return true;
2460
2461 if (!m_currentRequest.scope)
2462 return m_shouldDoFullCollection || overCriticalMemoryThreshold();
2463 return *m_currentRequest.scope == CollectionScope::Full;
2464}
2465
2466void Heap::addLogicallyEmptyWeakBlock(WeakBlock* block)
2467{
2468 m_logicallyEmptyWeakBlocks.append(block);
2469}
2470
2471void Heap::sweepAllLogicallyEmptyWeakBlocks()
2472{
2473 if (m_logicallyEmptyWeakBlocks.isEmpty())
2474 return;
2475
2476 m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
2477 while (sweepNextLogicallyEmptyWeakBlock()) { }
2478}
2479
2480bool Heap::sweepNextLogicallyEmptyWeakBlock()
2481{
2482 if (m_indexOfNextLogicallyEmptyWeakBlockToSweep == WTF::notFound)
2483 return false;
2484
2485 WeakBlock* block = m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep];
2486
2487 block->sweep();
2488 if (block->isEmpty()) {
2489 std::swap(m_logicallyEmptyWeakBlocks[m_indexOfNextLogicallyEmptyWeakBlockToSweep], m_logicallyEmptyWeakBlocks.last());
2490 m_logicallyEmptyWeakBlocks.removeLast();
2491 WeakBlock::destroy(*this, block);
2492 } else
2493 m_indexOfNextLogicallyEmptyWeakBlockToSweep++;
2494
2495 if (m_indexOfNextLogicallyEmptyWeakBlockToSweep >= m_logicallyEmptyWeakBlocks.size()) {
2496 m_indexOfNextLogicallyEmptyWeakBlockToSweep = WTF::notFound;
2497 return false;
2498 }
2499
2500 return true;
2501}
2502
2503size_t Heap::visitCount()
2504{
2505 size_t result = 0;
2506 forEachSlotVisitor(
2507 [&] (SlotVisitor& visitor) {
2508 result += visitor.visitCount();
2509 });
2510 return result;
2511}
2512
2513size_t Heap::bytesVisited()
2514{
2515 size_t result = 0;
2516 forEachSlotVisitor(
2517 [&] (SlotVisitor& visitor) {
2518 result += visitor.bytesVisited();
2519 });
2520 return result;
2521}
2522
2523void Heap::forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>& func)
2524{
2525 // We don't know the full set of CodeBlocks until compilation has terminated.
2526 completeAllJITPlans();
2527
2528 return m_codeBlocks->iterate(func);
2529}
2530
2531void Heap::forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& locker, const ScopedLambda<void(CodeBlock*)>& func)
2532{
2533 return m_codeBlocks->iterate(locker, func);
2534}
2535
2536void Heap::writeBarrierSlowPath(const JSCell* from)
2537{
2538 if (UNLIKELY(mutatorShouldBeFenced())) {
2539 // In this case, the barrierThreshold is the tautological threshold, so from could still be
2540 // not black. But we can't know for sure until we fire off a fence.
2541 WTF::storeLoadFence();
2542 if (from->cellState() != CellState::PossiblyBlack)
2543 return;
2544 }
2545
2546 addToRememberedSet(from);
2547}
2548
2549bool Heap::isCurrentThreadBusy()
2550{
2551 return Thread::mayBeGCThread() || mutatorState() != MutatorState::Running;
2552}
2553
2554void Heap::reportExtraMemoryVisited(size_t size)
2555{
2556 size_t* counter = &m_extraMemorySize;
2557
2558 for (;;) {
2559 size_t oldSize = *counter;
2560 // FIXME: Change this to use SaturatedArithmetic when available.
2561 // https://bugs.webkit.org/show_bug.cgi?id=170411
2562 Checked<size_t, RecordOverflow> checkedNewSize = oldSize;
2563 checkedNewSize += size;
2564 size_t newSize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet();
2565 if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, newSize))
2566 return;
2567 }
2568}
2569
2570#if ENABLE(RESOURCE_USAGE)
2571void Heap::reportExternalMemoryVisited(size_t size)
2572{
2573 size_t* counter = &m_externalMemorySize;
2574
2575 for (;;) {
2576 size_t oldSize = *counter;
2577 if (WTF::atomicCompareExchangeWeakRelaxed(counter, oldSize, oldSize + size))
2578 return;
2579 }
2580}
2581#endif
2582
2583void Heap::collectIfNecessaryOrDefer(GCDeferralContext* deferralContext)
2584{
2585 ASSERT(deferralContext || isDeferred() || !DisallowGC::isInEffectOnCurrentThread());
2586 if (validateDFGDoesGC)
2587 RELEASE_ASSERT(expectDoesGC());
2588
2589 if (!m_isSafeToCollect)
2590 return;
2591
2592 switch (mutatorState()) {
2593 case MutatorState::Running:
2594 case MutatorState::Allocating:
2595 break;
2596 case MutatorState::Sweeping:
2597 case MutatorState::Collecting:
2598 return;
2599 }
2600 if (!Options::useGC())
2601 return;
2602
2603 if (mayNeedToStop()) {
2604 if (deferralContext)
2605 deferralContext->m_shouldGC = true;
2606 else if (isDeferred())
2607 m_didDeferGCWork = true;
2608 else
2609 stopIfNecessary();
2610 }
2611
2612 if (UNLIKELY(Options::gcMaxHeapSize())) {
2613 if (m_bytesAllocatedThisCycle <= Options::gcMaxHeapSize())
2614 return;
2615 } else {
2616 size_t bytesAllowedThisCycle = m_maxEdenSize;
2617
2618#if PLATFORM(IOS_FAMILY)
2619 if (overCriticalMemoryThreshold())
2620 bytesAllowedThisCycle = std::min(m_maxEdenSizeWhenCritical, bytesAllowedThisCycle);
2621#endif
2622
2623 if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle)
2624 return;
2625 }
2626
2627 if (deferralContext)
2628 deferralContext->m_shouldGC = true;
2629 else if (isDeferred())
2630 m_didDeferGCWork = true;
2631 else {
2632 collectAsync();
2633 stopIfNecessary(); // This will immediately start the collection if we have the conn.
2634 }
2635}
2636
2637void Heap::decrementDeferralDepthAndGCIfNeededSlow()
2638{
2639 // Can't do anything if we're still deferred.
2640 if (m_deferralDepth)
2641 return;
2642
2643 ASSERT(!isDeferred());
2644
2645 m_didDeferGCWork = false;
2646 // FIXME: Bring back something like the DeferGCProbability mode.
2647 // https://bugs.webkit.org/show_bug.cgi?id=166627
2648 collectIfNecessaryOrDefer();
2649}
2650
2651void Heap::registerWeakGCMap(WeakGCMapBase* weakGCMap)
2652{
2653 m_weakGCMaps.add(weakGCMap);
2654}
2655
2656void Heap::unregisterWeakGCMap(WeakGCMapBase* weakGCMap)
2657{
2658 m_weakGCMaps.remove(weakGCMap);
2659}
2660
2661void Heap::didAllocateBlock(size_t capacity)
2662{
2663#if ENABLE(RESOURCE_USAGE)
2664 m_blockBytesAllocated += capacity;
2665#else
2666 UNUSED_PARAM(capacity);
2667#endif
2668}
2669
2670void Heap::didFreeBlock(size_t capacity)
2671{
2672#if ENABLE(RESOURCE_USAGE)
2673 m_blockBytesAllocated -= capacity;
2674#else
2675 UNUSED_PARAM(capacity);
2676#endif
2677}
2678
2679void Heap::addCoreConstraints()
2680{
2681 m_constraintSet->add(
2682 "Cs", "Conservative Scan",
2683 [this, lastVersion = static_cast<uint64_t>(0)] (SlotVisitor& slotVisitor) mutable {
2684 bool shouldNotProduceWork = lastVersion == m_phaseVersion;
2685 if (shouldNotProduceWork)
2686 return;
2687
2688 TimingScope preConvergenceTimingScope(*this, "Constraint: conservative scan");
2689 m_objectSpace.prepareForConservativeScan();
2690 m_jitStubRoutines->prepareForConservativeScan();
2691
2692 {
2693 ConservativeRoots conservativeRoots(*this);
2694 SuperSamplerScope superSamplerScope(false);
2695
2696 gatherStackRoots(conservativeRoots);
2697 gatherJSStackRoots(conservativeRoots);
2698 gatherScratchBufferRoots(conservativeRoots);
2699
2700 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ConservativeScan);
2701 slotVisitor.append(conservativeRoots);
2702 }
2703 if (VM::canUseJIT()) {
2704 // JITStubRoutines must be visited after scanning ConservativeRoots since JITStubRoutines depend on the hook executed during gathering ConservativeRoots.
2705 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::JITStubRoutines);
2706 m_jitStubRoutines->traceMarkedStubRoutines(slotVisitor);
2707 }
2708
2709 lastVersion = m_phaseVersion;
2710 },
2711 ConstraintVolatility::GreyedByExecution);
2712
2713 m_constraintSet->add(
2714 "Msr", "Misc Small Roots",
2715 [this] (SlotVisitor& slotVisitor) {
2716
2717#if JSC_OBJC_API_ENABLED
2718 scanExternalRememberedSet(*m_vm, slotVisitor);
2719#endif
2720 if (m_vm->smallStrings.needsToBeVisited(*m_collectionScope)) {
2721 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::StrongReferences);
2722 m_vm->smallStrings.visitStrongReferences(slotVisitor);
2723 }
2724
2725 {
2726 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ProtectedValues);
2727 for (auto& pair : m_protectedValues)
2728 slotVisitor.appendUnbarriered(pair.key);
2729 }
2730
2731 if (m_markListSet && m_markListSet->size()) {
2732 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::ConservativeScan);
2733 MarkedArgumentBuffer::markLists(slotVisitor, *m_markListSet);
2734 }
2735
2736 {
2737 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::VMExceptions);
2738 slotVisitor.appendUnbarriered(m_vm->exception());
2739 slotVisitor.appendUnbarriered(m_vm->lastException());
2740 }
2741 },
2742 ConstraintVolatility::GreyedByExecution);
2743
2744 m_constraintSet->add(
2745 "Sh", "Strong Handles",
2746 [this] (SlotVisitor& slotVisitor) {
2747 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::StrongHandles);
2748 m_handleSet.visitStrongHandles(slotVisitor);
2749 },
2750 ConstraintVolatility::GreyedByExecution);
2751
2752 m_constraintSet->add(
2753 "D", "Debugger",
2754 [this] (SlotVisitor& slotVisitor) {
2755 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::Debugger);
2756
2757#if ENABLE(SAMPLING_PROFILER)
2758 if (SamplingProfiler* samplingProfiler = m_vm->samplingProfiler()) {
2759 LockHolder locker(samplingProfiler->getLock());
2760 samplingProfiler->processUnverifiedStackTraces();
2761 samplingProfiler->visit(slotVisitor);
2762 if (Options::logGC() == GCLogging::Verbose)
2763 dataLog("Sampling Profiler data:\n", slotVisitor);
2764 }
2765#endif // ENABLE(SAMPLING_PROFILER)
2766
2767 if (m_vm->typeProfiler())
2768 m_vm->typeProfilerLog()->visit(slotVisitor);
2769
2770 if (auto* shadowChicken = m_vm->shadowChicken())
2771 shadowChicken->visitChildren(slotVisitor);
2772 },
2773 ConstraintVolatility::GreyedByExecution);
2774
2775 m_constraintSet->add(
2776 "Ws", "Weak Sets",
2777 [this] (SlotVisitor& slotVisitor) {
2778 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::WeakSets);
2779 m_objectSpace.visitWeakSets(slotVisitor);
2780 },
2781 ConstraintVolatility::GreyedByMarking);
2782
2783 m_constraintSet->add(
2784 "O", "Output",
2785 [] (SlotVisitor& slotVisitor) {
2786 VM& vm = slotVisitor.vm();
2787
2788 auto callOutputConstraint = [] (SlotVisitor& slotVisitor, HeapCell* heapCell, HeapCell::Kind) {
2789 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::Output);
2790 VM& vm = slotVisitor.vm();
2791 JSCell* cell = static_cast<JSCell*>(heapCell);
2792 cell->methodTable(vm)->visitOutputConstraints(cell, slotVisitor);
2793 };
2794
2795 auto add = [&] (auto& set) {
2796 slotVisitor.addParallelConstraintTask(set.forEachMarkedCellInParallel(callOutputConstraint));
2797 };
2798
2799 add(vm.executableToCodeBlockEdgesWithConstraints);
2800 if (vm.m_weakMapSpace)
2801 add(*vm.m_weakMapSpace);
2802 },
2803 ConstraintVolatility::GreyedByMarking,
2804 ConstraintParallelism::Parallel);
2805
2806#if ENABLE(DFG_JIT)
2807 if (VM::canUseJIT()) {
2808 m_constraintSet->add(
2809 "Dw", "DFG Worklists",
2810 [this] (SlotVisitor& slotVisitor) {
2811 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::DFGWorkLists);
2812
2813 for (unsigned i = DFG::numberOfWorklists(); i--;)
2814 DFG::existingWorklistForIndex(i).visitWeakReferences(slotVisitor);
2815
2816 // FIXME: This is almost certainly unnecessary.
2817 // https://bugs.webkit.org/show_bug.cgi?id=166829
2818 DFG::iterateCodeBlocksForGC(
2819 *m_vm,
2820 [&] (CodeBlock* codeBlock) {
2821 slotVisitor.appendUnbarriered(codeBlock);
2822 });
2823
2824 if (Options::logGC() == GCLogging::Verbose)
2825 dataLog("DFG Worklists:\n", slotVisitor);
2826 },
2827 ConstraintVolatility::GreyedByMarking);
2828 }
2829#endif
2830
2831 m_constraintSet->add(
2832 "Cb", "CodeBlocks",
2833 [this] (SlotVisitor& slotVisitor) {
2834 SetRootMarkReasonScope rootScope(slotVisitor, SlotVisitor::RootMarkReason::CodeBlocks);
2835 iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(
2836 [&] (CodeBlock* codeBlock) {
2837 // Visit the CodeBlock as a constraint only if it's black.
2838 if (isMarked(codeBlock)
2839 && codeBlock->cellState() == CellState::PossiblyBlack)
2840 slotVisitor.visitAsConstraint(codeBlock);
2841 });
2842 },
2843 ConstraintVolatility::SeldomGreyed);
2844
2845 m_constraintSet->add(std::make_unique<MarkStackMergingConstraint>(*this));
2846}
2847
2848void Heap::addMarkingConstraint(std::unique_ptr<MarkingConstraint> constraint)
2849{
2850 PreventCollectionScope preventCollectionScope(*this);
2851 m_constraintSet->add(WTFMove(constraint));
2852}
2853
2854void Heap::notifyIsSafeToCollect()
2855{
2856 MonotonicTime before;
2857 if (Options::logGC()) {
2858 before = MonotonicTime::now();
2859 dataLog("[GC<", RawPointer(this), ">: starting ");
2860 }
2861
2862 addCoreConstraints();
2863
2864 m_isSafeToCollect = true;
2865
2866 if (Options::collectContinuously()) {
2867 m_collectContinuouslyThread = Thread::create(
2868 "JSC DEBUG Continuous GC",
2869 [this] () {
2870 MonotonicTime initialTime = MonotonicTime::now();
2871 Seconds period = Seconds::fromMilliseconds(Options::collectContinuouslyPeriodMS());
2872 while (!m_shouldStopCollectingContinuously) {
2873 {
2874 LockHolder locker(*m_threadLock);
2875 if (m_requests.isEmpty()) {
2876 m_requests.append(WTF::nullopt);
2877 m_lastGrantedTicket++;
2878 m_threadCondition->notifyOne(locker);
2879 }
2880 }
2881
2882 {
2883 LockHolder locker(m_collectContinuouslyLock);
2884 Seconds elapsed = MonotonicTime::now() - initialTime;
2885 Seconds elapsedInPeriod = elapsed % period;
2886 MonotonicTime timeToWakeUp =
2887 initialTime + elapsed - elapsedInPeriod + period;
2888 while (!hasElapsed(timeToWakeUp) && !m_shouldStopCollectingContinuously) {
2889 m_collectContinuouslyCondition.waitUntil(
2890 m_collectContinuouslyLock, timeToWakeUp);
2891 }
2892 }
2893 }
2894 });
2895 }
2896
2897 if (Options::logGC())
2898 dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n");
2899}
2900
2901void Heap::preventCollection()
2902{
2903 if (!m_isSafeToCollect)
2904 return;
2905
2906 // This prevents the collectContinuously thread from starting a collection.
2907 m_collectContinuouslyLock.lock();
2908
2909 // Wait for all collections to finish.
2910 waitForCollector(
2911 [&] (const AbstractLocker&) -> bool {
2912 ASSERT(m_lastServedTicket <= m_lastGrantedTicket);
2913 return m_lastServedTicket == m_lastGrantedTicket;
2914 });
2915
2916 // Now a collection can only start if this thread starts it.
2917 RELEASE_ASSERT(!m_collectionScope);
2918}
2919
2920void Heap::allowCollection()
2921{
2922 if (!m_isSafeToCollect)
2923 return;
2924
2925 m_collectContinuouslyLock.unlock();
2926}
2927
2928void Heap::setMutatorShouldBeFenced(bool value)
2929{
2930 m_mutatorShouldBeFenced = value;
2931 m_barrierThreshold = value ? tautologicalThreshold : blackThreshold;
2932}
2933
2934void Heap::performIncrement(size_t bytes)
2935{
2936 if (!m_objectSpace.isMarking())
2937 return;
2938
2939 if (isDeferred())
2940 return;
2941
2942 m_incrementBalance += bytes * Options::gcIncrementScale();
2943
2944 // Save ourselves from crazy. Since this is an optimization, it's OK to go back to any consistent
2945 // state when the double goes wild.
2946 if (std::isnan(m_incrementBalance) || std::isinf(m_incrementBalance))
2947 m_incrementBalance = 0;
2948
2949 if (m_incrementBalance < static_cast<double>(Options::gcIncrementBytes()))
2950 return;
2951
2952 double targetBytes = m_incrementBalance;
2953 if (targetBytes <= 0)
2954 return;
2955 targetBytes = std::min(targetBytes, Options::gcIncrementMaxBytes());
2956
2957 SlotVisitor& slotVisitor = *m_mutatorSlotVisitor;
2958 ParallelModeEnabler parallelModeEnabler(slotVisitor);
2959 size_t bytesVisited = slotVisitor.performIncrementOfDraining(static_cast<size_t>(targetBytes));
2960 // incrementBalance may go negative here because it'll remember how many bytes we overshot.
2961 m_incrementBalance -= bytesVisited;
2962}
2963
2964void Heap::addHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2965{
2966 m_heapFinalizerCallbacks.append(callback);
2967}
2968
2969void Heap::removeHeapFinalizerCallback(const HeapFinalizerCallback& callback)
2970{
2971 m_heapFinalizerCallbacks.removeFirst(callback);
2972}
2973
2974void Heap::setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2975{
2976 auto locker = holdLock(m_markingMutex);
2977 m_bonusVisitorTask = task;
2978 m_markingConditionVariable.notifyAll();
2979}
2980
2981void Heap::runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>> task)
2982{
2983 unsigned initialRefCount = task->refCount();
2984 setBonusVisitorTask(task);
2985 task->run(*m_collectorSlotVisitor);
2986 setBonusVisitorTask(nullptr);
2987 // The constraint solver expects return of this function to imply termination of the task in all
2988 // threads. This ensures that property.
2989 {
2990 auto locker = holdLock(m_markingMutex);
2991 while (task->refCount() > initialRefCount)
2992 m_markingConditionVariable.wait(m_markingMutex);
2993 }
2994}
2995
2996} // namespace JSC
2997