| 1 | /* |
| 2 | * Copyright (C) 1999-2000 Harri Porten (porten@kde.org) |
| 3 | * Copyright (C) 2001 Peter Kelly (pmk@post.com) |
| 4 | * Copyright (C) 2003-2019 Apple Inc. All rights reserved. |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 19 | * |
| 20 | */ |
| 21 | |
| 22 | #pragma once |
| 23 | |
| 24 | #include "ArrayBuffer.h" |
| 25 | #include "CellState.h" |
| 26 | #include "CollectionScope.h" |
| 27 | #include "CollectorPhase.h" |
| 28 | #include "DeleteAllCodeEffort.h" |
| 29 | #include "GCConductor.h" |
| 30 | #include "GCIncomingRefCountedSet.h" |
| 31 | #include "GCRequest.h" |
| 32 | #include "HandleSet.h" |
| 33 | #include "HeapFinalizerCallback.h" |
| 34 | #include "HeapObserver.h" |
| 35 | #include "MarkedBlock.h" |
| 36 | #include "MarkedSpace.h" |
| 37 | #include "MutatorState.h" |
| 38 | #include "Options.h" |
| 39 | #include "StructureIDTable.h" |
| 40 | #include "Synchronousness.h" |
| 41 | #include "WeakHandleOwner.h" |
| 42 | #include <wtf/AutomaticThread.h> |
| 43 | #include <wtf/ConcurrentPtrHashSet.h> |
| 44 | #include <wtf/Deque.h> |
| 45 | #include <wtf/HashCountedSet.h> |
| 46 | #include <wtf/HashSet.h> |
| 47 | #include <wtf/Markable.h> |
| 48 | #include <wtf/ParallelHelperPool.h> |
| 49 | #include <wtf/Threading.h> |
| 50 | |
| 51 | namespace JSC { |
| 52 | |
| 53 | class CodeBlock; |
| 54 | class CodeBlockSet; |
| 55 | class CollectingScope; |
| 56 | class ConservativeRoots; |
| 57 | class GCDeferralContext; |
| 58 | class EdenGCActivityCallback; |
| 59 | class FullGCActivityCallback; |
| 60 | class GCActivityCallback; |
| 61 | class GCAwareJITStubRoutine; |
| 62 | class Heap; |
| 63 | class HeapProfiler; |
| 64 | class HeapVerifier; |
| 65 | class IncrementalSweeper; |
| 66 | class JITStubRoutine; |
| 67 | class JITStubRoutineSet; |
| 68 | class JSCell; |
| 69 | class JSImmutableButterfly; |
| 70 | class JSValue; |
| 71 | class ; |
| 72 | class MachineThreads; |
| 73 | class MarkStackArray; |
| 74 | class MarkStackMergingConstraint; |
| 75 | class BlockDirectory; |
| 76 | class MarkedArgumentBuffer; |
| 77 | class MarkingConstraint; |
| 78 | class MarkingConstraintSet; |
| 79 | class MutatorScheduler; |
| 80 | class RunningScope; |
| 81 | class SlotVisitor; |
| 82 | class SpaceTimeMutatorScheduler; |
| 83 | class StopIfNecessaryTimer; |
| 84 | class SweepingScope; |
| 85 | class VM; |
| 86 | class WeakGCMapBase; |
| 87 | struct CurrentThreadState; |
| 88 | |
| 89 | #if USE(GLIB) |
| 90 | class JSCGLibWrapperObject; |
| 91 | #endif |
| 92 | |
| 93 | namespace DFG { |
| 94 | class SpeculativeJIT; |
| 95 | class Worklist; |
| 96 | } |
| 97 | |
| 98 | #if !ASSERT_DISABLED |
| 99 | #define ENABLE_DFG_DOES_GC_VALIDATION 1 |
| 100 | #else |
| 101 | #define ENABLE_DFG_DOES_GC_VALIDATION 0 |
| 102 | #endif |
| 103 | constexpr bool validateDFGDoesGC = ENABLE_DFG_DOES_GC_VALIDATION; |
| 104 | |
| 105 | typedef HashCountedSet<JSCell*> ProtectCountSet; |
| 106 | typedef HashCountedSet<const char*> TypeCountSet; |
| 107 | |
| 108 | enum HeapType { SmallHeap, LargeHeap }; |
| 109 | |
| 110 | class HeapUtil; |
| 111 | |
| 112 | class Heap { |
| 113 | WTF_MAKE_NONCOPYABLE(Heap); |
| 114 | public: |
| 115 | friend class JIT; |
| 116 | friend class DFG::SpeculativeJIT; |
| 117 | static Heap* heap(const JSValue); // 0 for immediate values |
| 118 | static Heap* heap(const HeapCell*); |
| 119 | |
| 120 | // This constant determines how many blocks we iterate between checks of our |
| 121 | // deadline when calling Heap::isPagedOut. Decreasing it will cause us to detect |
| 122 | // overstepping our deadline more quickly, while increasing it will cause |
| 123 | // our scan to run faster. |
| 124 | static const unsigned s_timeCheckResolution = 16; |
| 125 | |
| 126 | bool isMarked(const void*); |
| 127 | static bool testAndSetMarked(HeapVersion, const void*); |
| 128 | |
| 129 | static size_t cellSize(const void*); |
| 130 | |
| 131 | void writeBarrier(const JSCell* from); |
| 132 | void writeBarrier(const JSCell* from, JSValue to); |
| 133 | void writeBarrier(const JSCell* from, JSCell* to); |
| 134 | |
| 135 | void writeBarrierWithoutFence(const JSCell* from); |
| 136 | |
| 137 | void mutatorFence(); |
| 138 | |
| 139 | // Take this if you know that from->cellState() < barrierThreshold. |
| 140 | JS_EXPORT_PRIVATE void writeBarrierSlowPath(const JSCell* from); |
| 141 | |
| 142 | Heap(VM*, HeapType); |
| 143 | ~Heap(); |
| 144 | void lastChanceToFinalize(); |
| 145 | void releaseDelayedReleasedObjects(); |
| 146 | |
| 147 | VM* vm() const; |
| 148 | |
| 149 | MarkedSpace& objectSpace() { return m_objectSpace; } |
| 150 | MachineThreads& machineThreads() { return *m_machineThreads; } |
| 151 | |
| 152 | SlotVisitor& collectorSlotVisitor() { return *m_collectorSlotVisitor; } |
| 153 | |
| 154 | JS_EXPORT_PRIVATE GCActivityCallback* fullActivityCallback(); |
| 155 | JS_EXPORT_PRIVATE GCActivityCallback* edenActivityCallback(); |
| 156 | JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool); |
| 157 | |
| 158 | JS_EXPORT_PRIVATE IncrementalSweeper& sweeper(); |
| 159 | |
| 160 | void addObserver(HeapObserver* observer) { m_observers.append(observer); } |
| 161 | void removeObserver(HeapObserver* observer) { m_observers.removeFirst(observer); } |
| 162 | |
| 163 | MutatorState mutatorState() const { return m_mutatorState; } |
| 164 | Optional<CollectionScope> collectionScope() const { return m_collectionScope; } |
| 165 | bool hasHeapAccess() const; |
| 166 | bool worldIsStopped() const; |
| 167 | bool worldIsRunning() const { return !worldIsStopped(); } |
| 168 | |
| 169 | // We're always busy on the collection threads. On the main thread, this returns true if we're |
| 170 | // helping heap. |
| 171 | JS_EXPORT_PRIVATE bool isCurrentThreadBusy(); |
| 172 | |
| 173 | typedef void (*Finalizer)(JSCell*); |
| 174 | JS_EXPORT_PRIVATE void addFinalizer(JSCell*, Finalizer); |
| 175 | |
| 176 | void notifyIsSafeToCollect(); |
| 177 | bool isSafeToCollect() const { return m_isSafeToCollect; } |
| 178 | |
| 179 | bool isShuttingDown() const { return m_isShuttingDown; } |
| 180 | |
| 181 | JS_EXPORT_PRIVATE bool isHeapSnapshotting() const; |
| 182 | |
| 183 | JS_EXPORT_PRIVATE void sweepSynchronously(); |
| 184 | |
| 185 | bool shouldCollectHeuristic(); |
| 186 | |
| 187 | // Queue up a collection. Returns immediately. This will not queue a collection if a collection |
| 188 | // of equal or greater strength exists. Full collections are stronger than WTF::nullopt collections |
| 189 | // and WTF::nullopt collections are stronger than Eden collections. WTF::nullopt means that the GC can |
| 190 | // choose Eden or Full. This implies that if you request a GC while that GC is ongoing, nothing |
| 191 | // will happen. |
| 192 | JS_EXPORT_PRIVATE void collectAsync(GCRequest = GCRequest()); |
| 193 | |
| 194 | // Queue up a collection and wait for it to complete. This won't return until you get your own |
| 195 | // complete collection. For example, if there was an ongoing asynchronous collection at the time |
| 196 | // you called this, then this would wait for that one to complete and then trigger your |
| 197 | // collection and then return. In weird cases, there could be multiple GC requests in the backlog |
| 198 | // and this will wait for that backlog before running its GC and returning. |
| 199 | JS_EXPORT_PRIVATE void collectSync(GCRequest = GCRequest()); |
| 200 | |
| 201 | JS_EXPORT_PRIVATE void collect(Synchronousness, GCRequest = GCRequest()); |
| 202 | |
| 203 | // Like collect(), but in the case of Async this will stopIfNecessary() and in the case of |
| 204 | // Sync this will sweep synchronously. |
| 205 | JS_EXPORT_PRIVATE void collectNow(Synchronousness, GCRequest = GCRequest()); |
| 206 | |
| 207 | JS_EXPORT_PRIVATE void collectNowFullIfNotDoneRecently(Synchronousness); |
| 208 | |
| 209 | void collectIfNecessaryOrDefer(GCDeferralContext* = nullptr); |
| 210 | |
| 211 | void completeAllJITPlans(); |
| 212 | |
| 213 | // Use this API to report non-GC memory referenced by GC objects. Be sure to |
| 214 | // call both of these functions: Calling only one may trigger catastropic |
| 215 | // memory growth. |
| 216 | void (size_t); |
| 217 | JS_EXPORT_PRIVATE void (size_t); |
| 218 | |
| 219 | #if ENABLE(RESOURCE_USAGE) |
| 220 | // Use this API to report the subset of extra memory that lives outside this process. |
| 221 | JS_EXPORT_PRIVATE void reportExternalMemoryVisited(size_t); |
| 222 | size_t externalMemorySize() { return m_externalMemorySize; } |
| 223 | #endif |
| 224 | |
| 225 | // Use this API to report non-GC memory if you can't use the better API above. |
| 226 | void (size_t); |
| 227 | |
| 228 | JS_EXPORT_PRIVATE void reportAbandonedObjectGraph(); |
| 229 | |
| 230 | JS_EXPORT_PRIVATE void protect(JSValue); |
| 231 | JS_EXPORT_PRIVATE bool unprotect(JSValue); // True when the protect count drops to 0. |
| 232 | |
| 233 | JS_EXPORT_PRIVATE size_t (); // Non-GC memory referenced by GC objects. |
| 234 | JS_EXPORT_PRIVATE size_t size(); |
| 235 | JS_EXPORT_PRIVATE size_t capacity(); |
| 236 | JS_EXPORT_PRIVATE size_t objectCount(); |
| 237 | JS_EXPORT_PRIVATE size_t globalObjectCount(); |
| 238 | JS_EXPORT_PRIVATE size_t protectedObjectCount(); |
| 239 | JS_EXPORT_PRIVATE size_t protectedGlobalObjectCount(); |
| 240 | JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> protectedObjectTypeCounts(); |
| 241 | JS_EXPORT_PRIVATE std::unique_ptr<TypeCountSet> objectTypeCounts(); |
| 242 | |
| 243 | HashSet<MarkedArgumentBuffer*>& markListSet(); |
| 244 | |
| 245 | template<typename Functor> void forEachProtectedCell(const Functor&); |
| 246 | template<typename Functor> void forEachCodeBlock(const Functor&); |
| 247 | template<typename Functor> void forEachCodeBlockIgnoringJITPlans(const AbstractLocker& codeBlockSetLocker, const Functor&); |
| 248 | |
| 249 | HandleSet* handleSet() { return &m_handleSet; } |
| 250 | |
| 251 | void willStartIterating(); |
| 252 | void didFinishIterating(); |
| 253 | |
| 254 | Seconds lastFullGCLength() const { return m_lastFullGCLength; } |
| 255 | Seconds lastEdenGCLength() const { return m_lastEdenGCLength; } |
| 256 | void increaseLastFullGCLength(Seconds amount) { m_lastFullGCLength += amount; } |
| 257 | |
| 258 | size_t sizeBeforeLastEdenCollection() const { return m_sizeBeforeLastEdenCollect; } |
| 259 | size_t sizeAfterLastEdenCollection() const { return m_sizeAfterLastEdenCollect; } |
| 260 | size_t sizeBeforeLastFullCollection() const { return m_sizeBeforeLastFullCollect; } |
| 261 | size_t sizeAfterLastFullCollection() const { return m_sizeAfterLastFullCollect; } |
| 262 | |
| 263 | void deleteAllCodeBlocks(DeleteAllCodeEffort); |
| 264 | void deleteAllUnlinkedCodeBlocks(DeleteAllCodeEffort); |
| 265 | |
| 266 | void didAllocate(size_t); |
| 267 | bool isPagedOut(MonotonicTime deadline); |
| 268 | |
| 269 | const JITStubRoutineSet& jitStubRoutines() { return *m_jitStubRoutines; } |
| 270 | |
| 271 | void addReference(JSCell*, ArrayBuffer*); |
| 272 | |
| 273 | bool isDeferred() const { return !!m_deferralDepth; } |
| 274 | |
| 275 | StructureIDTable& structureIDTable() { return m_structureIDTable; } |
| 276 | |
| 277 | CodeBlockSet& codeBlockSet() { return *m_codeBlocks; } |
| 278 | |
| 279 | #if USE(FOUNDATION) |
| 280 | template<typename T> void releaseSoon(RetainPtr<T>&&); |
| 281 | #endif |
| 282 | #if USE(GLIB) |
| 283 | void releaseSoon(std::unique_ptr<JSCGLibWrapperObject>&&); |
| 284 | #endif |
| 285 | |
| 286 | JS_EXPORT_PRIVATE void registerWeakGCMap(WeakGCMapBase* weakGCMap); |
| 287 | JS_EXPORT_PRIVATE void unregisterWeakGCMap(WeakGCMapBase* weakGCMap); |
| 288 | |
| 289 | void addLogicallyEmptyWeakBlock(WeakBlock*); |
| 290 | |
| 291 | #if ENABLE(RESOURCE_USAGE) |
| 292 | size_t blockBytesAllocated() const { return m_blockBytesAllocated; } |
| 293 | #endif |
| 294 | |
| 295 | void didAllocateBlock(size_t capacity); |
| 296 | void didFreeBlock(size_t capacity); |
| 297 | |
| 298 | bool mutatorShouldBeFenced() const { return m_mutatorShouldBeFenced; } |
| 299 | const bool* addressOfMutatorShouldBeFenced() const { return &m_mutatorShouldBeFenced; } |
| 300 | |
| 301 | unsigned barrierThreshold() const { return m_barrierThreshold; } |
| 302 | const unsigned* addressOfBarrierThreshold() const { return &m_barrierThreshold; } |
| 303 | |
| 304 | #if ENABLE(DFG_DOES_GC_VALIDATION) |
| 305 | bool expectDoesGC() const { return m_expectDoesGC; } |
| 306 | void setExpectDoesGC(bool value) { m_expectDoesGC = value; } |
| 307 | bool* addressOfExpectDoesGC() { return &m_expectDoesGC; } |
| 308 | #else |
| 309 | bool expectDoesGC() const { UNREACHABLE_FOR_PLATFORM(); return true; } |
| 310 | void setExpectDoesGC(bool) { UNREACHABLE_FOR_PLATFORM(); } |
| 311 | bool* addressOfExpectDoesGC() { UNREACHABLE_FOR_PLATFORM(); return nullptr; } |
| 312 | #endif |
| 313 | |
| 314 | // If true, the GC believes that the mutator is currently messing with the heap. We call this |
| 315 | // "having heap access". The GC may block if the mutator is in this state. If false, the GC may |
| 316 | // currently be doing things to the heap that make the heap unsafe to access for the mutator. |
| 317 | bool hasAccess() const; |
| 318 | |
| 319 | // If the mutator does not currently have heap access, this function will acquire it. If the GC |
| 320 | // is currently using the lack of heap access to do dangerous things to the heap then this |
| 321 | // function will block, waiting for the GC to finish. It's not valid to call this if the mutator |
| 322 | // already has heap access. The mutator is required to precisely track whether or not it has |
| 323 | // heap access. |
| 324 | // |
| 325 | // It's totally fine to acquireAccess() upon VM instantiation and keep it that way. This is how |
| 326 | // WebCore uses us. For most other clients, JSLock does acquireAccess()/releaseAccess() for you. |
| 327 | void acquireAccess(); |
| 328 | |
| 329 | // Releases heap access. If the GC is blocking waiting to do bad things to the heap, it will be |
| 330 | // allowed to run now. |
| 331 | // |
| 332 | // Ordinarily, you should use the ReleaseHeapAccessScope to release and then reacquire heap |
| 333 | // access. You should do this anytime you're about do perform a blocking operation, like waiting |
| 334 | // on the ParkingLot. |
| 335 | void releaseAccess(); |
| 336 | |
| 337 | // This is like a super optimized way of saying: |
| 338 | // |
| 339 | // releaseAccess() |
| 340 | // acquireAccess() |
| 341 | // |
| 342 | // The fast path is an inlined relaxed load and branch. The slow path will block the mutator if |
| 343 | // the GC wants to do bad things to the heap. |
| 344 | // |
| 345 | // All allocations logically call this. As an optimization to improve GC progress, you can call |
| 346 | // this anywhere that you can afford a load-branch and where an object allocation would have been |
| 347 | // safe. |
| 348 | // |
| 349 | // The GC will also push a stopIfNecessary() event onto the runloop of the thread that |
| 350 | // instantiated the VM whenever it wants the mutator to stop. This means that if you never block |
| 351 | // but instead use the runloop to wait for events, then you could safely run in a mode where the |
| 352 | // mutator has permanent heap access (like the DOM does). If you have good event handling |
| 353 | // discipline (i.e. you don't block the runloop) then you can be sure that stopIfNecessary() will |
| 354 | // already be called for you at the right times. |
| 355 | void stopIfNecessary(); |
| 356 | |
| 357 | // This gives the conn to the collector. |
| 358 | void relinquishConn(); |
| 359 | |
| 360 | bool mayNeedToStop(); |
| 361 | |
| 362 | void performIncrement(size_t bytes); |
| 363 | |
| 364 | // This is a much stronger kind of stopping of the collector, and it may require waiting for a |
| 365 | // while. This is meant to be a legacy API for clients of collectAllGarbage that expect that there |
| 366 | // is no GC before or after that function call. After calling this, you are free to start GCs |
| 367 | // yourself but you can be sure that none are running. |
| 368 | // |
| 369 | // This both prevents new collections from being started asynchronously and waits for any |
| 370 | // outstanding collections to complete. |
| 371 | void preventCollection(); |
| 372 | void allowCollection(); |
| 373 | |
| 374 | uint64_t mutatorExecutionVersion() const { return m_mutatorExecutionVersion; } |
| 375 | uint64_t phaseVersion() const { return m_phaseVersion; } |
| 376 | |
| 377 | JS_EXPORT_PRIVATE void addMarkingConstraint(std::unique_ptr<MarkingConstraint>); |
| 378 | |
| 379 | size_t numOpaqueRoots() const { return m_opaqueRoots.size(); } |
| 380 | |
| 381 | HeapVerifier* verifier() const { return m_verifier.get(); } |
| 382 | |
| 383 | void addHeapFinalizerCallback(const HeapFinalizerCallback&); |
| 384 | void removeHeapFinalizerCallback(const HeapFinalizerCallback&); |
| 385 | |
| 386 | void runTaskInParallel(RefPtr<SharedTask<void(SlotVisitor&)>>); |
| 387 | |
| 388 | template<typename Func> |
| 389 | void runFunctionInParallel(const Func& func) |
| 390 | { |
| 391 | runTaskInParallel(createSharedTask<void(SlotVisitor&)>(func)); |
| 392 | } |
| 393 | |
| 394 | template<typename Func> |
| 395 | void forEachSlotVisitor(const Func&); |
| 396 | unsigned numberOfSlotVisitors(); |
| 397 | |
| 398 | Seconds totalGCTime() const { return m_totalGCTime; } |
| 399 | |
| 400 | HashMap<JSImmutableButterfly*, JSString*> immutableButterflyToStringCache; |
| 401 | |
| 402 | private: |
| 403 | friend class AllocatingScope; |
| 404 | friend class CodeBlock; |
| 405 | friend class CollectingScope; |
| 406 | friend class DeferGC; |
| 407 | friend class DeferGCForAWhile; |
| 408 | friend class GCAwareJITStubRoutine; |
| 409 | friend class GCLogging; |
| 410 | friend class GCThread; |
| 411 | friend class HandleSet; |
| 412 | friend class HeapUtil; |
| 413 | friend class HeapVerifier; |
| 414 | friend class JITStubRoutine; |
| 415 | friend class LLIntOffsetsExtractor; |
| 416 | friend class MarkStackMergingConstraint; |
| 417 | friend class MarkedSpace; |
| 418 | friend class BlockDirectory; |
| 419 | friend class MarkedBlock; |
| 420 | friend class RunningScope; |
| 421 | friend class SlotVisitor; |
| 422 | friend class SpaceTimeMutatorScheduler; |
| 423 | friend class StochasticSpaceTimeMutatorScheduler; |
| 424 | friend class SweepingScope; |
| 425 | friend class IncrementalSweeper; |
| 426 | friend class VM; |
| 427 | friend class WeakSet; |
| 428 | |
| 429 | class HeapThread; |
| 430 | friend class HeapThread; |
| 431 | |
| 432 | static const size_t = 256; |
| 433 | |
| 434 | class FinalizerOwner : public WeakHandleOwner { |
| 435 | void finalize(Handle<Unknown>, void* context) override; |
| 436 | }; |
| 437 | |
| 438 | JS_EXPORT_PRIVATE bool isValidAllocation(size_t); |
| 439 | JS_EXPORT_PRIVATE void (size_t); |
| 440 | JS_EXPORT_PRIVATE void (size_t); |
| 441 | |
| 442 | bool shouldCollectInCollectorThread(const AbstractLocker&); |
| 443 | void collectInCollectorThread(); |
| 444 | |
| 445 | void checkConn(GCConductor); |
| 446 | |
| 447 | enum class RunCurrentPhaseResult { |
| 448 | Finished, |
| 449 | Continue, |
| 450 | NeedCurrentThreadState |
| 451 | }; |
| 452 | RunCurrentPhaseResult runCurrentPhase(GCConductor, CurrentThreadState*); |
| 453 | |
| 454 | // Returns true if we should keep doing things. |
| 455 | bool runNotRunningPhase(GCConductor); |
| 456 | bool runBeginPhase(GCConductor); |
| 457 | bool runFixpointPhase(GCConductor); |
| 458 | bool runConcurrentPhase(GCConductor); |
| 459 | bool runReloopPhase(GCConductor); |
| 460 | bool runEndPhase(GCConductor); |
| 461 | bool changePhase(GCConductor, CollectorPhase); |
| 462 | bool finishChangingPhase(GCConductor); |
| 463 | |
| 464 | void collectInMutatorThread(); |
| 465 | |
| 466 | void stopThePeriphery(GCConductor); |
| 467 | void resumeThePeriphery(); |
| 468 | |
| 469 | // Returns true if the mutator is stopped, false if the mutator has the conn now. |
| 470 | bool stopTheMutator(); |
| 471 | void resumeTheMutator(); |
| 472 | |
| 473 | JS_EXPORT_PRIVATE void stopIfNecessarySlow(); |
| 474 | bool stopIfNecessarySlow(unsigned ); |
| 475 | |
| 476 | template<typename Func> |
| 477 | void waitForCollector(const Func&); |
| 478 | |
| 479 | JS_EXPORT_PRIVATE void acquireAccessSlow(); |
| 480 | JS_EXPORT_PRIVATE void releaseAccessSlow(); |
| 481 | |
| 482 | bool handleGCDidJIT(unsigned); |
| 483 | void handleGCDidJIT(); |
| 484 | |
| 485 | bool handleNeedFinalize(unsigned); |
| 486 | void handleNeedFinalize(); |
| 487 | |
| 488 | bool relinquishConn(unsigned); |
| 489 | void finishRelinquishingConn(); |
| 490 | |
| 491 | void setGCDidJIT(); |
| 492 | void setNeedFinalize(); |
| 493 | void waitWhileNeedFinalize(); |
| 494 | |
| 495 | void setMutatorWaiting(); |
| 496 | void clearMutatorWaiting(); |
| 497 | void notifyThreadStopping(const AbstractLocker&); |
| 498 | |
| 499 | typedef uint64_t Ticket; |
| 500 | Ticket requestCollection(GCRequest); |
| 501 | void waitForCollection(Ticket); |
| 502 | |
| 503 | void suspendCompilerThreads(); |
| 504 | void willStartCollection(); |
| 505 | void prepareForMarking(); |
| 506 | |
| 507 | void gatherStackRoots(ConservativeRoots&); |
| 508 | void gatherJSStackRoots(ConservativeRoots&); |
| 509 | void gatherScratchBufferRoots(ConservativeRoots&); |
| 510 | void beginMarking(); |
| 511 | void visitCompilerWorklistWeakReferences(); |
| 512 | void removeDeadCompilerWorklistEntries(); |
| 513 | void updateObjectCounts(); |
| 514 | void endMarking(); |
| 515 | |
| 516 | void reapWeakHandles(); |
| 517 | void pruneStaleEntriesFromWeakGCMaps(); |
| 518 | void sweepArrayBuffers(); |
| 519 | void snapshotUnswept(); |
| 520 | void deleteSourceProviderCaches(); |
| 521 | void notifyIncrementalSweeper(); |
| 522 | void harvestWeakReferences(); |
| 523 | |
| 524 | template<typename CellType, typename CellSet> |
| 525 | void finalizeMarkedUnconditionalFinalizers(CellSet&); |
| 526 | |
| 527 | void finalizeUnconditionalFinalizers(); |
| 528 | |
| 529 | void deleteUnmarkedCompiledCode(); |
| 530 | JS_EXPORT_PRIVATE void addToRememberedSet(const JSCell*); |
| 531 | void updateAllocationLimits(); |
| 532 | void didFinishCollection(); |
| 533 | void resumeCompilerThreads(); |
| 534 | void (HeapProfiler&); |
| 535 | void removeDeadHeapSnapshotNodes(HeapProfiler&); |
| 536 | void finalize(); |
| 537 | void sweepInFinalize(); |
| 538 | |
| 539 | void sweepAllLogicallyEmptyWeakBlocks(); |
| 540 | bool sweepNextLogicallyEmptyWeakBlock(); |
| 541 | |
| 542 | bool shouldDoFullCollection(); |
| 543 | |
| 544 | void incrementDeferralDepth(); |
| 545 | void decrementDeferralDepth(); |
| 546 | void decrementDeferralDepthAndGCIfNeeded(); |
| 547 | JS_EXPORT_PRIVATE void decrementDeferralDepthAndGCIfNeededSlow(); |
| 548 | |
| 549 | size_t visitCount(); |
| 550 | size_t bytesVisited(); |
| 551 | |
| 552 | void forEachCodeBlockImpl(const ScopedLambda<void(CodeBlock*)>&); |
| 553 | void forEachCodeBlockIgnoringJITPlansImpl(const AbstractLocker& codeBlockSetLocker, const ScopedLambda<void(CodeBlock*)>&); |
| 554 | |
| 555 | void setMutatorShouldBeFenced(bool value); |
| 556 | |
| 557 | void addCoreConstraints(); |
| 558 | |
| 559 | enum class MemoryThresholdCallType { |
| 560 | Cached, |
| 561 | Direct |
| 562 | }; |
| 563 | |
| 564 | bool overCriticalMemoryThreshold(MemoryThresholdCallType memoryThresholdCallType = MemoryThresholdCallType::Cached); |
| 565 | |
| 566 | template<typename Func> |
| 567 | void iterateExecutingAndCompilingCodeBlocks(const Func&); |
| 568 | |
| 569 | template<typename Func> |
| 570 | void iterateExecutingAndCompilingCodeBlocksWithoutHoldingLocks(const Func&); |
| 571 | |
| 572 | void assertMarkStacksEmpty(); |
| 573 | |
| 574 | void setBonusVisitorTask(RefPtr<SharedTask<void(SlotVisitor&)>>); |
| 575 | |
| 576 | void dumpHeapStatisticsAtVMDestruction(); |
| 577 | |
| 578 | static bool useGenerationalGC(); |
| 579 | static bool shouldSweepSynchronously(); |
| 580 | |
| 581 | const HeapType m_heapType; |
| 582 | MutatorState m_mutatorState { MutatorState::Running }; |
| 583 | const size_t m_ramSize; |
| 584 | const size_t m_minBytesPerCycle; |
| 585 | size_t m_sizeAfterLastCollect { 0 }; |
| 586 | size_t m_sizeAfterLastFullCollect { 0 }; |
| 587 | size_t m_sizeBeforeLastFullCollect { 0 }; |
| 588 | size_t m_sizeAfterLastEdenCollect { 0 }; |
| 589 | size_t m_sizeBeforeLastEdenCollect { 0 }; |
| 590 | |
| 591 | size_t m_bytesAllocatedThisCycle { 0 }; |
| 592 | size_t m_bytesAbandonedSinceLastFullCollect { 0 }; |
| 593 | size_t m_maxEdenSize; |
| 594 | size_t m_maxEdenSizeWhenCritical; |
| 595 | size_t m_maxHeapSize; |
| 596 | size_t m_totalBytesVisited { 0 }; |
| 597 | size_t m_totalBytesVisitedThisCycle { 0 }; |
| 598 | double m_incrementBalance { 0 }; |
| 599 | |
| 600 | bool m_shouldDoFullCollection { false }; |
| 601 | Markable<CollectionScope, EnumMarkableTraits<CollectionScope>> m_collectionScope; |
| 602 | Markable<CollectionScope, EnumMarkableTraits<CollectionScope>> m_lastCollectionScope; |
| 603 | Lock m_raceMarkStackLock; |
| 604 | #if ENABLE(DFG_DOES_GC_VALIDATION) |
| 605 | bool m_expectDoesGC { true }; |
| 606 | #endif |
| 607 | |
| 608 | StructureIDTable m_structureIDTable; |
| 609 | MarkedSpace m_objectSpace; |
| 610 | GCIncomingRefCountedSet<ArrayBuffer> m_arrayBuffers; |
| 611 | size_t { 0 }; |
| 612 | size_t { 0 }; |
| 613 | |
| 614 | HashSet<const JSCell*> m_copyingRememberedSet; |
| 615 | |
| 616 | ProtectCountSet m_protectedValues; |
| 617 | std::unique_ptr<HashSet<MarkedArgumentBuffer*>> m_markListSet; |
| 618 | |
| 619 | std::unique_ptr<MachineThreads> m_machineThreads; |
| 620 | |
| 621 | std::unique_ptr<SlotVisitor> m_collectorSlotVisitor; |
| 622 | std::unique_ptr<SlotVisitor> m_mutatorSlotVisitor; |
| 623 | std::unique_ptr<MarkStackArray> m_mutatorMarkStack; |
| 624 | std::unique_ptr<MarkStackArray> m_raceMarkStack; |
| 625 | std::unique_ptr<MarkingConstraintSet> m_constraintSet; |
| 626 | |
| 627 | // We pool the slot visitors used by parallel marking threads. It's useful to be able to |
| 628 | // enumerate over them, and it's useful to have them cache some small amount of memory from |
| 629 | // one GC to the next. GC marking threads claim these at the start of marking, and return |
| 630 | // them at the end. |
| 631 | Vector<std::unique_ptr<SlotVisitor>> m_parallelSlotVisitors; |
| 632 | Vector<SlotVisitor*> m_availableParallelSlotVisitors; |
| 633 | |
| 634 | HandleSet m_handleSet; |
| 635 | std::unique_ptr<CodeBlockSet> m_codeBlocks; |
| 636 | std::unique_ptr<JITStubRoutineSet> m_jitStubRoutines; |
| 637 | FinalizerOwner m_finalizerOwner; |
| 638 | |
| 639 | Lock m_parallelSlotVisitorLock; |
| 640 | bool m_isSafeToCollect { false }; |
| 641 | bool m_isShuttingDown { false }; |
| 642 | bool m_mutatorShouldBeFenced { Options::forceFencedBarrier() }; |
| 643 | |
| 644 | unsigned m_barrierThreshold { Options::forceFencedBarrier() ? tautologicalThreshold : blackThreshold }; |
| 645 | |
| 646 | VM* m_vm; |
| 647 | Seconds m_lastFullGCLength { 10_ms }; |
| 648 | Seconds m_lastEdenGCLength { 10_ms }; |
| 649 | |
| 650 | Vector<WeakBlock*> m_logicallyEmptyWeakBlocks; |
| 651 | size_t m_indexOfNextLogicallyEmptyWeakBlockToSweep { WTF::notFound }; |
| 652 | |
| 653 | RefPtr<FullGCActivityCallback> m_fullActivityCallback; |
| 654 | RefPtr<GCActivityCallback> m_edenActivityCallback; |
| 655 | Ref<IncrementalSweeper> m_sweeper; |
| 656 | Ref<StopIfNecessaryTimer> m_stopIfNecessaryTimer; |
| 657 | |
| 658 | Vector<HeapObserver*> m_observers; |
| 659 | |
| 660 | Vector<HeapFinalizerCallback> m_heapFinalizerCallbacks; |
| 661 | |
| 662 | std::unique_ptr<HeapVerifier> m_verifier; |
| 663 | |
| 664 | #if USE(FOUNDATION) |
| 665 | Vector<RetainPtr<CFTypeRef>> m_delayedReleaseObjects; |
| 666 | unsigned m_delayedReleaseRecursionCount { 0 }; |
| 667 | #endif |
| 668 | #if USE(GLIB) |
| 669 | Vector<std::unique_ptr<JSCGLibWrapperObject>> m_delayedReleaseObjects; |
| 670 | unsigned m_delayedReleaseRecursionCount { 0 }; |
| 671 | #endif |
| 672 | unsigned m_deferralDepth { 0 }; |
| 673 | |
| 674 | HashSet<WeakGCMapBase*> m_weakGCMaps; |
| 675 | |
| 676 | std::unique_ptr<MarkStackArray> m_sharedCollectorMarkStack; |
| 677 | std::unique_ptr<MarkStackArray> m_sharedMutatorMarkStack; |
| 678 | unsigned m_numberOfActiveParallelMarkers { 0 }; |
| 679 | unsigned m_numberOfWaitingParallelMarkers { 0 }; |
| 680 | |
| 681 | ConcurrentPtrHashSet m_opaqueRoots; |
| 682 | static const size_t s_blockFragmentLength = 32; |
| 683 | |
| 684 | ParallelHelperClient m_helperClient; |
| 685 | RefPtr<SharedTask<void(SlotVisitor&)>> m_bonusVisitorTask; |
| 686 | |
| 687 | #if ENABLE(RESOURCE_USAGE) |
| 688 | size_t m_blockBytesAllocated { 0 }; |
| 689 | size_t m_externalMemorySize { 0 }; |
| 690 | #endif |
| 691 | |
| 692 | std::unique_ptr<MutatorScheduler> m_scheduler; |
| 693 | |
| 694 | static const unsigned mutatorHasConnBit = 1u << 0u; // Must also be protected by threadLock. |
| 695 | static const unsigned stoppedBit = 1u << 1u; // Only set when !hasAccessBit |
| 696 | static const unsigned hasAccessBit = 1u << 2u; |
| 697 | static const unsigned gcDidJITBit = 1u << 3u; // Set when the GC did some JITing, so on resume we need to cpuid. |
| 698 | static const unsigned needFinalizeBit = 1u << 4u; |
| 699 | static const unsigned mutatorWaitingBit = 1u << 5u; // Allows the mutator to use this as a condition variable. |
| 700 | Atomic<unsigned> m_worldState; |
| 701 | bool m_worldIsStopped { false }; |
| 702 | Lock m_visitRaceLock; |
| 703 | Lock m_markingMutex; |
| 704 | Condition m_markingConditionVariable; |
| 705 | |
| 706 | MonotonicTime m_beforeGC; |
| 707 | MonotonicTime m_afterGC; |
| 708 | MonotonicTime m_stopTime; |
| 709 | |
| 710 | Deque<GCRequest> m_requests; |
| 711 | GCRequest m_currentRequest; |
| 712 | Ticket m_lastServedTicket { 0 }; |
| 713 | Ticket m_lastGrantedTicket { 0 }; |
| 714 | |
| 715 | CollectorPhase m_lastPhase { CollectorPhase::NotRunning }; |
| 716 | CollectorPhase m_currentPhase { CollectorPhase::NotRunning }; |
| 717 | CollectorPhase m_nextPhase { CollectorPhase::NotRunning }; |
| 718 | bool m_threadShouldStop { false }; |
| 719 | bool m_threadIsStopping { false }; |
| 720 | bool m_mutatorDidRun { true }; |
| 721 | bool m_didDeferGCWork { false }; |
| 722 | bool m_shouldStopCollectingContinuously { false }; |
| 723 | |
| 724 | uint64_t m_mutatorExecutionVersion { 0 }; |
| 725 | uint64_t m_phaseVersion { 0 }; |
| 726 | Box<Lock> m_threadLock; |
| 727 | Ref<AutomaticThreadCondition> m_threadCondition; // The mutator must not wait on this. It would cause a deadlock. |
| 728 | RefPtr<AutomaticThread> m_thread; |
| 729 | |
| 730 | RefPtr<Thread> m_collectContinuouslyThread { nullptr }; |
| 731 | |
| 732 | MonotonicTime m_lastGCStartTime; |
| 733 | MonotonicTime m_lastGCEndTime; |
| 734 | MonotonicTime m_currentGCStartTime; |
| 735 | Seconds m_totalGCTime; |
| 736 | |
| 737 | uintptr_t m_barriersExecuted { 0 }; |
| 738 | |
| 739 | CurrentThreadState* m_currentThreadState { nullptr }; |
| 740 | Thread* m_currentThread { nullptr }; // It's OK if this becomes a dangling pointer. |
| 741 | |
| 742 | #if PLATFORM(IOS_FAMILY) |
| 743 | unsigned m_precentAvailableMemoryCachedCallCount; |
| 744 | bool m_overCriticalMemoryThreshold; |
| 745 | #endif |
| 746 | |
| 747 | bool { false }; |
| 748 | Lock m_collectContinuouslyLock; |
| 749 | Condition m_collectContinuouslyCondition; |
| 750 | }; |
| 751 | |
| 752 | } // namespace JSC |
| 753 | |