| 1 | /* |
| 2 | * Copyright (C) 2011-2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' |
| 14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
| 15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS |
| 17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
| 23 | * THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #pragma once |
| 27 | |
| 28 | #include "HandleTypes.h" |
| 29 | #include "IterationStatus.h" |
| 30 | #include "MarkStack.h" |
| 31 | #include "VisitRaceKey.h" |
| 32 | #include <wtf/Forward.h> |
| 33 | #include <wtf/MonotonicTime.h> |
| 34 | #include <wtf/SharedTask.h> |
| 35 | #include <wtf/text/CString.h> |
| 36 | |
| 37 | namespace JSC { |
| 38 | |
| 39 | class ConservativeRoots; |
| 40 | class GCThreadSharedData; |
| 41 | class Heap; |
| 42 | class HeapCell; |
| 43 | class HeapSnapshotBuilder; |
| 44 | class MarkedBlock; |
| 45 | class MarkingConstraint; |
| 46 | class MarkingConstraintSolver; |
| 47 | template<typename T> class Weak; |
| 48 | template<typename T, typename Traits> class WriteBarrierBase; |
| 49 | |
| 50 | typedef uint32_t HeapVersion; |
| 51 | |
| 52 | class SlotVisitor { |
| 53 | WTF_MAKE_NONCOPYABLE(SlotVisitor); |
| 54 | WTF_MAKE_FAST_ALLOCATED; |
| 55 | |
| 56 | friend class SetCurrentCellScope; |
| 57 | friend class Heap; |
| 58 | |
| 59 | public: |
| 60 | enum RootMarkReason { |
| 61 | None, |
| 62 | ConservativeScan, |
| 63 | StrongReferences, |
| 64 | ProtectedValues, |
| 65 | MarkListSet, |
| 66 | VMExceptions, |
| 67 | StrongHandles, |
| 68 | Debugger, |
| 69 | JITStubRoutines, |
| 70 | WeakSets, |
| 71 | Output, |
| 72 | DFGWorkLists, |
| 73 | CodeBlocks, |
| 74 | DOMGCOutput, |
| 75 | }; |
| 76 | |
| 77 | SlotVisitor(Heap&, CString codeName); |
| 78 | ~SlotVisitor(); |
| 79 | |
| 80 | MarkStackArray& collectorMarkStack() { return m_collectorStack; } |
| 81 | MarkStackArray& mutatorMarkStack() { return m_mutatorStack; } |
| 82 | const MarkStackArray& collectorMarkStack() const { return m_collectorStack; } |
| 83 | const MarkStackArray& mutatorMarkStack() const { return m_mutatorStack; } |
| 84 | |
| 85 | VM& vm(); |
| 86 | const VM& vm() const; |
| 87 | Heap* heap() const; |
| 88 | |
| 89 | void append(const ConservativeRoots&); |
| 90 | |
| 91 | template<typename T, typename Traits> void append(const WriteBarrierBase<T, Traits>&); |
| 92 | template<typename T, typename Traits> void appendHidden(const WriteBarrierBase<T, Traits>&); |
| 93 | template<typename Iterator> void append(Iterator begin , Iterator end); |
| 94 | void appendValues(const WriteBarrierBase<Unknown, DumbValueTraits<Unknown>>*, size_t count); |
| 95 | void appendValuesHidden(const WriteBarrierBase<Unknown, DumbValueTraits<Unknown>>*, size_t count); |
| 96 | |
| 97 | // These don't require you to prove that you have a WriteBarrier<>. That makes sense |
| 98 | // for: |
| 99 | // |
| 100 | // - roots. |
| 101 | // - sophisticated data structures that barrier through other means (like DFG::Plan and |
| 102 | // friends). |
| 103 | // |
| 104 | // If you are not a root and you don't know what kind of barrier you have, then you |
| 105 | // shouldn't call these methods. |
| 106 | void appendUnbarriered(JSValue); |
| 107 | void appendUnbarriered(JSValue*, size_t); |
| 108 | void appendUnbarriered(JSCell*); |
| 109 | |
| 110 | template<typename T> |
| 111 | void append(const Weak<T>& weak); |
| 112 | |
| 113 | void appendHiddenUnbarriered(JSValue); |
| 114 | void appendHiddenUnbarriered(JSCell*); |
| 115 | |
| 116 | bool addOpaqueRoot(void*); // Returns true if the root was new. |
| 117 | |
| 118 | bool containsOpaqueRoot(void*) const; |
| 119 | |
| 120 | bool isEmpty() { return m_collectorStack.isEmpty() && m_mutatorStack.isEmpty(); } |
| 121 | |
| 122 | void didStartMarking(); |
| 123 | void reset(); |
| 124 | void clearMarkStacks(); |
| 125 | |
| 126 | size_t bytesVisited() const { return m_bytesVisited; } |
| 127 | size_t visitCount() const { return m_visitCount; } |
| 128 | |
| 129 | void addToVisitCount(size_t value) { m_visitCount += value; } |
| 130 | |
| 131 | void donate(); |
| 132 | void drain(MonotonicTime timeout = MonotonicTime::infinity()); |
| 133 | void donateAndDrain(MonotonicTime timeout = MonotonicTime::infinity()); |
| 134 | |
| 135 | enum SharedDrainMode { SlaveDrain, MasterDrain }; |
| 136 | enum class SharedDrainResult { Done, TimedOut }; |
| 137 | SharedDrainResult drainFromShared(SharedDrainMode, MonotonicTime timeout = MonotonicTime::infinity()); |
| 138 | |
| 139 | SharedDrainResult drainInParallel(MonotonicTime timeout = MonotonicTime::infinity()); |
| 140 | SharedDrainResult drainInParallelPassively(MonotonicTime timeout = MonotonicTime::infinity()); |
| 141 | |
| 142 | SharedDrainResult waitForTermination(MonotonicTime timeout = MonotonicTime::infinity()); |
| 143 | |
| 144 | // Attempts to perform an increment of draining that involves only walking `bytes` worth of data. This |
| 145 | // is likely to accidentally walk more or less than that. It will usually mark more than bytes. It may |
| 146 | // mark less than bytes if we're reaching termination or if the global worklist is empty (which may in |
| 147 | // rare cases happen temporarily even if we're not reaching termination). |
| 148 | size_t performIncrementOfDraining(size_t bytes); |
| 149 | |
| 150 | // This informs the GC about auxiliary of some size that we are keeping alive. If you don't do |
| 151 | // this then the space will be freed at end of GC. |
| 152 | void markAuxiliary(const void* base); |
| 153 | |
| 154 | void (size_t); |
| 155 | #if ENABLE(RESOURCE_USAGE) |
| 156 | void reportExternalMemoryVisited(size_t); |
| 157 | #endif |
| 158 | |
| 159 | void dump(PrintStream&) const; |
| 160 | |
| 161 | bool isBuildingHeapSnapshot() const { return !!m_heapSnapshotBuilder; } |
| 162 | HeapSnapshotBuilder* heapSnapshotBuilder() const { return m_heapSnapshotBuilder; } |
| 163 | |
| 164 | RootMarkReason rootMarkReason() const { return m_rootMarkReason; } |
| 165 | void setRootMarkReason(RootMarkReason reason) { m_rootMarkReason = reason; } |
| 166 | |
| 167 | HeapVersion markingVersion() const { return m_markingVersion; } |
| 168 | |
| 169 | bool mutatorIsStopped() const { return m_mutatorIsStopped; } |
| 170 | |
| 171 | Lock& rightToRun() { return m_rightToRun; } |
| 172 | |
| 173 | void updateMutatorIsStopped(const AbstractLocker&); |
| 174 | void updateMutatorIsStopped(); |
| 175 | |
| 176 | bool hasAcknowledgedThatTheMutatorIsResumed() const; |
| 177 | bool mutatorIsStoppedIsUpToDate() const; |
| 178 | |
| 179 | void optimizeForStoppedMutator(); |
| 180 | |
| 181 | void didRace(const VisitRaceKey&); |
| 182 | void didRace(JSCell* cell, const char* reason) { didRace(VisitRaceKey(cell, reason)); } |
| 183 | |
| 184 | void visitAsConstraint(const JSCell*); |
| 185 | |
| 186 | bool didReachTermination(); |
| 187 | |
| 188 | void setIgnoreNewOpaqueRoots(bool value) { m_ignoreNewOpaqueRoots = value; } |
| 189 | |
| 190 | void donateAll(); |
| 191 | |
| 192 | const char* codeName() const { return m_codeName.data(); } |
| 193 | |
| 194 | JS_EXPORT_PRIVATE void addParallelConstraintTask(RefPtr<SharedTask<void(SlotVisitor&)>>); |
| 195 | |
| 196 | private: |
| 197 | friend class ParallelModeEnabler; |
| 198 | friend class MarkingConstraintSolver; |
| 199 | |
| 200 | void appendJSCellOrAuxiliary(HeapCell*); |
| 201 | |
| 202 | JS_EXPORT_PRIVATE void appendSlow(JSCell*, Dependency); |
| 203 | JS_EXPORT_PRIVATE void appendHiddenSlow(JSCell*, Dependency); |
| 204 | void appendHiddenSlowImpl(JSCell*, Dependency); |
| 205 | |
| 206 | template<typename ContainerType> |
| 207 | void setMarkedAndAppendToMarkStack(ContainerType&, JSCell*, Dependency); |
| 208 | |
| 209 | void appendToMarkStack(JSCell*); |
| 210 | |
| 211 | template<typename ContainerType> |
| 212 | void appendToMarkStack(ContainerType&, JSCell*); |
| 213 | |
| 214 | void appendToMutatorMarkStack(const JSCell*); |
| 215 | |
| 216 | void noteLiveAuxiliaryCell(HeapCell*); |
| 217 | |
| 218 | void visitChildren(const JSCell*); |
| 219 | |
| 220 | void propagateExternalMemoryVisitedIfNecessary(); |
| 221 | |
| 222 | void donateKnownParallel(); |
| 223 | void donateKnownParallel(MarkStackArray& from, MarkStackArray& to); |
| 224 | |
| 225 | void donateAll(const AbstractLocker&); |
| 226 | |
| 227 | bool hasWork(const AbstractLocker&); |
| 228 | bool didReachTermination(const AbstractLocker&); |
| 229 | |
| 230 | template<typename Func> |
| 231 | IterationStatus forEachMarkStack(const Func&); |
| 232 | |
| 233 | MarkStackArray& correspondingGlobalStack(MarkStackArray&); |
| 234 | |
| 235 | MarkStackArray m_collectorStack; |
| 236 | MarkStackArray m_mutatorStack; |
| 237 | |
| 238 | size_t m_bytesVisited; |
| 239 | size_t m_visitCount; |
| 240 | size_t m_nonCellVisitCount { 0 }; // Used for incremental draining, ignored otherwise. |
| 241 | Checked<size_t, RecordOverflow> { 0 }; |
| 242 | bool m_isInParallelMode; |
| 243 | bool m_ignoreNewOpaqueRoots { false }; // Useful as a debugging mode. |
| 244 | |
| 245 | HeapVersion m_markingVersion; |
| 246 | |
| 247 | Heap& m_heap; |
| 248 | |
| 249 | HeapSnapshotBuilder* m_heapSnapshotBuilder { nullptr }; |
| 250 | JSCell* m_currentCell { nullptr }; |
| 251 | RootMarkReason m_rootMarkReason { RootMarkReason::None }; |
| 252 | bool m_isFirstVisit { false }; |
| 253 | bool m_mutatorIsStopped { false }; |
| 254 | bool m_canOptimizeForStoppedMutator { false }; |
| 255 | Lock m_rightToRun; |
| 256 | |
| 257 | CString m_codeName; |
| 258 | |
| 259 | MarkingConstraint* m_currentConstraint { nullptr }; |
| 260 | MarkingConstraintSolver* m_currentSolver { nullptr }; |
| 261 | |
| 262 | public: |
| 263 | #if !ASSERT_DISABLED |
| 264 | bool m_isCheckingForDefaultMarkViolation; |
| 265 | bool m_isDraining; |
| 266 | #endif |
| 267 | }; |
| 268 | |
| 269 | class ParallelModeEnabler { |
| 270 | public: |
| 271 | ParallelModeEnabler(SlotVisitor& stack) |
| 272 | : m_stack(stack) |
| 273 | { |
| 274 | ASSERT(!m_stack.m_isInParallelMode); |
| 275 | m_stack.m_isInParallelMode = true; |
| 276 | } |
| 277 | |
| 278 | ~ParallelModeEnabler() |
| 279 | { |
| 280 | ASSERT(m_stack.m_isInParallelMode); |
| 281 | m_stack.m_isInParallelMode = false; |
| 282 | } |
| 283 | |
| 284 | private: |
| 285 | SlotVisitor& m_stack; |
| 286 | }; |
| 287 | |
| 288 | class SetRootMarkReasonScope { |
| 289 | public: |
| 290 | SetRootMarkReasonScope(SlotVisitor& visitor, SlotVisitor::RootMarkReason reason) |
| 291 | : m_visitor(visitor) |
| 292 | , m_previousReason(visitor.rootMarkReason()) |
| 293 | { |
| 294 | m_visitor.setRootMarkReason(reason); |
| 295 | } |
| 296 | |
| 297 | ~SetRootMarkReasonScope() |
| 298 | { |
| 299 | m_visitor.setRootMarkReason(m_previousReason); |
| 300 | } |
| 301 | |
| 302 | private: |
| 303 | SlotVisitor& m_visitor; |
| 304 | SlotVisitor::RootMarkReason m_previousReason; |
| 305 | }; |
| 306 | |
| 307 | } // namespace JSC |
| 308 | |