1 | /* |
2 | * Copyright (C) 2017 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' |
14 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
15 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS |
17 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
18 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
19 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
20 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
21 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
22 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
23 | * THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "MarkingConstraintSolver.h" |
28 | |
29 | #include "JSCInlines.h" |
30 | #include "MarkingConstraintSet.h" |
31 | |
32 | namespace JSC { |
33 | |
34 | MarkingConstraintSolver::MarkingConstraintSolver(MarkingConstraintSet& set) |
35 | : m_heap(set.m_heap) |
36 | , m_mainVisitor(m_heap.collectorSlotVisitor()) |
37 | , m_set(set) |
38 | { |
39 | m_heap.forEachSlotVisitor( |
40 | [&] (SlotVisitor& visitor) { |
41 | m_visitCounters.append(VisitCounter(visitor)); |
42 | }); |
43 | } |
44 | |
45 | MarkingConstraintSolver::~MarkingConstraintSolver() |
46 | { |
47 | } |
48 | |
49 | bool MarkingConstraintSolver::didVisitSomething() const |
50 | { |
51 | for (const VisitCounter& visitCounter : m_visitCounters) { |
52 | if (visitCounter.visitCount()) |
53 | return true; |
54 | } |
55 | // If the number of SlotVisitors increases after creating m_visitCounters, |
56 | // we conservatively say there could be something visited by added SlotVisitors. |
57 | if (m_heap.numberOfSlotVisitors() > m_visitCounters.size()) |
58 | return true; |
59 | return false; |
60 | } |
61 | |
62 | void MarkingConstraintSolver::execute(SchedulerPreference preference, ScopedLambda<Optional<unsigned>()> pickNext) |
63 | { |
64 | m_pickNextIsStillActive = true; |
65 | RELEASE_ASSERT(!m_numThreadsThatMayProduceWork); |
66 | |
67 | if (Options::useParallelMarkingConstraintSolver()) { |
68 | if (Options::logGC()) |
69 | dataLog(preference == ParallelWorkFirst ? "P" : "N" , "<" ); |
70 | |
71 | m_heap.runFunctionInParallel( |
72 | [&] (SlotVisitor& visitor) { runExecutionThread(visitor, preference, pickNext); }); |
73 | |
74 | if (Options::logGC()) |
75 | dataLog(">" ); |
76 | } else |
77 | runExecutionThread(m_mainVisitor, preference, pickNext); |
78 | |
79 | RELEASE_ASSERT(!m_pickNextIsStillActive); |
80 | RELEASE_ASSERT(!m_numThreadsThatMayProduceWork); |
81 | |
82 | if (!m_toExecuteSequentially.isEmpty()) { |
83 | for (unsigned indexToRun : m_toExecuteSequentially) |
84 | execute(*m_set.m_set[indexToRun]); |
85 | m_toExecuteSequentially.clear(); |
86 | } |
87 | |
88 | RELEASE_ASSERT(m_toExecuteInParallel.isEmpty()); |
89 | } |
90 | |
91 | void MarkingConstraintSolver::drain(BitVector& unexecuted) |
92 | { |
93 | auto iter = unexecuted.begin(); |
94 | auto end = unexecuted.end(); |
95 | if (iter == end) |
96 | return; |
97 | auto pickNext = scopedLambda<Optional<unsigned>()>( |
98 | [&] () -> Optional<unsigned> { |
99 | if (iter == end) |
100 | return WTF::nullopt; |
101 | return *iter++; |
102 | }); |
103 | execute(NextConstraintFirst, pickNext); |
104 | unexecuted.clearAll(); |
105 | } |
106 | |
107 | void MarkingConstraintSolver::converge(const Vector<MarkingConstraint*>& order) |
108 | { |
109 | if (didVisitSomething()) |
110 | return; |
111 | |
112 | if (order.isEmpty()) |
113 | return; |
114 | |
115 | size_t index = 0; |
116 | |
117 | // We want to execute the first constraint sequentially if we think it will quickly give us a |
118 | // result. If we ran it in parallel to other constraints, then we might end up having to wait for |
119 | // those other constraints to finish, which would be a waste of time since during convergence it's |
120 | // empirically most optimal to return to draining as soon as a constraint generates work. Most |
121 | // constraints don't generate any work most of the time, and when they do generate work, they tend |
122 | // to generate enough of it to feed a decent draining cycle. Therefore, pause times are lowest if |
123 | // we get the heck out of here as soon as a constraint generates work. I think that part of what |
124 | // makes this optimal is that we also never abort running a constraint early, so when we do run |
125 | // one, it has an opportunity to generate as much work as it possibly can. |
126 | if (order[index]->quickWorkEstimate(m_mainVisitor) > 0.) { |
127 | execute(*order[index++]); |
128 | |
129 | if (m_toExecuteInParallel.isEmpty() |
130 | && (order.isEmpty() || didVisitSomething())) |
131 | return; |
132 | } |
133 | |
134 | auto pickNext = scopedLambda<Optional<unsigned>()>( |
135 | [&] () -> Optional<unsigned> { |
136 | if (didVisitSomething()) |
137 | return WTF::nullopt; |
138 | |
139 | if (index >= order.size()) |
140 | return WTF::nullopt; |
141 | |
142 | MarkingConstraint& constraint = *order[index++]; |
143 | return constraint.index(); |
144 | }); |
145 | |
146 | execute(ParallelWorkFirst, pickNext); |
147 | } |
148 | |
149 | void MarkingConstraintSolver::execute(MarkingConstraint& constraint) |
150 | { |
151 | if (m_executed.get(constraint.index())) |
152 | return; |
153 | |
154 | constraint.prepareToExecute(NoLockingNecessary, m_mainVisitor); |
155 | constraint.execute(m_mainVisitor); |
156 | m_executed.set(constraint.index()); |
157 | } |
158 | |
159 | void MarkingConstraintSolver::addParallelTask(RefPtr<SharedTask<void(SlotVisitor&)>> task, MarkingConstraint& constraint) |
160 | { |
161 | auto locker = holdLock(m_lock); |
162 | m_toExecuteInParallel.append(TaskWithConstraint(WTFMove(task), &constraint)); |
163 | } |
164 | |
165 | void MarkingConstraintSolver::runExecutionThread(SlotVisitor& visitor, SchedulerPreference preference, ScopedLambda<Optional<unsigned>()> pickNext) |
166 | { |
167 | for (;;) { |
168 | bool doParallelWorkMode; |
169 | MarkingConstraint* constraint = nullptr; |
170 | unsigned indexToRun = UINT_MAX; |
171 | TaskWithConstraint task; |
172 | { |
173 | auto locker = holdLock(m_lock); |
174 | |
175 | for (;;) { |
176 | auto tryParallelWork = [&] () -> bool { |
177 | if (m_toExecuteInParallel.isEmpty()) |
178 | return false; |
179 | |
180 | task = m_toExecuteInParallel.first(); |
181 | constraint = task.constraint; |
182 | doParallelWorkMode = true; |
183 | return true; |
184 | }; |
185 | |
186 | auto tryNextConstraint = [&] () -> bool { |
187 | if (!m_pickNextIsStillActive) |
188 | return false; |
189 | |
190 | for (;;) { |
191 | Optional<unsigned> pickResult = pickNext(); |
192 | if (!pickResult) { |
193 | m_pickNextIsStillActive = false; |
194 | return false; |
195 | } |
196 | |
197 | if (m_executed.get(*pickResult)) |
198 | continue; |
199 | |
200 | MarkingConstraint& candidateConstraint = *m_set.m_set[*pickResult]; |
201 | if (candidateConstraint.concurrency() == ConstraintConcurrency::Sequential) { |
202 | m_toExecuteSequentially.append(*pickResult); |
203 | continue; |
204 | } |
205 | if (candidateConstraint.parallelism() == ConstraintParallelism::Parallel) |
206 | m_numThreadsThatMayProduceWork++; |
207 | indexToRun = *pickResult; |
208 | constraint = &candidateConstraint; |
209 | doParallelWorkMode = false; |
210 | constraint->prepareToExecute(locker, visitor); |
211 | return true; |
212 | } |
213 | }; |
214 | |
215 | if (preference == ParallelWorkFirst) { |
216 | if (tryParallelWork() || tryNextConstraint()) |
217 | break; |
218 | } else { |
219 | if (tryNextConstraint() || tryParallelWork()) |
220 | break; |
221 | } |
222 | |
223 | // This means that we have nothing left to run. The only way for us to have more work is |
224 | // if someone is running a constraint that may produce parallel work. |
225 | |
226 | if (!m_numThreadsThatMayProduceWork) |
227 | return; |
228 | |
229 | // FIXME: Any waiting could be replaced with just running the SlotVisitor. |
230 | // I wonder if that would be profitable. |
231 | m_condition.wait(m_lock); |
232 | } |
233 | } |
234 | |
235 | if (doParallelWorkMode) |
236 | constraint->doParallelWork(visitor, *task.task); |
237 | else { |
238 | if (constraint->parallelism() == ConstraintParallelism::Parallel) { |
239 | visitor.m_currentConstraint = constraint; |
240 | visitor.m_currentSolver = this; |
241 | } |
242 | |
243 | constraint->execute(visitor); |
244 | |
245 | visitor.m_currentConstraint = nullptr; |
246 | visitor.m_currentSolver = nullptr; |
247 | } |
248 | |
249 | { |
250 | auto locker = holdLock(m_lock); |
251 | |
252 | if (doParallelWorkMode) { |
253 | if (!m_toExecuteInParallel.isEmpty() |
254 | && task == m_toExecuteInParallel.first()) |
255 | m_toExecuteInParallel.takeFirst(); |
256 | else |
257 | ASSERT(!m_toExecuteInParallel.contains(task)); |
258 | } else { |
259 | if (constraint->parallelism() == ConstraintParallelism::Parallel) |
260 | m_numThreadsThatMayProduceWork--; |
261 | m_executed.set(indexToRun); |
262 | } |
263 | |
264 | m_condition.notifyAll(); |
265 | } |
266 | } |
267 | } |
268 | |
269 | } // namespace JSC |
270 | |
271 | |