1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of |
15 | * its contributors may be used to endorse or promote products derived |
16 | * from this software without specific prior written permission. |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
29 | |
30 | #include "config.h" |
31 | #include "CodeBlock.h" |
32 | |
33 | #include "ArithProfile.h" |
34 | #include "BasicBlockLocation.h" |
35 | #include "BytecodeDumper.h" |
36 | #include "BytecodeGenerator.h" |
37 | #include "BytecodeLivenessAnalysis.h" |
38 | #include "BytecodeStructs.h" |
39 | #include "BytecodeUseDef.h" |
40 | #include "CallLinkStatus.h" |
41 | #include "CodeBlockInlines.h" |
42 | #include "CodeBlockSet.h" |
43 | #include "DFGCapabilities.h" |
44 | #include "DFGCommon.h" |
45 | #include "DFGDriver.h" |
46 | #include "DFGJITCode.h" |
47 | #include "DFGWorklist.h" |
48 | #include "Debugger.h" |
49 | #include "EvalCodeBlock.h" |
50 | #include "FullCodeOrigin.h" |
51 | #include "FunctionCodeBlock.h" |
52 | #include "FunctionExecutableDump.h" |
53 | #include "GetPutInfo.h" |
54 | #include "InlineCallFrame.h" |
55 | #include "Instruction.h" |
56 | #include "InstructionStream.h" |
57 | #include "InterpreterInlines.h" |
58 | #include "IsoCellSetInlines.h" |
59 | #include "JIT.h" |
60 | #include "JITMathIC.h" |
61 | #include "JSBigInt.h" |
62 | #include "JSCInlines.h" |
63 | #include "JSCJSValue.h" |
64 | #include "JSFunction.h" |
65 | #include "JSLexicalEnvironment.h" |
66 | #include "JSModuleEnvironment.h" |
67 | #include "JSSet.h" |
68 | #include "JSString.h" |
69 | #include "JSTemplateObjectDescriptor.h" |
70 | #include "LLIntData.h" |
71 | #include "LLIntEntrypoint.h" |
72 | #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h" |
73 | #include "LowLevelInterpreter.h" |
74 | #include "MetadataTable.h" |
75 | #include "ModuleProgramCodeBlock.h" |
76 | #include "ObjectAllocationProfileInlines.h" |
77 | #include "OpcodeInlines.h" |
78 | #include "PCToCodeOriginMap.h" |
79 | #include "PolymorphicAccess.h" |
80 | #include "ProfilerDatabase.h" |
81 | #include "ProgramCodeBlock.h" |
82 | #include "ReduceWhitespace.h" |
83 | #include "Repatch.h" |
84 | #include "SlotVisitorInlines.h" |
85 | #include "StackVisitor.h" |
86 | #include "StructureStubInfo.h" |
87 | #include "TypeLocationCache.h" |
88 | #include "TypeProfiler.h" |
89 | #include "VMInlines.h" |
90 | #include <wtf/BagToHashMap.h> |
91 | #include <wtf/CommaPrinter.h> |
92 | #include <wtf/Forward.h> |
93 | #include <wtf/SimpleStats.h> |
94 | #include <wtf/StringPrintStream.h> |
95 | #include <wtf/text/StringConcatenateNumbers.h> |
96 | #include <wtf/text/UniquedStringImpl.h> |
97 | |
98 | #if ENABLE(ASSEMBLER) |
99 | #include "RegisterAtOffsetList.h" |
100 | #endif |
101 | |
102 | #if ENABLE(DFG_JIT) |
103 | #include "DFGOperations.h" |
104 | #endif |
105 | |
106 | #if ENABLE(FTL_JIT) |
107 | #include "FTLJITCode.h" |
108 | #endif |
109 | |
110 | namespace JSC { |
111 | |
112 | const ClassInfo CodeBlock::s_info = { |
113 | "CodeBlock" , nullptr, nullptr, nullptr, |
114 | CREATE_METHOD_TABLE(CodeBlock) |
115 | }; |
116 | |
117 | CString CodeBlock::inferredName() const |
118 | { |
119 | switch (codeType()) { |
120 | case GlobalCode: |
121 | return "<global>" ; |
122 | case EvalCode: |
123 | return "<eval>" ; |
124 | case FunctionCode: |
125 | return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8(); |
126 | case ModuleCode: |
127 | return "<module>" ; |
128 | default: |
129 | CRASH(); |
130 | return CString("" , 0); |
131 | } |
132 | } |
133 | |
134 | bool CodeBlock::hasHash() const |
135 | { |
136 | return !!m_hash; |
137 | } |
138 | |
139 | bool CodeBlock::isSafeToComputeHash() const |
140 | { |
141 | return !isCompilationThread(); |
142 | } |
143 | |
144 | CodeBlockHash CodeBlock::hash() const |
145 | { |
146 | if (!m_hash) { |
147 | RELEASE_ASSERT(isSafeToComputeHash()); |
148 | m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind()); |
149 | } |
150 | return m_hash; |
151 | } |
152 | |
153 | CString CodeBlock::sourceCodeForTools() const |
154 | { |
155 | if (codeType() != FunctionCode) |
156 | return ownerExecutable()->source().toUTF8(); |
157 | |
158 | SourceProvider* provider = source().provider(); |
159 | FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable()); |
160 | UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable(); |
161 | unsigned unlinkedStartOffset = unlinked->startOffset(); |
162 | unsigned linkedStartOffset = executable->source().startOffset(); |
163 | int delta = linkedStartOffset - unlinkedStartOffset; |
164 | unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart(); |
165 | unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength(); |
166 | return toCString( |
167 | "function " , |
168 | provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8()); |
169 | } |
170 | |
171 | CString CodeBlock::sourceCodeOnOneLine() const |
172 | { |
173 | return reduceWhitespace(sourceCodeForTools()); |
174 | } |
175 | |
176 | CString CodeBlock::hashAsStringIfPossible() const |
177 | { |
178 | if (hasHash() || isSafeToComputeHash()) |
179 | return toCString(hash()); |
180 | return "<no-hash>" ; |
181 | } |
182 | |
183 | void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const |
184 | { |
185 | out.print(inferredName(), "#" , hashAsStringIfPossible()); |
186 | out.print(":[" , RawPointer(this), "->" ); |
187 | if (!!m_alternative) |
188 | out.print(RawPointer(alternative()), "->" ); |
189 | out.print(RawPointer(ownerExecutable()), ", " , jitType, codeType()); |
190 | |
191 | if (codeType() == FunctionCode) |
192 | out.print(specializationKind()); |
193 | out.print(", " , instructionsSize()); |
194 | if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined) |
195 | out.print(" (ShouldAlwaysBeInlined)" ); |
196 | if (ownerExecutable()->neverInline()) |
197 | out.print(" (NeverInline)" ); |
198 | if (ownerExecutable()->neverOptimize()) |
199 | out.print(" (NeverOptimize)" ); |
200 | else if (ownerExecutable()->neverFTLOptimize()) |
201 | out.print(" (NeverFTLOptimize)" ); |
202 | if (ownerExecutable()->didTryToEnterInLoop()) |
203 | out.print(" (DidTryToEnterInLoop)" ); |
204 | if (ownerExecutable()->isStrictMode()) |
205 | out.print(" (StrictMode)" ); |
206 | if (m_didFailJITCompilation) |
207 | out.print(" (JITFail)" ); |
208 | if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation) |
209 | out.print(" (FTLFail)" ); |
210 | if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL) |
211 | out.print(" (HadFTLReplacement)" ); |
212 | out.print("]" ); |
213 | } |
214 | |
215 | void CodeBlock::dump(PrintStream& out) const |
216 | { |
217 | dumpAssumingJITType(out, jitType()); |
218 | } |
219 | |
220 | void CodeBlock::dumpSource() |
221 | { |
222 | dumpSource(WTF::dataFile()); |
223 | } |
224 | |
225 | void CodeBlock::dumpSource(PrintStream& out) |
226 | { |
227 | ScriptExecutable* executable = ownerExecutable(); |
228 | if (executable->isFunctionExecutable()) { |
229 | FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable); |
230 | StringView source = functionExecutable->source().provider()->getRange( |
231 | functionExecutable->parametersStartOffset(), |
232 | functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'. |
233 | |
234 | out.print("function " , inferredName(), source); |
235 | return; |
236 | } |
237 | out.print(executable->source().view()); |
238 | } |
239 | |
240 | void CodeBlock::dumpBytecode() |
241 | { |
242 | dumpBytecode(WTF::dataFile()); |
243 | } |
244 | |
245 | void CodeBlock::dumpBytecode(PrintStream& out) |
246 | { |
247 | ICStatusMap statusMap; |
248 | getICStatusMap(statusMap); |
249 | BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap); |
250 | } |
251 | |
252 | void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) |
253 | { |
254 | BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap); |
255 | } |
256 | |
257 | void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap) |
258 | { |
259 | const auto it = instructions().at(bytecodeOffset); |
260 | dumpBytecode(out, it, statusMap); |
261 | } |
262 | |
263 | namespace { |
264 | |
265 | class PutToScopeFireDetail : public FireDetail { |
266 | public: |
267 | PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident) |
268 | : m_codeBlock(codeBlock) |
269 | , m_ident(ident) |
270 | { |
271 | } |
272 | |
273 | void dump(PrintStream& out) const override |
274 | { |
275 | out.print("Linking put_to_scope in " , FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for " , m_ident); |
276 | } |
277 | |
278 | private: |
279 | CodeBlock* m_codeBlock; |
280 | const Identifier& m_ident; |
281 | }; |
282 | |
283 | } // anonymous namespace |
284 | |
285 | CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other) |
286 | : JSCell(*vm, structure) |
287 | , m_globalObject(other.m_globalObject) |
288 | , m_shouldAlwaysBeInlined(true) |
289 | #if ENABLE(JIT) |
290 | , m_capabilityLevelState(DFG::CapabilityLevelNotSet) |
291 | #endif |
292 | , m_didFailJITCompilation(false) |
293 | , m_didFailFTLCompilation(false) |
294 | , m_hasBeenCompiledWithFTL(false) |
295 | , m_numCalleeLocals(other.m_numCalleeLocals) |
296 | , m_numVars(other.m_numVars) |
297 | , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip) |
298 | , m_hasDebuggerStatement(false) |
299 | , m_steppingMode(SteppingModeDisabled) |
300 | , m_numBreakpoints(0) |
301 | , m_bytecodeCost(other.m_bytecodeCost) |
302 | , m_scopeRegister(other.m_scopeRegister) |
303 | , m_hash(other.m_hash) |
304 | , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get()) |
305 | , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get()) |
306 | , m_vm(other.m_vm) |
307 | , m_instructionsRawPointer(other.m_instructionsRawPointer) |
308 | , m_constantRegisters(other.m_constantRegisters) |
309 | , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation) |
310 | , m_functionDecls(other.m_functionDecls) |
311 | , m_functionExprs(other.m_functionExprs) |
312 | , m_osrExitCounter(0) |
313 | , m_optimizationDelayCounter(0) |
314 | , m_reoptimizationRetryCounter(0) |
315 | , m_metadata(other.m_metadata) |
316 | , m_creationTime(MonotonicTime::now()) |
317 | { |
318 | ASSERT(heap()->isDeferred()); |
319 | ASSERT(m_scopeRegister.isLocal()); |
320 | |
321 | ASSERT(source().provider()); |
322 | setNumParameters(other.numParameters()); |
323 | |
324 | vm->heap.codeBlockSet().add(this); |
325 | } |
326 | |
327 | void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other) |
328 | { |
329 | Base::finishCreation(vm); |
330 | finishCreationCommon(vm); |
331 | |
332 | optimizeAfterWarmUp(); |
333 | jitAfterWarmUp(); |
334 | |
335 | if (other.m_rareData) { |
336 | createRareDataIfNecessary(); |
337 | |
338 | m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers; |
339 | m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables; |
340 | m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables; |
341 | } |
342 | } |
343 | |
344 | CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope) |
345 | : JSCell(*vm, structure) |
346 | , m_globalObject(*vm, this, scope->globalObject(*vm)) |
347 | , m_shouldAlwaysBeInlined(true) |
348 | #if ENABLE(JIT) |
349 | , m_capabilityLevelState(DFG::CapabilityLevelNotSet) |
350 | #endif |
351 | , m_didFailJITCompilation(false) |
352 | , m_didFailFTLCompilation(false) |
353 | , m_hasBeenCompiledWithFTL(false) |
354 | , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals()) |
355 | , m_numVars(unlinkedCodeBlock->numVars()) |
356 | , m_hasDebuggerStatement(false) |
357 | , m_steppingMode(SteppingModeDisabled) |
358 | , m_numBreakpoints(0) |
359 | , m_scopeRegister(unlinkedCodeBlock->scopeRegister()) |
360 | , m_unlinkedCode(*vm, this, unlinkedCodeBlock) |
361 | , m_ownerExecutable(*vm, this, ownerExecutable) |
362 | , m_vm(vm) |
363 | , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer()) |
364 | , m_osrExitCounter(0) |
365 | , m_optimizationDelayCounter(0) |
366 | , m_reoptimizationRetryCounter(0) |
367 | , m_metadata(unlinkedCodeBlock->metadata().link()) |
368 | , m_creationTime(MonotonicTime::now()) |
369 | { |
370 | ASSERT(heap()->isDeferred()); |
371 | ASSERT(m_scopeRegister.isLocal()); |
372 | |
373 | ASSERT(source().provider()); |
374 | setNumParameters(unlinkedCodeBlock->numParameters()); |
375 | |
376 | vm->heap.codeBlockSet().add(this); |
377 | } |
378 | |
379 | // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process |
380 | // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope |
381 | // chain. For example, this process allows us to cache the depth of lexical environment reads that reach |
382 | // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that |
383 | // we can't generate during unlinked bytecode generation. This process is not allowed to generate control |
384 | // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for |
385 | // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis |
386 | // inside UnlinkedCodeBlock. |
387 | bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, |
388 | JSScope* scope) |
389 | { |
390 | Base::finishCreation(vm); |
391 | finishCreationCommon(vm); |
392 | |
393 | auto throwScope = DECLARE_THROW_SCOPE(vm); |
394 | |
395 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes()) |
396 | vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm)); |
397 | |
398 | ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable(); |
399 | setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable); |
400 | RETURN_IF_EXCEPTION(throwScope, false); |
401 | |
402 | for (unsigned i = 0; i < LinkTimeConstantCount; i++) { |
403 | LinkTimeConstant type = static_cast<LinkTimeConstant>(i); |
404 | if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type)) |
405 | m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type)); |
406 | } |
407 | |
408 | // We already have the cloned symbol table for the module environment since we need to instantiate |
409 | // the module environments before linking the code block. We replace the stored symbol table with the already cloned one. |
410 | if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) { |
411 | SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable(); |
412 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) { |
413 | ConcurrentJSLocker locker(clonedSymbolTable->m_lock); |
414 | clonedSymbolTable->prepareForTypeProfiling(locker); |
415 | } |
416 | replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable); |
417 | } |
418 | |
419 | bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes(); |
420 | m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls()); |
421 | for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) { |
422 | UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i); |
423 | if (shouldUpdateFunctionHasExecutedCache) |
424 | vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); |
425 | m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source())); |
426 | } |
427 | |
428 | m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs()); |
429 | for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) { |
430 | UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i); |
431 | if (shouldUpdateFunctionHasExecutedCache) |
432 | vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); |
433 | m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source())); |
434 | } |
435 | |
436 | if (unlinkedCodeBlock->hasRareData()) { |
437 | createRareDataIfNecessary(); |
438 | |
439 | setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets()); |
440 | RETURN_IF_EXCEPTION(throwScope, false); |
441 | |
442 | if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) { |
443 | m_rareData->m_exceptionHandlers.resizeToFit(count); |
444 | for (size_t i = 0; i < count; i++) { |
445 | const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i); |
446 | HandlerInfo& handler = m_rareData->m_exceptionHandlers[i]; |
447 | #if ENABLE(JIT) |
448 | MacroAssemblerCodePtr<BytecodePtrTag> codePtr = instructions().at(unlinkedHandler.target)->isWide() |
449 | ? LLInt::getWideCodePtr<BytecodePtrTag>(op_catch) |
450 | : LLInt::getCodePtr<BytecodePtrTag>(op_catch); |
451 | handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>())); |
452 | #else |
453 | handler.initialize(unlinkedHandler); |
454 | #endif |
455 | } |
456 | } |
457 | |
458 | if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) { |
459 | m_rareData->m_stringSwitchJumpTables.grow(count); |
460 | for (size_t i = 0; i < count; i++) { |
461 | UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin(); |
462 | UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end(); |
463 | for (; ptr != end; ++ptr) { |
464 | OffsetLocation offset; |
465 | offset.branchOffset = ptr->value.branchOffset; |
466 | m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset); |
467 | } |
468 | } |
469 | } |
470 | |
471 | if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) { |
472 | m_rareData->m_switchJumpTables.grow(count); |
473 | for (size_t i = 0; i < count; i++) { |
474 | UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i); |
475 | SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i]; |
476 | destTable.branchOffsets = sourceTable.branchOffsets; |
477 | destTable.min = sourceTable.min; |
478 | } |
479 | } |
480 | } |
481 | |
482 | // Bookkeep the strongly referenced module environments. |
483 | HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments; |
484 | |
485 | auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) { |
486 | m_numberOfNonArgumentValueProfiles++; |
487 | metadata.m_profile.m_bytecodeOffset = instruction.offset(); |
488 | }; |
489 | |
490 | auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) { |
491 | metadata.m_arrayProfile.m_bytecodeOffset = instruction.offset(); |
492 | }; |
493 | |
494 | auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) { |
495 | metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity); |
496 | }; |
497 | |
498 | auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) { |
499 | metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType); |
500 | }; |
501 | |
502 | auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) { |
503 | metadata.m_hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching(); |
504 | }; |
505 | |
506 | #define LINK_FIELD(__field) \ |
507 | WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata); |
508 | |
509 | #define INITIALIZE_METADATA(__op) \ |
510 | auto bytecode = instruction->as<__op>(); \ |
511 | auto& metadata = bytecode.metadata(this); \ |
512 | new (&metadata) __op::Metadata { bytecode }; \ |
513 | |
514 | #define CASE(__op) case __op::opcodeID |
515 | |
516 | #define LINK(...) \ |
517 | CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \ |
518 | INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \ |
519 | WTF_LAZY_HAS_REST(__VA_ARGS__)({ \ |
520 | WTF_LAZY_FOR_EACH_TERM(LINK_FIELD, WTF_LAZY_REST_(__VA_ARGS__)) \ |
521 | }) \ |
522 | break; \ |
523 | } |
524 | |
525 | const InstructionStream& instructionStream = instructions(); |
526 | for (const auto& instruction : instructionStream) { |
527 | OpcodeID opcodeID = instruction->opcodeID(); |
528 | m_bytecodeCost += opcodeLengths[opcodeID]; |
529 | switch (opcodeID) { |
530 | LINK(OpHasIndexedProperty, arrayProfile) |
531 | |
532 | LINK(OpCallVarargs, arrayProfile, profile) |
533 | LINK(OpTailCallVarargs, arrayProfile, profile) |
534 | LINK(OpTailCallForwardArguments, arrayProfile, profile) |
535 | LINK(OpConstructVarargs, arrayProfile, profile) |
536 | LINK(OpGetByVal, arrayProfile, profile) |
537 | |
538 | LINK(OpGetDirectPname, profile) |
539 | LINK(OpGetByIdWithThis, profile) |
540 | LINK(OpTryGetById, profile) |
541 | LINK(OpGetByIdDirect, profile) |
542 | LINK(OpGetByValWithThis, profile) |
543 | LINK(OpGetFromArguments, profile) |
544 | LINK(OpToNumber, profile) |
545 | LINK(OpToObject, profile) |
546 | LINK(OpGetArgument, profile) |
547 | LINK(OpToThis, profile) |
548 | LINK(OpBitand, profile) |
549 | LINK(OpBitor, profile) |
550 | LINK(OpBitnot, profile) |
551 | LINK(OpBitxor, profile) |
552 | |
553 | LINK(OpGetById, profile, hitCountForLLIntCaching) |
554 | |
555 | LINK(OpCall, profile, arrayProfile) |
556 | LINK(OpTailCall, profile, arrayProfile) |
557 | LINK(OpCallEval, profile, arrayProfile) |
558 | LINK(OpConstruct, profile, arrayProfile) |
559 | |
560 | LINK(OpInByVal, arrayProfile) |
561 | LINK(OpPutByVal, arrayProfile) |
562 | LINK(OpPutByValDirect, arrayProfile) |
563 | |
564 | LINK(OpNewArray) |
565 | LINK(OpNewArrayWithSize) |
566 | LINK(OpNewArrayBuffer, arrayAllocationProfile) |
567 | |
568 | LINK(OpNewObject, objectAllocationProfile) |
569 | |
570 | LINK(OpPutById) |
571 | LINK(OpCreateThis) |
572 | |
573 | LINK(OpAdd) |
574 | LINK(OpMul) |
575 | LINK(OpDiv) |
576 | LINK(OpSub) |
577 | |
578 | LINK(OpNegate) |
579 | |
580 | LINK(OpJneqPtr) |
581 | |
582 | LINK(OpCatch) |
583 | LINK(OpProfileControlFlow) |
584 | |
585 | case op_resolve_scope: { |
586 | INITIALIZE_METADATA(OpResolveScope) |
587 | |
588 | const Identifier& ident = identifier(bytecode.m_var); |
589 | RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar); |
590 | |
591 | ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization); |
592 | RETURN_IF_EXCEPTION(throwScope, false); |
593 | |
594 | metadata.m_resolveType = op.type; |
595 | metadata.m_localScopeDepth = op.depth; |
596 | if (op.lexicalEnvironment) { |
597 | if (op.type == ModuleVar) { |
598 | // Keep the linked module environment strongly referenced. |
599 | if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) |
600 | addConstant(op.lexicalEnvironment); |
601 | metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment); |
602 | } else |
603 | metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); |
604 | } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) { |
605 | metadata.m_constantScope.set(vm, this, constantScope); |
606 | if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks) |
607 | metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch(); |
608 | } else |
609 | metadata.m_globalObject = nullptr; |
610 | break; |
611 | } |
612 | |
613 | case op_get_from_scope: { |
614 | INITIALIZE_METADATA(OpGetFromScope) |
615 | |
616 | link_profile(instruction, bytecode, metadata); |
617 | metadata.m_watchpointSet = nullptr; |
618 | |
619 | ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode())); |
620 | if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) { |
621 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode()); |
622 | break; |
623 | } |
624 | |
625 | const Identifier& ident = identifier(bytecode.m_var); |
626 | ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization); |
627 | RETURN_IF_EXCEPTION(throwScope, false); |
628 | |
629 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode()); |
630 | if (op.type == ModuleVar) |
631 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode()); |
632 | if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) |
633 | metadata.m_watchpointSet = op.watchpointSet; |
634 | else if (op.structure) |
635 | metadata.m_structure.set(vm, this, op.structure); |
636 | metadata.m_operand = op.operand; |
637 | break; |
638 | } |
639 | |
640 | case op_put_to_scope: { |
641 | INITIALIZE_METADATA(OpPutToScope) |
642 | |
643 | if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) { |
644 | // Only do watching if the property we're putting to is not anonymous. |
645 | if (bytecode.m_var != UINT_MAX) { |
646 | SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset())); |
647 | const Identifier& ident = identifier(bytecode.m_var); |
648 | ConcurrentJSLocker locker(symbolTable->m_lock); |
649 | auto iter = symbolTable->find(locker, ident.impl()); |
650 | ASSERT(iter != symbolTable->end(locker)); |
651 | iter->value.prepareToWatch(); |
652 | metadata.m_watchpointSet = iter->value.watchpointSet(); |
653 | } else |
654 | metadata.m_watchpointSet = nullptr; |
655 | break; |
656 | } |
657 | |
658 | const Identifier& ident = identifier(bytecode.m_var); |
659 | metadata.m_watchpointSet = nullptr; |
660 | ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode()); |
661 | RETURN_IF_EXCEPTION(throwScope, false); |
662 | |
663 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode()); |
664 | if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) |
665 | metadata.m_watchpointSet = op.watchpointSet; |
666 | else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { |
667 | if (op.watchpointSet) |
668 | op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident)); |
669 | } else if (op.structure) |
670 | metadata.m_structure.set(vm, this, op.structure); |
671 | metadata.m_operand = op.operand; |
672 | break; |
673 | } |
674 | |
675 | case op_profile_type: { |
676 | RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()); |
677 | |
678 | INITIALIZE_METADATA(OpProfileType) |
679 | |
680 | size_t instructionOffset = instruction.offset() + instruction->size() - 1; |
681 | unsigned divotStart, divotEnd; |
682 | GlobalVariableID globalVariableID = 0; |
683 | RefPtr<TypeSet> globalTypeSet; |
684 | bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); |
685 | SymbolTable* symbolTable = nullptr; |
686 | |
687 | switch (bytecode.m_flag) { |
688 | case ProfileTypeBytecodeClosureVar: { |
689 | const Identifier& ident = identifier(bytecode.m_identifier); |
690 | unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth(); |
691 | // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because |
692 | // we're abstractly "read"ing from a JSScope. |
693 | ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization); |
694 | RETURN_IF_EXCEPTION(throwScope, false); |
695 | |
696 | if (op.type == ClosureVar || op.type == ModuleVar) |
697 | symbolTable = op.lexicalEnvironment->symbolTable(); |
698 | else if (op.type == GlobalVar) |
699 | symbolTable = m_globalObject.get()->symbolTable(); |
700 | |
701 | UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl(); |
702 | if (symbolTable) { |
703 | ConcurrentJSLocker locker(symbolTable->m_lock); |
704 | // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. |
705 | symbolTable->prepareForTypeProfiling(locker); |
706 | globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm); |
707 | globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm); |
708 | } else |
709 | globalVariableID = TypeProfilerNoGlobalIDExists; |
710 | |
711 | break; |
712 | } |
713 | case ProfileTypeBytecodeLocallyResolved: { |
714 | int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset(); |
715 | SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); |
716 | const Identifier& ident = identifier(bytecode.m_identifier); |
717 | ConcurrentJSLocker locker(symbolTable->m_lock); |
718 | // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. |
719 | globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm); |
720 | globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm); |
721 | |
722 | break; |
723 | } |
724 | case ProfileTypeBytecodeDoesNotHaveGlobalID: |
725 | case ProfileTypeBytecodeFunctionArgument: { |
726 | globalVariableID = TypeProfilerNoGlobalIDExists; |
727 | break; |
728 | } |
729 | case ProfileTypeBytecodeFunctionReturnStatement: { |
730 | RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); |
731 | globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); |
732 | globalVariableID = TypeProfilerReturnStatement; |
733 | if (!shouldAnalyze) { |
734 | // Because a return statement can be added implicitly to return undefined at the end of a function, |
735 | // and these nodes don't emit expression ranges because they aren't in the actual source text of |
736 | // the user's program, give the type profiler some range to identify these return statements. |
737 | // Currently, the text offset that is used as identification is "f" in the function keyword |
738 | // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. |
739 | divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm); |
740 | shouldAnalyze = true; |
741 | } |
742 | break; |
743 | } |
744 | } |
745 | |
746 | std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, |
747 | ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm); |
748 | TypeLocation* location = locationPair.first; |
749 | bool isNewLocation = locationPair.second; |
750 | |
751 | if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement) |
752 | location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm); |
753 | |
754 | if (shouldAnalyze && isNewLocation) |
755 | vm.typeProfiler()->insertNewLocation(location); |
756 | |
757 | metadata.m_typeLocation = location; |
758 | break; |
759 | } |
760 | |
761 | case op_debug: { |
762 | if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint) |
763 | m_hasDebuggerStatement = true; |
764 | break; |
765 | } |
766 | |
767 | case op_create_rest: { |
768 | int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip; |
769 | ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0); |
770 | // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT."); |
771 | m_numberOfArgumentsToSkip = numberOfArgumentsToSkip; |
772 | break; |
773 | } |
774 | |
775 | default: |
776 | break; |
777 | } |
778 | } |
779 | |
780 | #undef CASE |
781 | #undef INITIALIZE_METADATA |
782 | #undef LINK_FIELD |
783 | #undef LINK |
784 | |
785 | if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes()) |
786 | insertBasicBlockBoundariesForControlFlowProfiler(); |
787 | |
788 | // Set optimization thresholds only after instructions is initialized, since these |
789 | // rely on the instruction count (and are in theory permitted to also inspect the |
790 | // instruction stream to more accurate assess the cost of tier-up). |
791 | optimizeAfterWarmUp(); |
792 | jitAfterWarmUp(); |
793 | |
794 | // If the concurrent thread will want the code block's hash, then compute it here |
795 | // synchronously. |
796 | if (Options::alwaysComputeHash()) |
797 | hash(); |
798 | |
799 | if (Options::dumpGeneratedBytecodes()) |
800 | dumpBytecode(); |
801 | |
802 | if (m_metadata) |
803 | vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes()); |
804 | |
805 | return true; |
806 | } |
807 | |
808 | void CodeBlock::finishCreationCommon(VM& vm) |
809 | { |
810 | m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this)); |
811 | } |
812 | |
813 | CodeBlock::~CodeBlock() |
814 | { |
815 | VM& vm = *m_vm; |
816 | |
817 | vm.heap.codeBlockSet().remove(this); |
818 | |
819 | if (UNLIKELY(vm.m_perBytecodeProfiler)) |
820 | vm.m_perBytecodeProfiler->notifyDestruction(this); |
821 | |
822 | if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState) |
823 | unlinkedCodeBlock()->setDidOptimize(FalseTriState); |
824 | |
825 | #if ENABLE(VERBOSE_VALUE_PROFILE) |
826 | dumpValueProfiles(); |
827 | #endif |
828 | |
829 | // We may be destroyed before any CodeBlocks that refer to us are destroyed. |
830 | // Consider that two CodeBlocks become unreachable at the same time. There |
831 | // is no guarantee about the order in which the CodeBlocks are destroyed. |
832 | // So, if we don't remove incoming calls, and get destroyed before the |
833 | // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's |
834 | // destructor will try to remove nodes from our (no longer valid) linked list. |
835 | unlinkIncomingCalls(); |
836 | |
837 | // Note that our outgoing calls will be removed from other CodeBlocks' |
838 | // m_incomingCalls linked lists through the execution of the ~CallLinkInfo |
839 | // destructors. |
840 | |
841 | #if ENABLE(JIT) |
842 | if (auto* jitData = m_jitData.get()) { |
843 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
844 | stubInfo->aboutToDie(); |
845 | stubInfo->deref(); |
846 | } |
847 | } |
848 | #endif // ENABLE(JIT) |
849 | } |
850 | |
851 | void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants) |
852 | { |
853 | auto scope = DECLARE_THROW_SCOPE(vm); |
854 | JSGlobalObject* globalObject = m_globalObject.get(); |
855 | ExecState* exec = globalObject->globalExec(); |
856 | |
857 | for (const auto& entry : constants) { |
858 | const IdentifierSet& set = entry.first; |
859 | |
860 | Structure* setStructure = globalObject->setStructure(); |
861 | RETURN_IF_EXCEPTION(scope, void()); |
862 | JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size()); |
863 | RETURN_IF_EXCEPTION(scope, void()); |
864 | |
865 | for (auto setEntry : set) { |
866 | JSString* jsString = jsOwnedString(&vm, setEntry.get()); |
867 | jsSet->add(exec, jsString); |
868 | RETURN_IF_EXCEPTION(scope, void()); |
869 | } |
870 | m_constantRegisters[entry.second].set(vm, this, jsSet); |
871 | } |
872 | } |
873 | |
874 | void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable) |
875 | { |
876 | VM& vm = *m_vm; |
877 | auto scope = DECLARE_THROW_SCOPE(vm); |
878 | JSGlobalObject* globalObject = m_globalObject.get(); |
879 | ExecState* exec = globalObject->globalExec(); |
880 | |
881 | ASSERT(constants.size() == constantsSourceCodeRepresentation.size()); |
882 | size_t count = constants.size(); |
883 | m_constantRegisters.resizeToFit(count); |
884 | for (size_t i = 0; i < count; i++) { |
885 | JSValue constant = constants[i].get(); |
886 | |
887 | if (!constant.isEmpty()) { |
888 | if (constant.isCell()) { |
889 | JSCell* cell = constant.asCell(); |
890 | if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) { |
891 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) { |
892 | ConcurrentJSLocker locker(symbolTable->m_lock); |
893 | symbolTable->prepareForTypeProfiling(locker); |
894 | } |
895 | |
896 | SymbolTable* clone = symbolTable->cloneScopePart(vm); |
897 | if (wasCompiledWithDebuggingOpcodes()) |
898 | clone->setRareDataCodeBlock(this); |
899 | |
900 | constant = clone; |
901 | } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) { |
902 | auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor); |
903 | RETURN_IF_EXCEPTION(scope, void()); |
904 | constant = templateObject; |
905 | } |
906 | } |
907 | } |
908 | |
909 | m_constantRegisters[i].set(vm, this, constant); |
910 | } |
911 | |
912 | m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation; |
913 | } |
914 | |
915 | void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative) |
916 | { |
917 | RELEASE_ASSERT(alternative); |
918 | RELEASE_ASSERT(alternative->jitCode()); |
919 | m_alternative.set(vm, this, alternative); |
920 | } |
921 | |
922 | void CodeBlock::setNumParameters(int newValue) |
923 | { |
924 | m_numParameters = newValue; |
925 | |
926 | m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0); |
927 | } |
928 | |
929 | CodeBlock* CodeBlock::specialOSREntryBlockOrNull() |
930 | { |
931 | #if ENABLE(FTL_JIT) |
932 | if (jitType() != JITType::DFGJIT) |
933 | return 0; |
934 | DFG::JITCode* jitCode = m_jitCode->dfg(); |
935 | return jitCode->osrEntryBlock(); |
936 | #else // ENABLE(FTL_JIT) |
937 | return 0; |
938 | #endif // ENABLE(FTL_JIT) |
939 | } |
940 | |
941 | size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm) |
942 | { |
943 | CodeBlock* thisObject = jsCast<CodeBlock*>(cell); |
944 | size_t = 0; |
945 | if (thisObject->m_metadata) |
946 | extraMemoryAllocated += thisObject->m_metadata->sizeInBytes(); |
947 | RefPtr<JITCode> jitCode = thisObject->m_jitCode; |
948 | if (jitCode && !jitCode->isShared()) |
949 | extraMemoryAllocated += jitCode->size(); |
950 | return Base::estimatedSize(cell, vm) + extraMemoryAllocated; |
951 | } |
952 | |
953 | void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) |
954 | { |
955 | CodeBlock* thisObject = jsCast<CodeBlock*>(cell); |
956 | ASSERT_GC_OBJECT_INHERITS(thisObject, info()); |
957 | Base::visitChildren(cell, visitor); |
958 | visitor.append(thisObject->m_ownerEdge); |
959 | thisObject->visitChildren(visitor); |
960 | } |
961 | |
962 | void CodeBlock::visitChildren(SlotVisitor& visitor) |
963 | { |
964 | ConcurrentJSLocker locker(m_lock); |
965 | if (CodeBlock* otherBlock = specialOSREntryBlockOrNull()) |
966 | visitor.appendUnbarriered(otherBlock); |
967 | |
968 | size_t = 0; |
969 | if (m_metadata) |
970 | extraMemory += m_metadata->sizeInBytes(); |
971 | if (m_jitCode && !m_jitCode->isShared()) |
972 | extraMemory += m_jitCode->size(); |
973 | visitor.reportExtraMemoryVisited(extraMemory); |
974 | |
975 | stronglyVisitStrongReferences(locker, visitor); |
976 | stronglyVisitWeakReferences(locker, visitor); |
977 | |
978 | VM::SpaceAndSet::setFor(*subspace()).add(this); |
979 | } |
980 | |
981 | bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker) |
982 | { |
983 | if (Options::forceCodeBlockLiveness()) |
984 | return true; |
985 | |
986 | if (shouldJettisonDueToOldAge(locker)) |
987 | return false; |
988 | |
989 | // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when |
990 | // their weak references go stale. So if a basline JIT CodeBlock gets |
991 | // scanned, we can assume that this means that it's live. |
992 | if (!JITCode::isOptimizingJIT(jitType())) |
993 | return true; |
994 | |
995 | return false; |
996 | } |
997 | |
998 | bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm) |
999 | { |
1000 | if (!JITCode::isOptimizingJIT(jitType())) |
1001 | return false; |
1002 | return !vm.heap.isMarked(this); |
1003 | } |
1004 | |
1005 | static Seconds timeToLive(JITType jitType) |
1006 | { |
1007 | if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) { |
1008 | switch (jitType) { |
1009 | case JITType::InterpreterThunk: |
1010 | return 10_ms; |
1011 | case JITType::BaselineJIT: |
1012 | return 30_ms; |
1013 | case JITType::DFGJIT: |
1014 | return 40_ms; |
1015 | case JITType::FTLJIT: |
1016 | return 120_ms; |
1017 | default: |
1018 | return Seconds::infinity(); |
1019 | } |
1020 | } |
1021 | |
1022 | switch (jitType) { |
1023 | case JITType::InterpreterThunk: |
1024 | return 5_s; |
1025 | case JITType::BaselineJIT: |
1026 | // Effectively 10 additional seconds, since BaselineJIT and |
1027 | // InterpreterThunk share a CodeBlock. |
1028 | return 15_s; |
1029 | case JITType::DFGJIT: |
1030 | return 20_s; |
1031 | case JITType::FTLJIT: |
1032 | return 60_s; |
1033 | default: |
1034 | return Seconds::infinity(); |
1035 | } |
1036 | } |
1037 | |
1038 | bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&) |
1039 | { |
1040 | if (m_vm->heap.isMarked(this)) |
1041 | return false; |
1042 | |
1043 | if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge())) |
1044 | return true; |
1045 | |
1046 | if (timeSinceCreation() < timeToLive(jitType())) |
1047 | return false; |
1048 | |
1049 | return true; |
1050 | } |
1051 | |
1052 | #if ENABLE(DFG_JIT) |
1053 | static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition) |
1054 | { |
1055 | if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get())) |
1056 | return false; |
1057 | |
1058 | if (!vm.heap.isMarked(transition.m_from.get())) |
1059 | return false; |
1060 | |
1061 | return true; |
1062 | } |
1063 | #endif // ENABLE(DFG_JIT) |
1064 | |
1065 | void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1066 | { |
1067 | UNUSED_PARAM(visitor); |
1068 | |
1069 | VM& vm = *m_vm; |
1070 | |
1071 | if (jitType() == JITType::InterpreterThunk) { |
1072 | const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); |
1073 | const InstructionStream& instructionStream = instructions(); |
1074 | for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { |
1075 | auto instruction = instructionStream.at(propertyAccessInstructions[i]); |
1076 | if (instruction->is<OpPutById>()) { |
1077 | auto& metadata = instruction->as<OpPutById>().metadata(this); |
1078 | StructureID oldStructureID = metadata.m_oldStructureID; |
1079 | StructureID newStructureID = metadata.m_newStructureID; |
1080 | if (!oldStructureID || !newStructureID) |
1081 | continue; |
1082 | Structure* oldStructure = |
1083 | vm.heap.structureIDTable().get(oldStructureID); |
1084 | Structure* newStructure = |
1085 | vm.heap.structureIDTable().get(newStructureID); |
1086 | if (vm.heap.isMarked(oldStructure)) |
1087 | visitor.appendUnbarriered(newStructure); |
1088 | continue; |
1089 | } |
1090 | } |
1091 | } |
1092 | |
1093 | #if ENABLE(JIT) |
1094 | if (JITCode::isJIT(jitType())) { |
1095 | if (auto* jitData = m_jitData.get()) { |
1096 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1097 | stubInfo->propagateTransitions(visitor); |
1098 | } |
1099 | } |
1100 | #endif // ENABLE(JIT) |
1101 | |
1102 | #if ENABLE(DFG_JIT) |
1103 | if (JITCode::isOptimizingJIT(jitType())) { |
1104 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1105 | |
1106 | dfgCommon->recordedStatuses.markIfCheap(visitor); |
1107 | |
1108 | for (auto& weakReference : dfgCommon->weakStructureReferences) |
1109 | weakReference->markIfCheap(visitor); |
1110 | |
1111 | for (auto& transition : dfgCommon->transitions) { |
1112 | if (shouldMarkTransition(vm, transition)) { |
1113 | // If the following three things are live, then the target of the |
1114 | // transition is also live: |
1115 | // |
1116 | // - This code block. We know it's live already because otherwise |
1117 | // we wouldn't be scanning ourselves. |
1118 | // |
1119 | // - The code origin of the transition. Transitions may arise from |
1120 | // code that was inlined. They are not relevant if the user's |
1121 | // object that is required for the inlinee to run is no longer |
1122 | // live. |
1123 | // |
1124 | // - The source of the transition. The transition checks if some |
1125 | // heap location holds the source, and if so, stores the target. |
1126 | // Hence the source must be live for the transition to be live. |
1127 | // |
1128 | // We also short-circuit the liveness if the structure is harmless |
1129 | // to mark (i.e. its global object and prototype are both already |
1130 | // live). |
1131 | |
1132 | visitor.append(transition.m_to); |
1133 | } |
1134 | } |
1135 | } |
1136 | #endif // ENABLE(DFG_JIT) |
1137 | } |
1138 | |
1139 | void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1140 | { |
1141 | UNUSED_PARAM(visitor); |
1142 | |
1143 | #if ENABLE(DFG_JIT) |
1144 | VM& vm = *m_vm; |
1145 | if (vm.heap.isMarked(this)) |
1146 | return; |
1147 | |
1148 | // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was |
1149 | // that we might decide that the CodeBlock should be jettisoned due to old age, so the |
1150 | // isMarked check doesn't protect us. |
1151 | if (!JITCode::isOptimizingJIT(jitType())) |
1152 | return; |
1153 | |
1154 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1155 | // Now check all of our weak references. If all of them are live, then we |
1156 | // have proved liveness and so we scan our strong references. If at end of |
1157 | // GC we still have not proved liveness, then this code block is toast. |
1158 | bool allAreLiveSoFar = true; |
1159 | for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { |
1160 | JSCell* reference = dfgCommon->weakReferences[i].get(); |
1161 | ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference)); |
1162 | if (!vm.heap.isMarked(reference)) { |
1163 | allAreLiveSoFar = false; |
1164 | break; |
1165 | } |
1166 | } |
1167 | if (allAreLiveSoFar) { |
1168 | for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) { |
1169 | if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) { |
1170 | allAreLiveSoFar = false; |
1171 | break; |
1172 | } |
1173 | } |
1174 | } |
1175 | |
1176 | // If some weak references are dead, then this fixpoint iteration was |
1177 | // unsuccessful. |
1178 | if (!allAreLiveSoFar) |
1179 | return; |
1180 | |
1181 | // All weak references are live. Record this information so we don't |
1182 | // come back here again, and scan the strong references. |
1183 | visitor.appendUnbarriered(this); |
1184 | #endif // ENABLE(DFG_JIT) |
1185 | } |
1186 | |
1187 | void CodeBlock::finalizeLLIntInlineCaches() |
1188 | { |
1189 | VM& vm = *m_vm; |
1190 | const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); |
1191 | |
1192 | auto handleGetPutFromScope = [&] (auto& metadata) { |
1193 | GetPutInfo getPutInfo = metadata.m_getPutInfo; |
1194 | if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks |
1195 | || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) |
1196 | return; |
1197 | WriteBarrierBase<Structure>& structure = metadata.m_structure; |
1198 | if (!structure || vm.heap.isMarked(structure.get())) |
1199 | return; |
1200 | if (Options::verboseOSR()) |
1201 | dataLogF("Clearing scope access with structure %p.\n" , structure.get()); |
1202 | structure.clear(); |
1203 | }; |
1204 | |
1205 | const InstructionStream& instructionStream = instructions(); |
1206 | for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { |
1207 | const auto curInstruction = instructionStream.at(propertyAccessInstructions[i]); |
1208 | switch (curInstruction->opcodeID()) { |
1209 | case op_get_by_id: { |
1210 | auto& metadata = curInstruction->as<OpGetById>().metadata(this); |
1211 | if (metadata.m_mode != GetByIdMode::Default) |
1212 | break; |
1213 | StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID; |
1214 | if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1215 | break; |
1216 | if (Options::verboseOSR()) |
1217 | dataLogF("Clearing LLInt property access.\n" ); |
1218 | LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata); |
1219 | break; |
1220 | } |
1221 | case op_get_by_id_direct: { |
1222 | auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this); |
1223 | StructureID oldStructureID = metadata.m_structureID; |
1224 | if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1225 | break; |
1226 | if (Options::verboseOSR()) |
1227 | dataLogF("Clearing LLInt property access.\n" ); |
1228 | metadata.m_structureID = 0; |
1229 | metadata.m_offset = 0; |
1230 | break; |
1231 | } |
1232 | case op_put_by_id: { |
1233 | auto& metadata = curInstruction->as<OpPutById>().metadata(this); |
1234 | StructureID oldStructureID = metadata.m_oldStructureID; |
1235 | StructureID newStructureID = metadata.m_newStructureID; |
1236 | StructureChain* chain = metadata.m_structureChain.get(); |
1237 | if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1238 | && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID))) |
1239 | && (!chain || vm.heap.isMarked(chain))) |
1240 | break; |
1241 | if (Options::verboseOSR()) |
1242 | dataLogF("Clearing LLInt put transition.\n" ); |
1243 | metadata.m_oldStructureID = 0; |
1244 | metadata.m_offset = 0; |
1245 | metadata.m_newStructureID = 0; |
1246 | metadata.m_structureChain.clear(); |
1247 | break; |
1248 | } |
1249 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418 |
1250 | // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution. |
1251 | case op_resolve_scope_for_hoisting_func_decl_in_eval: |
1252 | break; |
1253 | case op_to_this: { |
1254 | auto& metadata = curInstruction->as<OpToThis>().metadata(this); |
1255 | if (!metadata.m_cachedStructure || vm.heap.isMarked(metadata.m_cachedStructure.get())) |
1256 | break; |
1257 | if (Options::verboseOSR()) |
1258 | dataLogF("Clearing LLInt to_this with structure %p.\n" , metadata.m_cachedStructure.get()); |
1259 | metadata.m_cachedStructure.clear(); |
1260 | metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC); |
1261 | break; |
1262 | } |
1263 | case op_create_this: { |
1264 | auto& metadata = curInstruction->as<OpCreateThis>().metadata(this); |
1265 | auto& cacheWriteBarrier = metadata.m_cachedCallee; |
1266 | if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects()) |
1267 | break; |
1268 | JSCell* cachedFunction = cacheWriteBarrier.get(); |
1269 | if (vm.heap.isMarked(cachedFunction)) |
1270 | break; |
1271 | if (Options::verboseOSR()) |
1272 | dataLogF("Clearing LLInt create_this with cached callee %p.\n" , cachedFunction); |
1273 | cacheWriteBarrier.clear(); |
1274 | break; |
1275 | } |
1276 | case op_resolve_scope: { |
1277 | // Right now this isn't strictly necessary. Any symbol tables that this will refer to |
1278 | // are for outer functions, and we refer to those functions strongly, and they refer |
1279 | // to the symbol table strongly. But it's nice to be on the safe side. |
1280 | auto& metadata = curInstruction->as<OpResolveScope>().metadata(this); |
1281 | WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable; |
1282 | if (!symbolTable || vm.heap.isMarked(symbolTable.get())) |
1283 | break; |
1284 | if (Options::verboseOSR()) |
1285 | dataLogF("Clearing dead symbolTable %p.\n" , symbolTable.get()); |
1286 | symbolTable.clear(); |
1287 | break; |
1288 | } |
1289 | case op_get_from_scope: |
1290 | handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this)); |
1291 | break; |
1292 | case op_put_to_scope: |
1293 | handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this)); |
1294 | break; |
1295 | default: |
1296 | OpcodeID opcodeID = curInstruction->opcodeID(); |
1297 | ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u" , opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); |
1298 | } |
1299 | } |
1300 | |
1301 | // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set |
1302 | // then cleared the cache without GCing in between. |
1303 | m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool { |
1304 | auto clear = [&] () { |
1305 | auto& instruction = instructions().at(std::get<1>(pair.key)); |
1306 | OpcodeID opcode = instruction->opcodeID(); |
1307 | if (opcode == op_get_by_id) { |
1308 | if (Options::verboseOSR()) |
1309 | dataLogF("Clearing LLInt property access.\n" ); |
1310 | LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this)); |
1311 | } |
1312 | return true; |
1313 | }; |
1314 | |
1315 | if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key)))) |
1316 | return clear(); |
1317 | |
1318 | for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) { |
1319 | if (!watchpoint.key().isStillLive(vm)) |
1320 | return clear(); |
1321 | } |
1322 | |
1323 | return false; |
1324 | }); |
1325 | |
1326 | forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) { |
1327 | if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee.get())) { |
1328 | if (Options::verboseOSR()) |
1329 | dataLog("Clearing LLInt call from " , *this, "\n" ); |
1330 | callLinkInfo.unlink(); |
1331 | } |
1332 | if (!!callLinkInfo.lastSeenCallee && !vm.heap.isMarked(callLinkInfo.lastSeenCallee.get())) |
1333 | callLinkInfo.lastSeenCallee.clear(); |
1334 | }); |
1335 | } |
1336 | |
1337 | #if ENABLE(JIT) |
1338 | CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&) |
1339 | { |
1340 | ASSERT(!m_jitData); |
1341 | m_jitData = std::make_unique<JITData>(); |
1342 | return *m_jitData; |
1343 | } |
1344 | |
1345 | void CodeBlock::finalizeBaselineJITInlineCaches() |
1346 | { |
1347 | if (auto* jitData = m_jitData.get()) { |
1348 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
1349 | callLinkInfo->visitWeak(*vm()); |
1350 | |
1351 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1352 | stubInfo->visitWeakReferences(this); |
1353 | } |
1354 | } |
1355 | #endif |
1356 | |
1357 | void CodeBlock::finalizeUnconditionally(VM& vm) |
1358 | { |
1359 | UNUSED_PARAM(vm); |
1360 | |
1361 | updateAllPredictions(); |
1362 | |
1363 | if (JITCode::couldBeInterpreted(jitType())) |
1364 | finalizeLLIntInlineCaches(); |
1365 | |
1366 | #if ENABLE(JIT) |
1367 | if (!!jitCode()) |
1368 | finalizeBaselineJITInlineCaches(); |
1369 | #endif |
1370 | |
1371 | #if ENABLE(DFG_JIT) |
1372 | if (JITCode::isOptimizingJIT(jitType())) { |
1373 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1374 | dfgCommon->recordedStatuses.finalize(vm); |
1375 | } |
1376 | #endif // ENABLE(DFG_JIT) |
1377 | |
1378 | VM::SpaceAndSet::setFor(*subspace()).remove(this); |
1379 | } |
1380 | |
1381 | void CodeBlock::destroy(JSCell* cell) |
1382 | { |
1383 | static_cast<CodeBlock*>(cell)->~CodeBlock(); |
1384 | } |
1385 | |
1386 | void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result) |
1387 | { |
1388 | #if ENABLE(JIT) |
1389 | if (JITCode::isJIT(jitType())) { |
1390 | if (auto* jitData = m_jitData.get()) { |
1391 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1392 | result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo; |
1393 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
1394 | result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo; |
1395 | for (ByValInfo* byValInfo : jitData->m_byValInfos) |
1396 | result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo; |
1397 | } |
1398 | #if ENABLE(DFG_JIT) |
1399 | if (JITCode::isOptimizingJIT(jitType())) { |
1400 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1401 | for (auto& pair : dfgCommon->recordedStatuses.calls) |
1402 | result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get(); |
1403 | for (auto& pair : dfgCommon->recordedStatuses.gets) |
1404 | result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get(); |
1405 | for (auto& pair : dfgCommon->recordedStatuses.puts) |
1406 | result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get(); |
1407 | for (auto& pair : dfgCommon->recordedStatuses.ins) |
1408 | result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get(); |
1409 | } |
1410 | #endif |
1411 | } |
1412 | #else |
1413 | UNUSED_PARAM(result); |
1414 | #endif |
1415 | } |
1416 | |
1417 | void CodeBlock::getICStatusMap(ICStatusMap& result) |
1418 | { |
1419 | ConcurrentJSLocker locker(m_lock); |
1420 | getICStatusMap(locker, result); |
1421 | } |
1422 | |
1423 | #if ENABLE(JIT) |
1424 | StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType) |
1425 | { |
1426 | ConcurrentJSLocker locker(m_lock); |
1427 | return ensureJITData(locker).m_stubInfos.add(accessType); |
1428 | } |
1429 | |
1430 | JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile) |
1431 | { |
1432 | ConcurrentJSLocker locker(m_lock); |
1433 | return ensureJITData(locker).m_addICs.add(arithProfile); |
1434 | } |
1435 | |
1436 | JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile) |
1437 | { |
1438 | ConcurrentJSLocker locker(m_lock); |
1439 | return ensureJITData(locker).m_mulICs.add(arithProfile); |
1440 | } |
1441 | |
1442 | JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile) |
1443 | { |
1444 | ConcurrentJSLocker locker(m_lock); |
1445 | return ensureJITData(locker).m_subICs.add(arithProfile); |
1446 | } |
1447 | |
1448 | JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile) |
1449 | { |
1450 | ConcurrentJSLocker locker(m_lock); |
1451 | return ensureJITData(locker).m_negICs.add(arithProfile); |
1452 | } |
1453 | |
1454 | StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin) |
1455 | { |
1456 | ConcurrentJSLocker locker(m_lock); |
1457 | if (auto* jitData = m_jitData.get()) { |
1458 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
1459 | if (stubInfo->codeOrigin == codeOrigin) |
1460 | return stubInfo; |
1461 | } |
1462 | } |
1463 | return nullptr; |
1464 | } |
1465 | |
1466 | ByValInfo* CodeBlock::addByValInfo() |
1467 | { |
1468 | ConcurrentJSLocker locker(m_lock); |
1469 | return ensureJITData(locker).m_byValInfos.add(); |
1470 | } |
1471 | |
1472 | CallLinkInfo* CodeBlock::addCallLinkInfo() |
1473 | { |
1474 | ConcurrentJSLocker locker(m_lock); |
1475 | return ensureJITData(locker).m_callLinkInfos.add(); |
1476 | } |
1477 | |
1478 | CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index) |
1479 | { |
1480 | ConcurrentJSLocker locker(m_lock); |
1481 | if (auto* jitData = m_jitData.get()) { |
1482 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) { |
1483 | if (callLinkInfo->codeOrigin() == CodeOrigin(index)) |
1484 | return callLinkInfo; |
1485 | } |
1486 | } |
1487 | return nullptr; |
1488 | } |
1489 | |
1490 | RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset) |
1491 | { |
1492 | ConcurrentJSLocker locker(m_lock); |
1493 | auto& jitData = ensureJITData(locker); |
1494 | jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); |
1495 | return &jitData.m_rareCaseProfiles.last(); |
1496 | } |
1497 | |
1498 | RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset) |
1499 | { |
1500 | if (auto* jitData = m_jitData.get()) { |
1501 | return tryBinarySearch<RareCaseProfile, int>( |
1502 | jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset, |
1503 | getRareCaseProfileBytecodeOffset); |
1504 | } |
1505 | return nullptr; |
1506 | } |
1507 | |
1508 | unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset) |
1509 | { |
1510 | RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset); |
1511 | if (profile) |
1512 | return profile->m_counter; |
1513 | return 0; |
1514 | } |
1515 | |
1516 | void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters) |
1517 | { |
1518 | ConcurrentJSLocker locker(m_lock); |
1519 | ensureJITData(locker).m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters); |
1520 | } |
1521 | |
1522 | void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList) |
1523 | { |
1524 | ConcurrentJSLocker locker(m_lock); |
1525 | ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList); |
1526 | } |
1527 | |
1528 | void CodeBlock::resetJITData() |
1529 | { |
1530 | RELEASE_ASSERT(!JITCode::isJIT(jitType())); |
1531 | ConcurrentJSLocker locker(m_lock); |
1532 | |
1533 | if (auto* jitData = m_jitData.get()) { |
1534 | // We can clear these because no other thread will have references to any stub infos, call |
1535 | // link infos, or by val infos if we don't have JIT code. Attempts to query these data |
1536 | // structures using the concurrent API (getICStatusMap and friends) will return nothing if we |
1537 | // don't have JIT code. |
1538 | jitData->m_stubInfos.clear(); |
1539 | jitData->m_callLinkInfos.clear(); |
1540 | jitData->m_byValInfos.clear(); |
1541 | // We can clear this because the DFG's queries to these data structures are guarded by whether |
1542 | // there is JIT code. |
1543 | jitData->m_rareCaseProfiles.clear(); |
1544 | } |
1545 | } |
1546 | #endif |
1547 | |
1548 | void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1549 | { |
1550 | // We strongly visit OSR exits targets because we don't want to deal with |
1551 | // the complexity of generating an exit target CodeBlock on demand and |
1552 | // guaranteeing that it matches the details of the CodeBlock we compiled |
1553 | // the OSR exit against. |
1554 | |
1555 | visitor.append(m_alternative); |
1556 | |
1557 | #if ENABLE(DFG_JIT) |
1558 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1559 | if (dfgCommon->inlineCallFrames) { |
1560 | for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) { |
1561 | ASSERT(inlineCallFrame->baselineCodeBlock); |
1562 | visitor.append(inlineCallFrame->baselineCodeBlock); |
1563 | } |
1564 | } |
1565 | #endif |
1566 | } |
1567 | |
1568 | void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor) |
1569 | { |
1570 | UNUSED_PARAM(locker); |
1571 | |
1572 | visitor.append(m_globalObject); |
1573 | visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked. |
1574 | visitor.append(m_unlinkedCode); |
1575 | if (m_rareData) |
1576 | m_rareData->m_directEvalCodeCache.visitAggregate(visitor); |
1577 | visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size()); |
1578 | for (auto& functionExpr : m_functionExprs) |
1579 | visitor.append(functionExpr); |
1580 | for (auto& functionDecl : m_functionDecls) |
1581 | visitor.append(functionDecl); |
1582 | forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) { |
1583 | objectAllocationProfile.visitAggregate(visitor); |
1584 | }); |
1585 | |
1586 | #if ENABLE(JIT) |
1587 | if (auto* jitData = m_jitData.get()) { |
1588 | for (ByValInfo* byValInfo : jitData->m_byValInfos) |
1589 | visitor.append(byValInfo->cachedSymbol); |
1590 | } |
1591 | #endif |
1592 | |
1593 | #if ENABLE(DFG_JIT) |
1594 | if (JITCode::isOptimizingJIT(jitType())) |
1595 | visitOSRExitTargets(locker, visitor); |
1596 | #endif |
1597 | } |
1598 | |
1599 | void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1600 | { |
1601 | UNUSED_PARAM(visitor); |
1602 | |
1603 | #if ENABLE(DFG_JIT) |
1604 | if (!JITCode::isOptimizingJIT(jitType())) |
1605 | return; |
1606 | |
1607 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1608 | |
1609 | for (auto& transition : dfgCommon->transitions) { |
1610 | if (!!transition.m_codeOrigin) |
1611 | visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though. |
1612 | visitor.append(transition.m_from); |
1613 | visitor.append(transition.m_to); |
1614 | } |
1615 | |
1616 | for (auto& weakReference : dfgCommon->weakReferences) |
1617 | visitor.append(weakReference); |
1618 | |
1619 | for (auto& weakStructureReference : dfgCommon->weakStructureReferences) |
1620 | visitor.append(weakStructureReference); |
1621 | |
1622 | dfgCommon->livenessHasBeenProved = true; |
1623 | #endif |
1624 | } |
1625 | |
1626 | CodeBlock* CodeBlock::baselineAlternative() |
1627 | { |
1628 | #if ENABLE(JIT) |
1629 | CodeBlock* result = this; |
1630 | while (result->alternative()) |
1631 | result = result->alternative(); |
1632 | RELEASE_ASSERT(result); |
1633 | RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None); |
1634 | return result; |
1635 | #else |
1636 | return this; |
1637 | #endif |
1638 | } |
1639 | |
1640 | CodeBlock* CodeBlock::baselineVersion() |
1641 | { |
1642 | #if ENABLE(JIT) |
1643 | JITType selfJITType = jitType(); |
1644 | if (JITCode::isBaselineCode(selfJITType)) |
1645 | return this; |
1646 | CodeBlock* result = replacement(); |
1647 | if (!result) { |
1648 | if (JITCode::isOptimizingJIT(selfJITType)) { |
1649 | // The replacement can be null if we've had a memory clean up and the executable |
1650 | // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless, |
1651 | // the current codeBlock is still live on the stack, and as an optimizing JIT |
1652 | // codeBlock, it will keep its baselineAlternative() alive for us to fetch below. |
1653 | result = this; |
1654 | } else { |
1655 | // This can happen if we're creating the original CodeBlock for an executable. |
1656 | // Assume that we're the baseline CodeBlock. |
1657 | RELEASE_ASSERT(selfJITType == JITType::None); |
1658 | return this; |
1659 | } |
1660 | } |
1661 | result = result->baselineAlternative(); |
1662 | ASSERT(result); |
1663 | return result; |
1664 | #else |
1665 | return this; |
1666 | #endif |
1667 | } |
1668 | |
1669 | #if ENABLE(JIT) |
1670 | bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace) |
1671 | { |
1672 | CodeBlock* replacement = this->replacement(); |
1673 | return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace); |
1674 | } |
1675 | |
1676 | bool CodeBlock::hasOptimizedReplacement() |
1677 | { |
1678 | return hasOptimizedReplacement(jitType()); |
1679 | } |
1680 | #endif |
1681 | |
1682 | HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler) |
1683 | { |
1684 | RELEASE_ASSERT(bytecodeOffset < instructions().size()); |
1685 | return handlerForIndex(bytecodeOffset, requiredHandler); |
1686 | } |
1687 | |
1688 | HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler) |
1689 | { |
1690 | if (!m_rareData) |
1691 | return 0; |
1692 | return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler); |
1693 | } |
1694 | |
1695 | CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite) |
1696 | { |
1697 | #if ENABLE(DFG_JIT) |
1698 | RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType())); |
1699 | RELEASE_ASSERT(canGetCodeOrigin(originalCallSite)); |
1700 | ASSERT(!!handlerForIndex(originalCallSite.bits())); |
1701 | CodeOrigin originalOrigin = codeOrigin(originalCallSite); |
1702 | return m_jitCode->dfgCommon()->addUniqueCallSiteIndex(originalOrigin); |
1703 | #else |
1704 | // We never create new on-the-fly exception handling |
1705 | // call sites outside the DFG/FTL inline caches. |
1706 | UNUSED_PARAM(originalCallSite); |
1707 | RELEASE_ASSERT_NOT_REACHED(); |
1708 | return CallSiteIndex(0u); |
1709 | #endif |
1710 | } |
1711 | |
1712 | |
1713 | |
1714 | void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset) |
1715 | { |
1716 | auto& instruction = instructions().at(bytecodeOffset); |
1717 | OpCatch op = instruction->as<OpCatch>(); |
1718 | auto& metadata = op.metadata(this); |
1719 | if (!!metadata.m_buffer) { |
1720 | #if !ASSERT_DISABLED |
1721 | ConcurrentJSLocker locker(m_lock); |
1722 | bool found = false; |
1723 | auto* rareData = m_rareData.get(); |
1724 | ASSERT(rareData); |
1725 | for (auto& profile : rareData->m_catchProfiles) { |
1726 | if (profile.get() == metadata.m_buffer) { |
1727 | found = true; |
1728 | break; |
1729 | } |
1730 | } |
1731 | ASSERT(found); |
1732 | #endif |
1733 | return; |
1734 | } |
1735 | |
1736 | ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset); |
1737 | } |
1738 | |
1739 | void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset) |
1740 | { |
1741 | BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis(); |
1742 | |
1743 | // We get the live-out set of variables at op_catch, not the live-in. This |
1744 | // is because the variables that the op_catch defines might be dead, and |
1745 | // we can avoid profiling them and extracting them when doing OSR entry |
1746 | // into the DFG. |
1747 | |
1748 | auto nextOffset = instructions().at(bytecodeOffset).next().offset(); |
1749 | FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset); |
1750 | Vector<VirtualRegister> liveOperands; |
1751 | liveOperands.reserveInitialCapacity(liveLocals.bitCount()); |
1752 | liveLocals.forEachSetBit([&] (unsigned liveLocal) { |
1753 | liveOperands.append(virtualRegisterForLocal(liveLocal)); |
1754 | }); |
1755 | |
1756 | for (int i = 0; i < numParameters(); ++i) |
1757 | liveOperands.append(virtualRegisterForArgument(i)); |
1758 | |
1759 | auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size()); |
1760 | RELEASE_ASSERT(profiles->m_size == liveOperands.size()); |
1761 | for (unsigned i = 0; i < profiles->m_size; ++i) |
1762 | profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset(); |
1763 | |
1764 | createRareDataIfNecessary(); |
1765 | |
1766 | // The compiler thread will read this pointer value and then proceed to dereference it |
1767 | // if it is not null. We need to make sure all above stores happen before this store so |
1768 | // the compiler thread reads fully initialized data. |
1769 | WTF::storeStoreFence(); |
1770 | |
1771 | op.metadata(this).m_buffer = profiles.get(); |
1772 | { |
1773 | ConcurrentJSLocker locker(m_lock); |
1774 | m_rareData->m_catchProfiles.append(WTFMove(profiles)); |
1775 | } |
1776 | } |
1777 | |
1778 | void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex) |
1779 | { |
1780 | RELEASE_ASSERT(m_rareData); |
1781 | Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers; |
1782 | unsigned index = callSiteIndex.bits(); |
1783 | for (size_t i = 0; i < exceptionHandlers.size(); ++i) { |
1784 | HandlerInfo& handler = exceptionHandlers[i]; |
1785 | if (handler.start <= index && handler.end > index) { |
1786 | exceptionHandlers.remove(i); |
1787 | return; |
1788 | } |
1789 | } |
1790 | |
1791 | RELEASE_ASSERT_NOT_REACHED(); |
1792 | } |
1793 | |
1794 | unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) |
1795 | { |
1796 | RELEASE_ASSERT(bytecodeOffset < instructions().size()); |
1797 | return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset); |
1798 | } |
1799 | |
1800 | unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset) |
1801 | { |
1802 | int divot; |
1803 | int startOffset; |
1804 | int endOffset; |
1805 | unsigned line; |
1806 | unsigned column; |
1807 | expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column); |
1808 | return column; |
1809 | } |
1810 | |
1811 | void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const |
1812 | { |
1813 | m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column); |
1814 | divot += sourceOffset(); |
1815 | column += line ? 1 : firstLineColumnOffset(); |
1816 | line += ownerExecutable()->firstLine(); |
1817 | } |
1818 | |
1819 | bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column) |
1820 | { |
1821 | const InstructionStream& instructionStream = instructions(); |
1822 | for (const auto& it : instructionStream) { |
1823 | if (it->is<OpDebug>()) { |
1824 | int unused; |
1825 | unsigned opDebugLine; |
1826 | unsigned opDebugColumn; |
1827 | expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn); |
1828 | if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn)) |
1829 | return true; |
1830 | } |
1831 | } |
1832 | return false; |
1833 | } |
1834 | |
1835 | void CodeBlock::shrinkToFit(ShrinkMode shrinkMode) |
1836 | { |
1837 | ConcurrentJSLocker locker(m_lock); |
1838 | |
1839 | #if ENABLE(JIT) |
1840 | if (auto* jitData = m_jitData.get()) |
1841 | jitData->m_rareCaseProfiles.shrinkToFit(); |
1842 | #endif |
1843 | |
1844 | if (shrinkMode == EarlyShrink) { |
1845 | m_constantRegisters.shrinkToFit(); |
1846 | m_constantsSourceCodeRepresentation.shrinkToFit(); |
1847 | |
1848 | if (m_rareData) { |
1849 | m_rareData->m_switchJumpTables.shrinkToFit(); |
1850 | m_rareData->m_stringSwitchJumpTables.shrinkToFit(); |
1851 | } |
1852 | } // else don't shrink these, because we would have already pointed pointers into these tables. |
1853 | } |
1854 | |
1855 | #if ENABLE(JIT) |
1856 | void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming) |
1857 | { |
1858 | noticeIncomingCall(callerFrame); |
1859 | ConcurrentJSLocker locker(m_lock); |
1860 | ensureJITData(locker).m_incomingCalls.push(incoming); |
1861 | } |
1862 | |
1863 | void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming) |
1864 | { |
1865 | noticeIncomingCall(callerFrame); |
1866 | { |
1867 | ConcurrentJSLocker locker(m_lock); |
1868 | ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming); |
1869 | } |
1870 | } |
1871 | #endif // ENABLE(JIT) |
1872 | |
1873 | void CodeBlock::unlinkIncomingCalls() |
1874 | { |
1875 | while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end()) |
1876 | m_incomingLLIntCalls.begin()->unlink(); |
1877 | #if ENABLE(JIT) |
1878 | JITData* jitData = nullptr; |
1879 | { |
1880 | ConcurrentJSLocker locker(m_lock); |
1881 | jitData = m_jitData.get(); |
1882 | } |
1883 | if (jitData) { |
1884 | while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end()) |
1885 | jitData->m_incomingCalls.begin()->unlink(*vm()); |
1886 | while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end()) |
1887 | jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm()); |
1888 | } |
1889 | #endif // ENABLE(JIT) |
1890 | } |
1891 | |
1892 | void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming) |
1893 | { |
1894 | noticeIncomingCall(callerFrame); |
1895 | m_incomingLLIntCalls.push(incoming); |
1896 | } |
1897 | |
1898 | CodeBlock* CodeBlock::newReplacement() |
1899 | { |
1900 | return ownerExecutable()->newReplacementCodeBlockFor(specializationKind()); |
1901 | } |
1902 | |
1903 | #if ENABLE(JIT) |
1904 | CodeBlock* CodeBlock::replacement() |
1905 | { |
1906 | const ClassInfo* classInfo = this->classInfo(*vm()); |
1907 | |
1908 | if (classInfo == FunctionCodeBlock::info()) |
1909 | return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall); |
1910 | |
1911 | if (classInfo == EvalCodeBlock::info()) |
1912 | return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock(); |
1913 | |
1914 | if (classInfo == ProgramCodeBlock::info()) |
1915 | return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock(); |
1916 | |
1917 | if (classInfo == ModuleProgramCodeBlock::info()) |
1918 | return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock(); |
1919 | |
1920 | RELEASE_ASSERT_NOT_REACHED(); |
1921 | return nullptr; |
1922 | } |
1923 | |
1924 | DFG::CapabilityLevel CodeBlock::computeCapabilityLevel() |
1925 | { |
1926 | const ClassInfo* classInfo = this->classInfo(*vm()); |
1927 | |
1928 | if (classInfo == FunctionCodeBlock::info()) { |
1929 | if (isConstructor()) |
1930 | return DFG::functionForConstructCapabilityLevel(this); |
1931 | return DFG::functionForCallCapabilityLevel(this); |
1932 | } |
1933 | |
1934 | if (classInfo == EvalCodeBlock::info()) |
1935 | return DFG::evalCapabilityLevel(this); |
1936 | |
1937 | if (classInfo == ProgramCodeBlock::info()) |
1938 | return DFG::programCapabilityLevel(this); |
1939 | |
1940 | if (classInfo == ModuleProgramCodeBlock::info()) |
1941 | return DFG::programCapabilityLevel(this); |
1942 | |
1943 | RELEASE_ASSERT_NOT_REACHED(); |
1944 | return DFG::CannotCompile; |
1945 | } |
1946 | |
1947 | #endif // ENABLE(JIT) |
1948 | |
1949 | void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail) |
1950 | { |
1951 | #if !ENABLE(DFG_JIT) |
1952 | UNUSED_PARAM(mode); |
1953 | UNUSED_PARAM(detail); |
1954 | #endif |
1955 | |
1956 | VM& vm = *m_vm; |
1957 | |
1958 | CODEBLOCK_LOG_EVENT(this, "jettison" , ("due to " , reason, ", counting = " , mode == CountReoptimization, ", detail = " , pointerDump(detail))); |
1959 | |
1960 | RELEASE_ASSERT(reason != Profiler::NotJettisoned); |
1961 | |
1962 | #if ENABLE(DFG_JIT) |
1963 | if (DFG::shouldDumpDisassembly()) { |
1964 | dataLog("Jettisoning " , *this); |
1965 | if (mode == CountReoptimization) |
1966 | dataLog(" and counting reoptimization" ); |
1967 | dataLog(" due to " , reason); |
1968 | if (detail) |
1969 | dataLog(", " , *detail); |
1970 | dataLog(".\n" ); |
1971 | } |
1972 | |
1973 | if (reason == Profiler::JettisonDueToWeakReference) { |
1974 | if (DFG::shouldDumpDisassembly()) { |
1975 | dataLog(*this, " will be jettisoned because of the following dead references:\n" ); |
1976 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1977 | for (auto& transition : dfgCommon->transitions) { |
1978 | JSCell* origin = transition.m_codeOrigin.get(); |
1979 | JSCell* from = transition.m_from.get(); |
1980 | JSCell* to = transition.m_to.get(); |
1981 | if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from)) |
1982 | continue; |
1983 | dataLog(" Transition under " , RawPointer(origin), ", " , RawPointer(from), " -> " , RawPointer(to), ".\n" ); |
1984 | } |
1985 | for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { |
1986 | JSCell* weak = dfgCommon->weakReferences[i].get(); |
1987 | if (vm.heap.isMarked(weak)) |
1988 | continue; |
1989 | dataLog(" Weak reference " , RawPointer(weak), ".\n" ); |
1990 | } |
1991 | } |
1992 | } |
1993 | #endif // ENABLE(DFG_JIT) |
1994 | |
1995 | DeferGCForAWhile deferGC(*heap()); |
1996 | |
1997 | // We want to accomplish two things here: |
1998 | // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it |
1999 | // we should OSR exit at the top of the next bytecode instruction after the return. |
2000 | // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock. |
2001 | |
2002 | #if ENABLE(DFG_JIT) |
2003 | if (JITCode::isOptimizingJIT(jitType())) |
2004 | jitCode()->dfgCommon()->clearWatchpoints(); |
2005 | |
2006 | if (reason != Profiler::JettisonDueToOldAge) { |
2007 | Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get(); |
2008 | if (UNLIKELY(compilation)) |
2009 | compilation->setJettisonReason(reason, detail); |
2010 | |
2011 | // This accomplishes (1), and does its own book-keeping about whether it has already happened. |
2012 | if (!jitCode()->dfgCommon()->invalidate()) { |
2013 | // We've already been invalidated. |
2014 | RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))); |
2015 | return; |
2016 | } |
2017 | } |
2018 | |
2019 | if (DFG::shouldDumpDisassembly()) |
2020 | dataLog(" Did invalidate " , *this, "\n" ); |
2021 | |
2022 | // Count the reoptimization if that's what the user wanted. |
2023 | if (mode == CountReoptimization) { |
2024 | // FIXME: Maybe this should call alternative(). |
2025 | // https://bugs.webkit.org/show_bug.cgi?id=123677 |
2026 | baselineAlternative()->countReoptimization(); |
2027 | if (DFG::shouldDumpDisassembly()) |
2028 | dataLog(" Did count reoptimization for " , *this, "\n" ); |
2029 | } |
2030 | |
2031 | if (this != replacement()) { |
2032 | // This means that we were never the entrypoint. This can happen for OSR entry code |
2033 | // blocks. |
2034 | return; |
2035 | } |
2036 | |
2037 | if (alternative()) |
2038 | alternative()->optimizeAfterWarmUp(); |
2039 | |
2040 | if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps) |
2041 | tallyFrequentExitSites(); |
2042 | #endif // ENABLE(DFG_JIT) |
2043 | |
2044 | // Jettison can happen during GC. We don't want to install code to a dead executable |
2045 | // because that would add a dead object to the remembered set. |
2046 | if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())) |
2047 | return; |
2048 | |
2049 | #if ENABLE(JIT) |
2050 | { |
2051 | ConcurrentJSLocker locker(m_lock); |
2052 | if (JITData* jitData = m_jitData.get()) { |
2053 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
2054 | callLinkInfo->setClearedByJettison(); |
2055 | } |
2056 | } |
2057 | #endif |
2058 | |
2059 | // This accomplishes (2). |
2060 | ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind()); |
2061 | |
2062 | #if ENABLE(DFG_JIT) |
2063 | if (DFG::shouldDumpDisassembly()) |
2064 | dataLog(" Did install baseline version of " , *this, "\n" ); |
2065 | #endif // ENABLE(DFG_JIT) |
2066 | } |
2067 | |
2068 | JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin) |
2069 | { |
2070 | auto* inlineCallFrame = codeOrigin.inlineCallFrame(); |
2071 | if (!inlineCallFrame) |
2072 | return globalObject(); |
2073 | return inlineCallFrame->baselineCodeBlock->globalObject(); |
2074 | } |
2075 | |
2076 | class RecursionCheckFunctor { |
2077 | public: |
2078 | RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck) |
2079 | : m_startCallFrame(startCallFrame) |
2080 | , m_codeBlock(codeBlock) |
2081 | , m_depthToCheck(depthToCheck) |
2082 | , m_foundStartCallFrame(false) |
2083 | , m_didRecurse(false) |
2084 | { } |
2085 | |
2086 | StackVisitor::Status operator()(StackVisitor& visitor) const |
2087 | { |
2088 | CallFrame* currentCallFrame = visitor->callFrame(); |
2089 | |
2090 | if (currentCallFrame == m_startCallFrame) |
2091 | m_foundStartCallFrame = true; |
2092 | |
2093 | if (m_foundStartCallFrame) { |
2094 | if (visitor->callFrame()->codeBlock() == m_codeBlock) { |
2095 | m_didRecurse = true; |
2096 | return StackVisitor::Done; |
2097 | } |
2098 | |
2099 | if (!m_depthToCheck--) |
2100 | return StackVisitor::Done; |
2101 | } |
2102 | |
2103 | return StackVisitor::Continue; |
2104 | } |
2105 | |
2106 | bool didRecurse() const { return m_didRecurse; } |
2107 | |
2108 | private: |
2109 | CallFrame* m_startCallFrame; |
2110 | CodeBlock* m_codeBlock; |
2111 | mutable unsigned m_depthToCheck; |
2112 | mutable bool m_foundStartCallFrame; |
2113 | mutable bool m_didRecurse; |
2114 | }; |
2115 | |
2116 | void CodeBlock::noticeIncomingCall(ExecState* callerFrame) |
2117 | { |
2118 | CodeBlock* callerCodeBlock = callerFrame->codeBlock(); |
2119 | |
2120 | if (Options::verboseCallLink()) |
2121 | dataLog("Noticing call link from " , pointerDump(callerCodeBlock), " to " , *this, "\n" ); |
2122 | |
2123 | #if ENABLE(DFG_JIT) |
2124 | if (!m_shouldAlwaysBeInlined) |
2125 | return; |
2126 | |
2127 | if (!callerCodeBlock) { |
2128 | m_shouldAlwaysBeInlined = false; |
2129 | if (Options::verboseCallLink()) |
2130 | dataLog(" Clearing SABI because caller is native.\n" ); |
2131 | return; |
2132 | } |
2133 | |
2134 | if (!hasBaselineJITProfiling()) |
2135 | return; |
2136 | |
2137 | if (!DFG::mightInlineFunction(this)) |
2138 | return; |
2139 | |
2140 | if (!canInline(capabilityLevelState())) |
2141 | return; |
2142 | |
2143 | if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) { |
2144 | m_shouldAlwaysBeInlined = false; |
2145 | if (Options::verboseCallLink()) |
2146 | dataLog(" Clearing SABI because caller is too large.\n" ); |
2147 | return; |
2148 | } |
2149 | |
2150 | if (callerCodeBlock->jitType() == JITType::InterpreterThunk) { |
2151 | // If the caller is still in the interpreter, then we can't expect inlining to |
2152 | // happen anytime soon. Assume it's profitable to optimize it separately. This |
2153 | // ensures that a function is SABI only if it is called no more frequently than |
2154 | // any of its callers. |
2155 | m_shouldAlwaysBeInlined = false; |
2156 | if (Options::verboseCallLink()) |
2157 | dataLog(" Clearing SABI because caller is in LLInt.\n" ); |
2158 | return; |
2159 | } |
2160 | |
2161 | if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) { |
2162 | m_shouldAlwaysBeInlined = false; |
2163 | if (Options::verboseCallLink()) |
2164 | dataLog(" Clearing SABI bcause caller was already optimized.\n" ); |
2165 | return; |
2166 | } |
2167 | |
2168 | if (callerCodeBlock->codeType() != FunctionCode) { |
2169 | // If the caller is either eval or global code, assume that that won't be |
2170 | // optimized anytime soon. For eval code this is particularly true since we |
2171 | // delay eval optimization by a *lot*. |
2172 | m_shouldAlwaysBeInlined = false; |
2173 | if (Options::verboseCallLink()) |
2174 | dataLog(" Clearing SABI because caller is not a function.\n" ); |
2175 | return; |
2176 | } |
2177 | |
2178 | // Recursive calls won't be inlined. |
2179 | RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth()); |
2180 | vm()->topCallFrame->iterate(functor); |
2181 | |
2182 | if (functor.didRecurse()) { |
2183 | if (Options::verboseCallLink()) |
2184 | dataLog(" Clearing SABI because recursion was detected.\n" ); |
2185 | m_shouldAlwaysBeInlined = false; |
2186 | return; |
2187 | } |
2188 | |
2189 | if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) { |
2190 | dataLog("In call from " , FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to " , *this, ": caller's DFG capability level is not set.\n" ); |
2191 | CRASH(); |
2192 | } |
2193 | |
2194 | if (canCompile(callerCodeBlock->capabilityLevelState())) |
2195 | return; |
2196 | |
2197 | if (Options::verboseCallLink()) |
2198 | dataLog(" Clearing SABI because the caller is not a DFG candidate.\n" ); |
2199 | |
2200 | m_shouldAlwaysBeInlined = false; |
2201 | #endif |
2202 | } |
2203 | |
2204 | unsigned CodeBlock::reoptimizationRetryCounter() const |
2205 | { |
2206 | #if ENABLE(JIT) |
2207 | ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax()); |
2208 | return m_reoptimizationRetryCounter; |
2209 | #else |
2210 | return 0; |
2211 | #endif // ENABLE(JIT) |
2212 | } |
2213 | |
2214 | #if !ENABLE(C_LOOP) |
2215 | const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const |
2216 | { |
2217 | #if ENABLE(JIT) |
2218 | if (auto* jitData = m_jitData.get()) { |
2219 | if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get()) |
2220 | return registers; |
2221 | } |
2222 | #endif |
2223 | return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters(); |
2224 | } |
2225 | |
2226 | |
2227 | static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters) |
2228 | { |
2229 | |
2230 | return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register)); |
2231 | |
2232 | } |
2233 | |
2234 | size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() |
2235 | { |
2236 | return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters()); |
2237 | } |
2238 | |
2239 | size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters() |
2240 | { |
2241 | return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size()); |
2242 | } |
2243 | #endif |
2244 | |
2245 | #if ENABLE(JIT) |
2246 | |
2247 | void CodeBlock::countReoptimization() |
2248 | { |
2249 | m_reoptimizationRetryCounter++; |
2250 | if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax()) |
2251 | m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax(); |
2252 | } |
2253 | |
2254 | unsigned CodeBlock::numberOfDFGCompiles() |
2255 | { |
2256 | ASSERT(JITCode::isBaselineCode(jitType())); |
2257 | if (Options::testTheFTL()) { |
2258 | if (m_didFailFTLCompilation) |
2259 | return 1000000; |
2260 | return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter; |
2261 | } |
2262 | CodeBlock* replacement = this->replacement(); |
2263 | return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter; |
2264 | } |
2265 | |
2266 | int32_t CodeBlock::codeTypeThresholdMultiplier() const |
2267 | { |
2268 | if (codeType() == EvalCode) |
2269 | return Options::evalThresholdMultiplier(); |
2270 | |
2271 | return 1; |
2272 | } |
2273 | |
2274 | double CodeBlock::optimizationThresholdScalingFactor() |
2275 | { |
2276 | // This expression arises from doing a least-squares fit of |
2277 | // |
2278 | // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d |
2279 | // |
2280 | // against the data points: |
2281 | // |
2282 | // x F[x_] |
2283 | // 10 0.9 (smallest reasonable code block) |
2284 | // 200 1.0 (typical small-ish code block) |
2285 | // 320 1.2 (something I saw in 3d-cube that I wanted to optimize) |
2286 | // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize) |
2287 | // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort) |
2288 | // 10000 6.0 (similar to above) |
2289 | // |
2290 | // I achieve the minimization using the following Mathematica code: |
2291 | // |
2292 | // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d |
2293 | // |
2294 | // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}} |
2295 | // |
2296 | // solution = |
2297 | // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples), |
2298 | // {a, b, c, d}][[2]] |
2299 | // |
2300 | // And the code below (to initialize a, b, c, d) is generated by: |
2301 | // |
2302 | // Print["const double " <> ToString[#[[1]]] <> " = " <> |
2303 | // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution |
2304 | // |
2305 | // We've long known the following to be true: |
2306 | // - Small code blocks are cheap to optimize and so we should do it sooner rather |
2307 | // than later. |
2308 | // - Large code blocks are expensive to optimize and so we should postpone doing so, |
2309 | // and sometimes have a large enough threshold that we never optimize them. |
2310 | // - The difference in cost is not totally linear because (a) just invoking the |
2311 | // DFG incurs some base cost and (b) for large code blocks there is enough slop |
2312 | // in the correlation between instruction count and the actual compilation cost |
2313 | // that for those large blocks, the instruction count should not have a strong |
2314 | // influence on our threshold. |
2315 | // |
2316 | // I knew the goals but I didn't know how to achieve them; so I picked an interesting |
2317 | // example where the heuristics were right (code block in 3d-cube with instruction |
2318 | // count 320, which got compiled early as it should have been) and one where they were |
2319 | // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive |
2320 | // to compile and didn't run often enough to warrant compilation in my opinion), and |
2321 | // then threw in additional data points that represented my own guess of what our |
2322 | // heuristics should do for some round-numbered examples. |
2323 | // |
2324 | // The expression to which I decided to fit the data arose because I started with an |
2325 | // affine function, and then did two things: put the linear part in an Abs to ensure |
2326 | // that the fit didn't end up choosing a negative value of c (which would result in |
2327 | // the function turning over and going negative for large x) and I threw in a Sqrt |
2328 | // term because Sqrt represents my intution that the function should be more sensitive |
2329 | // to small changes in small values of x, but less sensitive when x gets large. |
2330 | |
2331 | // Note that the current fit essentially eliminates the linear portion of the |
2332 | // expression (c == 0.0). |
2333 | const double a = 0.061504; |
2334 | const double b = 1.02406; |
2335 | const double c = 0.0; |
2336 | const double d = 0.825914; |
2337 | |
2338 | double bytecodeCost = this->bytecodeCost(); |
2339 | |
2340 | ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense. |
2341 | |
2342 | double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost; |
2343 | |
2344 | result *= codeTypeThresholdMultiplier(); |
2345 | |
2346 | if (Options::verboseOSR()) { |
2347 | dataLog( |
2348 | *this, ": bytecode cost is " , bytecodeCost, |
2349 | ", scaling execution counter by " , result, " * " , codeTypeThresholdMultiplier(), |
2350 | "\n" ); |
2351 | } |
2352 | return result; |
2353 | } |
2354 | |
2355 | static int32_t clipThreshold(double threshold) |
2356 | { |
2357 | if (threshold < 1.0) |
2358 | return 1; |
2359 | |
2360 | if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max())) |
2361 | return std::numeric_limits<int32_t>::max(); |
2362 | |
2363 | return static_cast<int32_t>(threshold); |
2364 | } |
2365 | |
2366 | int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold) |
2367 | { |
2368 | return clipThreshold( |
2369 | static_cast<double>(desiredThreshold) * |
2370 | optimizationThresholdScalingFactor() * |
2371 | (1 << reoptimizationRetryCounter())); |
2372 | } |
2373 | |
2374 | bool CodeBlock::checkIfOptimizationThresholdReached() |
2375 | { |
2376 | #if ENABLE(DFG_JIT) |
2377 | if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) { |
2378 | if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode)) |
2379 | == DFG::Worklist::Compiled) { |
2380 | optimizeNextInvocation(); |
2381 | return true; |
2382 | } |
2383 | } |
2384 | #endif |
2385 | |
2386 | return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this); |
2387 | } |
2388 | |
2389 | #if ENABLE(DFG_JIT) |
2390 | auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction |
2391 | { |
2392 | DFG::OSRExitBase& exit = exitState.exit; |
2393 | if (!exitKindMayJettison(exit.m_kind)) { |
2394 | // FIXME: We may want to notice that we're frequently exiting |
2395 | // at an op_catch that we didn't compile an entrypoint for, and |
2396 | // then trigger a reoptimization of this CodeBlock: |
2397 | // https://bugs.webkit.org/show_bug.cgi?id=175842 |
2398 | return OptimizeAction::None; |
2399 | } |
2400 | |
2401 | exit.m_count++; |
2402 | m_osrExitCounter++; |
2403 | |
2404 | CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock; |
2405 | ASSERT(baselineCodeBlock == baselineAlternative()); |
2406 | if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold())) |
2407 | return OptimizeAction::ReoptimizeNow; |
2408 | |
2409 | // We want to figure out if there's a possibility that we're in a loop. For the outermost |
2410 | // code block in the inline stack, we handle this appropriately by having the loop OSR trigger |
2411 | // check the exit count of the replacement of the CodeBlock from which we are OSRing. The |
2412 | // problem is the inlined functions, which might also have loops, but whose baseline versions |
2413 | // don't know where to look for the exit count. Figure out if those loops are severe enough |
2414 | // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. |
2415 | // Otherwise, we should use the normal reoptimization trigger. |
2416 | |
2417 | bool didTryToEnterInLoop = false; |
2418 | for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) { |
2419 | if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) { |
2420 | didTryToEnterInLoop = true; |
2421 | break; |
2422 | } |
2423 | } |
2424 | |
2425 | uint32_t exitCountThreshold = didTryToEnterInLoop |
2426 | ? exitCountThresholdForReoptimizationFromLoop() |
2427 | : exitCountThresholdForReoptimization(); |
2428 | |
2429 | if (m_osrExitCounter > exitCountThreshold) |
2430 | return OptimizeAction::ReoptimizeNow; |
2431 | |
2432 | // Too few fails. Adjust the execution counter such that the target is to only optimize after a while. |
2433 | baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold); |
2434 | return OptimizeAction::None; |
2435 | } |
2436 | #endif |
2437 | |
2438 | void CodeBlock::optimizeNextInvocation() |
2439 | { |
2440 | if (Options::verboseOSR()) |
2441 | dataLog(*this, ": Optimizing next invocation.\n" ); |
2442 | m_jitExecuteCounter.setNewThreshold(0, this); |
2443 | } |
2444 | |
2445 | void CodeBlock::dontOptimizeAnytimeSoon() |
2446 | { |
2447 | if (Options::verboseOSR()) |
2448 | dataLog(*this, ": Not optimizing anytime soon.\n" ); |
2449 | m_jitExecuteCounter.deferIndefinitely(); |
2450 | } |
2451 | |
2452 | void CodeBlock::optimizeAfterWarmUp() |
2453 | { |
2454 | if (Options::verboseOSR()) |
2455 | dataLog(*this, ": Optimizing after warm-up.\n" ); |
2456 | #if ENABLE(DFG_JIT) |
2457 | m_jitExecuteCounter.setNewThreshold( |
2458 | adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this); |
2459 | #endif |
2460 | } |
2461 | |
2462 | void CodeBlock::optimizeAfterLongWarmUp() |
2463 | { |
2464 | if (Options::verboseOSR()) |
2465 | dataLog(*this, ": Optimizing after long warm-up.\n" ); |
2466 | #if ENABLE(DFG_JIT) |
2467 | m_jitExecuteCounter.setNewThreshold( |
2468 | adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this); |
2469 | #endif |
2470 | } |
2471 | |
2472 | void CodeBlock::optimizeSoon() |
2473 | { |
2474 | if (Options::verboseOSR()) |
2475 | dataLog(*this, ": Optimizing soon.\n" ); |
2476 | #if ENABLE(DFG_JIT) |
2477 | m_jitExecuteCounter.setNewThreshold( |
2478 | adjustedCounterValue(Options::thresholdForOptimizeSoon()), this); |
2479 | #endif |
2480 | } |
2481 | |
2482 | void CodeBlock::forceOptimizationSlowPathConcurrently() |
2483 | { |
2484 | if (Options::verboseOSR()) |
2485 | dataLog(*this, ": Forcing slow path concurrently.\n" ); |
2486 | m_jitExecuteCounter.forceSlowPathConcurrently(); |
2487 | } |
2488 | |
2489 | #if ENABLE(DFG_JIT) |
2490 | void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result) |
2491 | { |
2492 | JITType type = jitType(); |
2493 | if (type != JITType::BaselineJIT) { |
2494 | dataLog(*this, ": expected to have baseline code but have " , type, "\n" ); |
2495 | CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type)); |
2496 | } |
2497 | |
2498 | CodeBlock* replacement = this->replacement(); |
2499 | bool hasReplacement = (replacement && replacement != this); |
2500 | if ((result == CompilationSuccessful) != hasReplacement) { |
2501 | dataLog(*this, ": we have result = " , result, " but " ); |
2502 | if (replacement == this) |
2503 | dataLog("we are our own replacement.\n" ); |
2504 | else |
2505 | dataLog("our replacement is " , pointerDump(replacement), "\n" ); |
2506 | RELEASE_ASSERT_NOT_REACHED(); |
2507 | } |
2508 | |
2509 | switch (result) { |
2510 | case CompilationSuccessful: |
2511 | RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType())); |
2512 | optimizeNextInvocation(); |
2513 | return; |
2514 | case CompilationFailed: |
2515 | dontOptimizeAnytimeSoon(); |
2516 | return; |
2517 | case CompilationDeferred: |
2518 | // We'd like to do dontOptimizeAnytimeSoon() but we cannot because |
2519 | // forceOptimizationSlowPathConcurrently() is inherently racy. It won't |
2520 | // necessarily guarantee anything. So, we make sure that even if that |
2521 | // function ends up being a no-op, we still eventually retry and realize |
2522 | // that we have optimized code ready. |
2523 | optimizeAfterWarmUp(); |
2524 | return; |
2525 | case CompilationInvalidated: |
2526 | // Retry with exponential backoff. |
2527 | countReoptimization(); |
2528 | optimizeAfterWarmUp(); |
2529 | return; |
2530 | } |
2531 | |
2532 | dataLog("Unrecognized result: " , static_cast<int>(result), "\n" ); |
2533 | RELEASE_ASSERT_NOT_REACHED(); |
2534 | } |
2535 | |
2536 | #endif |
2537 | |
2538 | uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold) |
2539 | { |
2540 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2541 | // Compute this the lame way so we don't saturate. This is called infrequently |
2542 | // enough that this loop won't hurt us. |
2543 | unsigned result = desiredThreshold; |
2544 | for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) { |
2545 | unsigned newResult = result << 1; |
2546 | if (newResult < result) |
2547 | return std::numeric_limits<uint32_t>::max(); |
2548 | result = newResult; |
2549 | } |
2550 | return result; |
2551 | } |
2552 | |
2553 | uint32_t CodeBlock::exitCountThresholdForReoptimization() |
2554 | { |
2555 | return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier()); |
2556 | } |
2557 | |
2558 | uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop() |
2559 | { |
2560 | return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier()); |
2561 | } |
2562 | |
2563 | bool CodeBlock::shouldReoptimizeNow() |
2564 | { |
2565 | return osrExitCounter() >= exitCountThresholdForReoptimization(); |
2566 | } |
2567 | |
2568 | bool CodeBlock::shouldReoptimizeFromLoopNow() |
2569 | { |
2570 | return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop(); |
2571 | } |
2572 | #endif |
2573 | |
2574 | ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset) |
2575 | { |
2576 | auto instruction = instructions().at(bytecodeOffset); |
2577 | switch (instruction->opcodeID()) { |
2578 | #define CASE(Op) \ |
2579 | case Op::opcodeID: \ |
2580 | return &instruction->as<Op>().metadata(this).m_arrayProfile; |
2581 | |
2582 | FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE) |
2583 | #undef CASE |
2584 | |
2585 | case OpGetById::opcodeID: { |
2586 | auto bytecode = instruction->as<OpGetById>(); |
2587 | auto& metadata = bytecode.metadata(this); |
2588 | if (metadata.m_mode == GetByIdMode::ArrayLength) |
2589 | return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile; |
2590 | break; |
2591 | } |
2592 | default: |
2593 | break; |
2594 | } |
2595 | |
2596 | return nullptr; |
2597 | } |
2598 | |
2599 | ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset) |
2600 | { |
2601 | ConcurrentJSLocker locker(m_lock); |
2602 | return getArrayProfile(locker, bytecodeOffset); |
2603 | } |
2604 | |
2605 | #if ENABLE(DFG_JIT) |
2606 | Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins() |
2607 | { |
2608 | return m_jitCode->dfgCommon()->codeOrigins; |
2609 | } |
2610 | |
2611 | size_t CodeBlock::numberOfDFGIdentifiers() const |
2612 | { |
2613 | if (!JITCode::isOptimizingJIT(jitType())) |
2614 | return 0; |
2615 | |
2616 | return m_jitCode->dfgCommon()->dfgIdentifiers.size(); |
2617 | } |
2618 | |
2619 | const Identifier& CodeBlock::identifier(int index) const |
2620 | { |
2621 | size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers(); |
2622 | if (static_cast<unsigned>(index) < unlinkedIdentifiers) |
2623 | return m_unlinkedCode->identifier(index); |
2624 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2625 | return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers]; |
2626 | } |
2627 | #endif // ENABLE(DFG_JIT) |
2628 | |
2629 | void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles) |
2630 | { |
2631 | ConcurrentJSLocker locker(m_lock); |
2632 | |
2633 | numberOfLiveNonArgumentValueProfiles = 0; |
2634 | numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full. |
2635 | |
2636 | forEachValueProfile([&](ValueProfile& profile) { |
2637 | unsigned numSamples = profile.totalNumberOfSamples(); |
2638 | if (numSamples > ValueProfile::numberOfBuckets) |
2639 | numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight. |
2640 | numberOfSamplesInProfiles += numSamples; |
2641 | if (profile.m_bytecodeOffset < 0) { |
2642 | profile.computeUpdatedPrediction(locker); |
2643 | return; |
2644 | } |
2645 | if (profile.numberOfSamples() || profile.m_prediction != SpecNone) |
2646 | numberOfLiveNonArgumentValueProfiles++; |
2647 | profile.computeUpdatedPrediction(locker); |
2648 | }); |
2649 | |
2650 | if (auto* rareData = m_rareData.get()) { |
2651 | for (auto& profileBucket : rareData->m_catchProfiles) { |
2652 | profileBucket->forEach([&] (ValueProfileAndOperand& profile) { |
2653 | profile.m_profile.computeUpdatedPrediction(locker); |
2654 | }); |
2655 | } |
2656 | } |
2657 | |
2658 | #if ENABLE(DFG_JIT) |
2659 | lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker); |
2660 | #endif |
2661 | } |
2662 | |
2663 | void CodeBlock::updateAllValueProfilePredictions() |
2664 | { |
2665 | unsigned ignoredValue1, ignoredValue2; |
2666 | updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2); |
2667 | } |
2668 | |
2669 | void CodeBlock::updateAllArrayPredictions() |
2670 | { |
2671 | ConcurrentJSLocker locker(m_lock); |
2672 | |
2673 | forEachArrayProfile([&](ArrayProfile& profile) { |
2674 | profile.computeUpdatedPrediction(locker, this); |
2675 | }); |
2676 | |
2677 | forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) { |
2678 | profile.updateProfile(); |
2679 | }); |
2680 | } |
2681 | |
2682 | void CodeBlock::updateAllPredictions() |
2683 | { |
2684 | updateAllValueProfilePredictions(); |
2685 | updateAllArrayPredictions(); |
2686 | } |
2687 | |
2688 | bool CodeBlock::shouldOptimizeNow() |
2689 | { |
2690 | if (Options::verboseOSR()) |
2691 | dataLog("Considering optimizing " , *this, "...\n" ); |
2692 | |
2693 | if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay()) |
2694 | return true; |
2695 | |
2696 | updateAllArrayPredictions(); |
2697 | |
2698 | unsigned numberOfLiveNonArgumentValueProfiles; |
2699 | unsigned numberOfSamplesInProfiles; |
2700 | updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles); |
2701 | |
2702 | if (Options::verboseOSR()) { |
2703 | dataLogF( |
2704 | "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n" , |
2705 | (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(), |
2706 | numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(), |
2707 | (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(), |
2708 | numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles()); |
2709 | } |
2710 | |
2711 | if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate()) |
2712 | && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate()) |
2713 | && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay()) |
2714 | return true; |
2715 | |
2716 | ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max()); |
2717 | m_optimizationDelayCounter++; |
2718 | optimizeAfterWarmUp(); |
2719 | return false; |
2720 | } |
2721 | |
2722 | #if ENABLE(DFG_JIT) |
2723 | void CodeBlock::tallyFrequentExitSites() |
2724 | { |
2725 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2726 | ASSERT(alternative()->jitType() == JITType::BaselineJIT); |
2727 | |
2728 | CodeBlock* profiledBlock = alternative(); |
2729 | |
2730 | switch (jitType()) { |
2731 | case JITType::DFGJIT: { |
2732 | DFG::JITCode* jitCode = m_jitCode->dfg(); |
2733 | for (auto& exit : jitCode->osrExit) |
2734 | exit.considerAddingAsFrequentExitSite(profiledBlock); |
2735 | break; |
2736 | } |
2737 | |
2738 | #if ENABLE(FTL_JIT) |
2739 | case JITType::FTLJIT: { |
2740 | // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit |
2741 | // vector contains a totally different type, that just so happens to behave like |
2742 | // DFG::JITCode::osrExit. |
2743 | FTL::JITCode* jitCode = m_jitCode->ftl(); |
2744 | for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) { |
2745 | FTL::OSRExit& exit = jitCode->osrExit[i]; |
2746 | exit.considerAddingAsFrequentExitSite(profiledBlock); |
2747 | } |
2748 | break; |
2749 | } |
2750 | #endif |
2751 | |
2752 | default: |
2753 | RELEASE_ASSERT_NOT_REACHED(); |
2754 | break; |
2755 | } |
2756 | } |
2757 | #endif // ENABLE(DFG_JIT) |
2758 | |
2759 | void CodeBlock::notifyLexicalBindingUpdate() |
2760 | { |
2761 | // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed. |
2762 | // https://bugs.webkit.org/show_bug.cgi?id=193347 |
2763 | if (scriptMode() == JSParserScriptMode::Module) |
2764 | return; |
2765 | JSGlobalObject* globalObject = m_globalObject.get(); |
2766 | JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope()); |
2767 | SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable(); |
2768 | |
2769 | ConcurrentJSLocker locker(m_lock); |
2770 | |
2771 | auto isShadowed = [&] (UniquedStringImpl* uid) { |
2772 | ConcurrentJSLocker locker(symbolTable->m_lock); |
2773 | return symbolTable->contains(locker, uid); |
2774 | }; |
2775 | |
2776 | const InstructionStream& instructionStream = instructions(); |
2777 | for (const auto& instruction : instructionStream) { |
2778 | OpcodeID opcodeID = instruction->opcodeID(); |
2779 | switch (opcodeID) { |
2780 | case op_resolve_scope: { |
2781 | auto bytecode = instruction->as<OpResolveScope>(); |
2782 | auto& metadata = bytecode.metadata(this); |
2783 | ResolveType originalResolveType = metadata.m_resolveType; |
2784 | if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) { |
2785 | const Identifier& ident = identifier(bytecode.m_var); |
2786 | if (isShadowed(ident.impl())) |
2787 | metadata.m_globalLexicalBindingEpoch = 0; |
2788 | else |
2789 | metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch(); |
2790 | } |
2791 | break; |
2792 | } |
2793 | default: |
2794 | break; |
2795 | } |
2796 | } |
2797 | } |
2798 | |
2799 | #if ENABLE(VERBOSE_VALUE_PROFILE) |
2800 | void CodeBlock::dumpValueProfiles() |
2801 | { |
2802 | dataLog("ValueProfile for " , *this, ":\n" ); |
2803 | forEachValueProfile([](ValueProfile& profile) { |
2804 | if (profile.m_bytecodeOffset < 0) { |
2805 | ASSERT(profile.m_bytecodeOffset == -1); |
2806 | dataLogF(" arg = %u: " , i); |
2807 | } else |
2808 | dataLogF(" bc = %d: " , profile.m_bytecodeOffset); |
2809 | if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) { |
2810 | dataLogF("<empty>\n" ); |
2811 | continue; |
2812 | } |
2813 | profile.dump(WTF::dataFile()); |
2814 | dataLogF("\n" ); |
2815 | }); |
2816 | dataLog("RareCaseProfile for " , *this, ":\n" ); |
2817 | if (auto* jitData = m_jitData.get()) { |
2818 | for (RareCaseProfile* profile : jitData->m_rareCaseProfiles) |
2819 | dataLogF(" bc = %d: %u\n" , profile->m_bytecodeOffset, profile->m_counter); |
2820 | } |
2821 | } |
2822 | #endif // ENABLE(VERBOSE_VALUE_PROFILE) |
2823 | |
2824 | unsigned CodeBlock::frameRegisterCount() |
2825 | { |
2826 | switch (jitType()) { |
2827 | case JITType::InterpreterThunk: |
2828 | return LLInt::frameRegisterCountFor(this); |
2829 | |
2830 | #if ENABLE(JIT) |
2831 | case JITType::BaselineJIT: |
2832 | return JIT::frameRegisterCountFor(this); |
2833 | #endif // ENABLE(JIT) |
2834 | |
2835 | #if ENABLE(DFG_JIT) |
2836 | case JITType::DFGJIT: |
2837 | case JITType::FTLJIT: |
2838 | return jitCode()->dfgCommon()->frameRegisterCount; |
2839 | #endif // ENABLE(DFG_JIT) |
2840 | |
2841 | default: |
2842 | RELEASE_ASSERT_NOT_REACHED(); |
2843 | return 0; |
2844 | } |
2845 | } |
2846 | |
2847 | int CodeBlock::stackPointerOffset() |
2848 | { |
2849 | return virtualRegisterForLocal(frameRegisterCount() - 1).offset(); |
2850 | } |
2851 | |
2852 | size_t CodeBlock::predictedMachineCodeSize() |
2853 | { |
2854 | VM* vm = m_vm; |
2855 | // This will be called from CodeBlock::CodeBlock before either m_vm or the |
2856 | // instructions have been initialized. It's OK to return 0 because what will really |
2857 | // matter is the recomputation of this value when the slow path is triggered. |
2858 | if (!vm) |
2859 | return 0; |
2860 | |
2861 | if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT) |
2862 | return 0; // It's as good of a prediction as we'll get. |
2863 | |
2864 | // Be conservative: return a size that will be an overestimation 84% of the time. |
2865 | double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() + |
2866 | vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation(); |
2867 | |
2868 | // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing |
2869 | // here is OK, since this whole method is just a heuristic. |
2870 | if (multiplier < 0 || multiplier > 1000) |
2871 | return 0; |
2872 | |
2873 | double doubleResult = multiplier * bytecodeCost(); |
2874 | |
2875 | // Be even more paranoid: silently reject values that won't fit into a size_t. If |
2876 | // the function is so huge that we can't even fit it into virtual memory then we |
2877 | // should probably have some other guards in place to prevent us from even getting |
2878 | // to this point. |
2879 | if (doubleResult > std::numeric_limits<size_t>::max()) |
2880 | return 0; |
2881 | |
2882 | return static_cast<size_t>(doubleResult); |
2883 | } |
2884 | |
2885 | String CodeBlock::nameForRegister(VirtualRegister virtualRegister) |
2886 | { |
2887 | for (auto& constantRegister : m_constantRegisters) { |
2888 | if (constantRegister.get().isEmpty()) |
2889 | continue; |
2890 | if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) { |
2891 | ConcurrentJSLocker locker(symbolTable->m_lock); |
2892 | auto end = symbolTable->end(locker); |
2893 | for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) { |
2894 | if (ptr->value.varOffset() == VarOffset(virtualRegister)) { |
2895 | // FIXME: This won't work from the compilation thread. |
2896 | // https://bugs.webkit.org/show_bug.cgi?id=115300 |
2897 | return ptr->key.get(); |
2898 | } |
2899 | } |
2900 | } |
2901 | } |
2902 | if (virtualRegister == thisRegister()) |
2903 | return "this"_s ; |
2904 | if (virtualRegister.isArgument()) |
2905 | return makeString("arguments[" , pad(' ', 3, virtualRegister.toArgument()), ']'); |
2906 | |
2907 | return emptyString(); |
2908 | } |
2909 | |
2910 | ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset) |
2911 | { |
2912 | auto instruction = instructions().at(bytecodeOffset); |
2913 | switch (instruction->opcodeID()) { |
2914 | |
2915 | #define CASE(Op) \ |
2916 | case Op::opcodeID: \ |
2917 | return &instruction->as<Op>().metadata(this).m_profile; |
2918 | |
2919 | FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE) |
2920 | |
2921 | #undef CASE |
2922 | |
2923 | default: |
2924 | return nullptr; |
2925 | |
2926 | } |
2927 | } |
2928 | |
2929 | SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset) |
2930 | { |
2931 | if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset)) |
2932 | return valueProfile->computeUpdatedPrediction(locker); |
2933 | return SpecNone; |
2934 | } |
2935 | |
2936 | ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset) |
2937 | { |
2938 | return *tryGetValueProfileForBytecodeOffset(bytecodeOffset); |
2939 | } |
2940 | |
2941 | void CodeBlock::validate() |
2942 | { |
2943 | BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint. |
2944 | |
2945 | FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0); |
2946 | |
2947 | if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) { |
2948 | beginValidationDidFail(); |
2949 | dataLog(" Wrong number of bits in result!\n" ); |
2950 | dataLog(" Result: " , liveAtHead, "\n" ); |
2951 | dataLog(" Bit count: " , liveAtHead.numBits(), "\n" ); |
2952 | endValidationDidFail(); |
2953 | } |
2954 | |
2955 | for (unsigned i = m_numCalleeLocals; i--;) { |
2956 | VirtualRegister reg = virtualRegisterForLocal(i); |
2957 | |
2958 | if (liveAtHead[i]) { |
2959 | beginValidationDidFail(); |
2960 | dataLog(" Variable " , reg, " is expected to be dead.\n" ); |
2961 | dataLog(" Result: " , liveAtHead, "\n" ); |
2962 | endValidationDidFail(); |
2963 | } |
2964 | } |
2965 | |
2966 | const InstructionStream& instructionStream = instructions(); |
2967 | for (const auto& instruction : instructionStream) { |
2968 | OpcodeID opcode = instruction->opcodeID(); |
2969 | if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) { |
2970 | if (opcode == op_catch || opcode == op_enter) { |
2971 | // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be |
2972 | // inside of a try block because they are responsible for bootstrapping state. And they |
2973 | // are never allowed throw an exception because of this. We rely on this when compiling |
2974 | // in the DFG. Because an entrypoint never throws, the bytecode generator will never |
2975 | // allow once inside a try block. |
2976 | beginValidationDidFail(); |
2977 | dataLog(" entrypoint not allowed inside a try block." ); |
2978 | endValidationDidFail(); |
2979 | } |
2980 | } |
2981 | } |
2982 | } |
2983 | |
2984 | void CodeBlock::beginValidationDidFail() |
2985 | { |
2986 | dataLog("Validation failure in " , *this, ":\n" ); |
2987 | dataLog("\n" ); |
2988 | } |
2989 | |
2990 | void CodeBlock::endValidationDidFail() |
2991 | { |
2992 | dataLog("\n" ); |
2993 | dumpBytecode(); |
2994 | dataLog("\n" ); |
2995 | dataLog("Validation failure.\n" ); |
2996 | RELEASE_ASSERT_NOT_REACHED(); |
2997 | } |
2998 | |
2999 | void CodeBlock::addBreakpoint(unsigned numBreakpoints) |
3000 | { |
3001 | m_numBreakpoints += numBreakpoints; |
3002 | ASSERT(m_numBreakpoints); |
3003 | if (JITCode::isOptimizingJIT(jitType())) |
3004 | jettison(Profiler::JettisonDueToDebuggerBreakpoint); |
3005 | } |
3006 | |
3007 | void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode) |
3008 | { |
3009 | m_steppingMode = mode; |
3010 | if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType())) |
3011 | jettison(Profiler::JettisonDueToDebuggerStepping); |
3012 | } |
3013 | |
3014 | int CodeBlock::outOfLineJumpOffset(const Instruction* pc) |
3015 | { |
3016 | int offset = bytecodeOffset(pc); |
3017 | return m_unlinkedCode->outOfLineJumpOffset(offset); |
3018 | } |
3019 | |
3020 | const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc) |
3021 | { |
3022 | int offset = bytecodeOffset(pc); |
3023 | int target = m_unlinkedCode->outOfLineJumpOffset(offset); |
3024 | return instructions().at(offset + target).ptr(); |
3025 | } |
3026 | |
3027 | ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset) |
3028 | { |
3029 | return arithProfileForPC(instructions().at(bytecodeOffset).ptr()); |
3030 | } |
3031 | |
3032 | ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc) |
3033 | { |
3034 | switch (pc->opcodeID()) { |
3035 | case op_negate: |
3036 | return &pc->as<OpNegate>().metadata(this).m_arithProfile; |
3037 | case op_add: |
3038 | return &pc->as<OpAdd>().metadata(this).m_arithProfile; |
3039 | case op_mul: |
3040 | return &pc->as<OpMul>().metadata(this).m_arithProfile; |
3041 | case op_sub: |
3042 | return &pc->as<OpSub>().metadata(this).m_arithProfile; |
3043 | case op_div: |
3044 | return &pc->as<OpDiv>().metadata(this).m_arithProfile; |
3045 | default: |
3046 | break; |
3047 | } |
3048 | |
3049 | return nullptr; |
3050 | } |
3051 | |
3052 | bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset) |
3053 | { |
3054 | if (!hasBaselineJITProfiling()) |
3055 | return false; |
3056 | ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset); |
3057 | if (!profile) |
3058 | return false; |
3059 | return profile->tookSpecialFastPath(); |
3060 | } |
3061 | |
3062 | #if ENABLE(JIT) |
3063 | DFG::CapabilityLevel CodeBlock::capabilityLevel() |
3064 | { |
3065 | DFG::CapabilityLevel result = computeCapabilityLevel(); |
3066 | m_capabilityLevelState = result; |
3067 | return result; |
3068 | } |
3069 | #endif |
3070 | |
3071 | void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler() |
3072 | { |
3073 | if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets()) |
3074 | return; |
3075 | const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); |
3076 | for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) { |
3077 | // Because op_profile_control_flow is emitted at the beginning of every basic block, finding |
3078 | // the next op_profile_control_flow will give us the text range of a single basic block. |
3079 | size_t startIdx = bytecodeOffsets[i]; |
3080 | auto instruction = instructions().at(startIdx); |
3081 | RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow); |
3082 | auto bytecode = instruction->as<OpProfileControlFlow>(); |
3083 | auto& metadata = bytecode.metadata(this); |
3084 | int basicBlockStartOffset = bytecode.m_textOffset; |
3085 | int basicBlockEndOffset; |
3086 | if (i + 1 < offsetsLength) { |
3087 | size_t endIdx = bytecodeOffsets[i + 1]; |
3088 | auto endInstruction = instructions().at(endIdx); |
3089 | RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow); |
3090 | basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1; |
3091 | } else { |
3092 | basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace. |
3093 | basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before. |
3094 | } |
3095 | |
3096 | // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more |
3097 | // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than |
3098 | // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node |
3099 | // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different |
3100 | // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript |
3101 | // program. The condition: |
3102 | // (basicBlockEndOffset < basicBlockStartOffset) |
3103 | // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic |
3104 | // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These |
3105 | // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same |
3106 | // internal data structure, so if any of them execute, it will record the same textual basic block in the |
3107 | // JavaScript program as executing. |
3108 | // At the bytecode level, this situation looks like: |
3109 | // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset) |
3110 | // ... |
3111 | // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m). |
3112 | // ... |
3113 | // m: op_profile_control_flow |
3114 | if (basicBlockEndOffset < basicBlockStartOffset) { |
3115 | RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock. |
3116 | metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); |
3117 | continue; |
3118 | } |
3119 | |
3120 | BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset); |
3121 | |
3122 | // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset] |
3123 | // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation. |
3124 | // This is necessary because in the original source text of a JavaScript program, |
3125 | // function literals form new basic blocks boundaries, but they aren't represented |
3126 | // inside the CodeBlock's instruction stream. |
3127 | auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) { |
3128 | const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable(); |
3129 | int functionStart = executable->typeProfilingStartOffset(); |
3130 | int functionEnd = executable->typeProfilingEndOffset(); |
3131 | if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset) |
3132 | basicBlockLocation->insertGap(functionStart, functionEnd); |
3133 | }; |
3134 | |
3135 | for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls) |
3136 | insertFunctionGaps(executable); |
3137 | for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs) |
3138 | insertFunctionGaps(executable); |
3139 | |
3140 | metadata.m_basicBlockLocation = basicBlockLocation; |
3141 | } |
3142 | } |
3143 | |
3144 | #if ENABLE(JIT) |
3145 | void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) |
3146 | { |
3147 | ConcurrentJSLocker locker(m_lock); |
3148 | ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map); |
3149 | } |
3150 | |
3151 | Optional<CodeOrigin> CodeBlock::findPC(void* pc) |
3152 | { |
3153 | { |
3154 | ConcurrentJSLocker locker(m_lock); |
3155 | if (auto* jitData = m_jitData.get()) { |
3156 | if (jitData->m_pcToCodeOriginMap) { |
3157 | if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc)) |
3158 | return codeOrigin; |
3159 | } |
3160 | |
3161 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
3162 | if (stubInfo->containsPC(pc)) |
3163 | return Optional<CodeOrigin>(stubInfo->codeOrigin); |
3164 | } |
3165 | } |
3166 | } |
3167 | |
3168 | if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc)) |
3169 | return codeOrigin; |
3170 | |
3171 | return WTF::nullopt; |
3172 | } |
3173 | #endif // ENABLE(JIT) |
3174 | |
3175 | Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex) |
3176 | { |
3177 | Optional<unsigned> bytecodeOffset; |
3178 | JITType jitType = this->jitType(); |
3179 | if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) { |
3180 | #if USE(JSVALUE64) |
3181 | bytecodeOffset = callSiteIndex.bits(); |
3182 | #else |
3183 | Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits()); |
3184 | bytecodeOffset = this->bytecodeOffset(instruction); |
3185 | #endif |
3186 | } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) { |
3187 | #if ENABLE(DFG_JIT) |
3188 | RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex)); |
3189 | CodeOrigin origin = codeOrigin(callSiteIndex); |
3190 | bytecodeOffset = origin.bytecodeIndex(); |
3191 | #else |
3192 | RELEASE_ASSERT_NOT_REACHED(); |
3193 | #endif |
3194 | } |
3195 | |
3196 | return bytecodeOffset; |
3197 | } |
3198 | |
3199 | int32_t CodeBlock::thresholdForJIT(int32_t threshold) |
3200 | { |
3201 | switch (unlinkedCodeBlock()->didOptimize()) { |
3202 | case MixedTriState: |
3203 | return threshold; |
3204 | case FalseTriState: |
3205 | return threshold * 4; |
3206 | case TrueTriState: |
3207 | return threshold / 2; |
3208 | } |
3209 | ASSERT_NOT_REACHED(); |
3210 | return threshold; |
3211 | } |
3212 | |
3213 | void CodeBlock::jitAfterWarmUp() |
3214 | { |
3215 | m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this); |
3216 | } |
3217 | |
3218 | void CodeBlock::jitSoon() |
3219 | { |
3220 | m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this); |
3221 | } |
3222 | |
3223 | bool CodeBlock::hasInstalledVMTrapBreakpoints() const |
3224 | { |
3225 | #if ENABLE(SIGNAL_BASED_VM_TRAPS) |
3226 | // This function may be called from a signal handler. We need to be |
3227 | // careful to not call anything that is not signal handler safe, e.g. |
3228 | // we should not perturb the refCount of m_jitCode. |
3229 | if (!JITCode::isOptimizingJIT(jitType())) |
3230 | return false; |
3231 | return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints(); |
3232 | #else |
3233 | return false; |
3234 | #endif |
3235 | } |
3236 | |
3237 | bool CodeBlock::installVMTrapBreakpoints() |
3238 | { |
3239 | #if ENABLE(SIGNAL_BASED_VM_TRAPS) |
3240 | // This function may be called from a signal handler. We need to be |
3241 | // careful to not call anything that is not signal handler safe, e.g. |
3242 | // we should not perturb the refCount of m_jitCode. |
3243 | if (!JITCode::isOptimizingJIT(jitType())) |
3244 | return false; |
3245 | auto& commonData = *m_jitCode->dfgCommon(); |
3246 | commonData.installVMTrapBreakpoints(this); |
3247 | return true; |
3248 | #else |
3249 | UNREACHABLE_FOR_PLATFORM(); |
3250 | return false; |
3251 | #endif |
3252 | } |
3253 | |
3254 | void CodeBlock::dumpMathICStats() |
3255 | { |
3256 | #if ENABLE(MATH_IC_STATS) |
3257 | double numAdds = 0.0; |
3258 | double totalAddSize = 0.0; |
3259 | double numMuls = 0.0; |
3260 | double totalMulSize = 0.0; |
3261 | double numNegs = 0.0; |
3262 | double totalNegSize = 0.0; |
3263 | double numSubs = 0.0; |
3264 | double totalSubSize = 0.0; |
3265 | |
3266 | auto countICs = [&] (CodeBlock* codeBlock) { |
3267 | if (auto* jitData = codeBlock->m_jitData.get()) { |
3268 | for (JITAddIC* addIC : jitData->m_addICs) { |
3269 | numAdds++; |
3270 | totalAddSize += addIC->codeSize(); |
3271 | } |
3272 | |
3273 | for (JITMulIC* mulIC : jitData->m_mulICs) { |
3274 | numMuls++; |
3275 | totalMulSize += mulIC->codeSize(); |
3276 | } |
3277 | |
3278 | for (JITNegIC* negIC : jitData->m_negICs) { |
3279 | numNegs++; |
3280 | totalNegSize += negIC->codeSize(); |
3281 | } |
3282 | |
3283 | for (JITSubIC* subIC : jitData->m_subICs) { |
3284 | numSubs++; |
3285 | totalSubSize += subIC->codeSize(); |
3286 | } |
3287 | } |
3288 | }; |
3289 | heap()->forEachCodeBlock(countICs); |
3290 | |
3291 | dataLog("Num Adds: " , numAdds, "\n" ); |
3292 | dataLog("Total Add size in bytes: " , totalAddSize, "\n" ); |
3293 | dataLog("Average Add size: " , totalAddSize / numAdds, "\n" ); |
3294 | dataLog("\n" ); |
3295 | dataLog("Num Muls: " , numMuls, "\n" ); |
3296 | dataLog("Total Mul size in bytes: " , totalMulSize, "\n" ); |
3297 | dataLog("Average Mul size: " , totalMulSize / numMuls, "\n" ); |
3298 | dataLog("\n" ); |
3299 | dataLog("Num Negs: " , numNegs, "\n" ); |
3300 | dataLog("Total Neg size in bytes: " , totalNegSize, "\n" ); |
3301 | dataLog("Average Neg size: " , totalNegSize / numNegs, "\n" ); |
3302 | dataLog("\n" ); |
3303 | dataLog("Num Subs: " , numSubs, "\n" ); |
3304 | dataLog("Total Sub size in bytes: " , totalSubSize, "\n" ); |
3305 | dataLog("Average Sub size: " , totalSubSize / numSubs, "\n" ); |
3306 | |
3307 | dataLog("-----------------------\n" ); |
3308 | #endif |
3309 | } |
3310 | |
3311 | void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock) |
3312 | { |
3313 | Printer::setPrinter(record, toCString(codeBlock)); |
3314 | } |
3315 | |
3316 | } // namespace JSC |
3317 | |
3318 | namespace WTF { |
3319 | |
3320 | void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock) |
3321 | { |
3322 | if (UNLIKELY(!codeBlock)) { |
3323 | out.print("<null codeBlock>" ); |
3324 | return; |
3325 | } |
3326 | out.print(*codeBlock); |
3327 | } |
3328 | |
3329 | } // namespace WTF |
3330 | |