1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGByteCodeParser.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "ArithProfile.h"
32#include "ArrayConstructor.h"
33#include "BasicBlockLocation.h"
34#include "BuiltinNames.h"
35#include "BytecodeStructs.h"
36#include "CallLinkStatus.h"
37#include "CodeBlock.h"
38#include "CodeBlockWithJITType.h"
39#include "CommonSlowPaths.h"
40#include "DFGAbstractHeap.h"
41#include "DFGArrayMode.h"
42#include "DFGCFG.h"
43#include "DFGCapabilities.h"
44#include "DFGClobberize.h"
45#include "DFGClobbersExitState.h"
46#include "DFGGraph.h"
47#include "DFGJITCode.h"
48#include "FunctionCodeBlock.h"
49#include "GetByIdStatus.h"
50#include "Heap.h"
51#include "InByIdStatus.h"
52#include "InstanceOfStatus.h"
53#include "JSCInlines.h"
54#include "JSFixedArray.h"
55#include "JSImmutableButterfly.h"
56#include "JSModuleEnvironment.h"
57#include "JSModuleNamespaceObject.h"
58#include "NumberConstructor.h"
59#include "ObjectConstructor.h"
60#include "OpcodeInlines.h"
61#include "PreciseJumpTargets.h"
62#include "PutByIdFlags.h"
63#include "PutByIdStatus.h"
64#include "RegExpPrototype.h"
65#include "StackAlignment.h"
66#include "StringConstructor.h"
67#include "StructureStubInfo.h"
68#include "SymbolConstructor.h"
69#include "Watchdog.h"
70#include <wtf/CommaPrinter.h>
71#include <wtf/HashMap.h>
72#include <wtf/MathExtras.h>
73#include <wtf/SetForScope.h>
74#include <wtf/StdLibExtras.h>
75
76namespace JSC { namespace DFG {
77
78namespace DFGByteCodeParserInternal {
79#ifdef NDEBUG
80static const bool verbose = false;
81#else
82static const bool verbose = true;
83#endif
84} // namespace DFGByteCodeParserInternal
85
86#define VERBOSE_LOG(...) do { \
87if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88dataLog(__VA_ARGS__); \
89} while (false)
90
91// === ByteCodeParser ===
92//
93// This class is used to compile the dataflow graph from a CodeBlock.
94class ByteCodeParser {
95public:
96 ByteCodeParser(Graph& graph)
97 : m_vm(&graph.m_vm)
98 , m_codeBlock(graph.m_codeBlock)
99 , m_profiledBlock(graph.m_profiledBlock)
100 , m_graph(graph)
101 , m_currentBlock(0)
102 , m_currentIndex(0)
103 , m_constantUndefined(graph.freeze(jsUndefined()))
104 , m_constantNull(graph.freeze(jsNull()))
105 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106 , m_constantOne(graph.freeze(jsNumber(1)))
107 , m_numArguments(m_codeBlock->numParameters())
108 , m_numLocals(m_codeBlock->numCalleeLocals())
109 , m_parameterSlots(0)
110 , m_numPassedVarArgs(0)
111 , m_inlineStackTop(0)
112 , m_currentInstruction(0)
113 , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
114 {
115 ASSERT(m_profiledBlock);
116 }
117
118 // Parse a full CodeBlock of bytecode.
119 void parse();
120
121private:
122 struct InlineStackEntry;
123
124 // Just parse from m_currentIndex to the end of the current CodeBlock.
125 void parseCodeBlock();
126
127 void ensureLocals(unsigned newNumLocals)
128 {
129 VERBOSE_LOG(" ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130 if (newNumLocals <= m_numLocals)
131 return;
132 m_numLocals = newNumLocals;
133 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134 m_graph.block(i)->ensureLocals(newNumLocals);
135 }
136
137 // Helper for min and max.
138 template<typename ChecksFunctor>
139 bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
140
141 void refineStatically(CallLinkStatus&, Node* callTarget);
142 // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143 // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144 // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145 // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146 // than to move the right index all the way to the treatment of op_ret.
147 BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148 BasicBlock* allocateUntargetableBlock();
149 // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150 void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151 void addJumpTo(BasicBlock*);
152 void addJumpTo(unsigned bytecodeIndex);
153 // Handle calls. This resolves issues surrounding inlining and intrinsics.
154 enum Terminality { Terminal, NonTerminal };
155 Terminality handleCall(
156 VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157 Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158 SpeculatedType prediction);
159 template<typename CallOp>
160 Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161 template<typename CallOp>
162 Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165 Node* getArgumentCount();
166 template<typename ChecksFunctor>
167 bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170 bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171 unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172 enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173 CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174 CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175 template<typename ChecksFunctor>
176 void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178 template<typename ChecksFunctor>
179 bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180 template<typename ChecksFunctor>
181 bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182 template<typename ChecksFunctor>
183 bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184 template<typename ChecksFunctor>
185 bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186 template<typename ChecksFunctor>
187 bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190 bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191 bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
192
193 template<typename Bytecode>
194 void handlePutByVal(Bytecode, unsigned instructionSize);
195 template <typename Bytecode>
196 void handlePutAccessorById(NodeType, Bytecode);
197 template <typename Bytecode>
198 void handlePutAccessorByVal(NodeType, Bytecode);
199 template <typename Bytecode>
200 void handleNewFunc(NodeType, Bytecode);
201 template <typename Bytecode>
202 void handleNewFuncExp(NodeType, Bytecode);
203
204 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205 // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206 ObjectPropertyCondition presenceLike(
207 JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
208
209 // Attempt to watch the presence of a property. It will watch that the property is present in the same
210 // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211 // Returns true if this all works out.
212 bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213 void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
214
215 // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216 template<typename VariantType>
217 Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
218
219 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
220
221 template<typename Op>
222 void parseGetById(const Instruction*);
223 void handleGetById(
224 VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
225 void emitPutById(
226 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
227 void handlePutById(
228 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229 bool isDirect, unsigned intructionSize);
230
231 // Either register a watchpoint or emit a check for this condition. Returns false if the
232 // condition no longer holds, and therefore no reasonable check can be emitted.
233 bool check(const ObjectPropertyCondition&);
234
235 GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
236
237 // Either register a watchpoint or emit a check for this condition. It must be a Presence
238 // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239 // Emits code for the loaded value that the condition guards, and returns a node containing
240 // the loaded value. Returns null if the condition no longer holds.
241 GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242 Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243 Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
244
245 // Calls check() for each condition in the set: that is, it either emits checks or registers
246 // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247 // conditions are no longer checkable, returns false.
248 bool check(const ObjectPropertyConditionSet&);
249
250 // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251 // base. Does a combination of watchpoint registration and check emission to guard the
252 // conditions, and emits code to load the value from the slot base. Returns a node containing
253 // the loaded value. Returns null if any of the conditions were no longer checkable.
254 GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255 Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
256
257 void prepareToParseBlock();
258 void clearCaches();
259
260 // Parse a single basic block of bytecode instructions.
261 void parseBlock(unsigned limit);
262 // Link block successors.
263 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264 void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
265
266 VariableAccessData* newVariableAccessData(VirtualRegister operand)
267 {
268 ASSERT(!operand.isConstant());
269
270 m_graph.m_variableAccessData.append(VariableAccessData(operand));
271 return &m_graph.m_variableAccessData.last();
272 }
273
274 // Get/Set the operands/result of a bytecode instruction.
275 Node* getDirect(VirtualRegister operand)
276 {
277 ASSERT(!operand.isConstant());
278
279 // Is this an argument?
280 if (operand.isArgument())
281 return getArgument(operand);
282
283 // Must be a local.
284 return getLocal(operand);
285 }
286
287 Node* get(VirtualRegister operand)
288 {
289 if (operand.isConstant()) {
290 unsigned constantIndex = operand.toConstantIndex();
291 unsigned oldSize = m_constants.size();
292 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294 JSValue value = codeBlock.getConstant(operand.offset());
295 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296 if (constantIndex >= oldSize) {
297 m_constants.grow(constantIndex + 1);
298 for (unsigned i = oldSize; i < m_constants.size(); ++i)
299 m_constants[i] = nullptr;
300 }
301
302 Node* constantNode = nullptr;
303 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304 constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
305 else
306 constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307 m_constants[constantIndex] = constantNode;
308 }
309 ASSERT(m_constants[constantIndex]);
310 return m_constants[constantIndex];
311 }
312
313 if (inlineCallFrame()) {
314 if (!inlineCallFrame()->isClosureCall) {
315 JSFunction* callee = inlineCallFrame()->calleeConstant();
316 if (operand.offset() == CallFrameSlot::callee)
317 return weakJSConstant(callee);
318 }
319 } else if (operand.offset() == CallFrameSlot::callee) {
320 // We have to do some constant-folding here because this enables CreateThis folding. Note
321 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322 // case if the function is a singleton then we already know it.
323 if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324 InferredValue* singleton = executable->singletonFunction();
325 if (JSValue value = singleton->inferredValue()) {
326 m_graph.watchpoints().addLazily(singleton);
327 JSFunction* function = jsCast<JSFunction*>(value);
328 return weakJSConstant(function);
329 }
330 }
331 return addToGraph(GetCallee);
332 }
333
334 return getDirect(m_inlineStackTop->remapOperand(operand));
335 }
336
337 enum SetMode {
338 // A normal set which follows a two-phase commit that spans code origins. During
339 // the current code origin it issues a MovHint, and at the start of the next
340 // code origin there will be a SetLocal. If the local needs flushing, the second
341 // SetLocal will be preceded with a Flush.
342 NormalSet,
343
344 // A set where the SetLocal happens immediately and there is still a Flush. This
345 // is relevant when assigning to a local in tricky situations for the delayed
346 // SetLocal logic but where we know that we have not performed any side effects
347 // within this code origin. This is a safe replacement for NormalSet anytime we
348 // know that we have not yet performed side effects in this code origin.
349 ImmediateSetWithFlush,
350
351 // A set where the SetLocal happens immediately and we do not Flush it even if
352 // this is a local that is marked as needing it. This is relevant when
353 // initializing locals at the top of a function.
354 ImmediateNakedSet
355 };
356 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
357 {
358 addToGraph(MovHint, OpInfo(operand.offset()), value);
359
360 // We can't exit anymore because our OSR exit state has changed.
361 m_exitOK = false;
362
363 DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
364
365 if (setMode == NormalSet) {
366 m_setLocalQueue.append(delayed);
367 return nullptr;
368 }
369
370 return delayed.execute(this);
371 }
372
373 void processSetLocalQueue()
374 {
375 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
376 m_setLocalQueue[i].execute(this);
377 m_setLocalQueue.shrink(0);
378 }
379
380 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
381 {
382 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
383 }
384
385 Node* injectLazyOperandSpeculation(Node* node)
386 {
387 ASSERT(node->op() == GetLocal);
388 ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
389 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
390 LazyOperandValueProfileKey key(m_currentIndex, node->local());
391 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
392 node->variableAccessData()->predict(prediction);
393 return node;
394 }
395
396 // Used in implementing get/set, above, where the operand is a local variable.
397 Node* getLocal(VirtualRegister operand)
398 {
399 unsigned local = operand.toLocal();
400
401 Node* node = m_currentBlock->variablesAtTail.local(local);
402
403 // This has two goals: 1) link together variable access datas, and 2)
404 // try to avoid creating redundant GetLocals. (1) is required for
405 // correctness - no other phase will ensure that block-local variable
406 // access data unification is done correctly. (2) is purely opportunistic
407 // and is meant as an compile-time optimization only.
408
409 VariableAccessData* variable;
410
411 if (node) {
412 variable = node->variableAccessData();
413
414 switch (node->op()) {
415 case GetLocal:
416 return node;
417 case SetLocal:
418 return node->child1().node();
419 default:
420 break;
421 }
422 } else
423 variable = newVariableAccessData(operand);
424
425 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
426 m_currentBlock->variablesAtTail.local(local) = node;
427 return node;
428 }
429 Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
430 {
431 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
432
433 unsigned local = operand.toLocal();
434
435 if (setMode != ImmediateNakedSet) {
436 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
437 if (argumentPosition)
438 flushDirect(operand, argumentPosition);
439 else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
440 flush(operand);
441 }
442
443 VariableAccessData* variableAccessData = newVariableAccessData(operand);
444 variableAccessData->mergeStructureCheckHoistingFailed(
445 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
446 variableAccessData->mergeCheckArrayHoistingFailed(
447 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
448 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
449 m_currentBlock->variablesAtTail.local(local) = node;
450 return node;
451 }
452
453 // Used in implementing get/set, above, where the operand is an argument.
454 Node* getArgument(VirtualRegister operand)
455 {
456 unsigned argument = operand.toArgument();
457 ASSERT(argument < m_numArguments);
458
459 Node* node = m_currentBlock->variablesAtTail.argument(argument);
460
461 VariableAccessData* variable;
462
463 if (node) {
464 variable = node->variableAccessData();
465
466 switch (node->op()) {
467 case GetLocal:
468 return node;
469 case SetLocal:
470 return node->child1().node();
471 default:
472 break;
473 }
474 } else
475 variable = newVariableAccessData(operand);
476
477 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
478 m_currentBlock->variablesAtTail.argument(argument) = node;
479 return node;
480 }
481 Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
482 {
483 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
484
485 unsigned argument = operand.toArgument();
486 ASSERT(argument < m_numArguments);
487
488 VariableAccessData* variableAccessData = newVariableAccessData(operand);
489
490 // Always flush arguments, except for 'this'. If 'this' is created by us,
491 // then make sure that it's never unboxed.
492 if (argument || m_graph.needsFlushedThis()) {
493 if (setMode != ImmediateNakedSet)
494 flushDirect(operand);
495 }
496
497 if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
498 variableAccessData->mergeShouldNeverUnbox(true);
499
500 variableAccessData->mergeStructureCheckHoistingFailed(
501 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
502 variableAccessData->mergeCheckArrayHoistingFailed(
503 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
504 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
505 m_currentBlock->variablesAtTail.argument(argument) = node;
506 return node;
507 }
508
509 ArgumentPosition* findArgumentPositionForArgument(int argument)
510 {
511 InlineStackEntry* stack = m_inlineStackTop;
512 while (stack->m_inlineCallFrame)
513 stack = stack->m_caller;
514 return stack->m_argumentPositions[argument];
515 }
516
517 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
518 {
519 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
520 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
521 if (!inlineCallFrame)
522 break;
523 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
524 continue;
525 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
526 continue;
527 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
528 return stack->m_argumentPositions[argument];
529 }
530 return 0;
531 }
532
533 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
534 {
535 if (operand.isArgument())
536 return findArgumentPositionForArgument(operand.toArgument());
537 return findArgumentPositionForLocal(operand);
538 }
539
540 template<typename AddFlushDirectFunc>
541 void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
542 {
543 int numArguments;
544 if (inlineCallFrame) {
545 ASSERT(!m_graph.hasDebuggerEnabled());
546 numArguments = inlineCallFrame->argumentsWithFixup.size();
547 if (inlineCallFrame->isClosureCall)
548 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
549 if (inlineCallFrame->isVarargs())
550 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
551 } else
552 numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
553
554 for (unsigned argument = numArguments; argument--;)
555 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
556
557 if (m_graph.needsScopeRegister())
558 addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
559 }
560
561 template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
562 void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
563 {
564 origin.walkUpInlineStack(
565 [&] (CodeOrigin origin) {
566 unsigned bytecodeIndex = origin.bytecodeIndex();
567 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
568 flushImpl(inlineCallFrame, addFlushDirect);
569
570 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
571 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
572 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
573
574 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
575 if (livenessAtBytecode[local])
576 addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
577 }
578 });
579 }
580
581 void flush(VirtualRegister operand)
582 {
583 flushDirect(m_inlineStackTop->remapOperand(operand));
584 }
585
586 void flushDirect(VirtualRegister operand)
587 {
588 flushDirect(operand, findArgumentPosition(operand));
589 }
590
591 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
592 {
593 addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
594 }
595
596 template<NodeType nodeType>
597 void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
598 {
599 ASSERT(!operand.isConstant());
600
601 Node* node = m_currentBlock->variablesAtTail.operand(operand);
602
603 VariableAccessData* variable;
604
605 if (node)
606 variable = node->variableAccessData();
607 else
608 variable = newVariableAccessData(operand);
609
610 node = addToGraph(nodeType, OpInfo(variable));
611 m_currentBlock->variablesAtTail.operand(operand) = node;
612 if (argumentPosition)
613 argumentPosition->addVariable(variable);
614 }
615
616 void phantomLocalDirect(VirtualRegister operand)
617 {
618 addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
619 }
620
621 void flush(InlineStackEntry* inlineStackEntry)
622 {
623 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
624 flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
625 }
626
627 void flushForTerminal()
628 {
629 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
630 auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
631 flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
632 }
633
634 void flushForReturn()
635 {
636 flush(m_inlineStackTop);
637 }
638
639 void flushIfTerminal(SwitchData& data)
640 {
641 if (data.fallThrough.bytecodeIndex() > m_currentIndex)
642 return;
643
644 for (unsigned i = data.cases.size(); i--;) {
645 if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
646 return;
647 }
648
649 flushForTerminal();
650 }
651
652 // Assumes that the constant should be strongly marked.
653 Node* jsConstant(JSValue constantValue)
654 {
655 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
656 }
657
658 Node* weakJSConstant(JSValue constantValue)
659 {
660 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
661 }
662
663 // Helper functions to get/set the this value.
664 Node* getThis()
665 {
666 return get(m_inlineStackTop->m_codeBlock->thisRegister());
667 }
668
669 void setThis(Node* value)
670 {
671 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
672 }
673
674 InlineCallFrame* inlineCallFrame()
675 {
676 return m_inlineStackTop->m_inlineCallFrame;
677 }
678
679 bool allInlineFramesAreTailCalls()
680 {
681 return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
682 }
683
684 CodeOrigin currentCodeOrigin()
685 {
686 return CodeOrigin(m_currentIndex, inlineCallFrame());
687 }
688
689 NodeOrigin currentNodeOrigin()
690 {
691 CodeOrigin semantic;
692 CodeOrigin forExit;
693
694 if (m_currentSemanticOrigin.isSet())
695 semantic = m_currentSemanticOrigin;
696 else
697 semantic = currentCodeOrigin();
698
699 forExit = currentCodeOrigin();
700
701 return NodeOrigin(semantic, forExit, m_exitOK);
702 }
703
704 BranchData* branchData(unsigned taken, unsigned notTaken)
705 {
706 // We assume that branches originating from bytecode always have a fall-through. We
707 // use this assumption to avoid checking for the creation of terminal blocks.
708 ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
709 BranchData* data = m_graph.m_branchData.add();
710 *data = BranchData::withBytecodeIndices(taken, notTaken);
711 return data;
712 }
713
714 Node* addToGraph(Node* node)
715 {
716 VERBOSE_LOG(" appended ", node, " ", Graph::opName(node->op()), "\n");
717
718 m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
719
720 m_currentBlock->append(node);
721 if (clobbersExitState(m_graph, node))
722 m_exitOK = false;
723 return node;
724 }
725
726 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
727 {
728 Node* result = m_graph.addNode(
729 op, currentNodeOrigin(), Edge(child1), Edge(child2),
730 Edge(child3));
731 return addToGraph(result);
732 }
733 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
734 {
735 Node* result = m_graph.addNode(
736 op, currentNodeOrigin(), child1, child2, child3);
737 return addToGraph(result);
738 }
739 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
740 {
741 Node* result = m_graph.addNode(
742 op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
743 Edge(child3));
744 return addToGraph(result);
745 }
746 Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
747 {
748 Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
749 return addToGraph(result);
750 }
751 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
752 {
753 Node* result = m_graph.addNode(
754 op, currentNodeOrigin(), info1, info2,
755 Edge(child1), Edge(child2), Edge(child3));
756 return addToGraph(result);
757 }
758 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
759 {
760 Node* result = m_graph.addNode(
761 op, currentNodeOrigin(), info1, info2, child1, child2, child3);
762 return addToGraph(result);
763 }
764
765 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
766 {
767 Node* result = m_graph.addNode(
768 Node::VarArg, op, currentNodeOrigin(), info1, info2,
769 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
770 addToGraph(result);
771
772 m_numPassedVarArgs = 0;
773
774 return result;
775 }
776
777 void addVarArgChild(Node* child)
778 {
779 m_graph.m_varArgChildren.append(Edge(child));
780 m_numPassedVarArgs++;
781 }
782
783 void addVarArgChild(Edge child)
784 {
785 m_graph.m_varArgChildren.append(child);
786 m_numPassedVarArgs++;
787 }
788
789 Node* addCallWithoutSettingResult(
790 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
791 OpInfo prediction)
792 {
793 addVarArgChild(callee);
794 size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
795
796 if (parameterSlots > m_parameterSlots)
797 m_parameterSlots = parameterSlots;
798
799 for (int i = 0; i < argCount; ++i)
800 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
801
802 return addToGraph(Node::VarArg, op, opInfo, prediction);
803 }
804
805 Node* addCall(
806 VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
807 SpeculatedType prediction)
808 {
809 if (op == TailCall) {
810 if (allInlineFramesAreTailCalls())
811 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
812 op = TailCallInlinedCaller;
813 }
814
815
816 Node* call = addCallWithoutSettingResult(
817 op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
818 if (result.isValid())
819 set(result, call);
820 return call;
821 }
822
823 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
824 {
825 // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
826 // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
827 // object's structure as soon as we make it a weakJSCosntant.
828 Node* objectNode = weakJSConstant(object);
829 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
830 return objectNode;
831 }
832
833 SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
834 {
835 auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
836 {
837 SpeculatedType prediction;
838 {
839 ConcurrentJSLocker locker(codeBlock->m_lock);
840 prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
841 }
842 auto* fuzzerAgent = m_vm->fuzzerAgent();
843 if (UNLIKELY(fuzzerAgent))
844 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
845 return prediction;
846 };
847
848 SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
849 if (prediction != SpecNone)
850 return prediction;
851
852 // If we have no information about the values this
853 // node generates, we check if by any chance it is
854 // a tail call opcode. In that case, we walk up the
855 // inline frames to find a call higher in the call
856 // chain and use its prediction. If we only have
857 // inlined tail call frames, we use SpecFullTop
858 // to avoid a spurious OSR exit.
859 auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
860 OpcodeID opcodeID = instruction->opcodeID();
861
862 switch (opcodeID) {
863 case op_tail_call:
864 case op_tail_call_varargs:
865 case op_tail_call_forward_arguments: {
866 // Things should be more permissive to us returning BOTTOM instead of TOP here.
867 // Currently, this will cause us to Force OSR exit. This is bad because returning
868 // TOP will cause anything that transitively touches this speculated type to
869 // also become TOP during prediction propagation.
870 // https://bugs.webkit.org/show_bug.cgi?id=164337
871 if (!inlineCallFrame())
872 return SpecFullTop;
873
874 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
875 if (!codeOrigin)
876 return SpecFullTop;
877
878 InlineStackEntry* stack = m_inlineStackTop;
879 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
880 stack = stack->m_caller;
881
882 return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
883 }
884
885 default:
886 return SpecNone;
887 }
888
889 RELEASE_ASSERT_NOT_REACHED();
890 return SpecNone;
891 }
892
893 SpeculatedType getPrediction(unsigned bytecodeIndex)
894 {
895 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
896
897 if (prediction == SpecNone) {
898 // We have no information about what values this node generates. Give up
899 // on executing this code, since we're likely to do more damage than good.
900 addToGraph(ForceOSRExit);
901 }
902
903 return prediction;
904 }
905
906 SpeculatedType getPredictionWithoutOSRExit()
907 {
908 return getPredictionWithoutOSRExit(m_currentIndex);
909 }
910
911 SpeculatedType getPrediction()
912 {
913 return getPrediction(m_currentIndex);
914 }
915
916 ArrayMode getArrayMode(Array::Action action)
917 {
918 CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
919 ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
920 return getArrayMode(*profile, action);
921 }
922
923 ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
924 {
925 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
926 profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
927 bool makeSafe = profile.outOfBounds(locker);
928 return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
929 }
930
931 Node* makeSafe(Node* node)
932 {
933 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
934 node->mergeFlags(NodeMayOverflowInt32InDFG);
935 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
936 node->mergeFlags(NodeMayNegZeroInDFG);
937
938 if (!isX86() && (node->op() == ArithMod || node->op() == ValueMod))
939 return node;
940
941 {
942 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
943 if (arithProfile) {
944 switch (node->op()) {
945 case ArithAdd:
946 case ArithSub:
947 case ValueAdd:
948 if (arithProfile->didObserveDouble())
949 node->mergeFlags(NodeMayHaveDoubleResult);
950 if (arithProfile->didObserveNonNumeric())
951 node->mergeFlags(NodeMayHaveNonNumericResult);
952 if (arithProfile->didObserveBigInt())
953 node->mergeFlags(NodeMayHaveBigIntResult);
954 break;
955
956 case ValueMul:
957 case ArithMul: {
958 if (arithProfile->didObserveInt52Overflow())
959 node->mergeFlags(NodeMayOverflowInt52);
960 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
961 node->mergeFlags(NodeMayOverflowInt32InBaseline);
962 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
963 node->mergeFlags(NodeMayNegZeroInBaseline);
964 if (arithProfile->didObserveDouble())
965 node->mergeFlags(NodeMayHaveDoubleResult);
966 if (arithProfile->didObserveNonNumeric())
967 node->mergeFlags(NodeMayHaveNonNumericResult);
968 if (arithProfile->didObserveBigInt())
969 node->mergeFlags(NodeMayHaveBigIntResult);
970 break;
971 }
972 case ValueNegate:
973 case ArithNegate: {
974 if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
975 node->mergeFlags(NodeMayHaveDoubleResult);
976 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
977 node->mergeFlags(NodeMayNegZeroInBaseline);
978 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
979 node->mergeFlags(NodeMayOverflowInt32InBaseline);
980 if (arithProfile->didObserveNonNumeric())
981 node->mergeFlags(NodeMayHaveNonNumericResult);
982 if (arithProfile->didObserveBigInt())
983 node->mergeFlags(NodeMayHaveBigIntResult);
984 break;
985 }
986
987 default:
988 break;
989 }
990 }
991 }
992
993 if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
994 switch (node->op()) {
995 case UInt32ToNumber:
996 case ArithAdd:
997 case ArithSub:
998 case ValueAdd:
999 case ValueMod:
1000 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
1001 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1002 break;
1003
1004 default:
1005 break;
1006 }
1007 }
1008
1009 return node;
1010 }
1011
1012 Node* makeDivSafe(Node* node)
1013 {
1014 ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1015
1016 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1017 node->mergeFlags(NodeMayOverflowInt32InDFG);
1018 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1019 node->mergeFlags(NodeMayNegZeroInDFG);
1020
1021 // The main slow case counter for op_div in the old JIT counts only when
1022 // the operands are not numbers. We don't care about that since we already
1023 // have speculations in place that take care of that separately. We only
1024 // care about when the outcome of the division is not an integer, which
1025 // is what the special fast case counter tells us.
1026
1027 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1028 return node;
1029
1030 // FIXME: It might be possible to make this more granular.
1031 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1032
1033 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1034 if (arithProfile->didObserveBigInt())
1035 node->mergeFlags(NodeMayHaveBigIntResult);
1036
1037 return node;
1038 }
1039
1040 void noticeArgumentsUse()
1041 {
1042 // All of the arguments in this function need to be formatted as JSValues because we will
1043 // load from them in a random-access fashion and we don't want to have to switch on
1044 // format.
1045
1046 for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1047 argument->mergeShouldNeverUnbox(true);
1048 }
1049
1050 bool needsDynamicLookup(ResolveType, OpcodeID);
1051
1052 VM* m_vm;
1053 CodeBlock* m_codeBlock;
1054 CodeBlock* m_profiledBlock;
1055 Graph& m_graph;
1056
1057 // The current block being generated.
1058 BasicBlock* m_currentBlock;
1059 // The bytecode index of the current instruction being generated.
1060 unsigned m_currentIndex;
1061 // The semantic origin of the current node if different from the current Index.
1062 CodeOrigin m_currentSemanticOrigin;
1063 // True if it's OK to OSR exit right now.
1064 bool m_exitOK { false };
1065
1066 FrozenValue* m_constantUndefined;
1067 FrozenValue* m_constantNull;
1068 FrozenValue* m_constantNaN;
1069 FrozenValue* m_constantOne;
1070 Vector<Node*, 16> m_constants;
1071
1072 HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1073
1074 // The number of arguments passed to the function.
1075 unsigned m_numArguments;
1076 // The number of locals (vars + temporaries) used in the function.
1077 unsigned m_numLocals;
1078 // The number of slots (in units of sizeof(Register)) that we need to
1079 // preallocate for arguments to outgoing calls from this frame. This
1080 // number includes the CallFrame slots that we initialize for the callee
1081 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1082 // This number is 0 if and only if this function is a leaf.
1083 unsigned m_parameterSlots;
1084 // The number of var args passed to the next var arg node.
1085 unsigned m_numPassedVarArgs;
1086
1087 struct InlineStackEntry {
1088 ByteCodeParser* m_byteCodeParser;
1089
1090 CodeBlock* m_codeBlock;
1091 CodeBlock* m_profiledBlock;
1092 InlineCallFrame* m_inlineCallFrame;
1093
1094 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1095
1096 QueryableExitProfile m_exitProfile;
1097
1098 // Remapping of identifier and constant numbers from the code block being
1099 // inlined (inline callee) to the code block that we're inlining into
1100 // (the machine code block, which is the transitive, though not necessarily
1101 // direct, caller).
1102 Vector<unsigned> m_identifierRemap;
1103 Vector<unsigned> m_switchRemap;
1104
1105 // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1106 // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1107 Vector<BasicBlock*> m_unlinkedBlocks;
1108
1109 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1110 // cannot have two blocks that have the same bytecodeBegin.
1111 Vector<BasicBlock*> m_blockLinkingTargets;
1112
1113 // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1114 BasicBlock* m_continuationBlock;
1115
1116 VirtualRegister m_returnValue;
1117
1118 // Speculations about variable types collected from the profiled code block,
1119 // which are based on OSR exit profiles that past DFG compilations of this
1120 // code block had gathered.
1121 LazyOperandValueProfileParser m_lazyOperands;
1122
1123 ICStatusMap m_baselineMap;
1124 ICStatusContext m_optimizedContext;
1125
1126 // Pointers to the argument position trackers for this slice of code.
1127 Vector<ArgumentPosition*> m_argumentPositions;
1128
1129 InlineStackEntry* m_caller;
1130
1131 InlineStackEntry(
1132 ByteCodeParser*,
1133 CodeBlock*,
1134 CodeBlock* profiledBlock,
1135 JSFunction* callee, // Null if this is a closure call.
1136 VirtualRegister returnValueVR,
1137 VirtualRegister inlineCallFrameStart,
1138 int argumentCountIncludingThis,
1139 InlineCallFrame::Kind,
1140 BasicBlock* continuationBlock);
1141
1142 ~InlineStackEntry();
1143
1144 VirtualRegister remapOperand(VirtualRegister operand) const
1145 {
1146 if (!m_inlineCallFrame)
1147 return operand;
1148
1149 ASSERT(!operand.isConstant());
1150
1151 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1152 }
1153 };
1154
1155 InlineStackEntry* m_inlineStackTop;
1156
1157 ICStatusContextStack m_icContextStack;
1158
1159 struct DelayedSetLocal {
1160 CodeOrigin m_origin;
1161 VirtualRegister m_operand;
1162 Node* m_value;
1163 SetMode m_setMode;
1164
1165 DelayedSetLocal() { }
1166 DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1167 : m_origin(origin)
1168 , m_operand(operand)
1169 , m_value(value)
1170 , m_setMode(setMode)
1171 {
1172 RELEASE_ASSERT(operand.isValid());
1173 }
1174
1175 Node* execute(ByteCodeParser* parser)
1176 {
1177 if (m_operand.isArgument())
1178 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1179 return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1180 }
1181 };
1182
1183 Vector<DelayedSetLocal, 2> m_setLocalQueue;
1184
1185 const Instruction* m_currentInstruction;
1186 bool m_hasDebuggerEnabled;
1187 bool m_hasAnyForceOSRExits { false };
1188};
1189
1190BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1191{
1192 ASSERT(bytecodeIndex != UINT_MAX);
1193 Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1194 BasicBlock* blockPtr = block.ptr();
1195 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1196 if (m_inlineStackTop->m_blockLinkingTargets.size())
1197 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1198 m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1199 m_graph.appendBlock(WTFMove(block));
1200 return blockPtr;
1201}
1202
1203BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1204{
1205 Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1206 BasicBlock* blockPtr = block.ptr();
1207 m_graph.appendBlock(WTFMove(block));
1208 return blockPtr;
1209}
1210
1211void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1212{
1213 RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1214 block->bytecodeBegin = bytecodeIndex;
1215 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1216 if (m_inlineStackTop->m_blockLinkingTargets.size())
1217 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1218 m_inlineStackTop->m_blockLinkingTargets.append(block);
1219}
1220
1221void ByteCodeParser::addJumpTo(BasicBlock* block)
1222{
1223 ASSERT(!m_currentBlock->terminal());
1224 Node* jumpNode = addToGraph(Jump);
1225 jumpNode->targetBlock() = block;
1226 m_currentBlock->didLink();
1227}
1228
1229void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1230{
1231 ASSERT(!m_currentBlock->terminal());
1232 addToGraph(Jump, OpInfo(bytecodeIndex));
1233 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1234}
1235
1236template<typename CallOp>
1237ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1238{
1239 auto bytecode = pc->as<CallOp>();
1240 Node* callTarget = get(bytecode.m_callee);
1241 int registerOffset = -static_cast<int>(bytecode.m_argv);
1242
1243 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1244 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1245 m_inlineStackTop->m_baselineMap, m_icContextStack);
1246
1247 InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1248
1249 return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1250 bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1251}
1252
1253void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1254{
1255 if (callTarget->isCellConstant())
1256 callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1257}
1258
1259ByteCodeParser::Terminality ByteCodeParser::handleCall(
1260 VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1261 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1262 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1263{
1264 ASSERT(registerOffset <= 0);
1265
1266 refineStatically(callLinkStatus, callTarget);
1267
1268 VERBOSE_LOG(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1269
1270 // If we have profiling information about this call, and it did not behave too polymorphically,
1271 // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1272 if (callLinkStatus.canOptimize()) {
1273 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1274
1275 VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1276 auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1277 argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1278 if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1279 return Terminal;
1280 if (optimizationResult == CallOptimizationResult::Inlined) {
1281 if (UNLIKELY(m_graph.compilation()))
1282 m_graph.compilation()->noticeInlinedCall();
1283 return NonTerminal;
1284 }
1285 }
1286
1287 Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1288 ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1289 return callNode->op() == TailCall ? Terminal : NonTerminal;
1290}
1291
1292template<typename CallOp>
1293ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1294{
1295 auto bytecode = pc->as<CallOp>();
1296 int firstFreeReg = bytecode.m_firstFree.offset();
1297 int firstVarArgOffset = bytecode.m_firstVarArg;
1298
1299 SpeculatedType prediction = getPrediction();
1300
1301 Node* callTarget = get(bytecode.m_callee);
1302
1303 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1304 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1305 m_inlineStackTop->m_baselineMap, m_icContextStack);
1306 refineStatically(callLinkStatus, callTarget);
1307
1308 VERBOSE_LOG(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1309
1310 if (callLinkStatus.canOptimize()) {
1311 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1312
1313 if (handleVarargsInlining(callTarget, bytecode.m_dst,
1314 callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1315 firstVarArgOffset, op,
1316 InlineCallFrame::varargsKindFor(callMode))) {
1317 if (UNLIKELY(m_graph.compilation()))
1318 m_graph.compilation()->noticeInlinedCall();
1319 return NonTerminal;
1320 }
1321 }
1322
1323 CallVarargsData* data = m_graph.m_callVarargsData.add();
1324 data->firstVarArgOffset = firstVarArgOffset;
1325
1326 Node* thisChild = get(bytecode.m_thisValue);
1327 Node* argumentsChild = nullptr;
1328 if (op != TailCallForwardVarargs)
1329 argumentsChild = get(bytecode.m_arguments);
1330
1331 if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1332 if (allInlineFramesAreTailCalls()) {
1333 addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1334 return Terminal;
1335 }
1336 op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1337 }
1338
1339 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1340 if (bytecode.m_dst.isValid())
1341 set(bytecode.m_dst, call);
1342 return NonTerminal;
1343}
1344
1345void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1346{
1347 Node* thisArgument;
1348 if (thisArgumentReg.isValid())
1349 thisArgument = get(thisArgumentReg);
1350 else
1351 thisArgument = nullptr;
1352
1353 JSCell* calleeCell;
1354 Node* callTargetForCheck;
1355 if (callee.isClosureCall()) {
1356 calleeCell = callee.executable();
1357 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1358 } else {
1359 calleeCell = callee.nonExecutableCallee();
1360 callTargetForCheck = callTarget;
1361 }
1362
1363 ASSERT(calleeCell);
1364 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1365 if (thisArgument)
1366 addToGraph(Phantom, thisArgument);
1367}
1368
1369Node* ByteCodeParser::getArgumentCount()
1370{
1371 Node* argumentCount;
1372 if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1373 argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1374 else
1375 argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1376 return argumentCount;
1377}
1378
1379void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1380{
1381 for (int i = 0; i < argumentCountIncludingThis; ++i)
1382 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1383}
1384
1385template<typename ChecksFunctor>
1386bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1387{
1388 if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1389 return false;
1390
1391 auto targetExecutable = callVariant.executable();
1392 InlineStackEntry* stackEntry = m_inlineStackTop;
1393 do {
1394 if (targetExecutable != stackEntry->executable())
1395 continue;
1396 VERBOSE_LOG(" We found a recursive tail call, trying to optimize it into a jump.\n");
1397
1398 if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1399 // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1400 // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1401 if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1402 continue;
1403 } else {
1404 // We are in the machine code entry (i.e. the original caller).
1405 // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1406 if (argumentCountIncludingThis > m_codeBlock->numParameters())
1407 return false;
1408 }
1409
1410 // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1411 // Check if this is the same callee that we try to inline here.
1412 if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1413 if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1414 continue;
1415 }
1416
1417 // We must add some check that the profiling information was correct and the target of this call is what we thought.
1418 emitFunctionCheckIfNeeded();
1419 // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1420 flushForTerminal();
1421
1422 // We must set the callee to the right value
1423 if (stackEntry->m_inlineCallFrame) {
1424 if (stackEntry->m_inlineCallFrame->isClosureCall)
1425 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1426 } else
1427 addToGraph(SetCallee, callTargetNode);
1428
1429 // We must set the arguments to the right values
1430 if (!stackEntry->m_inlineCallFrame)
1431 addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1432 int argIndex = 0;
1433 for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1434 Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1435 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1436 }
1437 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1438 for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1439 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1440
1441 // We must repeat the work of op_enter here as we will jump right after it.
1442 // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1443 for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1444 setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1445
1446 // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1447 unsigned oldIndex = m_currentIndex;
1448 auto oldStackTop = m_inlineStackTop;
1449 m_inlineStackTop = stackEntry;
1450 m_currentIndex = opcodeLengths[op_enter];
1451 m_exitOK = true;
1452 processSetLocalQueue();
1453 m_currentIndex = oldIndex;
1454 m_inlineStackTop = oldStackTop;
1455 m_exitOK = false;
1456
1457 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1458 RELEASE_ASSERT(entryBlockPtr);
1459 addJumpTo(*entryBlockPtr);
1460 return true;
1461 // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1462 } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1463
1464 // The tail call was not recursive
1465 return false;
1466}
1467
1468unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1469{
1470 CallMode callMode = InlineCallFrame::callModeFor(kind);
1471 CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1472 VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1473
1474 if (m_hasDebuggerEnabled) {
1475 VERBOSE_LOG(" Failing because the debugger is in use.\n");
1476 return UINT_MAX;
1477 }
1478
1479 FunctionExecutable* executable = callee.functionExecutable();
1480 if (!executable) {
1481 VERBOSE_LOG(" Failing because there is no function executable.\n");
1482 return UINT_MAX;
1483 }
1484
1485 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1486 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1487 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1488 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1489 // to inline it if we had a static proof of what was being called; this might happen for example
1490 // if you call a global function, where watchpointing gives us static information. Overall,
1491 // it's a rare case because we expect that any hot callees would have already been compiled.
1492 CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1493 if (!codeBlock) {
1494 VERBOSE_LOG(" Failing because no code block available.\n");
1495 return UINT_MAX;
1496 }
1497
1498 if (!Options::useArityFixupInlining()) {
1499 if (codeBlock->numParameters() > argumentCountIncludingThis) {
1500 VERBOSE_LOG(" Failing because of arity mismatch.\n");
1501 return UINT_MAX;
1502 }
1503 }
1504
1505 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1506 codeBlock, specializationKind, callee.isClosureCall());
1507 VERBOSE_LOG(" Call mode: ", callMode, "\n");
1508 VERBOSE_LOG(" Is closure call: ", callee.isClosureCall(), "\n");
1509 VERBOSE_LOG(" Capability level: ", capabilityLevel, "\n");
1510 VERBOSE_LOG(" Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1511 VERBOSE_LOG(" Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1512 VERBOSE_LOG(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1513 VERBOSE_LOG(" Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1514 if (!canInline(capabilityLevel)) {
1515 VERBOSE_LOG(" Failing because the function is not inlineable.\n");
1516 return UINT_MAX;
1517 }
1518
1519 // Check if the caller is already too large. We do this check here because that's just
1520 // where we happen to also have the callee's code block, and we want that for the
1521 // purpose of unsetting SABI.
1522 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1523 codeBlock->m_shouldAlwaysBeInlined = false;
1524 VERBOSE_LOG(" Failing because the caller is too large.\n");
1525 return UINT_MAX;
1526 }
1527
1528 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1529 // this function.
1530 // https://bugs.webkit.org/show_bug.cgi?id=127627
1531
1532 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1533 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1534 // haven't gotten to Baseline yet. Consider not inlining these functions.
1535 // https://bugs.webkit.org/show_bug.cgi?id=145503
1536
1537 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1538 // too many levels? If either of these are detected, then don't inline. We adjust our
1539 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1540
1541 unsigned depth = 0;
1542 unsigned recursion = 0;
1543
1544 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1545 ++depth;
1546 if (depth >= Options::maximumInliningDepth()) {
1547 VERBOSE_LOG(" Failing because depth exceeded.\n");
1548 return UINT_MAX;
1549 }
1550
1551 if (entry->executable() == executable) {
1552 ++recursion;
1553 if (recursion >= Options::maximumInliningRecursion()) {
1554 VERBOSE_LOG(" Failing because recursion detected.\n");
1555 return UINT_MAX;
1556 }
1557 }
1558 }
1559
1560 VERBOSE_LOG(" Inlining should be possible.\n");
1561
1562 // It might be possible to inline.
1563 return codeBlock->bytecodeCost();
1564}
1565
1566template<typename ChecksFunctor>
1567void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1568{
1569 const Instruction* savedCurrentInstruction = m_currentInstruction;
1570 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1571
1572 ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1573
1574 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1575 insertChecks(codeBlock);
1576
1577 // FIXME: Don't flush constants!
1578
1579 // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1580 // numberOfStackPaddingSlots consider alignment. Consider the following case,
1581 //
1582 // before: [ ... ][arg0][header]
1583 // after: [ ... ][ext ][arg1][arg0][header]
1584 //
1585 // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1586 // We insert extra slots to align stack.
1587 int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1588 int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1589 ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1590 int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1591
1592 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1593
1594 ensureLocals(
1595 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1596 CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1597
1598 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1599
1600 if (result.isValid())
1601 result = m_inlineStackTop->remapOperand(result);
1602
1603 VariableAccessData* calleeVariable = nullptr;
1604 if (callee.isClosureCall()) {
1605 Node* calleeSet = set(
1606 VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1607
1608 calleeVariable = calleeSet->variableAccessData();
1609 calleeVariable->mergeShouldNeverUnbox(true);
1610 }
1611
1612 InlineStackEntry* callerStackTop = m_inlineStackTop;
1613 InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1614 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1615
1616 // This is where the actual inlining really happens.
1617 unsigned oldIndex = m_currentIndex;
1618 m_currentIndex = 0;
1619
1620 switch (kind) {
1621 case InlineCallFrame::GetterCall:
1622 case InlineCallFrame::SetterCall: {
1623 // When inlining getter and setter calls, we setup a stack frame which does not appear in the bytecode.
1624 // Because Inlining can switch on executable, we could have a graph like this.
1625 //
1626 // BB#0
1627 // ...
1628 // 30: GetSetter
1629 // 31: MovHint(loc10)
1630 // 32: SetLocal(loc10)
1631 // 33: MovHint(loc9)
1632 // 34: SetLocal(loc9)
1633 // ...
1634 // 37: GetExecutable(@30)
1635 // ...
1636 // 41: Switch(@37)
1637 //
1638 // BB#2
1639 // 42: GetLocal(loc12, bc#7 of caller)
1640 // ...
1641 // --> callee: loc9 and loc10 are arguments of callee.
1642 // ...
1643 // <HERE, exit to callee, loc9 and loc10 are required in the bytecode>
1644 //
1645 // When we prune OSR availability at the beginning of BB#2 (bc#7 in the caller), we prune loc9 and loc10's liveness because the caller does not actually have loc9 and loc10.
1646 // However, when we begin executing the callee, we need OSR exit to be aware of where it can recover the arguments to the setter, loc9 and loc10. The MovHints in the inlined
1647 // callee make it so that if we exit at <HERE>, we can recover loc9 and loc10.
1648 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1649 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1650 Node* value = getDirect(argumentToGet);
1651 addToGraph(MovHint, OpInfo(argumentToGet.offset()), value);
1652 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToGet, value, ImmediateNakedSet });
1653 }
1654 break;
1655 }
1656 default:
1657 break;
1658 }
1659
1660 if (arityFixupCount) {
1661 // Note: we do arity fixup in two phases:
1662 // 1. We get all the values we need and MovHint them to the expected locals.
1663 // 2. We SetLocal them after that. This way, if we exit, the callee's
1664 // frame is already set up. If any SetLocal exits, we have a valid exit state.
1665 // This is required because if we didn't do this in two phases, we may exit in
1666 // the middle of arity fixup from the callee's CodeOrigin. This is unsound because exited
1667 // code does not have arity fixup so that remaining necessary fixups are not executed.
1668 // For example, consider if we need to pad two args:
1669 // [arg3][arg2][arg1][arg0]
1670 // [fix ][fix ][arg3][arg2][arg1][arg0]
1671 // We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1672 // for arg3's SetLocal in the callee's CodeOrigin, we'd exit with a frame like so:
1673 // [arg3][arg2][arg1][arg2][arg1][arg0]
1674 // Since we do not perform arity fixup in the callee, this is the frame used by the callee.
1675 // And the callee would then just end up thinking its argument are:
1676 // [fix ][fix ][arg3][arg2][arg1][arg0]
1677 // which is incorrect.
1678
1679 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1680 // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1681 // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1682 // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1683 //
1684 // before: [ ... ][ext ][arg1][arg0][header]
1685 //
1686 // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1687 // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1688 //
1689 // before: [ ... ][ext ][arg1][arg0][header]
1690 // after: [ ... ][arg2][arg1][arg0][header]
1691 //
1692 // In such cases, we do not need to move frames.
1693 if (registerOffsetAfterFixup != registerOffset) {
1694 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1695 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1696 Node* value = getDirect(argumentToGet);
1697 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
1698 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1699 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1700 }
1701 }
1702 for (int index = 0; index < arityFixupCount; ++index) {
1703 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
1704 addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1705 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1706 }
1707
1708 // At this point, it's OK to OSR exit because we finished setting up
1709 // our callee's frame. We emit an ExitOK below.
1710 }
1711
1712 // At this point, it's again OK to OSR exit.
1713 m_exitOK = true;
1714 addToGraph(ExitOK);
1715
1716 processSetLocalQueue();
1717
1718 InlineVariableData inlineVariableData;
1719 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1720 inlineVariableData.argumentPositionStart = argumentPositionStart;
1721 inlineVariableData.calleeVariable = 0;
1722
1723 RELEASE_ASSERT(
1724 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1725 == callee.isClosureCall());
1726 if (callee.isClosureCall()) {
1727 RELEASE_ASSERT(calleeVariable);
1728 inlineVariableData.calleeVariable = calleeVariable;
1729 }
1730
1731 m_graph.m_inlineVariableData.append(inlineVariableData);
1732
1733 parseCodeBlock();
1734 clearCaches(); // Reset our state now that we're back to the outer code.
1735
1736 m_currentIndex = oldIndex;
1737 m_exitOK = false;
1738
1739 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1740
1741 // Most functions have at least one op_ret and thus set up the continuation block.
1742 // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1743 if (inlineStackEntry.m_continuationBlock)
1744 m_currentBlock = inlineStackEntry.m_continuationBlock;
1745 else
1746 m_currentBlock = allocateUntargetableBlock();
1747 ASSERT(!m_currentBlock->terminal());
1748
1749 prepareToParseBlock();
1750 m_currentInstruction = savedCurrentInstruction;
1751}
1752
1753ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1754{
1755 VERBOSE_LOG(" Considering callee ", callee, "\n");
1756
1757 bool didInsertChecks = false;
1758 auto insertChecksWithAccounting = [&] () {
1759 if (needsToCheckCallee)
1760 emitFunctionChecks(callee, callTargetNode, thisArgument);
1761 didInsertChecks = true;
1762 };
1763
1764 if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1765 RELEASE_ASSERT(didInsertChecks);
1766 return CallOptimizationResult::OptimizedToJump;
1767 }
1768 RELEASE_ASSERT(!didInsertChecks);
1769
1770 if (!inliningBalance)
1771 return CallOptimizationResult::DidNothing;
1772
1773 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1774
1775 auto endSpecialCase = [&] () {
1776 RELEASE_ASSERT(didInsertChecks);
1777 addToGraph(Phantom, callTargetNode);
1778 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1779 inliningBalance--;
1780 if (continuationBlock) {
1781 m_currentIndex = nextOffset;
1782 m_exitOK = true;
1783 processSetLocalQueue();
1784 addJumpTo(continuationBlock);
1785 }
1786 };
1787
1788 if (InternalFunction* function = callee.internalFunction()) {
1789 if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1790 endSpecialCase();
1791 return CallOptimizationResult::Inlined;
1792 }
1793 RELEASE_ASSERT(!didInsertChecks);
1794 return CallOptimizationResult::DidNothing;
1795 }
1796
1797 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1798 if (intrinsic != NoIntrinsic) {
1799 if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1800 endSpecialCase();
1801 return CallOptimizationResult::Inlined;
1802 }
1803 RELEASE_ASSERT(!didInsertChecks);
1804 // We might still try to inline the Intrinsic because it might be a builtin JS function.
1805 }
1806
1807 if (Options::useDOMJIT()) {
1808 if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1809 if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1810 endSpecialCase();
1811 return CallOptimizationResult::Inlined;
1812 }
1813 RELEASE_ASSERT(!didInsertChecks);
1814 }
1815 }
1816
1817 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1818 if (myInliningCost > inliningBalance)
1819 return CallOptimizationResult::DidNothing;
1820
1821 auto insertCheck = [&] (CodeBlock*) {
1822 if (needsToCheckCallee)
1823 emitFunctionChecks(callee, callTargetNode, thisArgument);
1824 };
1825 inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1826 inliningBalance -= myInliningCost;
1827 return CallOptimizationResult::Inlined;
1828}
1829
1830bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1831 const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1832 VirtualRegister argumentsArgument, unsigned argumentsOffset,
1833 NodeType callOp, InlineCallFrame::Kind kind)
1834{
1835 VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1836 if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1837 VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1838 return false;
1839 }
1840 if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1841 VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1842 return false;
1843 }
1844
1845 CallVariant callVariant = callLinkStatus[0];
1846
1847 unsigned mandatoryMinimum;
1848 if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1849 mandatoryMinimum = functionExecutable->parameterCount();
1850 else
1851 mandatoryMinimum = 0;
1852
1853 // includes "this"
1854 unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1855
1856 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1857 if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1858 VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1859 return false;
1860 }
1861
1862 int registerOffset = firstFreeReg + 1;
1863 registerOffset -= maxNumArguments; // includes "this"
1864 registerOffset -= CallFrame::headerSizeInRegisters;
1865 registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1866
1867 Vector<VirtualRegister> setArgumentMaybes;
1868
1869 auto insertChecks = [&] (CodeBlock* codeBlock) {
1870 emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1871
1872 int remappedRegisterOffset =
1873 m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1874
1875 ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1876
1877 int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1878 int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1879
1880 LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1881 data->start = VirtualRegister(remappedArgumentStart + 1);
1882 data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1883 data->offset = argumentsOffset;
1884 data->limit = maxNumArguments;
1885 data->mandatoryMinimum = mandatoryMinimum;
1886
1887 if (callOp == TailCallForwardVarargs)
1888 addToGraph(ForwardVarargs, OpInfo(data));
1889 else
1890 addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1891
1892 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1893 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1894 // callTargetNode because the other 2 are still in use and alive at this point.
1895 addToGraph(Phantom, callTargetNode);
1896
1897 // In DFG IR before SSA, we cannot insert control flow between after the
1898 // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1899 // SSA. Fortunately, we also have other reasons for not inserting control flow
1900 // before SSA.
1901
1902 VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1903 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1904 // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1905 // mostly just a formality.
1906 countVariable->predict(SpecInt32Only);
1907 countVariable->mergeIsProfitableToUnbox(true);
1908 Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1909 m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1910
1911 set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1912 unsigned numSetArguments = 0;
1913 for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1914 VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1915 variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1916
1917 // For a while it had been my intention to do things like this inside the
1918 // prediction injection phase. But in this case it's really best to do it here,
1919 // because it's here that we have access to the variable access datas for the
1920 // inlining we're about to do.
1921 //
1922 // Something else that's interesting here is that we'd really love to get
1923 // predictions from the arguments loaded at the callsite, rather than the
1924 // arguments received inside the callee. But that probably won't matter for most
1925 // calls.
1926 if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1927 ConcurrentJSLocker locker(codeBlock->m_lock);
1928 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1929 variable->predict(profile.computeUpdatedPrediction(locker));
1930 }
1931
1932 Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
1933 if (numSetArguments >= mandatoryMinimum && Options::useMaximalFlushInsertionPhase())
1934 setArgumentMaybes.append(variable->local());
1935 m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1936 ++numSetArguments;
1937 }
1938 };
1939
1940 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1941 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1942 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1943 // and there are no callsite value profiles and native function won't have callee value profiles for
1944 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1945 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1946 // calling LoadVarargs twice.
1947 inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1948
1949 for (VirtualRegister reg : setArgumentMaybes)
1950 setDirect(reg, jsConstant(jsUndefined()), ImmediateNakedSet);
1951
1952 VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1953 return true;
1954}
1955
1956unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1957{
1958 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateBytecodeCost();
1959 if (specializationKind == CodeForConstruct)
1960 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateBytecoodeCost());
1961 if (callLinkStatus.isClosureCall())
1962 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateBytecodeCost());
1963 return inliningBalance;
1964}
1965
1966ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1967 Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1968 int registerOffset, VirtualRegister thisArgument,
1969 int argumentCountIncludingThis,
1970 unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1971{
1972 VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1973
1974 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1975 unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1976
1977 // First check if we can avoid creating control flow. Our inliner does some CFG
1978 // simplification on the fly and this helps reduce compile times, but we can only leverage
1979 // this in cases where we don't need control flow diamonds to check the callee.
1980 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1981 return handleCallVariant(
1982 callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1983 argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1984 }
1985
1986 // We need to create some kind of switch over callee. For now we only do this if we believe that
1987 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1988 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1989 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1990 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1991 // also.
1992 if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1993 VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1994 return CallOptimizationResult::DidNothing;
1995 }
1996
1997 // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1998 // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1999 // it has no idea.
2000 if (!Options::usePolymorphicCallInliningForNonStubStatus()
2001 && !callLinkStatus.isBasedOnStub()) {
2002 VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
2003 return CallOptimizationResult::DidNothing;
2004 }
2005
2006 bool allAreClosureCalls = true;
2007 bool allAreDirectCalls = true;
2008 for (unsigned i = callLinkStatus.size(); i--;) {
2009 if (callLinkStatus[i].isClosureCall())
2010 allAreDirectCalls = false;
2011 else
2012 allAreClosureCalls = false;
2013 }
2014
2015 Node* thingToSwitchOn;
2016 if (allAreDirectCalls)
2017 thingToSwitchOn = callTargetNode;
2018 else if (allAreClosureCalls)
2019 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
2020 else {
2021 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
2022 // where it would be beneficial. It might be best to handle these cases as if all calls were
2023 // closure calls.
2024 // https://bugs.webkit.org/show_bug.cgi?id=136020
2025 VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
2026 return CallOptimizationResult::DidNothing;
2027 }
2028
2029 VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
2030
2031 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
2032 // store the callee so that it will be accessible to all of the blocks we're about to create. We
2033 // get away with doing an immediate-set here because we wouldn't have performed any side effects
2034 // yet.
2035 VERBOSE_LOG("Register offset: ", registerOffset);
2036 VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
2037 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
2038 VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
2039 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
2040
2041 // It's OK to exit right now, even though we set some locals. That's because those locals are not
2042 // user-visible.
2043 m_exitOK = true;
2044 addToGraph(ExitOK);
2045
2046 SwitchData& data = *m_graph.m_switchData.add();
2047 data.kind = SwitchCell;
2048 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
2049 m_currentBlock->didLink();
2050
2051 BasicBlock* continuationBlock = allocateUntargetableBlock();
2052 VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2053
2054 // We may force this true if we give up on inlining any of the edges.
2055 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2056
2057 VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2058
2059 unsigned oldOffset = m_currentIndex;
2060 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2061 m_currentIndex = oldOffset;
2062 BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2063 m_currentBlock = calleeEntryBlock;
2064 prepareToParseBlock();
2065
2066 // At the top of each switch case, we can exit.
2067 m_exitOK = true;
2068
2069 Node* myCallTargetNode = getDirect(calleeReg);
2070
2071 auto inliningResult = handleCallVariant(
2072 myCallTargetNode, result, callLinkStatus[i], registerOffset,
2073 thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2074 inliningBalance, continuationBlock, false);
2075
2076 if (inliningResult == CallOptimizationResult::DidNothing) {
2077 // That failed so we let the block die. Nothing interesting should have been added to
2078 // the block. We also give up on inlining any of the (less frequent) callees.
2079 ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2080 m_graph.killBlockAndItsContents(m_currentBlock);
2081 m_graph.m_blocks.removeLast();
2082 VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2083
2084 // The fact that inlining failed means we need a slow path.
2085 couldTakeSlowPath = true;
2086 break;
2087 }
2088
2089 JSCell* thingToCaseOn;
2090 if (allAreDirectCalls)
2091 thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2092 else {
2093 ASSERT(allAreClosureCalls);
2094 thingToCaseOn = callLinkStatus[i].executable();
2095 }
2096 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2097 VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2098 }
2099
2100 // Slow path block
2101 m_currentBlock = allocateUntargetableBlock();
2102 m_currentIndex = oldOffset;
2103 m_exitOK = true;
2104 data.fallThrough = BranchTarget(m_currentBlock);
2105 prepareToParseBlock();
2106 Node* myCallTargetNode = getDirect(calleeReg);
2107 if (couldTakeSlowPath) {
2108 addCall(
2109 result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2110 registerOffset, prediction);
2111 VERBOSE_LOG("We added a call in the slow path\n");
2112 } else {
2113 addToGraph(CheckBadCell);
2114 addToGraph(Phantom, myCallTargetNode);
2115 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2116
2117 set(result, addToGraph(BottomValue));
2118 VERBOSE_LOG("couldTakeSlowPath was false\n");
2119 }
2120
2121 m_currentIndex = nextOffset;
2122 m_exitOK = true; // Origin changed, so it's fine to exit again.
2123 processSetLocalQueue();
2124
2125 if (Node* terminal = m_currentBlock->terminal())
2126 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2127 else {
2128 addJumpTo(continuationBlock);
2129 }
2130
2131 prepareToParseBlock();
2132
2133 m_currentIndex = oldOffset;
2134 m_currentBlock = continuationBlock;
2135 m_exitOK = true;
2136
2137 VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2138 return CallOptimizationResult::Inlined;
2139}
2140
2141template<typename ChecksFunctor>
2142bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2143{
2144 ASSERT(op == ArithMin || op == ArithMax);
2145
2146 if (argumentCountIncludingThis == 1) {
2147 insertChecks();
2148 double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2149 set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2150 return true;
2151 }
2152
2153 if (argumentCountIncludingThis == 2) {
2154 insertChecks();
2155 Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2156 addToGraph(Phantom, Edge(resultNode, NumberUse));
2157 set(result, resultNode);
2158 return true;
2159 }
2160
2161 if (argumentCountIncludingThis == 3) {
2162 insertChecks();
2163 set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2164 return true;
2165 }
2166
2167 // Don't handle >=3 arguments for now.
2168 return false;
2169}
2170
2171template<typename ChecksFunctor>
2172bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2173{
2174 VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n");
2175
2176 if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2177 return false;
2178
2179 // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2180 // it would only benefit intrinsics called as setters, like if you do:
2181 //
2182 // o.__defineSetter__("foo", Math.pow)
2183 //
2184 // Which is extremely amusing, but probably not worth optimizing.
2185 if (!result.isValid())
2186 return false;
2187
2188 bool didSetResult = false;
2189 auto setResult = [&] (Node* node) {
2190 RELEASE_ASSERT(!didSetResult);
2191 set(result, node);
2192 didSetResult = true;
2193 };
2194
2195 auto inlineIntrinsic = [&] {
2196 switch (intrinsic) {
2197
2198 // Intrinsic Functions:
2199
2200 case AbsIntrinsic: {
2201 if (argumentCountIncludingThis == 1) { // Math.abs()
2202 insertChecks();
2203 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2204 return true;
2205 }
2206
2207 if (!MacroAssembler::supportsFloatingPointAbs())
2208 return false;
2209
2210 insertChecks();
2211 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2212 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2213 node->mergeFlags(NodeMayOverflowInt32InDFG);
2214 setResult(node);
2215 return true;
2216 }
2217
2218 case MinIntrinsic:
2219 case MaxIntrinsic:
2220 if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2221 didSetResult = true;
2222 return true;
2223 }
2224 return false;
2225
2226#define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2227 case capitalizedName##Intrinsic:
2228 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2229#undef DFG_ARITH_UNARY
2230 {
2231 if (argumentCountIncludingThis == 1) {
2232 insertChecks();
2233 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2234 return true;
2235 }
2236 Arith::UnaryType type = Arith::UnaryType::Sin;
2237 switch (intrinsic) {
2238#define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2239 case capitalizedName##Intrinsic: \
2240 type = Arith::UnaryType::capitalizedName; \
2241 break;
2242 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2243#undef DFG_ARITH_UNARY
2244 default:
2245 RELEASE_ASSERT_NOT_REACHED();
2246 }
2247 insertChecks();
2248 setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2249 return true;
2250 }
2251
2252 case FRoundIntrinsic:
2253 case SqrtIntrinsic: {
2254 if (argumentCountIncludingThis == 1) {
2255 insertChecks();
2256 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2257 return true;
2258 }
2259
2260 NodeType nodeType = Unreachable;
2261 switch (intrinsic) {
2262 case FRoundIntrinsic:
2263 nodeType = ArithFRound;
2264 break;
2265 case SqrtIntrinsic:
2266 nodeType = ArithSqrt;
2267 break;
2268 default:
2269 RELEASE_ASSERT_NOT_REACHED();
2270 }
2271 insertChecks();
2272 setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2273 return true;
2274 }
2275
2276 case PowIntrinsic: {
2277 if (argumentCountIncludingThis < 3) {
2278 // Math.pow() and Math.pow(x) return NaN.
2279 insertChecks();
2280 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2281 return true;
2282 }
2283 insertChecks();
2284 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2285 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2286 setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2287 return true;
2288 }
2289
2290 case ArrayPushIntrinsic: {
2291#if USE(JSVALUE32_64)
2292 if (isX86()) {
2293 if (argumentCountIncludingThis > 2)
2294 return false;
2295 }
2296#endif
2297
2298 if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2299 return false;
2300
2301 ArrayMode arrayMode = getArrayMode(Array::Write);
2302 if (!arrayMode.isJSArray())
2303 return false;
2304 switch (arrayMode.type()) {
2305 case Array::Int32:
2306 case Array::Double:
2307 case Array::Contiguous:
2308 case Array::ArrayStorage: {
2309 insertChecks();
2310
2311 addVarArgChild(nullptr); // For storage.
2312 for (int i = 0; i < argumentCountIncludingThis; ++i)
2313 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2314 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2315 setResult(arrayPush);
2316 return true;
2317 }
2318
2319 default:
2320 return false;
2321 }
2322 }
2323
2324 case ArraySliceIntrinsic: {
2325#if USE(JSVALUE32_64)
2326 if (isX86()) {
2327 // There aren't enough registers for this to be done easily.
2328 return false;
2329 }
2330#endif
2331 if (argumentCountIncludingThis < 1)
2332 return false;
2333
2334 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2335 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2336 return false;
2337
2338 ArrayMode arrayMode = getArrayMode(Array::Read);
2339 if (!arrayMode.isJSArray())
2340 return false;
2341
2342 if (!arrayMode.isJSArrayWithOriginalStructure())
2343 return false;
2344
2345 switch (arrayMode.type()) {
2346 case Array::Double:
2347 case Array::Int32:
2348 case Array::Contiguous: {
2349 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2350
2351 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2352 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2353
2354 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2355 // https://bugs.webkit.org/show_bug.cgi?id=173171
2356 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2357 && globalObject->havingABadTimeWatchpoint()->isStillValid()
2358 && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2359 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2360 && globalObject->arrayPrototypeChainIsSane()) {
2361
2362 m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2363 m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2364 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2365 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2366
2367 insertChecks();
2368
2369 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2370 // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2371 // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2372 // effects of slice require that we perform a Get(array, "constructor") and we can skip
2373 // that if we're an original array structure. (We can relax this in the future by using
2374 // TryGetById and CheckCell).
2375 //
2376 // 2. We check that the array we're calling slice on has the same global object as the lexical
2377 // global object that this code is running in. This requirement is necessary because we setup the
2378 // watchpoints above on the lexical global object. This means that code that calls slice on
2379 // arrays produced by other global objects won't get this optimization. We could relax this
2380 // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2381 // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2382 //
2383 // 3. By proving we're an original array structure, we guarantee that the incoming array
2384 // isn't a subclass of Array.
2385
2386 StructureSet structureSet;
2387 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2388 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2389 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2390 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2391 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2392 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2393 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2394
2395 addVarArgChild(array);
2396 if (argumentCountIncludingThis >= 2)
2397 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2398 if (argumentCountIncludingThis >= 3)
2399 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2400 addVarArgChild(addToGraph(GetButterfly, array));
2401
2402 Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2403 setResult(arraySlice);
2404 return true;
2405 }
2406
2407 return false;
2408 }
2409 default:
2410 return false;
2411 }
2412
2413 RELEASE_ASSERT_NOT_REACHED();
2414 return false;
2415 }
2416
2417 case ArrayIndexOfIntrinsic: {
2418 if (argumentCountIncludingThis < 2)
2419 return false;
2420
2421 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2422 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2423 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2424 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2425 return false;
2426
2427 ArrayMode arrayMode = getArrayMode(Array::Read);
2428 if (!arrayMode.isJSArray())
2429 return false;
2430
2431 if (!arrayMode.isJSArrayWithOriginalStructure())
2432 return false;
2433
2434 // We do not want to convert arrays into one type just to perform indexOf.
2435 if (arrayMode.doesConversion())
2436 return false;
2437
2438 switch (arrayMode.type()) {
2439 case Array::Double:
2440 case Array::Int32:
2441 case Array::Contiguous: {
2442 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2443
2444 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2445 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2446
2447 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2448 // https://bugs.webkit.org/show_bug.cgi?id=173171
2449 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2450 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2451 && globalObject->arrayPrototypeChainIsSane()) {
2452
2453 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2454 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2455
2456 insertChecks();
2457
2458 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2459 addVarArgChild(array);
2460 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2461 if (argumentCountIncludingThis >= 3)
2462 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2463 addVarArgChild(nullptr);
2464
2465 Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2466 setResult(node);
2467 return true;
2468 }
2469
2470 return false;
2471 }
2472 default:
2473 return false;
2474 }
2475
2476 RELEASE_ASSERT_NOT_REACHED();
2477 return false;
2478
2479 }
2480
2481 case ArrayPopIntrinsic: {
2482 if (argumentCountIncludingThis != 1)
2483 return false;
2484
2485 ArrayMode arrayMode = getArrayMode(Array::Write);
2486 if (!arrayMode.isJSArray())
2487 return false;
2488 switch (arrayMode.type()) {
2489 case Array::Int32:
2490 case Array::Double:
2491 case Array::Contiguous:
2492 case Array::ArrayStorage: {
2493 insertChecks();
2494 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2495 setResult(arrayPop);
2496 return true;
2497 }
2498
2499 default:
2500 return false;
2501 }
2502 }
2503
2504 case AtomicsAddIntrinsic:
2505 case AtomicsAndIntrinsic:
2506 case AtomicsCompareExchangeIntrinsic:
2507 case AtomicsExchangeIntrinsic:
2508 case AtomicsIsLockFreeIntrinsic:
2509 case AtomicsLoadIntrinsic:
2510 case AtomicsOrIntrinsic:
2511 case AtomicsStoreIntrinsic:
2512 case AtomicsSubIntrinsic:
2513 case AtomicsXorIntrinsic: {
2514 if (!is64Bit())
2515 return false;
2516
2517 NodeType op = LastNodeType;
2518 Array::Action action = Array::Write;
2519 unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2520 switch (intrinsic) {
2521 case AtomicsAddIntrinsic:
2522 op = AtomicsAdd;
2523 numArgs = 3;
2524 break;
2525 case AtomicsAndIntrinsic:
2526 op = AtomicsAnd;
2527 numArgs = 3;
2528 break;
2529 case AtomicsCompareExchangeIntrinsic:
2530 op = AtomicsCompareExchange;
2531 numArgs = 4;
2532 break;
2533 case AtomicsExchangeIntrinsic:
2534 op = AtomicsExchange;
2535 numArgs = 3;
2536 break;
2537 case AtomicsIsLockFreeIntrinsic:
2538 // This gets no backing store, but we need no special logic for this since this also does
2539 // not need varargs.
2540 op = AtomicsIsLockFree;
2541 numArgs = 1;
2542 break;
2543 case AtomicsLoadIntrinsic:
2544 op = AtomicsLoad;
2545 numArgs = 2;
2546 action = Array::Read;
2547 break;
2548 case AtomicsOrIntrinsic:
2549 op = AtomicsOr;
2550 numArgs = 3;
2551 break;
2552 case AtomicsStoreIntrinsic:
2553 op = AtomicsStore;
2554 numArgs = 3;
2555 break;
2556 case AtomicsSubIntrinsic:
2557 op = AtomicsSub;
2558 numArgs = 3;
2559 break;
2560 case AtomicsXorIntrinsic:
2561 op = AtomicsXor;
2562 numArgs = 3;
2563 break;
2564 default:
2565 RELEASE_ASSERT_NOT_REACHED();
2566 break;
2567 }
2568
2569 if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2570 return false;
2571
2572 insertChecks();
2573
2574 Vector<Node*, 3> args;
2575 for (unsigned i = 0; i < numArgs; ++i)
2576 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2577
2578 Node* resultNode;
2579 if (numArgs + 1 <= 3) {
2580 while (args.size() < 3)
2581 args.append(nullptr);
2582 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2583 } else {
2584 for (Node* node : args)
2585 addVarArgChild(node);
2586 addVarArgChild(nullptr);
2587 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2588 }
2589
2590 setResult(resultNode);
2591 return true;
2592 }
2593
2594 case ParseIntIntrinsic: {
2595 if (argumentCountIncludingThis < 2)
2596 return false;
2597
2598 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2599 return false;
2600
2601 insertChecks();
2602 VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2603 Node* parseInt;
2604 if (argumentCountIncludingThis == 2)
2605 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2606 else {
2607 ASSERT(argumentCountIncludingThis > 2);
2608 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2609 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2610 }
2611 setResult(parseInt);
2612 return true;
2613 }
2614
2615 case CharCodeAtIntrinsic: {
2616 if (argumentCountIncludingThis != 2)
2617 return false;
2618
2619 insertChecks();
2620 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2621 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2622 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2623
2624 setResult(charCode);
2625 return true;
2626 }
2627
2628 case CharAtIntrinsic: {
2629 if (argumentCountIncludingThis != 2)
2630 return false;
2631
2632 insertChecks();
2633 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2634 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2635 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2636
2637 setResult(charCode);
2638 return true;
2639 }
2640 case Clz32Intrinsic: {
2641 insertChecks();
2642 if (argumentCountIncludingThis == 1)
2643 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2644 else {
2645 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2646 setResult(addToGraph(ArithClz32, operand));
2647 }
2648 return true;
2649 }
2650 case FromCharCodeIntrinsic: {
2651 if (argumentCountIncludingThis != 2)
2652 return false;
2653
2654 insertChecks();
2655 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2656 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2657
2658 setResult(charCode);
2659
2660 return true;
2661 }
2662
2663 case RegExpExecIntrinsic: {
2664 if (argumentCountIncludingThis != 2)
2665 return false;
2666
2667 insertChecks();
2668 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2669 setResult(regExpExec);
2670
2671 return true;
2672 }
2673
2674 case RegExpTestIntrinsic:
2675 case RegExpTestFastIntrinsic: {
2676 if (argumentCountIncludingThis != 2)
2677 return false;
2678
2679 if (intrinsic == RegExpTestIntrinsic) {
2680 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2681 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2682 return false;
2683
2684 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2685 Structure* regExpStructure = globalObject->regExpStructure();
2686 m_graph.registerStructure(regExpStructure);
2687 ASSERT(regExpStructure->storedPrototype().isObject());
2688 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2689
2690 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2691 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2692
2693 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2694 JSValue currentProperty;
2695 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2696 return false;
2697
2698 return currentProperty == primordialProperty;
2699 };
2700
2701 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2702 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2703 return false;
2704
2705 // Check that regExpObject is actually a RegExp object.
2706 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2707 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2708
2709 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2710 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2711 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2712 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2713 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2714 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2715 }
2716
2717 insertChecks();
2718 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2719 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2720 setResult(regExpExec);
2721
2722 return true;
2723 }
2724
2725 case RegExpMatchFastIntrinsic: {
2726 RELEASE_ASSERT(argumentCountIncludingThis == 2);
2727
2728 insertChecks();
2729 Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2730 setResult(regExpMatch);
2731 return true;
2732 }
2733
2734 case ObjectCreateIntrinsic: {
2735 if (argumentCountIncludingThis != 2)
2736 return false;
2737
2738 insertChecks();
2739 setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2740 return true;
2741 }
2742
2743 case ObjectGetPrototypeOfIntrinsic: {
2744 if (argumentCountIncludingThis != 2)
2745 return false;
2746
2747 insertChecks();
2748 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2749 return true;
2750 }
2751
2752 case ObjectIsIntrinsic: {
2753 if (argumentCountIncludingThis < 3)
2754 return false;
2755
2756 insertChecks();
2757 setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2758 return true;
2759 }
2760
2761 case ObjectKeysIntrinsic: {
2762 if (argumentCountIncludingThis < 2)
2763 return false;
2764
2765 insertChecks();
2766 setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2767 return true;
2768 }
2769
2770 case ReflectGetPrototypeOfIntrinsic: {
2771 if (argumentCountIncludingThis != 2)
2772 return false;
2773
2774 insertChecks();
2775 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2776 return true;
2777 }
2778
2779 case IsTypedArrayViewIntrinsic: {
2780 ASSERT(argumentCountIncludingThis == 2);
2781
2782 insertChecks();
2783 setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2784 return true;
2785 }
2786
2787 case StringPrototypeValueOfIntrinsic: {
2788 insertChecks();
2789 Node* value = get(virtualRegisterForArgument(0, registerOffset));
2790 setResult(addToGraph(StringValueOf, value));
2791 return true;
2792 }
2793
2794 case StringPrototypeReplaceIntrinsic: {
2795 if (argumentCountIncludingThis != 3)
2796 return false;
2797
2798 // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2799 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2800 return false;
2801
2802 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2803 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2804 return false;
2805
2806 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2807 Structure* regExpStructure = globalObject->regExpStructure();
2808 m_graph.registerStructure(regExpStructure);
2809 ASSERT(regExpStructure->storedPrototype().isObject());
2810 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2811
2812 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2813 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2814
2815 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2816 JSValue currentProperty;
2817 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2818 return false;
2819
2820 return currentProperty == primordialProperty;
2821 };
2822
2823 // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2824 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2825 return false;
2826
2827 // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2828 if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2829 return false;
2830
2831 // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2832 if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2833 return false;
2834
2835 // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2836 if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2837 return false;
2838
2839 insertChecks();
2840
2841 Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2842 setResult(resultNode);
2843 return true;
2844 }
2845
2846 case StringPrototypeReplaceRegExpIntrinsic: {
2847 if (argumentCountIncludingThis != 3)
2848 return false;
2849
2850 insertChecks();
2851 Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2852 setResult(resultNode);
2853 return true;
2854 }
2855
2856 case RoundIntrinsic:
2857 case FloorIntrinsic:
2858 case CeilIntrinsic:
2859 case TruncIntrinsic: {
2860 if (argumentCountIncludingThis == 1) {
2861 insertChecks();
2862 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2863 return true;
2864 }
2865 insertChecks();
2866 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2867 NodeType op;
2868 if (intrinsic == RoundIntrinsic)
2869 op = ArithRound;
2870 else if (intrinsic == FloorIntrinsic)
2871 op = ArithFloor;
2872 else if (intrinsic == CeilIntrinsic)
2873 op = ArithCeil;
2874 else {
2875 ASSERT(intrinsic == TruncIntrinsic);
2876 op = ArithTrunc;
2877 }
2878 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2879 setResult(roundNode);
2880 return true;
2881 }
2882 case IMulIntrinsic: {
2883 if (argumentCountIncludingThis != 3)
2884 return false;
2885 insertChecks();
2886 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2887 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2888 Node* left = get(leftOperand);
2889 Node* right = get(rightOperand);
2890 setResult(addToGraph(ArithIMul, left, right));
2891 return true;
2892 }
2893
2894 case RandomIntrinsic: {
2895 if (argumentCountIncludingThis != 1)
2896 return false;
2897 insertChecks();
2898 setResult(addToGraph(ArithRandom));
2899 return true;
2900 }
2901
2902 case DFGTrueIntrinsic: {
2903 insertChecks();
2904 setResult(jsConstant(jsBoolean(true)));
2905 return true;
2906 }
2907
2908 case FTLTrueIntrinsic: {
2909 insertChecks();
2910 setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2911 return true;
2912 }
2913
2914 case OSRExitIntrinsic: {
2915 insertChecks();
2916 addToGraph(ForceOSRExit);
2917 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2918 return true;
2919 }
2920
2921 case IsFinalTierIntrinsic: {
2922 insertChecks();
2923 setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2924 return true;
2925 }
2926
2927 case SetInt32HeapPredictionIntrinsic: {
2928 insertChecks();
2929 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2930 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2931 if (node->hasHeapPrediction())
2932 node->setHeapPrediction(SpecInt32Only);
2933 }
2934 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2935 return true;
2936 }
2937
2938 case CheckInt32Intrinsic: {
2939 insertChecks();
2940 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2941 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2942 addToGraph(Phantom, Edge(node, Int32Use));
2943 }
2944 setResult(jsConstant(jsBoolean(true)));
2945 return true;
2946 }
2947
2948 case FiatInt52Intrinsic: {
2949 if (argumentCountIncludingThis != 2)
2950 return false;
2951 insertChecks();
2952 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2953 if (enableInt52())
2954 setResult(addToGraph(FiatInt52, get(operand)));
2955 else
2956 setResult(get(operand));
2957 return true;
2958 }
2959
2960 case JSMapGetIntrinsic: {
2961 if (argumentCountIncludingThis != 2)
2962 return false;
2963
2964 insertChecks();
2965 Node* map = get(virtualRegisterForArgument(0, registerOffset));
2966 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2967 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2968 Node* hash = addToGraph(MapHash, normalizedKey);
2969 Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2970 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2971 setResult(resultNode);
2972 return true;
2973 }
2974
2975 case JSSetHasIntrinsic:
2976 case JSMapHasIntrinsic: {
2977 if (argumentCountIncludingThis != 2)
2978 return false;
2979
2980 insertChecks();
2981 Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2982 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2983 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2984 Node* hash = addToGraph(MapHash, normalizedKey);
2985 UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2986 Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2987 JSCell* sentinel = nullptr;
2988 if (intrinsic == JSMapHasIntrinsic)
2989 sentinel = m_vm->sentinelMapBucket();
2990 else
2991 sentinel = m_vm->sentinelSetBucket();
2992
2993 FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2994 Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2995 Node* resultNode = addToGraph(LogicalNot, invertedResult);
2996 setResult(resultNode);
2997 return true;
2998 }
2999
3000 case JSSetAddIntrinsic: {
3001 if (argumentCountIncludingThis != 2)
3002 return false;
3003
3004 insertChecks();
3005 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3006 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3007 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3008 Node* hash = addToGraph(MapHash, normalizedKey);
3009 addToGraph(SetAdd, base, normalizedKey, hash);
3010 setResult(base);
3011 return true;
3012 }
3013
3014 case JSMapSetIntrinsic: {
3015 if (argumentCountIncludingThis != 3)
3016 return false;
3017
3018 insertChecks();
3019 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3020 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3021 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3022
3023 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3024 Node* hash = addToGraph(MapHash, normalizedKey);
3025
3026 addVarArgChild(base);
3027 addVarArgChild(normalizedKey);
3028 addVarArgChild(value);
3029 addVarArgChild(hash);
3030 addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
3031 setResult(base);
3032 return true;
3033 }
3034
3035 case JSSetBucketHeadIntrinsic:
3036 case JSMapBucketHeadIntrinsic: {
3037 ASSERT(argumentCountIncludingThis == 2);
3038
3039 insertChecks();
3040 Node* map = get(virtualRegisterForArgument(1, registerOffset));
3041 UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
3042 Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
3043 setResult(resultNode);
3044 return true;
3045 }
3046
3047 case JSSetBucketNextIntrinsic:
3048 case JSMapBucketNextIntrinsic: {
3049 ASSERT(argumentCountIncludingThis == 2);
3050
3051 insertChecks();
3052 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3053 BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3054 Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3055 setResult(resultNode);
3056 return true;
3057 }
3058
3059 case JSSetBucketKeyIntrinsic:
3060 case JSMapBucketKeyIntrinsic: {
3061 ASSERT(argumentCountIncludingThis == 2);
3062
3063 insertChecks();
3064 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3065 BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3066 Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3067 setResult(resultNode);
3068 return true;
3069 }
3070
3071 case JSMapBucketValueIntrinsic: {
3072 ASSERT(argumentCountIncludingThis == 2);
3073
3074 insertChecks();
3075 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3076 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3077 setResult(resultNode);
3078 return true;
3079 }
3080
3081 case JSWeakMapGetIntrinsic: {
3082 if (argumentCountIncludingThis != 2)
3083 return false;
3084
3085 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3086 return false;
3087
3088 insertChecks();
3089 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3090 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3091 addToGraph(Check, Edge(key, ObjectUse));
3092 Node* hash = addToGraph(MapHash, key);
3093 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3094 Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3095
3096 setResult(resultNode);
3097 return true;
3098 }
3099
3100 case JSWeakMapHasIntrinsic: {
3101 if (argumentCountIncludingThis != 2)
3102 return false;
3103
3104 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3105 return false;
3106
3107 insertChecks();
3108 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3109 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3110 addToGraph(Check, Edge(key, ObjectUse));
3111 Node* hash = addToGraph(MapHash, key);
3112 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3113 Node* invertedResult = addToGraph(IsEmpty, holder);
3114 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3115
3116 setResult(resultNode);
3117 return true;
3118 }
3119
3120 case JSWeakSetHasIntrinsic: {
3121 if (argumentCountIncludingThis != 2)
3122 return false;
3123
3124 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3125 return false;
3126
3127 insertChecks();
3128 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3129 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3130 addToGraph(Check, Edge(key, ObjectUse));
3131 Node* hash = addToGraph(MapHash, key);
3132 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3133 Node* invertedResult = addToGraph(IsEmpty, holder);
3134 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3135
3136 setResult(resultNode);
3137 return true;
3138 }
3139
3140 case JSWeakSetAddIntrinsic: {
3141 if (argumentCountIncludingThis != 2)
3142 return false;
3143
3144 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3145 return false;
3146
3147 insertChecks();
3148 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3149 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3150 addToGraph(Check, Edge(key, ObjectUse));
3151 Node* hash = addToGraph(MapHash, key);
3152 addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3153 setResult(base);
3154 return true;
3155 }
3156
3157 case JSWeakMapSetIntrinsic: {
3158 if (argumentCountIncludingThis != 3)
3159 return false;
3160
3161 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3162 return false;
3163
3164 insertChecks();
3165 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3166 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3167 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3168
3169 addToGraph(Check, Edge(key, ObjectUse));
3170 Node* hash = addToGraph(MapHash, key);
3171
3172 addVarArgChild(Edge(base, WeakMapObjectUse));
3173 addVarArgChild(Edge(key, ObjectUse));
3174 addVarArgChild(Edge(value));
3175 addVarArgChild(Edge(hash, Int32Use));
3176 addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
3177 setResult(base);
3178 return true;
3179 }
3180
3181 case DataViewGetInt8:
3182 case DataViewGetUint8:
3183 case DataViewGetInt16:
3184 case DataViewGetUint16:
3185 case DataViewGetInt32:
3186 case DataViewGetUint32:
3187 case DataViewGetFloat32:
3188 case DataViewGetFloat64: {
3189 if (!is64Bit())
3190 return false;
3191
3192 // To inline data view accesses, we assume the architecture we're running on:
3193 // - Is little endian.
3194 // - Allows unaligned loads/stores without crashing.
3195
3196 if (argumentCountIncludingThis < 2)
3197 return false;
3198 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3199 return false;
3200
3201 insertChecks();
3202
3203 uint8_t byteSize;
3204 NodeType op = DataViewGetInt;
3205 bool isSigned = false;
3206 switch (intrinsic) {
3207 case DataViewGetInt8:
3208 isSigned = true;
3209 FALLTHROUGH;
3210 case DataViewGetUint8:
3211 byteSize = 1;
3212 break;
3213
3214 case DataViewGetInt16:
3215 isSigned = true;
3216 FALLTHROUGH;
3217 case DataViewGetUint16:
3218 byteSize = 2;
3219 break;
3220
3221 case DataViewGetInt32:
3222 isSigned = true;
3223 FALLTHROUGH;
3224 case DataViewGetUint32:
3225 byteSize = 4;
3226 break;
3227
3228 case DataViewGetFloat32:
3229 byteSize = 4;
3230 op = DataViewGetFloat;
3231 break;
3232 case DataViewGetFloat64:
3233 byteSize = 8;
3234 op = DataViewGetFloat;
3235 break;
3236 default:
3237 RELEASE_ASSERT_NOT_REACHED();
3238 }
3239
3240 TriState isLittleEndian = MixedTriState;
3241 Node* littleEndianChild = nullptr;
3242 if (byteSize > 1) {
3243 if (argumentCountIncludingThis < 3)
3244 isLittleEndian = FalseTriState;
3245 else {
3246 littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
3247 if (littleEndianChild->hasConstant()) {
3248 JSValue constant = littleEndianChild->constant()->value();
3249 isLittleEndian = constant.pureToBoolean();
3250 if (isLittleEndian != MixedTriState)
3251 littleEndianChild = nullptr;
3252 } else
3253 isLittleEndian = MixedTriState;
3254 }
3255 }
3256
3257 DataViewData data { };
3258 data.isLittleEndian = isLittleEndian;
3259 data.isSigned = isSigned;
3260 data.byteSize = byteSize;
3261
3262 setResult(
3263 addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
3264 return true;
3265 }
3266
3267 case DataViewSetInt8:
3268 case DataViewSetUint8:
3269 case DataViewSetInt16:
3270 case DataViewSetUint16:
3271 case DataViewSetInt32:
3272 case DataViewSetUint32:
3273 case DataViewSetFloat32:
3274 case DataViewSetFloat64: {
3275 if (!is64Bit())
3276 return false;
3277
3278 if (argumentCountIncludingThis < 3)
3279 return false;
3280
3281 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3282 return false;
3283
3284 insertChecks();
3285
3286 uint8_t byteSize;
3287 bool isFloatingPoint = false;
3288 bool isSigned = false;
3289 switch (intrinsic) {
3290 case DataViewSetInt8:
3291 isSigned = true;
3292 FALLTHROUGH;
3293 case DataViewSetUint8:
3294 byteSize = 1;
3295 break;
3296
3297 case DataViewSetInt16:
3298 isSigned = true;
3299 FALLTHROUGH;
3300 case DataViewSetUint16:
3301 byteSize = 2;
3302 break;
3303
3304 case DataViewSetInt32:
3305 isSigned = true;
3306 FALLTHROUGH;
3307 case DataViewSetUint32:
3308 byteSize = 4;
3309 break;
3310
3311 case DataViewSetFloat32:
3312 isFloatingPoint = true;
3313 byteSize = 4;
3314 break;
3315 case DataViewSetFloat64:
3316 isFloatingPoint = true;
3317 byteSize = 8;
3318 break;
3319 default:
3320 RELEASE_ASSERT_NOT_REACHED();
3321 }
3322
3323 TriState isLittleEndian = MixedTriState;
3324 Node* littleEndianChild = nullptr;
3325 if (byteSize > 1) {
3326 if (argumentCountIncludingThis < 4)
3327 isLittleEndian = FalseTriState;
3328 else {
3329 littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
3330 if (littleEndianChild->hasConstant()) {
3331 JSValue constant = littleEndianChild->constant()->value();
3332 isLittleEndian = constant.pureToBoolean();
3333 if (isLittleEndian != MixedTriState)
3334 littleEndianChild = nullptr;
3335 } else
3336 isLittleEndian = MixedTriState;
3337 }
3338 }
3339
3340 DataViewData data { };
3341 data.isLittleEndian = isLittleEndian;
3342 data.isSigned = isSigned;
3343 data.byteSize = byteSize;
3344 data.isFloatingPoint = isFloatingPoint;
3345
3346 addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
3347 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
3348 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
3349 addVarArgChild(littleEndianChild);
3350
3351 addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
3352 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
3353 return true;
3354 }
3355
3356 case HasOwnPropertyIntrinsic: {
3357 if (argumentCountIncludingThis != 2)
3358 return false;
3359
3360 // This can be racy, that's fine. We know that once we observe that this is created,
3361 // that it will never be destroyed until the VM is destroyed. It's unlikely that
3362 // we'd ever get to the point where we inline this as an intrinsic without the
3363 // cache being created, however, it's possible if we always throw exceptions inside
3364 // hasOwnProperty.
3365 if (!m_vm->hasOwnPropertyCache())
3366 return false;
3367
3368 insertChecks();
3369 Node* object = get(virtualRegisterForArgument(0, registerOffset));
3370 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3371 Node* resultNode = addToGraph(HasOwnProperty, object, key);
3372 setResult(resultNode);
3373 return true;
3374 }
3375
3376 case StringPrototypeSliceIntrinsic: {
3377 if (argumentCountIncludingThis < 2)
3378 return false;
3379
3380 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3381 return false;
3382
3383 insertChecks();
3384 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3385 Node* start = get(virtualRegisterForArgument(1, registerOffset));
3386 Node* end = nullptr;
3387 if (argumentCountIncludingThis > 2)
3388 end = get(virtualRegisterForArgument(2, registerOffset));
3389 Node* resultNode = addToGraph(StringSlice, thisString, start, end);
3390 setResult(resultNode);
3391 return true;
3392 }
3393
3394 case StringPrototypeToLowerCaseIntrinsic: {
3395 if (argumentCountIncludingThis != 1)
3396 return false;
3397
3398 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3399 return false;
3400
3401 insertChecks();
3402 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3403 Node* resultNode = addToGraph(ToLowerCase, thisString);
3404 setResult(resultNode);
3405 return true;
3406 }
3407
3408 case NumberPrototypeToStringIntrinsic: {
3409 if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
3410 return false;
3411
3412 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3413 return false;
3414
3415 insertChecks();
3416 Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
3417 if (argumentCountIncludingThis == 1) {
3418 Node* resultNode = addToGraph(ToString, thisNumber);
3419 setResult(resultNode);
3420 } else {
3421 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
3422 Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
3423 setResult(resultNode);
3424 }
3425 return true;
3426 }
3427
3428 case NumberIsIntegerIntrinsic: {
3429 if (argumentCountIncludingThis < 2)
3430 return false;
3431
3432 insertChecks();
3433 Node* input = get(virtualRegisterForArgument(1, registerOffset));
3434 Node* resultNode = addToGraph(NumberIsInteger, input);
3435 setResult(resultNode);
3436 return true;
3437 }
3438
3439 case CPUMfenceIntrinsic:
3440 case CPURdtscIntrinsic:
3441 case CPUCpuidIntrinsic:
3442 case CPUPauseIntrinsic: {
3443#if CPU(X86_64)
3444 if (!m_graph.m_plan.isFTL())
3445 return false;
3446 insertChecks();
3447 setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
3448 return true;
3449#else
3450 return false;
3451#endif
3452 }
3453
3454 default:
3455 return false;
3456 }
3457 };
3458
3459 if (inlineIntrinsic()) {
3460 RELEASE_ASSERT(didSetResult);
3461 return true;
3462 }
3463
3464 return false;
3465}
3466
3467template<typename ChecksFunctor>
3468bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3469{
3470 if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
3471 return false;
3472 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3473 return false;
3474
3475 // FIXME: Currently, we only support functions which arguments are up to 2.
3476 // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
3477 // https://bugs.webkit.org/show_bug.cgi?id=164346
3478 ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
3479
3480 insertChecks();
3481 addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
3482 return true;
3483}
3484
3485
3486template<typename ChecksFunctor>
3487bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
3488{
3489 switch (variant.intrinsic()) {
3490 case TypedArrayByteLengthIntrinsic: {
3491 insertChecks();
3492
3493 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3494 Array::Type arrayType = toArrayType(type);
3495 size_t logSize = logElementSize(type);
3496
3497 variant.structureSet().forEach([&] (Structure* structure) {
3498 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3499 ASSERT(logSize == logElementSize(curType));
3500 arrayType = refineTypedArrayType(arrayType, curType);
3501 ASSERT(arrayType != Array::Generic);
3502 });
3503
3504 Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode);
3505
3506 if (!logSize) {
3507 set(result, lengthNode);
3508 return true;
3509 }
3510
3511 // We can use a BitLShift here because typed arrays will never have a byteLength
3512 // that overflows int32.
3513 Node* shiftNode = jsConstant(jsNumber(logSize));
3514 set(result, addToGraph(BitLShift, lengthNode, shiftNode));
3515
3516 return true;
3517 }
3518
3519 case TypedArrayLengthIntrinsic: {
3520 insertChecks();
3521
3522 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3523 Array::Type arrayType = toArrayType(type);
3524
3525 variant.structureSet().forEach([&] (Structure* structure) {
3526 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3527 arrayType = refineTypedArrayType(arrayType, curType);
3528 ASSERT(arrayType != Array::Generic);
3529 });
3530
3531 set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3532
3533 return true;
3534
3535 }
3536
3537 case TypedArrayByteOffsetIntrinsic: {
3538 insertChecks();
3539
3540 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3541 Array::Type arrayType = toArrayType(type);
3542
3543 variant.structureSet().forEach([&] (Structure* structure) {
3544 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3545 arrayType = refineTypedArrayType(arrayType, curType);
3546 ASSERT(arrayType != Array::Generic);
3547 });
3548
3549 set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3550
3551 return true;
3552 }
3553
3554 case UnderscoreProtoIntrinsic: {
3555 insertChecks();
3556
3557 bool canFold = !variant.structureSet().isEmpty();
3558 JSValue prototype;
3559 variant.structureSet().forEach([&] (Structure* structure) {
3560 auto getPrototypeMethod = structure->classInfo()->methodTable.getPrototype;
3561 MethodTable::GetPrototypeFunctionPtr defaultGetPrototype = JSObject::getPrototype;
3562 if (getPrototypeMethod != defaultGetPrototype) {
3563 canFold = false;
3564 return;
3565 }
3566
3567 if (structure->hasPolyProto()) {
3568 canFold = false;
3569 return;
3570 }
3571 if (!prototype)
3572 prototype = structure->storedPrototype();
3573 else if (prototype != structure->storedPrototype())
3574 canFold = false;
3575 });
3576
3577 // OK, only one prototype is found. We perform constant folding here.
3578 // This information is important for super's constructor call to get new.target constant.
3579 if (prototype && canFold) {
3580 set(result, weakJSConstant(prototype));
3581 return true;
3582 }
3583
3584 set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode));
3585 return true;
3586 }
3587
3588 default:
3589 return false;
3590 }
3591 RELEASE_ASSERT_NOT_REACHED();
3592}
3593
3594static void blessCallDOMGetter(Node* node)
3595{
3596 DOMJIT::CallDOMGetterSnippet* snippet = node->callDOMGetterData()->snippet;
3597 if (snippet && !snippet->effect.mustGenerate())
3598 node->clearFlags(NodeMustGenerate);
3599}
3600
3601bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)
3602{
3603 if (!variant.domAttribute())
3604 return false;
3605
3606 auto domAttribute = variant.domAttribute().value();
3607
3608 // We do not need to actually look up CustomGetterSetter here. Checking Structures or registering watchpoints are enough,
3609 // since replacement of CustomGetterSetter always incurs Structure transition.
3610 if (!check(variant.conditionSet()))
3611 return false;
3612 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), thisNode);
3613
3614 // We do not need to emit CheckCell thingy here. When the custom accessor is replaced to different one, Structure transition occurs.
3615 addToGraph(CheckSubClass, OpInfo(domAttribute.classInfo), thisNode);
3616
3617 bool wasSeenInJIT = true;
3618 addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), GetByIdStatus(GetByIdStatus::Custom, wasSeenInJIT, variant))), thisNode);
3619
3620 CallDOMGetterData* callDOMGetterData = m_graph.m_callDOMGetterData.add();
3621 callDOMGetterData->customAccessorGetter = variant.customAccessorGetter();
3622 ASSERT(callDOMGetterData->customAccessorGetter);
3623
3624 if (const auto* domJIT = domAttribute.domJIT) {
3625 callDOMGetterData->domJIT = domJIT;
3626 Ref<DOMJIT::CallDOMGetterSnippet> snippet = domJIT->compiler()();
3627 callDOMGetterData->snippet = snippet.ptr();
3628 m_graph.m_domJITSnippets.append(WTFMove(snippet));
3629 }
3630 DOMJIT::CallDOMGetterSnippet* callDOMGetterSnippet = callDOMGetterData->snippet;
3631 callDOMGetterData->identifierNumber = identifierNumber;
3632
3633 Node* callDOMGetterNode = nullptr;
3634 // GlobalObject of thisNode is always used to create a DOMWrapper.
3635 if (callDOMGetterSnippet && callDOMGetterSnippet->requireGlobalObject) {
3636 Node* globalObject = addToGraph(GetGlobalObject, thisNode);
3637 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode, globalObject);
3638 } else
3639 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode);
3640 blessCallDOMGetter(callDOMGetterNode);
3641 set(result, callDOMGetterNode);
3642 return true;
3643}
3644
3645bool ByteCodeParser::handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType prediction, Node* base, GetByIdStatus getById)
3646{
3647 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
3648 return false;
3649 addToGraph(CheckCell, OpInfo(m_graph.freeze(getById.moduleNamespaceObject())), Edge(base, CellUse));
3650
3651 addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getById)), base);
3652
3653 // Ideally we wouldn't have to do this Phantom. But:
3654 //
3655 // For the constant case: we must do it because otherwise we would have no way of knowing
3656 // that the scope is live at OSR here.
3657 //
3658 // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
3659 // won't be able to handle an Undefined scope.
3660 addToGraph(Phantom, base);
3661
3662 // Constant folding in the bytecode parser is important for performance. This may not
3663 // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
3664 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
3665 // would recompile. But if we can fold it here, we avoid the exit.
3666 m_graph.freeze(getById.moduleEnvironment());
3667 if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) {
3668 set(result, weakJSConstant(value));
3669 return true;
3670 }
3671 set(result, addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment())));
3672 return true;
3673}
3674
3675template<typename ChecksFunctor>
3676bool ByteCodeParser::handleTypedArrayConstructor(
3677 VirtualRegister result, InternalFunction* function, int registerOffset,
3678 int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
3679{
3680 if (!isTypedView(type))
3681 return false;
3682
3683 if (function->classInfo() != constructorClassInfoForType(type))
3684 return false;
3685
3686 if (function->globalObject(*m_vm) != m_inlineStackTop->m_codeBlock->globalObject())
3687 return false;
3688
3689 // We only have an intrinsic for the case where you say:
3690 //
3691 // new FooArray(blah);
3692 //
3693 // Of course, 'blah' could be any of the following:
3694 //
3695 // - Integer, indicating that you want to allocate an array of that length.
3696 // This is the thing we're hoping for, and what we can actually do meaningful
3697 // optimizations for.
3698 //
3699 // - Array buffer, indicating that you want to create a view onto that _entire_
3700 // buffer.
3701 //
3702 // - Non-buffer object, indicating that you want to create a copy of that
3703 // object by pretending that it quacks like an array.
3704 //
3705 // - Anything else, indicating that you want to have an exception thrown at
3706 // you.
3707 //
3708 // The intrinsic, NewTypedArray, will behave as if it could do any of these
3709 // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
3710 // predicted Int32, then we lock it in as a normal typed array allocation.
3711 // Otherwise, NewTypedArray turns into a totally opaque function call that
3712 // may clobber the world - by virtue of it accessing properties on what could
3713 // be an object.
3714 //
3715 // Note that although the generic form of NewTypedArray sounds sort of awful,
3716 // it is actually quite likely to be more efficient than a fully generic
3717 // Construct. So, we might want to think about making NewTypedArray variadic,
3718 // or else making Construct not super slow.
3719
3720 if (argumentCountIncludingThis != 2)
3721 return false;
3722
3723 if (!function->globalObject(*m_vm)->typedArrayStructureConcurrently(type))
3724 return false;
3725
3726 insertChecks();
3727 set(result,
3728 addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
3729 return true;
3730}
3731
3732template<typename ChecksFunctor>
3733bool ByteCodeParser::handleConstantInternalFunction(
3734 Node* callTargetNode, VirtualRegister result, InternalFunction* function, int registerOffset,
3735 int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3736{
3737 VERBOSE_LOG(" Handling constant internal function ", JSValue(function), "\n");
3738
3739 // It so happens that the code below assumes that the result operand is valid. It's extremely
3740 // unlikely that the result operand would be invalid - you'd have to call this via a setter call.
3741 if (!result.isValid())
3742 return false;
3743
3744 if (kind == CodeForConstruct) {
3745 Node* newTargetNode = get(virtualRegisterForArgument(0, registerOffset));
3746 // We cannot handle the case where new.target != callee (i.e. a construct from a super call) because we
3747 // don't know what the prototype of the constructed object will be.
3748 // FIXME: If we have inlined super calls up to the call site, however, we should be able to figure out the structure. https://bugs.webkit.org/show_bug.cgi?id=152700
3749 if (newTargetNode != callTargetNode)
3750 return false;
3751 }
3752
3753 if (function->classInfo() == ArrayConstructor::info()) {
3754 if (function->globalObject(*m_vm) != m_inlineStackTop->m_codeBlock->globalObject())
3755 return false;
3756
3757 insertChecks();
3758 if (argumentCountIncludingThis == 2) {
3759 set(result,
3760 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
3761 return true;
3762 }
3763
3764 for (int i = 1; i < argumentCountIncludingThis; ++i)
3765 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
3766 set(result,
3767 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(argumentCountIncludingThis - 1)));
3768 return true;
3769 }
3770
3771 if (function->classInfo() == NumberConstructor::info()) {
3772 if (kind == CodeForConstruct)
3773 return false;
3774
3775 insertChecks();
3776 if (argumentCountIncludingThis <= 1)
3777 set(result, jsConstant(jsNumber(0)));
3778 else
3779 set(result, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
3780
3781 return true;
3782 }
3783
3784 if (function->classInfo() == StringConstructor::info()) {
3785 insertChecks();
3786
3787 Node* resultNode;
3788
3789 if (argumentCountIncludingThis <= 1)
3790 resultNode = jsConstant(m_vm->smallStrings.emptyString());
3791 else
3792 resultNode = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
3793
3794 if (kind == CodeForConstruct)
3795 resultNode = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), resultNode);
3796
3797 set(result, resultNode);
3798 return true;
3799 }
3800
3801 if (function->classInfo() == SymbolConstructor::info() && kind == CodeForCall) {
3802 insertChecks();
3803
3804 Node* resultNode;
3805
3806 if (argumentCountIncludingThis <= 1)
3807 resultNode = addToGraph(NewSymbol);
3808 else
3809 resultNode = addToGraph(NewSymbol, addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset))));
3810
3811 set(result, resultNode);
3812 return true;
3813 }
3814
3815 // FIXME: This should handle construction as well. https://bugs.webkit.org/show_bug.cgi?id=155591
3816 if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) {
3817 insertChecks();
3818
3819 Node* resultNode;
3820 if (argumentCountIncludingThis <= 1)
3821 resultNode = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor())));
3822 else
3823 resultNode = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
3824 set(result, resultNode);
3825 return true;
3826 }
3827
3828 for (unsigned typeIndex = 0; typeIndex < NumberOfTypedArrayTypes; ++typeIndex) {
3829 bool handled = handleTypedArrayConstructor(
3830 result, function, registerOffset, argumentCountIncludingThis,
3831 indexToTypedArrayType(typeIndex), insertChecks);
3832 if (handled)
3833 return true;
3834 }
3835
3836 return false;
3837}
3838
3839Node* ByteCodeParser::handleGetByOffset(
3840 SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, NodeType op)
3841{
3842 Node* propertyStorage;
3843 if (isInlineOffset(offset))
3844 propertyStorage = base;
3845 else
3846 propertyStorage = addToGraph(GetButterfly, base);
3847
3848 StorageAccessData* data = m_graph.m_storageAccessData.add();
3849 data->offset = offset;
3850 data->identifierNumber = identifierNumber;
3851
3852 Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
3853
3854 return getByOffset;
3855}
3856
3857Node* ByteCodeParser::handlePutByOffset(
3858 Node* base, unsigned identifier, PropertyOffset offset,
3859 Node* value)
3860{
3861 Node* propertyStorage;
3862 if (isInlineOffset(offset))
3863 propertyStorage = base;
3864 else
3865 propertyStorage = addToGraph(GetButterfly, base);
3866
3867 StorageAccessData* data = m_graph.m_storageAccessData.add();
3868 data->offset = offset;
3869 data->identifierNumber = identifier;
3870
3871 Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
3872
3873 return result;
3874}
3875
3876bool ByteCodeParser::check(const ObjectPropertyCondition& condition)
3877{
3878 if (!condition)
3879 return false;
3880
3881 if (m_graph.watchCondition(condition))
3882 return true;
3883
3884 Structure* structure = condition.object()->structure(*m_vm);
3885 if (!condition.structureEnsuresValidity(structure))
3886 return false;
3887
3888 addToGraph(
3889 CheckStructure,
3890 OpInfo(m_graph.addStructureSet(structure)),
3891 weakJSConstant(condition.object()));
3892 return true;
3893}
3894
3895GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method)
3896{
3897 if (method.kind() == GetByOffsetMethod::LoadFromPrototype
3898 && method.prototype()->structure()->dfgShouldWatch()) {
3899 if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset()))
3900 return GetByOffsetMethod::constant(m_graph.freeze(constant));
3901 }
3902
3903 return method;
3904}
3905
3906bool ByteCodeParser::needsDynamicLookup(ResolveType type, OpcodeID opcode)
3907{
3908 ASSERT(opcode == op_resolve_scope || opcode == op_get_from_scope || opcode == op_put_to_scope);
3909
3910 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3911 if (needsVarInjectionChecks(type) && globalObject->varInjectionWatchpoint()->hasBeenInvalidated())
3912 return true;
3913
3914 switch (type) {
3915 case GlobalProperty:
3916 case GlobalVar:
3917 case GlobalLexicalVar:
3918 case ClosureVar:
3919 case LocalClosureVar:
3920 case ModuleVar:
3921 return false;
3922
3923 case UnresolvedProperty:
3924 case UnresolvedPropertyWithVarInjectionChecks: {
3925 // The heuristic for UnresolvedProperty scope accesses is we will ForceOSRExit if we
3926 // haven't exited from from this access before to let the baseline JIT try to better
3927 // cache the access. If we've already exited from this operation, it's unlikely that
3928 // the baseline will come up with a better ResolveType and instead we will compile
3929 // this as a dynamic scope access.
3930
3931 // We only track our heuristic through resolve_scope since resolve_scope will
3932 // dominate unresolved gets/puts on that scope.
3933 if (opcode != op_resolve_scope)
3934 return true;
3935
3936 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, InadequateCoverage)) {
3937 // We've already exited so give up on getting better ResolveType information.
3938 return true;
3939 }
3940
3941 // We have not exited yet, so let's have the baseline get better ResolveType information for us.
3942 // This type of code is often seen when we tier up in a loop but haven't executed the part
3943 // of a function that comes after the loop.
3944 return false;
3945 }
3946
3947 case Dynamic:
3948 return true;
3949
3950 case GlobalPropertyWithVarInjectionChecks:
3951 case GlobalVarWithVarInjectionChecks:
3952 case GlobalLexicalVarWithVarInjectionChecks:
3953 case ClosureVarWithVarInjectionChecks:
3954 return false;
3955 }
3956
3957 ASSERT_NOT_REACHED();
3958 return false;
3959}
3960
3961GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyCondition& condition)
3962{
3963 VERBOSE_LOG("Planning a load: ", condition, "\n");
3964
3965 // We might promote this to Equivalence, and a later DFG pass might also do such promotion
3966 // even if we fail, but for simplicity this cannot be asked to load an equivalence condition.
3967 // None of the clients of this method will request a load of an Equivalence condition anyway,
3968 // and supporting it would complicate the heuristics below.
3969 RELEASE_ASSERT(condition.kind() == PropertyCondition::Presence);
3970
3971 // Here's the ranking of how to handle this, from most preferred to least preferred:
3972 //
3973 // 1) Watchpoint on an equivalence condition and return a constant node for the loaded value.
3974 // No other code is emitted, and the structure of the base object is never registered.
3975 // Hence this results in zero code and we won't jettison this compilation if the object
3976 // transitions, even if the structure is watchable right now.
3977 //
3978 // 2) Need to emit a load, and the current structure of the base is going to be watched by the
3979 // DFG anyway (i.e. dfgShouldWatch). Watch the structure and emit the load. Don't watch the
3980 // condition, since the act of turning the base into a constant in IR will cause the DFG to
3981 // watch the structure anyway and doing so would subsume watching the condition.
3982 //
3983 // 3) Need to emit a load, and the current structure of the base is watchable but not by the
3984 // DFG (i.e. transitionWatchpointSetIsStillValid() and !dfgShouldWatchIfPossible()). Watch
3985 // the condition, and emit a load.
3986 //
3987 // 4) Need to emit a load, and the current structure of the base is not watchable. Emit a
3988 // structure check, and emit a load.
3989 //
3990 // 5) The condition does not hold. Give up and return null.
3991
3992 // First, try to promote Presence to Equivalence. We do this before doing anything else
3993 // because it's the most profitable. Also, there are cases where the presence is watchable but
3994 // we don't want to watch it unless it became an equivalence (see the relationship between
3995 // (1), (2), and (3) above).
3996 ObjectPropertyCondition equivalenceCondition = condition.attemptToMakeEquivalenceWithoutBarrier(*m_vm);
3997 if (m_graph.watchCondition(equivalenceCondition))
3998 return GetByOffsetMethod::constant(m_graph.freeze(equivalenceCondition.requiredValue()));
3999
4000 // At this point, we'll have to materialize the condition's base as a constant in DFG IR. Once
4001 // we do this, the frozen value will have its own idea of what the structure is. Use that from
4002 // now on just because it's less confusing.
4003 FrozenValue* base = m_graph.freeze(condition.object());
4004 Structure* structure = base->structure();
4005
4006 // Check if the structure that we've registered makes the condition hold. If not, just give
4007 // up. This is case (5) above.
4008 if (!condition.structureEnsuresValidity(structure))
4009 return GetByOffsetMethod();
4010
4011 // If the structure is watched by the DFG already, then just use this fact to emit the load.
4012 // This is case (2) above.
4013 if (structure->dfgShouldWatch())
4014 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4015
4016 // If we can watch the condition right now, then we can emit the load after watching it. This
4017 // is case (3) above.
4018 if (m_graph.watchCondition(condition))
4019 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4020
4021 // We can't watch anything but we know that the current structure satisfies the condition. So,
4022 // check for that structure and then emit the load.
4023 addToGraph(
4024 CheckStructure,
4025 OpInfo(m_graph.addStructureSet(structure)),
4026 addToGraph(JSConstant, OpInfo(base)));
4027 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
4028}
4029
4030Node* ByteCodeParser::load(
4031 SpeculatedType prediction, unsigned identifierNumber, const GetByOffsetMethod& method,
4032 NodeType op)
4033{
4034 switch (method.kind()) {
4035 case GetByOffsetMethod::Invalid:
4036 return nullptr;
4037 case GetByOffsetMethod::Constant:
4038 return addToGraph(JSConstant, OpInfo(method.constant()));
4039 case GetByOffsetMethod::LoadFromPrototype: {
4040 Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype()));
4041 return handleGetByOffset(
4042 prediction, baseNode, identifierNumber, method.offset(), op);
4043 }
4044 case GetByOffsetMethod::Load:
4045 // Will never see this from planLoad().
4046 RELEASE_ASSERT_NOT_REACHED();
4047 return nullptr;
4048 }
4049
4050 RELEASE_ASSERT_NOT_REACHED();
4051 return nullptr;
4052}
4053
4054Node* ByteCodeParser::load(
4055 SpeculatedType prediction, const ObjectPropertyCondition& condition, NodeType op)
4056{
4057 GetByOffsetMethod method = planLoad(condition);
4058 return load(prediction, m_graph.identifiers().ensure(condition.uid()), method, op);
4059}
4060
4061bool ByteCodeParser::check(const ObjectPropertyConditionSet& conditionSet)
4062{
4063 for (const ObjectPropertyCondition& condition : conditionSet) {
4064 if (!check(condition))
4065 return false;
4066 }
4067 return true;
4068}
4069
4070GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyConditionSet& conditionSet)
4071{
4072 VERBOSE_LOG("conditionSet = ", conditionSet, "\n");
4073
4074 GetByOffsetMethod result;
4075 for (const ObjectPropertyCondition& condition : conditionSet) {
4076 switch (condition.kind()) {
4077 case PropertyCondition::Presence:
4078 RELEASE_ASSERT(!result); // Should only see exactly one of these.
4079 result = planLoad(condition);
4080 if (!result)
4081 return GetByOffsetMethod();
4082 break;
4083 default:
4084 if (!check(condition))
4085 return GetByOffsetMethod();
4086 break;
4087 }
4088 }
4089 if (!result) {
4090 // We have a unset property.
4091 ASSERT(!conditionSet.numberOfConditionsWithKind(PropertyCondition::Presence));
4092 return GetByOffsetMethod::constant(m_constantUndefined);
4093 }
4094 return result;
4095}
4096
4097Node* ByteCodeParser::load(
4098 SpeculatedType prediction, const ObjectPropertyConditionSet& conditionSet, NodeType op)
4099{
4100 GetByOffsetMethod method = planLoad(conditionSet);
4101 return load(
4102 prediction,
4103 m_graph.identifiers().ensure(conditionSet.slotBaseCondition().uid()),
4104 method, op);
4105}
4106
4107ObjectPropertyCondition ByteCodeParser::presenceLike(
4108 JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4109{
4110 if (set.isEmpty())
4111 return ObjectPropertyCondition();
4112 unsigned attributes;
4113 PropertyOffset firstOffset = set[0]->getConcurrently(uid, attributes);
4114 if (firstOffset != offset)
4115 return ObjectPropertyCondition();
4116 for (unsigned i = 1; i < set.size(); ++i) {
4117 unsigned otherAttributes;
4118 PropertyOffset otherOffset = set[i]->getConcurrently(uid, otherAttributes);
4119 if (otherOffset != offset || otherAttributes != attributes)
4120 return ObjectPropertyCondition();
4121 }
4122 return ObjectPropertyCondition::presenceWithoutBarrier(knownBase, uid, offset, attributes);
4123}
4124
4125bool ByteCodeParser::checkPresenceLike(
4126 JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4127{
4128 return check(presenceLike(knownBase, uid, offset, set));
4129}
4130
4131void ByteCodeParser::checkPresenceLike(
4132 Node* base, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
4133{
4134 if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) {
4135 if (checkPresenceLike(knownBase, uid, offset, set))
4136 return;
4137 }
4138
4139 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(set)), base);
4140}
4141
4142template<typename VariantType>
4143Node* ByteCodeParser::load(
4144 SpeculatedType prediction, Node* base, unsigned identifierNumber, const VariantType& variant)
4145{
4146 // Make sure backwards propagation knows that we've used base.
4147 addToGraph(Phantom, base);
4148
4149 bool needStructureCheck = true;
4150
4151 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
4152
4153 if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>(*m_vm)) {
4154 // Try to optimize away the structure check. Note that it's not worth doing anything about this
4155 // if the base's structure is watched.
4156 Structure* structure = base->constant()->structure();
4157 if (!structure->dfgShouldWatch()) {
4158 if (!variant.conditionSet().isEmpty()) {
4159 // This means that we're loading from a prototype or we have a property miss. We expect
4160 // the base not to have the property. We can only use ObjectPropertyCondition if all of
4161 // the structures in the variant.structureSet() agree on the prototype (it would be
4162 // hilariously rare if they didn't). Note that we are relying on structureSet() having
4163 // at least one element. That will always be true here because of how GetByIdStatus/PutByIdStatus work.
4164
4165 // FIXME: right now, if we have an OPCS, we have mono proto. However, this will
4166 // need to be changed in the future once we have a hybrid data structure for
4167 // poly proto:
4168 // https://bugs.webkit.org/show_bug.cgi?id=177339
4169 JSObject* prototype = variant.structureSet()[0]->storedPrototypeObject();
4170 bool allAgree = true;
4171 for (unsigned i = 1; i < variant.structureSet().size(); ++i) {
4172 if (variant.structureSet()[i]->storedPrototypeObject() != prototype) {
4173 allAgree = false;
4174 break;
4175 }
4176 }
4177 if (allAgree) {
4178 ObjectPropertyCondition condition = ObjectPropertyCondition::absenceWithoutBarrier(
4179 knownBase, uid, prototype);
4180 if (check(condition))
4181 needStructureCheck = false;
4182 }
4183 } else {
4184 // This means we're loading directly from base. We can avoid all of the code that follows
4185 // if we can prove that the property is a constant. Otherwise, we try to prove that the
4186 // property is watchably present, in which case we get rid of the structure check.
4187
4188 ObjectPropertyCondition presenceCondition =
4189 presenceLike(knownBase, uid, variant.offset(), variant.structureSet());
4190 if (presenceCondition) {
4191 ObjectPropertyCondition equivalenceCondition =
4192 presenceCondition.attemptToMakeEquivalenceWithoutBarrier(*m_vm);
4193 if (m_graph.watchCondition(equivalenceCondition))
4194 return weakJSConstant(equivalenceCondition.requiredValue());
4195
4196 if (check(presenceCondition))
4197 needStructureCheck = false;
4198 }
4199 }
4200 }
4201 }
4202
4203 if (needStructureCheck)
4204 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
4205
4206 if (variant.isPropertyUnset()) {
4207 if (m_graph.watchConditions(variant.conditionSet()))
4208 return jsConstant(jsUndefined());
4209 return nullptr;
4210 }
4211
4212 SpeculatedType loadPrediction;
4213 NodeType loadOp;
4214 if (variant.callLinkStatus() || variant.intrinsic() != NoIntrinsic) {
4215 loadPrediction = SpecCellOther;
4216 loadOp = GetGetterSetterByOffset;
4217 } else {
4218 loadPrediction = prediction;
4219 loadOp = GetByOffset;
4220 }
4221
4222 Node* loadedValue;
4223 if (!variant.conditionSet().isEmpty())
4224 loadedValue = load(loadPrediction, variant.conditionSet(), loadOp);
4225 else {
4226 if (needStructureCheck && base->hasConstant()) {
4227 // We did emit a structure check. That means that we have an opportunity to do constant folding
4228 // here, since we didn't do it above.
4229 JSValue constant = m_graph.tryGetConstantProperty(
4230 base->asJSValue(), *m_graph.addStructureSet(variant.structureSet()), variant.offset());
4231 if (constant)
4232 return weakJSConstant(constant);
4233 }
4234
4235 loadedValue = handleGetByOffset(
4236 loadPrediction, base, identifierNumber, variant.offset(), loadOp);
4237 }
4238
4239 return loadedValue;
4240}
4241
4242Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVariant& variant, Node* value)
4243{
4244 RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace);
4245
4246 checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure());
4247 return handlePutByOffset(base, identifier, variant.offset(), value);
4248}
4249
4250void ByteCodeParser::handleGetById(
4251 VirtualRegister destination, SpeculatedType prediction, Node* base, unsigned identifierNumber,
4252 GetByIdStatus getByIdStatus, AccessType type, unsigned instructionSize)
4253{
4254 // Attempt to reduce the set of things in the GetByIdStatus.
4255 if (base->op() == NewObject) {
4256 bool ok = true;
4257 for (unsigned i = m_currentBlock->size(); i--;) {
4258 Node* node = m_currentBlock->at(i);
4259 if (node == base)
4260 break;
4261 if (writesOverlap(m_graph, node, JSCell_structureID)) {
4262 ok = false;
4263 break;
4264 }
4265 }
4266 if (ok)
4267 getByIdStatus.filter(base->structure().get());
4268 }
4269
4270 NodeType getById;
4271 if (type == AccessType::Get)
4272 getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
4273 else if (type == AccessType::TryGet)
4274 getById = TryGetById;
4275 else
4276 getById = getByIdStatus.makesCalls() ? GetByIdDirectFlush : GetByIdDirect;
4277
4278 if (getById != TryGetById && getByIdStatus.isModuleNamespace()) {
4279 if (handleModuleNamespaceLoad(destination, prediction, base, getByIdStatus)) {
4280 if (UNLIKELY(m_graph.compilation()))
4281 m_graph.compilation()->noticeInlinedGetById();
4282 return;
4283 }
4284 }
4285
4286 // Special path for custom accessors since custom's offset does not have any meanings.
4287 // So, this is completely different from Simple one. But we have a chance to optimize it when we use DOMJIT.
4288 if (Options::useDOMJIT() && getByIdStatus.isCustom()) {
4289 ASSERT(getByIdStatus.numVariants() == 1);
4290 ASSERT(!getByIdStatus.makesCalls());
4291 GetByIdVariant variant = getByIdStatus[0];
4292 ASSERT(variant.domAttribute());
4293 if (handleDOMJITGetter(destination, variant, base, identifierNumber, prediction)) {
4294 if (UNLIKELY(m_graph.compilation()))
4295 m_graph.compilation()->noticeInlinedGetById();
4296 return;
4297 }
4298 }
4299
4300 ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !getByIdStatus.makesCalls());
4301 if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) {
4302 set(destination,
4303 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4304 return;
4305 }
4306
4307 // FIXME: If we use the GetByIdStatus for anything then we should record it and insert a node
4308 // after everything else (like the GetByOffset or whatever) that will filter the recorded
4309 // GetByIdStatus. That means that the constant folder also needs to do the same!
4310
4311 if (getByIdStatus.numVariants() > 1) {
4312 if (getByIdStatus.makesCalls() || !m_graph.m_plan.isFTL()
4313 || !Options::usePolymorphicAccessInlining()
4314 || getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
4315 set(destination,
4316 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4317 return;
4318 }
4319
4320 addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
4321
4322 Vector<MultiGetByOffsetCase, 2> cases;
4323
4324 // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
4325 // optimal, if there is some rarely executed case in the chain that requires a lot
4326 // of checks and those checks are not watchpointable.
4327 for (const GetByIdVariant& variant : getByIdStatus.variants()) {
4328 if (variant.intrinsic() != NoIntrinsic) {
4329 set(destination,
4330 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4331 return;
4332 }
4333
4334 if (variant.conditionSet().isEmpty()) {
4335 cases.append(
4336 MultiGetByOffsetCase(
4337 *m_graph.addStructureSet(variant.structureSet()),
4338 GetByOffsetMethod::load(variant.offset())));
4339 continue;
4340 }
4341
4342 GetByOffsetMethod method = planLoad(variant.conditionSet());
4343 if (!method) {
4344 set(destination,
4345 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4346 return;
4347 }
4348
4349 cases.append(MultiGetByOffsetCase(*m_graph.addStructureSet(variant.structureSet()), method));
4350 }
4351
4352 if (UNLIKELY(m_graph.compilation()))
4353 m_graph.compilation()->noticeInlinedGetById();
4354
4355 // 2) Emit a MultiGetByOffset
4356 MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
4357 data->cases = cases;
4358 data->identifierNumber = identifierNumber;
4359 set(destination,
4360 addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
4361 return;
4362 }
4363
4364 addToGraph(FilterGetByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addGetByIdStatus(currentCodeOrigin(), getByIdStatus)), base);
4365
4366 ASSERT(getByIdStatus.numVariants() == 1);
4367 GetByIdVariant variant = getByIdStatus[0];
4368
4369 Node* loadedValue = load(prediction, base, identifierNumber, variant);
4370 if (!loadedValue) {
4371 set(destination,
4372 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
4373 return;
4374 }
4375
4376 if (UNLIKELY(m_graph.compilation()))
4377 m_graph.compilation()->noticeInlinedGetById();
4378
4379 ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !variant.callLinkStatus());
4380 if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) {
4381 set(destination, loadedValue);
4382 return;
4383 }
4384
4385 Node* getter = addToGraph(GetGetter, loadedValue);
4386
4387 if (handleIntrinsicGetter(destination, prediction, variant, base,
4388 [&] () {
4389 addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter);
4390 })) {
4391 addToGraph(Phantom, base);
4392 return;
4393 }
4394
4395 ASSERT(variant.intrinsic() == NoIntrinsic);
4396
4397 // Make a call. We don't try to get fancy with using the smallest operand number because
4398 // the stack layout phase should compress the stack anyway.
4399
4400 unsigned numberOfParameters = 0;
4401 numberOfParameters++; // The 'this' argument.
4402 numberOfParameters++; // True return PC.
4403
4404 // Start with a register offset that corresponds to the last in-use register.
4405 int registerOffset = virtualRegisterForLocal(
4406 m_inlineStackTop->m_profiledBlock->numCalleeLocals() - 1).offset();
4407 registerOffset -= numberOfParameters;
4408 registerOffset -= CallFrame::headerSizeInRegisters;
4409
4410 // Get the alignment right.
4411 registerOffset = -WTF::roundUpToMultipleOf(
4412 stackAlignmentRegisters(),
4413 -registerOffset);
4414
4415 ensureLocals(
4416 m_inlineStackTop->remapOperand(
4417 VirtualRegister(registerOffset)).toLocal());
4418
4419 // Issue SetLocals. This has two effects:
4420 // 1) That's how handleCall() sees the arguments.
4421 // 2) If we inline then this ensures that the arguments are flushed so that if you use
4422 // the dreaded arguments object on the getter, the right things happen. Well, sort of -
4423 // since we only really care about 'this' in this case. But we're not going to take that
4424 // shortcut.
4425 set(virtualRegisterForArgument(0, registerOffset), base, ImmediateNakedSet);
4426
4427 // We've set some locals, but they are not user-visible. It's still OK to exit from here.
4428 m_exitOK = true;
4429 addToGraph(ExitOK);
4430
4431 handleCall(
4432 destination, Call, InlineCallFrame::GetterCall, instructionSize,
4433 getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
4434}
4435
4436void ByteCodeParser::emitPutById(
4437 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
4438{
4439 if (isDirect)
4440 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
4441 else
4442 addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
4443}
4444
4445void ByteCodeParser::handlePutById(
4446 Node* base, unsigned identifierNumber, Node* value,
4447 const PutByIdStatus& putByIdStatus, bool isDirect, unsigned instructionSize)
4448{
4449 if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) {
4450 if (!putByIdStatus.isSet())
4451 addToGraph(ForceOSRExit);
4452 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4453 return;
4454 }
4455
4456 if (putByIdStatus.numVariants() > 1) {
4457 if (!m_graph.m_plan.isFTL() || putByIdStatus.makesCalls()
4458 || !Options::usePolymorphicAccessInlining()
4459 || putByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) {
4460 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4461 return;
4462 }
4463
4464 if (!isDirect) {
4465 for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
4466 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
4467 continue;
4468 if (!check(putByIdStatus[variantIndex].conditionSet())) {
4469 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4470 return;
4471 }
4472 }
4473 }
4474
4475 if (UNLIKELY(m_graph.compilation()))
4476 m_graph.compilation()->noticeInlinedPutById();
4477
4478 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4479
4480 for (const PutByIdVariant& variant : putByIdStatus.variants()) {
4481 for (Structure* structure : variant.oldStructure())
4482 m_graph.registerStructure(structure);
4483 if (variant.kind() == PutByIdVariant::Transition)
4484 m_graph.registerStructure(variant.newStructure());
4485 }
4486
4487 MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
4488 data->variants = putByIdStatus.variants();
4489 data->identifierNumber = identifierNumber;
4490 addToGraph(MultiPutByOffset, OpInfo(data), base, value);
4491 return;
4492 }
4493
4494 ASSERT(putByIdStatus.numVariants() == 1);
4495 const PutByIdVariant& variant = putByIdStatus[0];
4496
4497 switch (variant.kind()) {
4498 case PutByIdVariant::Replace: {
4499 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4500
4501 store(base, identifierNumber, variant, value);
4502 if (UNLIKELY(m_graph.compilation()))
4503 m_graph.compilation()->noticeInlinedPutById();
4504 return;
4505 }
4506
4507 case PutByIdVariant::Transition: {
4508 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4509
4510 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
4511 if (!check(variant.conditionSet())) {
4512 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4513 return;
4514 }
4515
4516 ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
4517
4518 Node* propertyStorage;
4519 Transition* transition = m_graph.m_transitions.add(
4520 m_graph.registerStructure(variant.oldStructureForTransition()), m_graph.registerStructure(variant.newStructure()));
4521
4522 if (variant.reallocatesStorage()) {
4523
4524 // If we're growing the property storage then it must be because we're
4525 // storing into the out-of-line storage.
4526 ASSERT(!isInlineOffset(variant.offset()));
4527
4528 if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
4529 propertyStorage = addToGraph(
4530 AllocatePropertyStorage, OpInfo(transition), base);
4531 } else {
4532 propertyStorage = addToGraph(
4533 ReallocatePropertyStorage, OpInfo(transition),
4534 base, addToGraph(GetButterfly, base));
4535 }
4536 } else {
4537 if (isInlineOffset(variant.offset()))
4538 propertyStorage = base;
4539 else
4540 propertyStorage = addToGraph(GetButterfly, base);
4541 }
4542
4543 StorageAccessData* data = m_graph.m_storageAccessData.add();
4544 data->offset = variant.offset();
4545 data->identifierNumber = identifierNumber;
4546
4547 // NOTE: We could GC at this point because someone could insert an operation that GCs.
4548 // That's fine because:
4549 // - Things already in the structure will get scanned because we haven't messed with
4550 // the object yet.
4551 // - The value we are fixing to put is going to be kept live by OSR exit handling. So
4552 // if the GC does a conservative scan here it will see the new value.
4553
4554 addToGraph(
4555 PutByOffset,
4556 OpInfo(data),
4557 propertyStorage,
4558 base,
4559 value);
4560
4561 if (variant.reallocatesStorage())
4562 addToGraph(NukeStructureAndSetButterfly, base, propertyStorage);
4563
4564 // FIXME: PutStructure goes last until we fix either
4565 // https://bugs.webkit.org/show_bug.cgi?id=142921 or
4566 // https://bugs.webkit.org/show_bug.cgi?id=142924.
4567 addToGraph(PutStructure, OpInfo(transition), base);
4568
4569 if (UNLIKELY(m_graph.compilation()))
4570 m_graph.compilation()->noticeInlinedPutById();
4571 return;
4572 }
4573
4574 case PutByIdVariant::Setter: {
4575 addToGraph(FilterPutByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addPutByIdStatus(currentCodeOrigin(), putByIdStatus)), base);
4576
4577 Node* loadedValue = load(SpecCellOther, base, identifierNumber, variant);
4578 if (!loadedValue) {
4579 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4580 return;
4581 }
4582
4583 Node* setter = addToGraph(GetSetter, loadedValue);
4584
4585 // Make a call. We don't try to get fancy with using the smallest operand number because
4586 // the stack layout phase should compress the stack anyway.
4587
4588 unsigned numberOfParameters = 0;
4589 numberOfParameters++; // The 'this' argument.
4590 numberOfParameters++; // The new value.
4591 numberOfParameters++; // True return PC.
4592
4593 // Start with a register offset that corresponds to the last in-use register.
4594 int registerOffset = virtualRegisterForLocal(
4595 m_inlineStackTop->m_profiledBlock->numCalleeLocals() - 1).offset();
4596 registerOffset -= numberOfParameters;
4597 registerOffset -= CallFrame::headerSizeInRegisters;
4598
4599 // Get the alignment right.
4600 registerOffset = -WTF::roundUpToMultipleOf(
4601 stackAlignmentRegisters(),
4602 -registerOffset);
4603
4604 ensureLocals(
4605 m_inlineStackTop->remapOperand(
4606 VirtualRegister(registerOffset)).toLocal());
4607
4608 set(virtualRegisterForArgument(0, registerOffset), base, ImmediateNakedSet);
4609 set(virtualRegisterForArgument(1, registerOffset), value, ImmediateNakedSet);
4610
4611 // We've set some locals, but they are not user-visible. It's still OK to exit from here.
4612 m_exitOK = true;
4613 addToGraph(ExitOK);
4614
4615 handleCall(
4616 VirtualRegister(), Call, InlineCallFrame::SetterCall,
4617 instructionSize, setter, numberOfParameters - 1, registerOffset,
4618 *variant.callLinkStatus(), SpecOther);
4619 return;
4620 }
4621
4622 default: {
4623 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
4624 return;
4625 } }
4626}
4627
4628void ByteCodeParser::prepareToParseBlock()
4629{
4630 clearCaches();
4631 ASSERT(m_setLocalQueue.isEmpty());
4632}
4633
4634void ByteCodeParser::clearCaches()
4635{
4636 m_constants.shrink(0);
4637}
4638
4639template<typename Op>
4640void ByteCodeParser::parseGetById(const Instruction* currentInstruction)
4641{
4642 auto bytecode = currentInstruction->as<Op>();
4643 SpeculatedType prediction = getPrediction();
4644
4645 Node* base = get(bytecode.m_base);
4646 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
4647
4648 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
4649 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
4650 m_inlineStackTop->m_profiledBlock,
4651 m_inlineStackTop->m_baselineMap, m_icContextStack,
4652 currentCodeOrigin(), uid);
4653
4654 AccessType type = AccessType::Get;
4655 unsigned opcodeLength = currentInstruction->size();
4656 if (Op::opcodeID == op_try_get_by_id)
4657 type = AccessType::TryGet;
4658 else if (Op::opcodeID == op_get_by_id_direct)
4659 type = AccessType::GetDirect;
4660
4661 handleGetById(
4662 bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength);
4663
4664}
4665
4666static uint64_t makeDynamicVarOpInfo(unsigned identifierNumber, unsigned getPutInfo)
4667{
4668 static_assert(sizeof(identifierNumber) == 4,
4669 "We cannot fit identifierNumber into the high bits of m_opInfo");
4670 return static_cast<uint64_t>(identifierNumber) | (static_cast<uint64_t>(getPutInfo) << 32);
4671}
4672
4673// The idiom:
4674// if (true) { ...; goto label; } else label: continue
4675// Allows using NEXT_OPCODE as a statement, even in unbraced if+else, while containing a `continue`.
4676// The more common idiom:
4677// do { ...; } while (false)
4678// Doesn't allow using `continue`.
4679#define NEXT_OPCODE(name) \
4680 if (true) { \
4681 m_currentIndex += currentInstruction->size(); \
4682 goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \
4683 } else \
4684 WTF_CONCAT(NEXT_OPCODE_, __LINE__): \
4685 continue
4686
4687#define LAST_OPCODE_LINKED(name) do { \
4688 m_currentIndex += currentInstruction->size(); \
4689 m_exitOK = false; \
4690 return; \
4691 } while (false)
4692
4693#define LAST_OPCODE(name) \
4694 do { \
4695 if (m_currentBlock->terminal()) { \
4696 switch (m_currentBlock->terminal()->op()) { \
4697 case Jump: \
4698 case Branch: \
4699 case Switch: \
4700 ASSERT(!m_currentBlock->isLinked); \
4701 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock); \
4702 break;\
4703 default: break; \
4704 } \
4705 } \
4706 LAST_OPCODE_LINKED(name); \
4707 } while (false)
4708
4709void ByteCodeParser::parseBlock(unsigned limit)
4710{
4711 auto& instructions = m_inlineStackTop->m_codeBlock->instructions();
4712 unsigned blockBegin = m_currentIndex;
4713
4714 // If we are the first basic block, introduce markers for arguments. This allows
4715 // us to track if a use of an argument may use the actual argument passed, as
4716 // opposed to using a value we set explicitly.
4717 if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
4718 auto addResult = m_graph.m_rootToArguments.add(m_currentBlock, ArgumentsVector());
4719 RELEASE_ASSERT(addResult.isNewEntry);
4720 ArgumentsVector& entrypointArguments = addResult.iterator->value;
4721 entrypointArguments.resize(m_numArguments);
4722
4723 // We will emit SetArgumentDefinitely nodes. They don't exit, but we're at the top of an op_enter so
4724 // exitOK = true.
4725 m_exitOK = true;
4726 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
4727 VariableAccessData* variable = newVariableAccessData(
4728 virtualRegisterForArgument(argument));
4729 variable->mergeStructureCheckHoistingFailed(
4730 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
4731 variable->mergeCheckArrayHoistingFailed(
4732 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
4733
4734 Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
4735 entrypointArguments[argument] = setArgument;
4736 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
4737 }
4738 }
4739
4740 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
4741
4742 auto jumpTarget = [&](int target) {
4743 if (target)
4744 return target;
4745 return codeBlock->outOfLineJumpOffset(m_currentInstruction);
4746 };
4747
4748 while (true) {
4749 // We're staring at a new bytecode instruction. So we once again have a place that we can exit
4750 // to.
4751 m_exitOK = true;
4752
4753 processSetLocalQueue();
4754
4755 // Don't extend over jump destinations.
4756 if (m_currentIndex == limit) {
4757 // Ordinarily we want to plant a jump. But refuse to do this if the block is
4758 // empty. This is a special case for inlining, which might otherwise create
4759 // some empty blocks in some cases. When parseBlock() returns with an empty
4760 // block, it will get repurposed instead of creating a new one. Note that this
4761 // logic relies on every bytecode resulting in one or more nodes, which would
4762 // be true anyway except for op_loop_hint, which emits a Phantom to force this
4763 // to be true.
4764
4765 if (!m_currentBlock->isEmpty())
4766 addJumpTo(m_currentIndex);
4767 return;
4768 }
4769
4770 // Switch on the current bytecode opcode.
4771 const Instruction* currentInstruction = instructions.at(m_currentIndex).ptr();
4772 m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
4773 OpcodeID opcodeID = currentInstruction->opcodeID();
4774
4775 VERBOSE_LOG(" parsing ", currentCodeOrigin(), ": ", opcodeID, "\n");
4776
4777 if (UNLIKELY(m_graph.compilation())) {
4778 addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
4779 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
4780 }
4781
4782 switch (opcodeID) {
4783
4784 // === Function entry opcodes ===
4785
4786 case op_enter: {
4787 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
4788 // Initialize all locals to undefined.
4789 for (int i = 0; i < m_inlineStackTop->m_codeBlock->numVars(); ++i)
4790 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
4791
4792 NEXT_OPCODE(op_enter);
4793 }
4794
4795 case op_to_this: {
4796 Node* op1 = getThis();
4797 auto& metadata = currentInstruction->as<OpToThis>().metadata(codeBlock);
4798 Structure* cachedStructure = metadata.m_cachedStructure.get();
4799 if (metadata.m_toThisStatus != ToThisOK
4800 || !cachedStructure
4801 || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
4802 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
4803 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
4804 || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
4805 setThis(addToGraph(ToThis, OpInfo(), OpInfo(getPrediction()), op1));
4806 } else {
4807 addToGraph(
4808 CheckStructure,
4809 OpInfo(m_graph.addStructureSet(cachedStructure)),
4810 op1);
4811 }
4812 NEXT_OPCODE(op_to_this);
4813 }
4814
4815 case op_create_this: {
4816 auto bytecode = currentInstruction->as<OpCreateThis>();
4817 Node* callee = get(VirtualRegister(bytecode.m_callee));
4818
4819 JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm);
4820 if (!function) {
4821 JSCell* cachedFunction = bytecode.metadata(codeBlock).m_cachedCallee.unvalidatedGet();
4822 if (cachedFunction
4823 && cachedFunction != JSCell::seenMultipleCalleeObjects()
4824 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
4825 ASSERT(cachedFunction->inherits<JSFunction>(*m_vm));
4826
4827 FrozenValue* frozen = m_graph.freeze(cachedFunction);
4828 addToGraph(CheckCell, OpInfo(frozen), callee);
4829
4830 function = static_cast<JSFunction*>(cachedFunction);
4831 }
4832 }
4833
4834 bool alreadyEmitted = false;
4835 if (function) {
4836 if (FunctionRareData* rareData = function->rareData()) {
4837 if (rareData->allocationProfileWatchpointSet().isStillValid()) {
4838 Structure* structure = rareData->objectAllocationStructure();
4839 JSObject* prototype = rareData->objectAllocationPrototype();
4840 if (structure
4841 && (structure->hasMonoProto() || prototype)
4842 && rareData->allocationProfileWatchpointSet().isStillValid()) {
4843
4844 m_graph.freeze(rareData);
4845 m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
4846
4847 // The callee is still live up to this point.
4848 addToGraph(Phantom, callee);
4849 Node* object = addToGraph(NewObject, OpInfo(m_graph.registerStructure(structure)));
4850 if (structure->hasPolyProto()) {
4851 StorageAccessData* data = m_graph.m_storageAccessData.add();
4852 data->offset = knownPolyProtoOffset;
4853 data->identifierNumber = m_graph.identifiers().ensure(m_graph.m_vm.propertyNames->builtinNames().polyProtoName().impl());
4854 ASSERT(isInlineOffset(knownPolyProtoOffset));
4855 addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype));
4856 }
4857 set(VirtualRegister(bytecode.m_dst), object);
4858 alreadyEmitted = true;
4859 }
4860 }
4861 }
4862 }
4863 if (!alreadyEmitted) {
4864 set(VirtualRegister(bytecode.m_dst),
4865 addToGraph(CreateThis, OpInfo(bytecode.m_inlineCapacity), callee));
4866 }
4867 NEXT_OPCODE(op_create_this);
4868 }
4869
4870 case op_new_object: {
4871 auto bytecode = currentInstruction->as<OpNewObject>();
4872 set(bytecode.m_dst,
4873 addToGraph(NewObject,
4874 OpInfo(m_graph.registerStructure(bytecode.metadata(codeBlock).m_objectAllocationProfile.structure()))));
4875 NEXT_OPCODE(op_new_object);
4876 }
4877
4878 case op_new_array: {
4879 auto bytecode = currentInstruction->as<OpNewArray>();
4880 int startOperand = bytecode.m_argv.offset();
4881 int numOperands = bytecode.m_argc;
4882 ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
4883 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
4884 addVarArgChild(get(VirtualRegister(operandIdx)));
4885 unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHint(), numOperands);
4886 set(bytecode.m_dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingType()), OpInfo(vectorLengthHint)));
4887 NEXT_OPCODE(op_new_array);
4888 }
4889
4890 case op_new_array_with_spread: {
4891 auto bytecode = currentInstruction->as<OpNewArrayWithSpread>();
4892 int startOperand = bytecode.m_argv.offset();
4893 int numOperands = bytecode.m_argc;
4894 const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.m_bitVector);
4895 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
4896 addVarArgChild(get(VirtualRegister(operandIdx)));
4897
4898 BitVector* copy = m_graph.m_bitVectors.add(bitVector);
4899 ASSERT(*copy == bitVector);
4900
4901 set(bytecode.m_dst,
4902 addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy)));
4903 NEXT_OPCODE(op_new_array_with_spread);
4904 }
4905
4906 case op_spread: {
4907 auto bytecode = currentInstruction->as<OpSpread>();
4908 set(bytecode.m_dst,
4909 addToGraph(Spread, get(bytecode.m_argument)));
4910 NEXT_OPCODE(op_spread);
4911 }
4912
4913 case op_new_array_with_size: {
4914 auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
4915 ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).m_arrayAllocationProfile;
4916 set(bytecode.m_dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingType()), get(bytecode.m_length)));
4917 NEXT_OPCODE(op_new_array_with_size);
4918 }
4919
4920 case op_new_array_buffer: {
4921 auto bytecode = currentInstruction->as<OpNewArrayBuffer>();
4922 // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we
4923 // cannot allocate from compilation threads.
4924 WTF::loadLoadFence();
4925 FrozenValue* frozen = get(VirtualRegister(bytecode.m_immutableButterfly))->constant();
4926 WTF::loadLoadFence();
4927 JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>();
4928 NewArrayBufferData data { };
4929 data.indexingMode = immutableButterfly->indexingMode();
4930 data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength();
4931
4932 set(VirtualRegister(bytecode.m_dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord)));
4933 NEXT_OPCODE(op_new_array_buffer);
4934 }
4935
4936 case op_new_regexp: {
4937 auto bytecode = currentInstruction->as<OpNewRegexp>();
4938 ASSERT(bytecode.m_regexp.isConstant());
4939 FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_regexp.offset()));
4940 set(bytecode.m_dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));
4941 NEXT_OPCODE(op_new_regexp);
4942 }
4943
4944 case op_get_rest_length: {
4945 auto bytecode = currentInstruction->as<OpGetRestLength>();
4946 InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
4947 Node* length;
4948 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
4949 unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1;
4950 JSValue restLength;
4951 if (argumentsLength <= bytecode.m_numParametersToSkip)
4952 restLength = jsNumber(0);
4953 else
4954 restLength = jsNumber(argumentsLength - bytecode.m_numParametersToSkip);
4955
4956 length = jsConstant(restLength);
4957 } else
4958 length = addToGraph(GetRestLength, OpInfo(bytecode.m_numParametersToSkip));
4959 set(bytecode.m_dst, length);
4960 NEXT_OPCODE(op_get_rest_length);
4961 }
4962
4963 case op_create_rest: {
4964 auto bytecode = currentInstruction->as<OpCreateRest>();
4965 noticeArgumentsUse();
4966 Node* arrayLength = get(bytecode.m_arraySize);
4967 set(bytecode.m_dst,
4968 addToGraph(CreateRest, OpInfo(bytecode.m_numParametersToSkip), arrayLength));
4969 NEXT_OPCODE(op_create_rest);
4970 }
4971
4972 // === Bitwise operations ===
4973
4974 case op_bitnot: {
4975 auto bytecode = currentInstruction->as<OpBitnot>();
4976 SpeculatedType prediction = getPrediction();
4977 Node* op1 = get(bytecode.m_operand);
4978 if (op1->hasNumberOrAnyIntResult())
4979 set(bytecode.m_dst, addToGraph(ArithBitNot, op1));
4980 else
4981 set(bytecode.m_dst, addToGraph(ValueBitNot, OpInfo(), OpInfo(prediction), op1));
4982 NEXT_OPCODE(op_bitnot);
4983 }
4984
4985 case op_bitand: {
4986 auto bytecode = currentInstruction->as<OpBitand>();
4987 SpeculatedType prediction = getPrediction();
4988 Node* op1 = get(bytecode.m_lhs);
4989 Node* op2 = get(bytecode.m_rhs);
4990 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
4991 set(bytecode.m_dst, addToGraph(ArithBitAnd, op1, op2));
4992 else
4993 set(bytecode.m_dst, addToGraph(ValueBitAnd, OpInfo(), OpInfo(prediction), op1, op2));
4994 NEXT_OPCODE(op_bitand);
4995 }
4996
4997 case op_bitor: {
4998 auto bytecode = currentInstruction->as<OpBitor>();
4999 SpeculatedType prediction = getPrediction();
5000 Node* op1 = get(bytecode.m_lhs);
5001 Node* op2 = get(bytecode.m_rhs);
5002 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5003 set(bytecode.m_dst, addToGraph(ArithBitOr, op1, op2));
5004 else
5005 set(bytecode.m_dst, addToGraph(ValueBitOr, OpInfo(), OpInfo(prediction), op1, op2));
5006 NEXT_OPCODE(op_bitor);
5007 }
5008
5009 case op_bitxor: {
5010 auto bytecode = currentInstruction->as<OpBitxor>();
5011 SpeculatedType prediction = getPrediction();
5012 Node* op1 = get(bytecode.m_lhs);
5013 Node* op2 = get(bytecode.m_rhs);
5014 if (op1->hasNumberOrAnyIntResult() && op2->hasNumberOrAnyIntResult())
5015 set(bytecode.m_dst, addToGraph(ArithBitXor, op1, op2));
5016 else
5017 set(bytecode.m_dst, addToGraph(ValueBitXor, OpInfo(), OpInfo(prediction), op1, op2));
5018 NEXT_OPCODE(op_bitxor);
5019 }
5020
5021 case op_rshift: {
5022 auto bytecode = currentInstruction->as<OpRshift>();
5023 Node* op1 = get(bytecode.m_lhs);
5024 Node* op2 = get(bytecode.m_rhs);
5025 set(bytecode.m_dst, addToGraph(BitRShift, op1, op2));
5026 NEXT_OPCODE(op_rshift);
5027 }
5028
5029 case op_lshift: {
5030 auto bytecode = currentInstruction->as<OpLshift>();
5031 Node* op1 = get(bytecode.m_lhs);
5032 Node* op2 = get(bytecode.m_rhs);
5033 set(bytecode.m_dst, addToGraph(BitLShift, op1, op2));
5034 NEXT_OPCODE(op_lshift);
5035 }
5036
5037 case op_urshift: {
5038 auto bytecode = currentInstruction->as<OpUrshift>();
5039 Node* op1 = get(bytecode.m_lhs);
5040 Node* op2 = get(bytecode.m_rhs);
5041 set(bytecode.m_dst, addToGraph(BitURShift, op1, op2));
5042 NEXT_OPCODE(op_urshift);
5043 }
5044
5045 case op_unsigned: {
5046 auto bytecode = currentInstruction->as<OpUnsigned>();
5047 set(bytecode.m_dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.m_operand))));
5048 NEXT_OPCODE(op_unsigned);
5049 }
5050
5051 // === Increment/Decrement opcodes ===
5052
5053 case op_inc: {
5054 auto bytecode = currentInstruction->as<OpInc>();
5055 Node* op = get(bytecode.m_srcDst);
5056 set(bytecode.m_srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
5057 NEXT_OPCODE(op_inc);
5058 }
5059
5060 case op_dec: {
5061 auto bytecode = currentInstruction->as<OpDec>();
5062 Node* op = get(bytecode.m_srcDst);
5063 set(bytecode.m_srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
5064 NEXT_OPCODE(op_dec);
5065 }
5066
5067 // === Arithmetic operations ===
5068
5069 case op_add: {
5070 auto bytecode = currentInstruction->as<OpAdd>();
5071 Node* op1 = get(bytecode.m_lhs);
5072 Node* op2 = get(bytecode.m_rhs);
5073 if (op1->hasNumberResult() && op2->hasNumberResult())
5074 set(bytecode.m_dst, makeSafe(addToGraph(ArithAdd, op1, op2)));
5075 else
5076 set(bytecode.m_dst, makeSafe(addToGraph(ValueAdd, op1, op2)));
5077 NEXT_OPCODE(op_add);
5078 }
5079
5080 case op_sub: {
5081 auto bytecode = currentInstruction->as<OpSub>();
5082 Node* op1 = get(bytecode.m_lhs);
5083 Node* op2 = get(bytecode.m_rhs);
5084 if (op1->hasNumberResult() && op2->hasNumberResult())
5085 set(bytecode.m_dst, makeSafe(addToGraph(ArithSub, op1, op2)));
5086 else
5087 set(bytecode.m_dst, makeSafe(addToGraph(ValueSub, op1, op2)));
5088 NEXT_OPCODE(op_sub);
5089 }
5090
5091 case op_negate: {
5092 auto bytecode = currentInstruction->as<OpNegate>();
5093 Node* op1 = get(bytecode.m_operand);
5094 if (op1->hasNumberResult())
5095 set(bytecode.m_dst, makeSafe(addToGraph(ArithNegate, op1)));
5096 else
5097 set(bytecode.m_dst, makeSafe(addToGraph(ValueNegate, op1)));
5098 NEXT_OPCODE(op_negate);
5099 }
5100
5101 case op_mul: {
5102 // Multiply requires that the inputs are not truncated, unfortunately.
5103 auto bytecode = currentInstruction->as<OpMul>();
5104 Node* op1 = get(bytecode.m_lhs);
5105 Node* op2 = get(bytecode.m_rhs);
5106 if (op1->hasNumberResult() && op2->hasNumberResult())
5107 set(bytecode.m_dst, makeSafe(addToGraph(ArithMul, op1, op2)));
5108 else
5109 set(bytecode.m_dst, makeSafe(addToGraph(ValueMul, op1, op2)));
5110 NEXT_OPCODE(op_mul);
5111 }
5112
5113 case op_mod: {
5114 auto bytecode = currentInstruction->as<OpMod>();
5115 Node* op1 = get(bytecode.m_lhs);
5116 Node* op2 = get(bytecode.m_rhs);
5117 if (op1->hasNumberResult() && op2->hasNumberResult())
5118 set(bytecode.m_dst, makeSafe(addToGraph(ArithMod, op1, op2)));
5119 else
5120 set(bytecode.m_dst, makeSafe(addToGraph(ValueMod, op1, op2)));
5121 NEXT_OPCODE(op_mod);
5122 }
5123
5124 case op_pow: {
5125 // FIXME: ArithPow(Untyped, Untyped) should be supported as the same to ArithMul, ArithSub etc.
5126 // https://bugs.webkit.org/show_bug.cgi?id=160012
5127 auto bytecode = currentInstruction->as<OpPow>();
5128 Node* op1 = get(bytecode.m_lhs);
5129 Node* op2 = get(bytecode.m_rhs);
5130 set(bytecode.m_dst, addToGraph(ArithPow, op1, op2));
5131 NEXT_OPCODE(op_pow);
5132 }
5133
5134 case op_div: {
5135 auto bytecode = currentInstruction->as<OpDiv>();
5136 Node* op1 = get(bytecode.m_lhs);
5137 Node* op2 = get(bytecode.m_rhs);
5138 if (op1->hasNumberResult() && op2->hasNumberResult())
5139 set(bytecode.m_dst, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
5140 else
5141 set(bytecode.m_dst, makeDivSafe(addToGraph(ValueDiv, op1, op2)));
5142 NEXT_OPCODE(op_div);
5143 }
5144
5145 // === Misc operations ===
5146
5147 case op_debug: {
5148 // This is a nop in the DFG/FTL because when we set a breakpoint in the debugger,
5149 // we will jettison all optimized CodeBlocks that contains the breakpoint.
5150 addToGraph(Check); // We add a nop here so that basic block linking doesn't break.
5151 NEXT_OPCODE(op_debug);
5152 }
5153
5154 case op_mov: {
5155 auto bytecode = currentInstruction->as<OpMov>();
5156 Node* op = get(bytecode.m_src);
5157 set(bytecode.m_dst, op);
5158 NEXT_OPCODE(op_mov);
5159 }
5160
5161 case op_check_tdz: {
5162 auto bytecode = currentInstruction->as<OpCheckTdz>();
5163 addToGraph(CheckNotEmpty, get(bytecode.m_targetVirtualRegister));
5164 NEXT_OPCODE(op_check_tdz);
5165 }
5166
5167 case op_overrides_has_instance: {
5168 auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
5169 JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction();
5170
5171 Node* constructor = get(VirtualRegister(bytecode.m_constructor));
5172 Node* hasInstanceValue = get(VirtualRegister(bytecode.m_hasInstanceValue));
5173
5174 set(VirtualRegister(bytecode.m_dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));
5175 NEXT_OPCODE(op_overrides_has_instance);
5176 }
5177
5178 case op_identity_with_profile: {
5179 auto bytecode = currentInstruction->as<OpIdentityWithProfile>();
5180 Node* srcDst = get(bytecode.m_srcDst);
5181 SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.m_topProfile) << 32 | static_cast<SpeculatedType>(bytecode.m_bottomProfile);
5182 set(bytecode.m_srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst));
5183 NEXT_OPCODE(op_identity_with_profile);
5184 }
5185
5186 case op_instanceof: {
5187 auto bytecode = currentInstruction->as<OpInstanceof>();
5188
5189 InstanceOfStatus status = InstanceOfStatus::computeFor(
5190 m_inlineStackTop->m_profiledBlock, m_inlineStackTop->m_baselineMap,
5191 m_currentIndex);
5192
5193 Node* value = get(bytecode.m_value);
5194 Node* prototype = get(bytecode.m_prototype);
5195
5196 // Only inline it if it's Simple with a commonPrototype; bottom/top or variable
5197 // prototypes both get handled by the IC. This makes sense for bottom (unprofiled)
5198 // instanceof ICs because the profit of this optimization is fairly low. So, in the
5199 // absence of any information, it's better to avoid making this be the cause of a
5200 // recompilation.
5201 if (JSObject* commonPrototype = status.commonPrototype()) {
5202 addToGraph(CheckCell, OpInfo(m_graph.freeze(commonPrototype)), prototype);
5203
5204 bool allOK = true;
5205 MatchStructureData* data = m_graph.m_matchStructureData.add();
5206 for (const InstanceOfVariant& variant : status.variants()) {
5207 if (!check(variant.conditionSet())) {
5208 allOK = false;
5209 break;
5210 }
5211 for (Structure* structure : variant.structureSet()) {
5212 MatchStructureVariant matchVariant;
5213 matchVariant.structure = m_graph.registerStructure(structure);
5214 matchVariant.result = variant.isHit();
5215
5216 data->variants.append(WTFMove(matchVariant));
5217 }
5218 }
5219
5220 if (allOK) {
5221 Node* match = addToGraph(MatchStructure, OpInfo(data), value);
5222 set(bytecode.m_dst, match);
5223 NEXT_OPCODE(op_instanceof);
5224 }
5225 }
5226
5227 set(bytecode.m_dst, addToGraph(InstanceOf, value, prototype));
5228 NEXT_OPCODE(op_instanceof);
5229 }
5230
5231 case op_instanceof_custom: {
5232 auto bytecode = currentInstruction->as<OpInstanceofCustom>();
5233 Node* value = get(bytecode.m_value);
5234 Node* constructor = get(bytecode.m_constructor);
5235 Node* hasInstanceValue = get(bytecode.m_hasInstanceValue);
5236 set(bytecode.m_dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));
5237 NEXT_OPCODE(op_instanceof_custom);
5238 }
5239 case op_is_empty: {
5240 auto bytecode = currentInstruction->as<OpIsEmpty>();
5241 Node* value = get(bytecode.m_operand);
5242 set(bytecode.m_dst, addToGraph(IsEmpty, value));
5243 NEXT_OPCODE(op_is_empty);
5244 }
5245 case op_is_undefined: {
5246 auto bytecode = currentInstruction->as<OpIsUndefined>();
5247 Node* value = get(bytecode.m_operand);
5248 set(bytecode.m_dst, addToGraph(IsUndefined, value));
5249 NEXT_OPCODE(op_is_undefined);
5250 }
5251 case op_is_undefined_or_null: {
5252 auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>();
5253 Node* value = get(bytecode.m_operand);
5254 set(bytecode.m_dst, addToGraph(IsUndefinedOrNull, value));
5255 NEXT_OPCODE(op_is_undefined_or_null);
5256 }
5257
5258 case op_is_boolean: {
5259 auto bytecode = currentInstruction->as<OpIsBoolean>();
5260 Node* value = get(bytecode.m_operand);
5261 set(bytecode.m_dst, addToGraph(IsBoolean, value));
5262 NEXT_OPCODE(op_is_boolean);
5263 }
5264
5265 case op_is_number: {
5266 auto bytecode = currentInstruction->as<OpIsNumber>();
5267 Node* value = get(bytecode.m_operand);
5268 set(bytecode.m_dst, addToGraph(IsNumber, value));
5269 NEXT_OPCODE(op_is_number);
5270 }
5271
5272 case op_is_cell_with_type: {
5273 auto bytecode = currentInstruction->as<OpIsCellWithType>();
5274 Node* value = get(bytecode.m_operand);
5275 set(bytecode.m_dst, addToGraph(IsCellWithType, OpInfo(bytecode.m_type), value));
5276 NEXT_OPCODE(op_is_cell_with_type);
5277 }
5278
5279 case op_is_object: {
5280 auto bytecode = currentInstruction->as<OpIsObject>();
5281 Node* value = get(bytecode.m_operand);
5282 set(bytecode.m_dst, addToGraph(IsObject, value));
5283 NEXT_OPCODE(op_is_object);
5284 }
5285
5286 case op_is_object_or_null: {
5287 auto bytecode = currentInstruction->as<OpIsObjectOrNull>();
5288 Node* value = get(bytecode.m_operand);
5289 set(bytecode.m_dst, addToGraph(IsObjectOrNull, value));
5290 NEXT_OPCODE(op_is_object_or_null);
5291 }
5292
5293 case op_is_function: {
5294 auto bytecode = currentInstruction->as<OpIsFunction>();
5295 Node* value = get(bytecode.m_operand);
5296 set(bytecode.m_dst, addToGraph(IsFunction, value));
5297 NEXT_OPCODE(op_is_function);
5298 }
5299
5300 case op_not: {
5301 auto bytecode = currentInstruction->as<OpNot>();
5302 Node* value = get(bytecode.m_operand);
5303 set(bytecode.m_dst, addToGraph(LogicalNot, value));
5304 NEXT_OPCODE(op_not);
5305 }
5306
5307 case op_to_primitive: {
5308 auto bytecode = currentInstruction->as<OpToPrimitive>();
5309 Node* value = get(bytecode.m_src);
5310 set(bytecode.m_dst, addToGraph(ToPrimitive, value));
5311 NEXT_OPCODE(op_to_primitive);
5312 }
5313
5314 case op_strcat: {
5315 auto bytecode = currentInstruction->as<OpStrcat>();
5316 int startOperand = bytecode.m_src.offset();
5317 int numOperands = bytecode.m_count;
5318#if CPU(X86)
5319 // X86 doesn't have enough registers to compile MakeRope with three arguments. The
5320 // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever,
5321 // we just make StrCat dumber on this processor.
5322 const unsigned maxArguments = 2;
5323#else
5324 const unsigned maxArguments = 3;
5325#endif
5326 Node* operands[AdjacencyList::Size];
5327 unsigned indexInOperands = 0;
5328 for (unsigned i = 0; i < AdjacencyList::Size; ++i)
5329 operands[i] = 0;
5330 for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
5331 if (indexInOperands == maxArguments) {
5332 operands[0] = addToGraph(StrCat, operands[0], operands[1], operands[2]);
5333 for (unsigned i = 1; i < AdjacencyList::Size; ++i)
5334 operands[i] = 0;
5335 indexInOperands = 1;
5336 }
5337
5338 ASSERT(indexInOperands < AdjacencyList::Size);
5339 ASSERT(indexInOperands < maxArguments);
5340 operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx));
5341 }
5342 set(bytecode.m_dst, addToGraph(StrCat, operands[0], operands[1], operands[2]));
5343 NEXT_OPCODE(op_strcat);
5344 }
5345
5346 case op_less: {
5347 auto bytecode = currentInstruction->as<OpLess>();
5348 Node* op1 = get(bytecode.m_lhs);
5349 Node* op2 = get(bytecode.m_rhs);
5350 set(bytecode.m_dst, addToGraph(CompareLess, op1, op2));
5351 NEXT_OPCODE(op_less);
5352 }
5353
5354 case op_lesseq: {
5355 auto bytecode = currentInstruction->as<OpLesseq>();
5356 Node* op1 = get(bytecode.m_lhs);
5357 Node* op2 = get(bytecode.m_rhs);
5358 set(bytecode.m_dst, addToGraph(CompareLessEq, op1, op2));
5359 NEXT_OPCODE(op_lesseq);
5360 }
5361
5362 case op_greater: {
5363 auto bytecode = currentInstruction->as<OpGreater>();
5364 Node* op1 = get(bytecode.m_lhs);
5365 Node* op2 = get(bytecode.m_rhs);
5366 set(bytecode.m_dst, addToGraph(CompareGreater, op1, op2));
5367 NEXT_OPCODE(op_greater);
5368 }
5369
5370 case op_greatereq: {
5371 auto bytecode = currentInstruction->as<OpGreatereq>();
5372 Node* op1 = get(bytecode.m_lhs);
5373 Node* op2 = get(bytecode.m_rhs);
5374 set(bytecode.m_dst, addToGraph(CompareGreaterEq, op1, op2));
5375 NEXT_OPCODE(op_greatereq);
5376 }
5377
5378 case op_below: {
5379 auto bytecode = currentInstruction->as<OpBelow>();
5380 Node* op1 = get(bytecode.m_lhs);
5381 Node* op2 = get(bytecode.m_rhs);
5382 set(bytecode.m_dst, addToGraph(CompareBelow, op1, op2));
5383 NEXT_OPCODE(op_below);
5384 }
5385
5386 case op_beloweq: {
5387 auto bytecode = currentInstruction->as<OpBeloweq>();
5388 Node* op1 = get(bytecode.m_lhs);
5389 Node* op2 = get(bytecode.m_rhs);
5390 set(bytecode.m_dst, addToGraph(CompareBelowEq, op1, op2));
5391 NEXT_OPCODE(op_beloweq);
5392 }
5393
5394 case op_eq: {
5395 auto bytecode = currentInstruction->as<OpEq>();
5396 Node* op1 = get(bytecode.m_lhs);
5397 Node* op2 = get(bytecode.m_rhs);
5398 set(bytecode.m_dst, addToGraph(CompareEq, op1, op2));
5399 NEXT_OPCODE(op_eq);
5400 }
5401
5402 case op_eq_null: {
5403 auto bytecode = currentInstruction->as<OpEqNull>();
5404 Node* value = get(bytecode.m_operand);
5405 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5406 set(bytecode.m_dst, addToGraph(CompareEq, value, nullConstant));
5407 NEXT_OPCODE(op_eq_null);
5408 }
5409
5410 case op_stricteq: {
5411 auto bytecode = currentInstruction->as<OpStricteq>();
5412 Node* op1 = get(bytecode.m_lhs);
5413 Node* op2 = get(bytecode.m_rhs);
5414 set(bytecode.m_dst, addToGraph(CompareStrictEq, op1, op2));
5415 NEXT_OPCODE(op_stricteq);
5416 }
5417
5418 case op_neq: {
5419 auto bytecode = currentInstruction->as<OpNeq>();
5420 Node* op1 = get(bytecode.m_lhs);
5421 Node* op2 = get(bytecode.m_rhs);
5422 set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
5423 NEXT_OPCODE(op_neq);
5424 }
5425
5426 case op_neq_null: {
5427 auto bytecode = currentInstruction->as<OpNeqNull>();
5428 Node* value = get(bytecode.m_operand);
5429 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5430 set(bytecode.m_dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));
5431 NEXT_OPCODE(op_neq_null);
5432 }
5433
5434 case op_nstricteq: {
5435 auto bytecode = currentInstruction->as<OpNstricteq>();
5436 Node* op1 = get(bytecode.m_lhs);
5437 Node* op2 = get(bytecode.m_rhs);
5438 Node* invertedResult;
5439 invertedResult = addToGraph(CompareStrictEq, op1, op2);
5440 set(bytecode.m_dst, addToGraph(LogicalNot, invertedResult));
5441 NEXT_OPCODE(op_nstricteq);
5442 }
5443
5444 // === Property access operations ===
5445
5446 case op_get_by_val: {
5447 auto bytecode = currentInstruction->as<OpGetByVal>();
5448 SpeculatedType prediction = getPredictionWithoutOSRExit();
5449
5450 Node* base = get(bytecode.m_base);
5451 Node* property = get(bytecode.m_property);
5452 bool compiledAsGetById = false;
5453 GetByIdStatus getByIdStatus;
5454 unsigned identifierNumber = 0;
5455 {
5456 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
5457 ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
5458 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
5459 // At that time, there is no information.
5460 if (byValInfo
5461 && byValInfo->stubInfo
5462 && !byValInfo->tookSlowPath
5463 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
5464 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
5465 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
5466 compiledAsGetById = true;
5467 identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
5468 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
5469
5470 if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
5471 FrozenValue* frozen = m_graph.freezeStrong(symbol);
5472 addToGraph(CheckCell, OpInfo(frozen), property);
5473 } else {
5474 ASSERT(!uid->isSymbol());
5475 addToGraph(CheckStringIdent, OpInfo(uid), property);
5476 }
5477
5478 getByIdStatus = GetByIdStatus::computeForStubInfo(
5479 locker, m_inlineStackTop->m_profiledBlock,
5480 byValInfo->stubInfo, currentCodeOrigin(), uid);
5481 }
5482 }
5483
5484 if (compiledAsGetById)
5485 handleGetById(bytecode.m_dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, currentInstruction->size());
5486 else {
5487 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
5488 // FIXME: We could consider making this not vararg, since it only uses three child
5489 // slots.
5490 // https://bugs.webkit.org/show_bug.cgi?id=184192
5491 addVarArgChild(base);
5492 addVarArgChild(property);
5493 addVarArgChild(0); // Leave room for property storage.
5494 Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction));
5495 m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic.
5496 set(bytecode.m_dst, getByVal);
5497 }
5498
5499 NEXT_OPCODE(op_get_by_val);
5500 }
5501
5502 case op_get_by_val_with_this: {
5503 auto bytecode = currentInstruction->as<OpGetByValWithThis>();
5504 SpeculatedType prediction = getPrediction();
5505
5506 Node* base = get(bytecode.m_base);
5507 Node* thisValue = get(bytecode.m_thisValue);
5508 Node* property = get(bytecode.m_property);
5509 Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property);
5510 set(bytecode.m_dst, getByValWithThis);
5511
5512 NEXT_OPCODE(op_get_by_val_with_this);
5513 }
5514
5515 case op_put_by_val_direct:
5516 handlePutByVal(currentInstruction->as<OpPutByValDirect>(), currentInstruction->size());
5517 NEXT_OPCODE(op_put_by_val_direct);
5518
5519 case op_put_by_val: {
5520 handlePutByVal(currentInstruction->as<OpPutByVal>(), currentInstruction->size());
5521 NEXT_OPCODE(op_put_by_val);
5522 }
5523
5524 case op_put_by_val_with_this: {
5525 auto bytecode = currentInstruction->as<OpPutByValWithThis>();
5526 Node* base = get(bytecode.m_base);
5527 Node* thisValue = get(bytecode.m_thisValue);
5528 Node* property = get(bytecode.m_property);
5529 Node* value = get(bytecode.m_value);
5530
5531 addVarArgChild(base);
5532 addVarArgChild(thisValue);
5533 addVarArgChild(property);
5534 addVarArgChild(value);
5535 addToGraph(Node::VarArg, PutByValWithThis, OpInfo(0), OpInfo(0));
5536
5537 NEXT_OPCODE(op_put_by_val_with_this);
5538 }
5539
5540 case op_define_data_property: {
5541 auto bytecode = currentInstruction->as<OpDefineDataProperty>();
5542 Node* base = get(bytecode.m_base);
5543 Node* property = get(bytecode.m_property);
5544 Node* value = get(bytecode.m_value);
5545 Node* attributes = get(bytecode.m_attributes);
5546
5547 addVarArgChild(base);
5548 addVarArgChild(property);
5549 addVarArgChild(value);
5550 addVarArgChild(attributes);
5551 addToGraph(Node::VarArg, DefineDataProperty, OpInfo(0), OpInfo(0));
5552
5553 NEXT_OPCODE(op_define_data_property);
5554 }
5555
5556 case op_define_accessor_property: {
5557 auto bytecode = currentInstruction->as<OpDefineAccessorProperty>();
5558 Node* base = get(bytecode.m_base);
5559 Node* property = get(bytecode.m_property);
5560 Node* getter = get(bytecode.m_getter);
5561 Node* setter = get(bytecode.m_setter);
5562 Node* attributes = get(bytecode.m_attributes);
5563
5564 addVarArgChild(base);
5565 addVarArgChild(property);
5566 addVarArgChild(getter);
5567 addVarArgChild(setter);
5568 addVarArgChild(attributes);
5569 addToGraph(Node::VarArg, DefineAccessorProperty, OpInfo(0), OpInfo(0));
5570
5571 NEXT_OPCODE(op_define_accessor_property);
5572 }
5573
5574 case op_get_by_id_direct: {
5575 parseGetById<OpGetByIdDirect>(currentInstruction);
5576 NEXT_OPCODE(op_get_by_id_direct);
5577 }
5578 case op_try_get_by_id: {
5579 parseGetById<OpTryGetById>(currentInstruction);
5580 NEXT_OPCODE(op_try_get_by_id);
5581 }
5582 case op_get_by_id: {
5583 parseGetById<OpGetById>(currentInstruction);
5584 NEXT_OPCODE(op_get_by_id);
5585 }
5586 case op_get_by_id_with_this: {
5587 SpeculatedType prediction = getPrediction();
5588
5589 auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
5590 Node* base = get(bytecode.m_base);
5591 Node* thisValue = get(bytecode.m_thisValue);
5592 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5593
5594 set(bytecode.m_dst,
5595 addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue));
5596
5597 NEXT_OPCODE(op_get_by_id_with_this);
5598 }
5599 case op_put_by_id: {
5600 auto bytecode = currentInstruction->as<OpPutById>();
5601 Node* value = get(bytecode.m_value);
5602 Node* base = get(bytecode.m_base);
5603 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5604 bool direct = !!(bytecode.m_flags & PutByIdIsDirect);
5605
5606 PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
5607 m_inlineStackTop->m_profiledBlock,
5608 m_inlineStackTop->m_baselineMap, m_icContextStack,
5609 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
5610
5611 handlePutById(base, identifierNumber, value, putByIdStatus, direct, currentInstruction->size());
5612 NEXT_OPCODE(op_put_by_id);
5613 }
5614
5615 case op_put_by_id_with_this: {
5616 auto bytecode = currentInstruction->as<OpPutByIdWithThis>();
5617 Node* base = get(bytecode.m_base);
5618 Node* thisValue = get(bytecode.m_thisValue);
5619 Node* value = get(bytecode.m_value);
5620 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5621
5622 addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value);
5623 NEXT_OPCODE(op_put_by_id_with_this);
5624 }
5625
5626 case op_put_getter_by_id:
5627 handlePutAccessorById(PutGetterById, currentInstruction->as<OpPutGetterById>());
5628 NEXT_OPCODE(op_put_getter_by_id);
5629 case op_put_setter_by_id: {
5630 handlePutAccessorById(PutSetterById, currentInstruction->as<OpPutSetterById>());
5631 NEXT_OPCODE(op_put_setter_by_id);
5632 }
5633
5634 case op_put_getter_setter_by_id: {
5635 auto bytecode = currentInstruction->as<OpPutGetterSetterById>();
5636 Node* base = get(bytecode.m_base);
5637 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5638 Node* getter = get(bytecode.m_getter);
5639 Node* setter = get(bytecode.m_setter);
5640 addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, getter, setter);
5641 NEXT_OPCODE(op_put_getter_setter_by_id);
5642 }
5643
5644 case op_put_getter_by_val:
5645 handlePutAccessorByVal(PutGetterByVal, currentInstruction->as<OpPutGetterByVal>());
5646 NEXT_OPCODE(op_put_getter_by_val);
5647 case op_put_setter_by_val: {
5648 handlePutAccessorByVal(PutSetterByVal, currentInstruction->as<OpPutSetterByVal>());
5649 NEXT_OPCODE(op_put_setter_by_val);
5650 }
5651
5652 case op_del_by_id: {
5653 auto bytecode = currentInstruction->as<OpDelById>();
5654 Node* base = get(bytecode.m_base);
5655 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
5656 set(bytecode.m_dst, addToGraph(DeleteById, OpInfo(identifierNumber), base));
5657 NEXT_OPCODE(op_del_by_id);
5658 }
5659
5660 case op_del_by_val: {
5661 auto bytecode = currentInstruction->as<OpDelByVal>();
5662 Node* base = get(bytecode.m_base);
5663 Node* key = get(bytecode.m_property);
5664 set(bytecode.m_dst, addToGraph(DeleteByVal, base, key));
5665 NEXT_OPCODE(op_del_by_val);
5666 }
5667
5668 case op_profile_type: {
5669 auto bytecode = currentInstruction->as<OpProfileType>();
5670 auto& metadata = bytecode.metadata(codeBlock);
5671 Node* valueToProfile = get(bytecode.m_targetVirtualRegister);
5672 addToGraph(ProfileType, OpInfo(metadata.m_typeLocation), valueToProfile);
5673 NEXT_OPCODE(op_profile_type);
5674 }
5675
5676 case op_profile_control_flow: {
5677 auto bytecode = currentInstruction->as<OpProfileControlFlow>();
5678 BasicBlockLocation* basicBlockLocation = bytecode.metadata(codeBlock).m_basicBlockLocation;
5679 addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
5680 NEXT_OPCODE(op_profile_control_flow);
5681 }
5682
5683 // === Block terminators. ===
5684
5685 case op_jmp: {
5686 ASSERT(!m_currentBlock->terminal());
5687 auto bytecode = currentInstruction->as<OpJmp>();
5688 int relativeOffset = jumpTarget(bytecode.m_targetLabel);
5689 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
5690 if (relativeOffset <= 0)
5691 flushForTerminal();
5692 LAST_OPCODE(op_jmp);
5693 }
5694
5695 case op_jtrue: {
5696 auto bytecode = currentInstruction->as<OpJtrue>();
5697 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5698 Node* condition = get(bytecode.m_condition);
5699 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5700 LAST_OPCODE(op_jtrue);
5701 }
5702
5703 case op_jfalse: {
5704 auto bytecode = currentInstruction->as<OpJfalse>();
5705 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5706 Node* condition = get(bytecode.m_condition);
5707 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5708 LAST_OPCODE(op_jfalse);
5709 }
5710
5711 case op_jeq_null: {
5712 auto bytecode = currentInstruction->as<OpJeqNull>();
5713 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5714 Node* value = get(bytecode.m_value);
5715 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5716 Node* condition = addToGraph(CompareEq, value, nullConstant);
5717 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5718 LAST_OPCODE(op_jeq_null);
5719 }
5720
5721 case op_jneq_null: {
5722 auto bytecode = currentInstruction->as<OpJneqNull>();
5723 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5724 Node* value = get(bytecode.m_value);
5725 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull));
5726 Node* condition = addToGraph(CompareEq, value, nullConstant);
5727 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5728 LAST_OPCODE(op_jneq_null);
5729 }
5730
5731 case op_jless: {
5732 auto bytecode = currentInstruction->as<OpJless>();
5733 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5734 Node* op1 = get(bytecode.m_lhs);
5735 Node* op2 = get(bytecode.m_rhs);
5736 Node* condition = addToGraph(CompareLess, op1, op2);
5737 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5738 LAST_OPCODE(op_jless);
5739 }
5740
5741 case op_jlesseq: {
5742 auto bytecode = currentInstruction->as<OpJlesseq>();
5743 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5744 Node* op1 = get(bytecode.m_lhs);
5745 Node* op2 = get(bytecode.m_rhs);
5746 Node* condition = addToGraph(CompareLessEq, op1, op2);
5747 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5748 LAST_OPCODE(op_jlesseq);
5749 }
5750
5751 case op_jgreater: {
5752 auto bytecode = currentInstruction->as<OpJgreater>();
5753 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5754 Node* op1 = get(bytecode.m_lhs);
5755 Node* op2 = get(bytecode.m_rhs);
5756 Node* condition = addToGraph(CompareGreater, op1, op2);
5757 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5758 LAST_OPCODE(op_jgreater);
5759 }
5760
5761 case op_jgreatereq: {
5762 auto bytecode = currentInstruction->as<OpJgreatereq>();
5763 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5764 Node* op1 = get(bytecode.m_lhs);
5765 Node* op2 = get(bytecode.m_rhs);
5766 Node* condition = addToGraph(CompareGreaterEq, op1, op2);
5767 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5768 LAST_OPCODE(op_jgreatereq);
5769 }
5770
5771 case op_jeq: {
5772 auto bytecode = currentInstruction->as<OpJeq>();
5773 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5774 Node* op1 = get(bytecode.m_lhs);
5775 Node* op2 = get(bytecode.m_rhs);
5776 Node* condition = addToGraph(CompareEq, op1, op2);
5777 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5778 LAST_OPCODE(op_jeq);
5779 }
5780
5781 case op_jstricteq: {
5782 auto bytecode = currentInstruction->as<OpJstricteq>();
5783 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5784 Node* op1 = get(bytecode.m_lhs);
5785 Node* op2 = get(bytecode.m_rhs);
5786 Node* condition = addToGraph(CompareStrictEq, op1, op2);
5787 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5788 LAST_OPCODE(op_jstricteq);
5789 }
5790
5791 case op_jnless: {
5792 auto bytecode = currentInstruction->as<OpJnless>();
5793 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5794 Node* op1 = get(bytecode.m_lhs);
5795 Node* op2 = get(bytecode.m_rhs);
5796 Node* condition = addToGraph(CompareLess, op1, op2);
5797 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5798 LAST_OPCODE(op_jnless);
5799 }
5800
5801 case op_jnlesseq: {
5802 auto bytecode = currentInstruction->as<OpJnlesseq>();
5803 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5804 Node* op1 = get(bytecode.m_lhs);
5805 Node* op2 = get(bytecode.m_rhs);
5806 Node* condition = addToGraph(CompareLessEq, op1, op2);
5807 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5808 LAST_OPCODE(op_jnlesseq);
5809 }
5810
5811 case op_jngreater: {
5812 auto bytecode = currentInstruction->as<OpJngreater>();
5813 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5814 Node* op1 = get(bytecode.m_lhs);
5815 Node* op2 = get(bytecode.m_rhs);
5816 Node* condition = addToGraph(CompareGreater, op1, op2);
5817 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5818 LAST_OPCODE(op_jngreater);
5819 }
5820
5821 case op_jngreatereq: {
5822 auto bytecode = currentInstruction->as<OpJngreatereq>();
5823 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5824 Node* op1 = get(bytecode.m_lhs);
5825 Node* op2 = get(bytecode.m_rhs);
5826 Node* condition = addToGraph(CompareGreaterEq, op1, op2);
5827 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5828 LAST_OPCODE(op_jngreatereq);
5829 }
5830
5831 case op_jneq: {
5832 auto bytecode = currentInstruction->as<OpJneq>();
5833 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5834 Node* op1 = get(bytecode.m_lhs);
5835 Node* op2 = get(bytecode.m_rhs);
5836 Node* condition = addToGraph(CompareEq, op1, op2);
5837 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5838 LAST_OPCODE(op_jneq);
5839 }
5840
5841 case op_jnstricteq: {
5842 auto bytecode = currentInstruction->as<OpJnstricteq>();
5843 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5844 Node* op1 = get(bytecode.m_lhs);
5845 Node* op2 = get(bytecode.m_rhs);
5846 Node* condition = addToGraph(CompareStrictEq, op1, op2);
5847 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
5848 LAST_OPCODE(op_jnstricteq);
5849 }
5850
5851 case op_jbelow: {
5852 auto bytecode = currentInstruction->as<OpJbelow>();
5853 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5854 Node* op1 = get(bytecode.m_lhs);
5855 Node* op2 = get(bytecode.m_rhs);
5856 Node* condition = addToGraph(CompareBelow, op1, op2);
5857 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5858 LAST_OPCODE(op_jbelow);
5859 }
5860
5861 case op_jbeloweq: {
5862 auto bytecode = currentInstruction->as<OpJbeloweq>();
5863 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
5864 Node* op1 = get(bytecode.m_lhs);
5865 Node* op2 = get(bytecode.m_rhs);
5866 Node* condition = addToGraph(CompareBelowEq, op1, op2);
5867 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition);
5868 LAST_OPCODE(op_jbeloweq);
5869 }
5870
5871 case op_switch_imm: {
5872 auto bytecode = currentInstruction->as<OpSwitchImm>();
5873 SwitchData& data = *m_graph.m_switchData.add();
5874 data.kind = SwitchImm;
5875 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
5876 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
5877 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
5878 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
5879 if (!table.branchOffsets[i])
5880 continue;
5881 unsigned target = m_currentIndex + table.branchOffsets[i];
5882 if (target == data.fallThrough.bytecodeIndex())
5883 continue;
5884 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
5885 }
5886 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
5887 flushIfTerminal(data);
5888 LAST_OPCODE(op_switch_imm);
5889 }
5890
5891 case op_switch_char: {
5892 auto bytecode = currentInstruction->as<OpSwitchChar>();
5893 SwitchData& data = *m_graph.m_switchData.add();
5894 data.kind = SwitchChar;
5895 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex];
5896 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
5897 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
5898 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
5899 if (!table.branchOffsets[i])
5900 continue;
5901 unsigned target = m_currentIndex + table.branchOffsets[i];
5902 if (target == data.fallThrough.bytecodeIndex())
5903 continue;
5904 data.cases.append(
5905 SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
5906 }
5907 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
5908 flushIfTerminal(data);
5909 LAST_OPCODE(op_switch_char);
5910 }
5911
5912 case op_switch_string: {
5913 auto bytecode = currentInstruction->as<OpSwitchString>();
5914 SwitchData& data = *m_graph.m_switchData.add();
5915 data.kind = SwitchString;
5916 data.switchTableIndex = bytecode.m_tableIndex;
5917 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));
5918 StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
5919 StringJumpTable::StringOffsetTable::iterator iter;
5920 StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
5921 for (iter = table.offsetTable.begin(); iter != end; ++iter) {
5922 unsigned target = m_currentIndex + iter->value.branchOffset;
5923 if (target == data.fallThrough.bytecodeIndex())
5924 continue;
5925 data.cases.append(
5926 SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
5927 }
5928 addToGraph(Switch, OpInfo(&data), get(bytecode.m_scrutinee));
5929 flushIfTerminal(data);
5930 LAST_OPCODE(op_switch_string);
5931 }
5932
5933 case op_ret: {
5934 auto bytecode = currentInstruction->as<OpRet>();
5935 ASSERT(!m_currentBlock->terminal());
5936 if (!inlineCallFrame()) {
5937 // Simple case: we are just producing a return
5938 addToGraph(Return, get(bytecode.m_value));
5939 flushForReturn();
5940 LAST_OPCODE(op_ret);
5941 }
5942
5943 flushForReturn();
5944 if (m_inlineStackTop->m_returnValue.isValid())
5945 setDirect(m_inlineStackTop->m_returnValue, get(bytecode.m_value), ImmediateSetWithFlush);
5946
5947 if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) {
5948 // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one.
5949 // It is untargetable, because we do not know the appropriate index.
5950 // If this block turns out to be a jump target, parseCodeBlock will fix its bytecodeIndex before putting it in m_blockLinkingTargets
5951 m_inlineStackTop->m_continuationBlock = allocateUntargetableBlock();
5952 }
5953
5954 if (m_inlineStackTop->m_continuationBlock)
5955 addJumpTo(m_inlineStackTop->m_continuationBlock);
5956 else {
5957 // We are returning from an inlined function, and do not need to jump anywhere, so we just keep the current block
5958 m_inlineStackTop->m_continuationBlock = m_currentBlock;
5959 }
5960 LAST_OPCODE_LINKED(op_ret);
5961 }
5962 case op_end:
5963 ASSERT(!inlineCallFrame());
5964 addToGraph(Return, get(currentInstruction->as<OpEnd>().m_value));
5965 flushForReturn();
5966 LAST_OPCODE(op_end);
5967
5968 case op_throw:
5969 addToGraph(Throw, get(currentInstruction->as<OpThrow>().m_value));
5970 flushForTerminal();
5971 LAST_OPCODE(op_throw);
5972
5973 case op_throw_static_error: {
5974 auto bytecode = currentInstruction->as<OpThrowStaticError>();
5975 addToGraph(ThrowStaticError, OpInfo(bytecode.m_errorType), get(bytecode.m_message));
5976 flushForTerminal();
5977 LAST_OPCODE(op_throw_static_error);
5978 }
5979
5980 case op_catch: {
5981 auto bytecode = currentInstruction->as<OpCatch>();
5982 m_graph.m_hasExceptionHandlers = true;
5983
5984 if (inlineCallFrame()) {
5985 // We can't do OSR entry into an inlined frame.
5986 NEXT_OPCODE(op_catch);
5987 }
5988
5989 if (m_graph.m_plan.mode() == FTLForOSREntryMode) {
5990 NEXT_OPCODE(op_catch);
5991 }
5992
5993 RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution));
5994
5995 ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).m_buffer;
5996
5997 if (!buffer) {
5998 NEXT_OPCODE(op_catch); // This catch has yet to execute. Note: this load can be racy with the main thread.
5999 }
6000
6001 // We're now committed to compiling this as an entrypoint.
6002 m_currentBlock->isCatchEntrypoint = true;
6003 m_graph.m_roots.append(m_currentBlock);
6004
6005 Vector<SpeculatedType> argumentPredictions(m_numArguments);
6006 Vector<SpeculatedType> localPredictions;
6007 HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> seenArguments;
6008
6009 {
6010 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6011
6012 buffer->forEach([&] (ValueProfileAndOperand& profile) {
6013 VirtualRegister operand(profile.m_operand);
6014 SpeculatedType prediction = profile.m_profile.computeUpdatedPrediction(locker);
6015 if (operand.isLocal())
6016 localPredictions.append(prediction);
6017 else {
6018 RELEASE_ASSERT(operand.isArgument());
6019 RELEASE_ASSERT(static_cast<uint32_t>(operand.toArgument()) < argumentPredictions.size());
6020 if (validationEnabled())
6021 seenArguments.add(operand.toArgument());
6022 argumentPredictions[operand.toArgument()] = prediction;
6023 }
6024 });
6025
6026 if (validationEnabled()) {
6027 for (unsigned argument = 0; argument < m_numArguments; ++argument)
6028 RELEASE_ASSERT(seenArguments.contains(argument));
6029 }
6030 }
6031
6032 Vector<std::pair<VirtualRegister, Node*>> localsToSet;
6033 localsToSet.reserveInitialCapacity(buffer->m_size); // Note: This will reserve more than the number of locals we see below because the buffer includes arguments.
6034
6035 // We're not allowed to exit here since we would not properly recover values.
6036 // We first need to bootstrap the catch entrypoint state.
6037 m_exitOK = false;
6038
6039 unsigned numberOfLocals = 0;
6040 buffer->forEach([&] (ValueProfileAndOperand& profile) {
6041 VirtualRegister operand(profile.m_operand);
6042 if (operand.isArgument())
6043 return;
6044 ASSERT(operand.isLocal());
6045 Node* value = addToGraph(ExtractCatchLocal, OpInfo(numberOfLocals), OpInfo(localPredictions[numberOfLocals]));
6046 ++numberOfLocals;
6047 addToGraph(MovHint, OpInfo(profile.m_operand), value);
6048 localsToSet.uncheckedAppend(std::make_pair(operand, value));
6049 });
6050 if (numberOfLocals)
6051 addToGraph(ClearCatchLocals);
6052
6053 if (!m_graph.m_maxLocalsForCatchOSREntry)
6054 m_graph.m_maxLocalsForCatchOSREntry = 0;
6055 m_graph.m_maxLocalsForCatchOSREntry = std::max(numberOfLocals, *m_graph.m_maxLocalsForCatchOSREntry);
6056
6057 // We could not exit before this point in the program because we would not know how to do value
6058 // recovery for live locals. The above IR sets up the necessary state so we can recover values
6059 // during OSR exit.
6060 //
6061 // The nodes that follow here all exit to the following bytecode instruction, not
6062 // the op_catch. Exiting to op_catch is reserved for when an exception is thrown.
6063 // The SetArgumentDefinitely nodes that follow below may exit because we may hoist type checks
6064 // to them. The SetLocal nodes that follow below may exit because we may choose
6065 // a flush format that speculates on the type of the local.
6066 m_exitOK = true;
6067 addToGraph(ExitOK);
6068
6069 {
6070 auto addResult = m_graph.m_rootToArguments.add(m_currentBlock, ArgumentsVector());
6071 RELEASE_ASSERT(addResult.isNewEntry);
6072 ArgumentsVector& entrypointArguments = addResult.iterator->value;
6073 entrypointArguments.resize(m_numArguments);
6074
6075 unsigned exitBytecodeIndex = m_currentIndex + currentInstruction->size();
6076
6077 for (unsigned argument = 0; argument < argumentPredictions.size(); ++argument) {
6078 VariableAccessData* variable = newVariableAccessData(virtualRegisterForArgument(argument));
6079 variable->predict(argumentPredictions[argument]);
6080
6081 variable->mergeStructureCheckHoistingFailed(
6082 m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadCache));
6083 variable->mergeCheckArrayHoistingFailed(
6084 m_inlineStackTop->m_exitProfile.hasExitSite(exitBytecodeIndex, BadIndexingType));
6085
6086 Node* setArgument = addToGraph(SetArgumentDefinitely, OpInfo(variable));
6087 setArgument->origin.forExit = CodeOrigin(exitBytecodeIndex, setArgument->origin.forExit.inlineCallFrame());
6088 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
6089 entrypointArguments[argument] = setArgument;
6090 }
6091 }
6092
6093 for (const std::pair<VirtualRegister, Node*>& pair : localsToSet) {
6094 DelayedSetLocal delayed { currentCodeOrigin(), pair.first, pair.second, ImmediateNakedSet };
6095 m_setLocalQueue.append(delayed);
6096 }
6097
6098 NEXT_OPCODE(op_catch);
6099 }
6100
6101 case op_call:
6102 handleCall<OpCall>(currentInstruction, Call, CallMode::Regular);
6103 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6104 NEXT_OPCODE(op_call);
6105
6106 case op_tail_call: {
6107 flushForReturn();
6108 Terminality terminality = handleCall<OpTailCall>(currentInstruction, TailCall, CallMode::Tail);
6109 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6110 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6111 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6112 // things up.
6113 if (terminality == NonTerminal)
6114 NEXT_OPCODE(op_tail_call);
6115 else
6116 LAST_OPCODE_LINKED(op_tail_call);
6117 // We use LAST_OPCODE_LINKED instead of LAST_OPCODE because if the tail call was optimized, it may now be a jump to a bytecode index in a different InlineStackEntry.
6118 }
6119
6120 case op_construct:
6121 handleCall<OpConstruct>(currentInstruction, Construct, CallMode::Construct);
6122 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction");
6123 NEXT_OPCODE(op_construct);
6124
6125 case op_call_varargs: {
6126 handleVarargsCall<OpCallVarargs>(currentInstruction, CallVarargs, CallMode::Regular);
6127 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6128 NEXT_OPCODE(op_call_varargs);
6129 }
6130
6131 case op_tail_call_varargs: {
6132 flushForReturn();
6133 Terminality terminality = handleVarargsCall<OpTailCallVarargs>(currentInstruction, TailCallVarargs, CallMode::Tail);
6134 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6135 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6136 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6137 // things up.
6138 if (terminality == NonTerminal)
6139 NEXT_OPCODE(op_tail_call_varargs);
6140 else
6141 LAST_OPCODE(op_tail_call_varargs);
6142 }
6143
6144 case op_tail_call_forward_arguments: {
6145 // We need to make sure that we don't unbox our arguments here since that won't be
6146 // done by the arguments object creation node as that node may not exist.
6147 noticeArgumentsUse();
6148 flushForReturn();
6149 Terminality terminality = handleVarargsCall<OpTailCallForwardArguments>(currentInstruction, TailCallForwardVarargs, CallMode::Tail);
6150 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6151 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function.
6152 // If the call is not terminal, however, then we want the subsequent op_ret/op_jmp to update metadata and clean
6153 // things up.
6154 if (terminality == NonTerminal)
6155 NEXT_OPCODE(op_tail_call_forward_arguments);
6156 else
6157 LAST_OPCODE(op_tail_call_forward_arguments);
6158 }
6159
6160 case op_construct_varargs: {
6161 handleVarargsCall<OpConstructVarargs>(currentInstruction, ConstructVarargs, CallMode::Construct);
6162 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction");
6163 NEXT_OPCODE(op_construct_varargs);
6164 }
6165
6166 case op_call_eval: {
6167 auto bytecode = currentInstruction->as<OpCallEval>();
6168 int registerOffset = -bytecode.m_argv;
6169 addCall(bytecode.m_dst, CallEval, nullptr, get(bytecode.m_callee), bytecode.m_argc, registerOffset, getPrediction());
6170 NEXT_OPCODE(op_call_eval);
6171 }
6172
6173 case op_jneq_ptr: {
6174 auto bytecode = currentInstruction->as<OpJneqPtr>();
6175 Special::Pointer specialPointer = bytecode.m_specialPointer;
6176 ASSERT(pointerIsCell(specialPointer));
6177 JSCell* actualPointer = static_cast<JSCell*>(
6178 actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer));
6179 FrozenValue* frozenPointer = m_graph.freeze(actualPointer);
6180 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel);
6181 Node* child = get(bytecode.m_value);
6182 if (bytecode.metadata(codeBlock).m_hasJumped) {
6183 Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child);
6184 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition);
6185 LAST_OPCODE(op_jneq_ptr);
6186 }
6187 addToGraph(CheckCell, OpInfo(frozenPointer), child);
6188 NEXT_OPCODE(op_jneq_ptr);
6189 }
6190
6191 case op_resolve_scope: {
6192 auto bytecode = currentInstruction->as<OpResolveScope>();
6193 auto& metadata = bytecode.metadata(codeBlock);
6194
6195 ResolveType resolveType;
6196 unsigned depth;
6197 JSScope* constantScope = nullptr;
6198 JSCell* lexicalEnvironment = nullptr;
6199 SymbolTable* symbolTable = nullptr;
6200 {
6201 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6202 resolveType = metadata.m_resolveType;
6203 depth = metadata.m_localScopeDepth;
6204 switch (resolveType) {
6205 case GlobalProperty:
6206 case GlobalVar:
6207 case GlobalPropertyWithVarInjectionChecks:
6208 case GlobalVarWithVarInjectionChecks:
6209 case GlobalLexicalVar:
6210 case GlobalLexicalVarWithVarInjectionChecks:
6211 constantScope = metadata.m_constantScope.get();
6212 break;
6213 case ModuleVar:
6214 lexicalEnvironment = metadata.m_lexicalEnvironment.get();
6215 break;
6216 case LocalClosureVar:
6217 case ClosureVar:
6218 case ClosureVarWithVarInjectionChecks:
6219 symbolTable = metadata.m_symbolTable.get();
6220 break;
6221 default:
6222 break;
6223 }
6224 }
6225
6226 if (needsDynamicLookup(resolveType, op_resolve_scope)) {
6227 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6228 set(bytecode.m_dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.m_scope)));
6229 NEXT_OPCODE(op_resolve_scope);
6230 }
6231
6232 // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
6233 if (needsVarInjectionChecks(resolveType))
6234 m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint());
6235
6236 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6237 // https://bugs.webkit.org/show_bug.cgi?id=193347
6238 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6239 if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
6240 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6241 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6242 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6243 addToGraph(ForceOSRExit);
6244 }
6245 }
6246
6247 switch (resolveType) {
6248 case GlobalProperty:
6249 case GlobalVar:
6250 case GlobalPropertyWithVarInjectionChecks:
6251 case GlobalVarWithVarInjectionChecks:
6252 case GlobalLexicalVar:
6253 case GlobalLexicalVarWithVarInjectionChecks: {
6254 RELEASE_ASSERT(constantScope);
6255 RELEASE_ASSERT(constantScope == JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6256 set(bytecode.m_dst, weakJSConstant(constantScope));
6257 addToGraph(Phantom, get(bytecode.m_scope));
6258 break;
6259 }
6260 case ModuleVar: {
6261 // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar,
6262 // we need not to keep it alive by the Phantom node.
6263 // Module environment is already strongly referenced by the CodeBlock.
6264 set(bytecode.m_dst, weakJSConstant(lexicalEnvironment));
6265 break;
6266 }
6267 case LocalClosureVar:
6268 case ClosureVar:
6269 case ClosureVarWithVarInjectionChecks: {
6270 Node* localBase = get(bytecode.m_scope);
6271 addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
6272
6273 // We have various forms of constant folding here. This is necessary to avoid
6274 // spurious recompiles in dead-but-foldable code.
6275 if (symbolTable) {
6276 InferredValue* singleton = symbolTable->singletonScope();
6277 if (JSValue value = singleton->inferredValue()) {
6278 m_graph.watchpoints().addLazily(singleton);
6279 set(bytecode.m_dst, weakJSConstant(value));
6280 break;
6281 }
6282 }
6283 if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>(*m_vm)) {
6284 for (unsigned n = depth; n--;)
6285 scope = scope->next();
6286 set(bytecode.m_dst, weakJSConstant(scope));
6287 break;
6288 }
6289 for (unsigned n = depth; n--;)
6290 localBase = addToGraph(SkipScope, localBase);
6291 set(bytecode.m_dst, localBase);
6292 break;
6293 }
6294 case UnresolvedProperty:
6295 case UnresolvedPropertyWithVarInjectionChecks: {
6296 addToGraph(Phantom, get(bytecode.m_scope));
6297 addToGraph(ForceOSRExit);
6298 set(bytecode.m_dst, addToGraph(JSConstant, OpInfo(m_constantNull)));
6299 break;
6300 }
6301 case Dynamic:
6302 RELEASE_ASSERT_NOT_REACHED();
6303 break;
6304 }
6305 NEXT_OPCODE(op_resolve_scope);
6306 }
6307 case op_resolve_scope_for_hoisting_func_decl_in_eval: {
6308 auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>();
6309 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
6310
6311 set(bytecode.m_dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.m_scope)));
6312
6313 NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval);
6314 }
6315
6316 case op_get_from_scope: {
6317 auto bytecode = currentInstruction->as<OpGetFromScope>();
6318 auto& metadata = bytecode.metadata(codeBlock);
6319 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_var];
6320 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
6321
6322 ResolveType resolveType;
6323 GetPutInfo getPutInfo(0);
6324 Structure* structure = 0;
6325 WatchpointSet* watchpoints = 0;
6326 uintptr_t operand;
6327 {
6328 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6329 getPutInfo = metadata.m_getPutInfo;
6330 resolveType = getPutInfo.resolveType();
6331 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6332 watchpoints = metadata.m_watchpointSet;
6333 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
6334 structure = metadata.m_structure.get();
6335 operand = metadata.m_operand;
6336 }
6337
6338 if (needsDynamicLookup(resolveType, op_get_from_scope)) {
6339 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
6340 SpeculatedType prediction = getPrediction();
6341 set(bytecode.m_dst,
6342 addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.m_scope)));
6343 NEXT_OPCODE(op_get_from_scope);
6344 }
6345
6346 UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
6347
6348 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6349
6350 switch (resolveType) {
6351 case GlobalProperty:
6352 case GlobalPropertyWithVarInjectionChecks: {
6353 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6354 // https://bugs.webkit.org/show_bug.cgi?id=193347
6355 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6356 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6357 addToGraph(ForceOSRExit);
6358 }
6359
6360 SpeculatedType prediction = getPrediction();
6361
6362 GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
6363 if (status.state() != GetByIdStatus::Simple
6364 || status.numVariants() != 1
6365 || status[0].structureSet().size() != 1) {
6366 set(bytecode.m_dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.m_scope)));
6367 break;
6368 }
6369
6370 Node* base = weakJSConstant(globalObject);
6371 Node* result = load(prediction, base, identifierNumber, status[0]);
6372 addToGraph(Phantom, get(bytecode.m_scope));
6373 set(bytecode.m_dst, result);
6374 break;
6375 }
6376 case GlobalVar:
6377 case GlobalVarWithVarInjectionChecks:
6378 case GlobalLexicalVar:
6379 case GlobalLexicalVarWithVarInjectionChecks: {
6380 addToGraph(Phantom, get(bytecode.m_scope));
6381 WatchpointSet* watchpointSet;
6382 ScopeOffset offset;
6383 JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6384 {
6385 ConcurrentJSLocker locker(scopeObject->symbolTable()->m_lock);
6386 SymbolTableEntry entry = scopeObject->symbolTable()->get(locker, uid);
6387 watchpointSet = entry.watchpointSet();
6388 offset = entry.scopeOffset();
6389 }
6390 if (watchpointSet && watchpointSet->state() == IsWatched) {
6391 // This has a fun concurrency story. There is the possibility of a race in two
6392 // directions:
6393 //
6394 // We see that the set IsWatched, but in the meantime it gets invalidated: this is
6395 // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
6396 // invalidated, then this compilation is invalidated. Note that in the meantime we
6397 // may load an absurd value from the global object. It's fine to load an absurd
6398 // value if the compilation is invalidated anyway.
6399 //
6400 // We see that the set IsWatched, but the value isn't yet initialized: this isn't
6401 // possible because of the ordering of operations.
6402 //
6403 // Here's how we order operations:
6404 //
6405 // Main thread stores to the global object: always store a value first, and only
6406 // after that do we touch the watchpoint set. There is a fence in the touch, that
6407 // ensures that the store to the global object always happens before the touch on the
6408 // set.
6409 //
6410 // Compilation thread: always first load the state of the watchpoint set, and then
6411 // load the value. The WatchpointSet::state() method does fences for us to ensure
6412 // that the load of the state happens before our load of the value.
6413 //
6414 // Finalizing compilation: this happens on the main thread and synchronously checks
6415 // validity of all watchpoint sets.
6416 //
6417 // We will only perform optimizations if the load of the state yields IsWatched. That
6418 // means that at least one store would have happened to initialize the original value
6419 // of the variable (that is, the value we'd like to constant fold to). There may be
6420 // other stores that happen after that, but those stores will invalidate the
6421 // watchpoint set and also the compilation.
6422
6423 // Note that we need to use the operand, which is a direct pointer at the global,
6424 // rather than looking up the global by doing variableAt(offset). That's because the
6425 // internal data structures of JSSegmentedVariableObject are not thread-safe even
6426 // though accessing the global itself is. The segmentation involves a vector spine
6427 // that resizes with malloc/free, so if new globals unrelated to the one we are
6428 // reading are added, we might access freed memory if we do variableAt().
6429 WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand);
6430
6431 ASSERT(scopeObject->findVariableIndex(pointer) == offset);
6432
6433 JSValue value = pointer->get();
6434 if (value) {
6435 m_graph.watchpoints().addLazily(watchpointSet);
6436 set(bytecode.m_dst, weakJSConstant(value));
6437 break;
6438 }
6439 }
6440
6441 SpeculatedType prediction = getPrediction();
6442 NodeType nodeType;
6443 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
6444 nodeType = GetGlobalVar;
6445 else
6446 nodeType = GetGlobalLexicalVariable;
6447 Node* value = addToGraph(nodeType, OpInfo(operand), OpInfo(prediction));
6448 if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6449 addToGraph(CheckNotEmpty, value);
6450 set(bytecode.m_dst, value);
6451 break;
6452 }
6453 case LocalClosureVar:
6454 case ClosureVar:
6455 case ClosureVarWithVarInjectionChecks: {
6456 Node* scopeNode = get(bytecode.m_scope);
6457
6458 // Ideally we wouldn't have to do this Phantom. But:
6459 //
6460 // For the constant case: we must do it because otherwise we would have no way of knowing
6461 // that the scope is live at OSR here.
6462 //
6463 // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
6464 // won't be able to handle an Undefined scope.
6465 addToGraph(Phantom, scopeNode);
6466
6467 // Constant folding in the bytecode parser is important for performance. This may not
6468 // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
6469 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
6470 // would recompile. But if we can fold it here, we avoid the exit.
6471 if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) {
6472 set(bytecode.m_dst, weakJSConstant(value));
6473 break;
6474 }
6475 SpeculatedType prediction = getPrediction();
6476 set(bytecode.m_dst,
6477 addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode));
6478 break;
6479 }
6480 case UnresolvedProperty:
6481 case UnresolvedPropertyWithVarInjectionChecks:
6482 case ModuleVar:
6483 case Dynamic:
6484 RELEASE_ASSERT_NOT_REACHED();
6485 break;
6486 }
6487 NEXT_OPCODE(op_get_from_scope);
6488 }
6489
6490 case op_put_to_scope: {
6491 auto bytecode = currentInstruction->as<OpPutToScope>();
6492 auto& metadata = bytecode.metadata(codeBlock);
6493 unsigned identifierNumber = bytecode.m_var;
6494 if (identifierNumber != UINT_MAX)
6495 identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber];
6496 UniquedStringImpl* uid;
6497 if (identifierNumber != UINT_MAX)
6498 uid = m_graph.identifiers()[identifierNumber];
6499 else
6500 uid = nullptr;
6501
6502 ResolveType resolveType;
6503 GetPutInfo getPutInfo(0);
6504 Structure* structure = nullptr;
6505 WatchpointSet* watchpoints = nullptr;
6506 uintptr_t operand;
6507 {
6508 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
6509 getPutInfo = metadata.m_getPutInfo;
6510 resolveType = getPutInfo.resolveType();
6511 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
6512 watchpoints = metadata.m_watchpointSet;
6513 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks)
6514 structure = metadata.m_structure.get();
6515 operand = metadata.m_operand;
6516 }
6517
6518 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
6519
6520 if (needsDynamicLookup(resolveType, op_put_to_scope)) {
6521 ASSERT(identifierNumber != UINT_MAX);
6522 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, getPutInfo.operand());
6523 addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.m_scope), get(bytecode.m_value));
6524 NEXT_OPCODE(op_put_to_scope);
6525 }
6526
6527 switch (resolveType) {
6528 case GlobalProperty:
6529 case GlobalPropertyWithVarInjectionChecks: {
6530 // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed.
6531 // https://bugs.webkit.org/show_bug.cgi?id=193347
6532 if (m_inlineStackTop->m_codeBlock->scriptMode() != JSParserScriptMode::Module) {
6533 if (!m_graph.watchGlobalProperty(globalObject, identifierNumber))
6534 addToGraph(ForceOSRExit);
6535 }
6536
6537 PutByIdStatus status;
6538 if (uid)
6539 status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
6540 else
6541 status = PutByIdStatus(PutByIdStatus::TakesSlowPath);
6542 if (status.numVariants() != 1
6543 || status[0].kind() != PutByIdVariant::Replace
6544 || status[0].structure().size() != 1) {
6545 addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.m_scope), get(bytecode.m_value));
6546 break;
6547 }
6548 Node* base = weakJSConstant(globalObject);
6549 store(base, identifierNumber, status[0], get(bytecode.m_value));
6550 // Keep scope alive until after put.
6551 addToGraph(Phantom, get(bytecode.m_scope));
6552 break;
6553 }
6554 case GlobalLexicalVar:
6555 case GlobalLexicalVarWithVarInjectionChecks:
6556 case GlobalVar:
6557 case GlobalVarWithVarInjectionChecks: {
6558 if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
6559 SpeculatedType prediction = SpecEmpty;
6560 Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction));
6561 addToGraph(CheckNotEmpty, value);
6562 }
6563
6564 JSSegmentedVariableObject* scopeObject = jsCast<JSSegmentedVariableObject*>(JSScope::constantScopeForCodeBlock(resolveType, m_inlineStackTop->m_codeBlock));
6565 if (watchpoints) {
6566 SymbolTableEntry entry = scopeObject->symbolTable()->get(uid);
6567 ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet());
6568 }
6569 Node* valueNode = get(bytecode.m_value);
6570 addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode);
6571 if (watchpoints && watchpoints->state() != IsInvalidated) {
6572 // Must happen after the store. See comment for GetGlobalVar.
6573 addToGraph(NotifyWrite, OpInfo(watchpoints));
6574 }
6575 // Keep scope alive until after put.
6576 addToGraph(Phantom, get(bytecode.m_scope));
6577 break;
6578 }
6579 case LocalClosureVar:
6580 case ClosureVar:
6581 case ClosureVarWithVarInjectionChecks: {
6582 Node* scopeNode = get(bytecode.m_scope);
6583 Node* valueNode = get(bytecode.m_value);
6584
6585 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode);
6586
6587 if (watchpoints && watchpoints->state() != IsInvalidated) {
6588 // Must happen after the store. See comment for GetGlobalVar.
6589 addToGraph(NotifyWrite, OpInfo(watchpoints));
6590 }
6591 break;
6592 }
6593
6594 case ModuleVar:
6595 // Need not to keep "scope" and "value" register values here by Phantom because
6596 // they are not used in LLInt / baseline op_put_to_scope with ModuleVar.
6597 addToGraph(ForceOSRExit);
6598 break;
6599
6600 case Dynamic:
6601 case UnresolvedProperty:
6602 case UnresolvedPropertyWithVarInjectionChecks:
6603 RELEASE_ASSERT_NOT_REACHED();
6604 break;
6605 }
6606 NEXT_OPCODE(op_put_to_scope);
6607 }
6608
6609 case op_loop_hint: {
6610 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
6611 // OSR can only happen at basic block boundaries. Assert that these two statements
6612 // are compatible.
6613 RELEASE_ASSERT(m_currentIndex == blockBegin);
6614
6615 // We never do OSR into an inlined code block. That could not happen, since OSR
6616 // looks up the code block that is the replacement for the baseline JIT code
6617 // block. Hence, machine code block = true code block = not inline code block.
6618 if (!m_inlineStackTop->m_caller)
6619 m_currentBlock->isOSRTarget = true;
6620
6621 addToGraph(LoopHint);
6622 NEXT_OPCODE(op_loop_hint);
6623 }
6624
6625 case op_check_traps: {
6626 addToGraph(Options::usePollingTraps() ? CheckTraps : InvalidationPoint);
6627 NEXT_OPCODE(op_check_traps);
6628 }
6629
6630 case op_nop: {
6631 addToGraph(Check); // We add a nop here so that basic block linking doesn't break.
6632 NEXT_OPCODE(op_nop);
6633 }
6634
6635 case op_super_sampler_begin: {
6636 addToGraph(SuperSamplerBegin);
6637 NEXT_OPCODE(op_super_sampler_begin);
6638 }
6639
6640 case op_super_sampler_end: {
6641 addToGraph(SuperSamplerEnd);
6642 NEXT_OPCODE(op_super_sampler_end);
6643 }
6644
6645 case op_create_lexical_environment: {
6646 auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>();
6647 ASSERT(bytecode.m_symbolTable.isConstant() && bytecode.m_initialValue.isConstant());
6648 FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_symbolTable.offset()));
6649 FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.m_initialValue.offset()));
6650 Node* scope = get(bytecode.m_scope);
6651 Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope);
6652 set(bytecode.m_dst, lexicalEnvironment);
6653 NEXT_OPCODE(op_create_lexical_environment);
6654 }
6655
6656 case op_push_with_scope: {
6657 auto bytecode = currentInstruction->as<OpPushWithScope>();
6658 Node* currentScope = get(bytecode.m_currentScope);
6659 Node* object = get(bytecode.m_newScope);
6660 set(bytecode.m_dst, addToGraph(PushWithScope, currentScope, object));
6661 NEXT_OPCODE(op_push_with_scope);
6662 }
6663
6664 case op_get_parent_scope: {
6665 auto bytecode = currentInstruction->as<OpGetParentScope>();
6666 Node* currentScope = get(bytecode.m_scope);
6667 Node* newScope = addToGraph(SkipScope, currentScope);
6668 set(bytecode.m_dst, newScope);
6669 addToGraph(Phantom, currentScope);
6670 NEXT_OPCODE(op_get_parent_scope);
6671 }
6672
6673 case op_get_scope: {
6674 // Help the later stages a bit by doing some small constant folding here. Note that this
6675 // only helps for the first basic block. It's extremely important not to constant fold
6676 // loads from the scope register later, as that would prevent the DFG from tracking the
6677 // bytecode-level liveness of the scope register.
6678 auto bytecode = currentInstruction->as<OpGetScope>();
6679 Node* callee = get(VirtualRegister(CallFrameSlot::callee));
6680 Node* result;
6681 if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm))
6682 result = weakJSConstant(function->scope());
6683 else
6684 result = addToGraph(GetScope, callee);
6685 set(bytecode.m_dst, result);
6686 NEXT_OPCODE(op_get_scope);
6687 }
6688
6689 case op_argument_count: {
6690 auto bytecode = currentInstruction->as<OpArgumentCount>();
6691 Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne)));
6692 set(bytecode.m_dst, sub);
6693 NEXT_OPCODE(op_argument_count);
6694 }
6695
6696 case op_create_direct_arguments: {
6697 auto bytecode = currentInstruction->as<OpCreateDirectArguments>();
6698 noticeArgumentsUse();
6699 Node* createArguments = addToGraph(CreateDirectArguments);
6700 set(bytecode.m_dst, createArguments);
6701 NEXT_OPCODE(op_create_direct_arguments);
6702 }
6703
6704 case op_create_scoped_arguments: {
6705 auto bytecode = currentInstruction->as<OpCreateScopedArguments>();
6706 noticeArgumentsUse();
6707 Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.m_scope));
6708 set(bytecode.m_dst, createArguments);
6709 NEXT_OPCODE(op_create_scoped_arguments);
6710 }
6711
6712 case op_create_cloned_arguments: {
6713 auto bytecode = currentInstruction->as<OpCreateClonedArguments>();
6714 noticeArgumentsUse();
6715 Node* createArguments = addToGraph(CreateClonedArguments);
6716 set(bytecode.m_dst, createArguments);
6717 NEXT_OPCODE(op_create_cloned_arguments);
6718 }
6719
6720 case op_get_from_arguments: {
6721 auto bytecode = currentInstruction->as<OpGetFromArguments>();
6722 set(bytecode.m_dst,
6723 addToGraph(
6724 GetFromArguments,
6725 OpInfo(bytecode.m_index),
6726 OpInfo(getPrediction()),
6727 get(bytecode.m_arguments)));
6728 NEXT_OPCODE(op_get_from_arguments);
6729 }
6730
6731 case op_put_to_arguments: {
6732 auto bytecode = currentInstruction->as<OpPutToArguments>();
6733 addToGraph(
6734 PutToArguments,
6735 OpInfo(bytecode.m_index),
6736 get(bytecode.m_arguments),
6737 get(bytecode.m_value));
6738 NEXT_OPCODE(op_put_to_arguments);
6739 }
6740
6741 case op_get_argument: {
6742 auto bytecode = currentInstruction->as<OpGetArgument>();
6743 InlineCallFrame* inlineCallFrame = this->inlineCallFrame();
6744 Node* argument;
6745 int32_t argumentIndexIncludingThis = bytecode.m_index;
6746 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
6747 int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size();
6748 if (argumentIndexIncludingThis < argumentCountIncludingThisWithFixup)
6749 argument = get(virtualRegisterForArgument(argumentIndexIncludingThis));
6750 else
6751 argument = addToGraph(JSConstant, OpInfo(m_constantUndefined));
6752 } else
6753 argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction()));
6754 set(bytecode.m_dst, argument);
6755 NEXT_OPCODE(op_get_argument);
6756 }
6757 case op_new_async_generator_func:
6758 handleNewFunc(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFunc>());
6759 NEXT_OPCODE(op_new_async_generator_func);
6760 case op_new_func:
6761 handleNewFunc(NewFunction, currentInstruction->as<OpNewFunc>());
6762 NEXT_OPCODE(op_new_func);
6763 case op_new_generator_func:
6764 handleNewFunc(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFunc>());
6765 NEXT_OPCODE(op_new_generator_func);
6766 case op_new_async_func:
6767 handleNewFunc(NewAsyncFunction, currentInstruction->as<OpNewAsyncFunc>());
6768 NEXT_OPCODE(op_new_async_func);
6769
6770 case op_new_func_exp:
6771 handleNewFuncExp(NewFunction, currentInstruction->as<OpNewFuncExp>());
6772 NEXT_OPCODE(op_new_func_exp);
6773 case op_new_generator_func_exp:
6774 handleNewFuncExp(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFuncExp>());
6775 NEXT_OPCODE(op_new_generator_func_exp);
6776 case op_new_async_generator_func_exp:
6777 handleNewFuncExp(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFuncExp>());
6778 NEXT_OPCODE(op_new_async_generator_func_exp);
6779 case op_new_async_func_exp:
6780 handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewAsyncFuncExp>());
6781 NEXT_OPCODE(op_new_async_func_exp);
6782
6783 case op_set_function_name: {
6784 auto bytecode = currentInstruction->as<OpSetFunctionName>();
6785 Node* func = get(bytecode.m_function);
6786 Node* name = get(bytecode.m_name);
6787 addToGraph(SetFunctionName, func, name);
6788 NEXT_OPCODE(op_set_function_name);
6789 }
6790
6791 case op_typeof: {
6792 auto bytecode = currentInstruction->as<OpTypeof>();
6793 set(bytecode.m_dst, addToGraph(TypeOf, get(bytecode.m_value)));
6794 NEXT_OPCODE(op_typeof);
6795 }
6796
6797 case op_to_number: {
6798 auto bytecode = currentInstruction->as<OpToNumber>();
6799 SpeculatedType prediction = getPrediction();
6800 Node* value = get(bytecode.m_operand);
6801 set(bytecode.m_dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));
6802 NEXT_OPCODE(op_to_number);
6803 }
6804
6805 case op_to_string: {
6806 auto bytecode = currentInstruction->as<OpToString>();
6807 Node* value = get(bytecode.m_operand);
6808 set(bytecode.m_dst, addToGraph(ToString, value));
6809 NEXT_OPCODE(op_to_string);
6810 }
6811
6812 case op_to_object: {
6813 auto bytecode = currentInstruction->as<OpToObject>();
6814 SpeculatedType prediction = getPrediction();
6815 Node* value = get(bytecode.m_operand);
6816 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_message];
6817 set(bytecode.m_dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value));
6818 NEXT_OPCODE(op_to_object);
6819 }
6820
6821 case op_in_by_val: {
6822 auto bytecode = currentInstruction->as<OpInByVal>();
6823 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
6824 set(bytecode.m_dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.m_base), get(bytecode.m_property)));
6825 NEXT_OPCODE(op_in_by_val);
6826 }
6827
6828 case op_in_by_id: {
6829 auto bytecode = currentInstruction->as<OpInById>();
6830 Node* base = get(bytecode.m_base);
6831 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
6832 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
6833
6834 InByIdStatus status = InByIdStatus::computeFor(
6835 m_inlineStackTop->m_profiledBlock,
6836 m_inlineStackTop->m_baselineMap, m_icContextStack,
6837 currentCodeOrigin(), uid);
6838
6839 if (status.isSimple()) {
6840 bool allOK = true;
6841 MatchStructureData* data = m_graph.m_matchStructureData.add();
6842 for (const InByIdVariant& variant : status.variants()) {
6843 if (!check(variant.conditionSet())) {
6844 allOK = false;
6845 break;
6846 }
6847 for (Structure* structure : variant.structureSet()) {
6848 MatchStructureVariant matchVariant;
6849 matchVariant.structure = m_graph.registerStructure(structure);
6850 matchVariant.result = variant.isHit();
6851
6852 data->variants.append(WTFMove(matchVariant));
6853 }
6854 }
6855
6856 if (allOK) {
6857 addToGraph(FilterInByIdStatus, OpInfo(m_graph.m_plan.recordedStatuses().addInByIdStatus(currentCodeOrigin(), status)), base);
6858
6859 Node* match = addToGraph(MatchStructure, OpInfo(data), base);
6860 set(bytecode.m_dst, match);
6861 NEXT_OPCODE(op_in_by_id);
6862 }
6863 }
6864
6865 set(bytecode.m_dst, addToGraph(InById, OpInfo(identifierNumber), base));
6866 NEXT_OPCODE(op_in_by_id);
6867 }
6868
6869 case op_get_enumerable_length: {
6870 auto bytecode = currentInstruction->as<OpGetEnumerableLength>();
6871 set(bytecode.m_dst, addToGraph(GetEnumerableLength, get(bytecode.m_base)));
6872 NEXT_OPCODE(op_get_enumerable_length);
6873 }
6874
6875 case op_has_generic_property: {
6876 auto bytecode = currentInstruction->as<OpHasGenericProperty>();
6877 set(bytecode.m_dst, addToGraph(HasGenericProperty, get(bytecode.m_base), get(bytecode.m_property)));
6878 NEXT_OPCODE(op_has_generic_property);
6879 }
6880
6881 case op_has_structure_property: {
6882 auto bytecode = currentInstruction->as<OpHasStructureProperty>();
6883 set(bytecode.m_dst, addToGraph(HasStructureProperty,
6884 get(bytecode.m_base),
6885 get(bytecode.m_property),
6886 get(bytecode.m_enumerator)));
6887 NEXT_OPCODE(op_has_structure_property);
6888 }
6889
6890 case op_has_indexed_property: {
6891 auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
6892 Node* base = get(bytecode.m_base);
6893 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).m_arrayProfile, Array::Read);
6894 Node* property = get(bytecode.m_property);
6895 addVarArgChild(base);
6896 addVarArgChild(property);
6897 addVarArgChild(nullptr);
6898 Node* hasIterableProperty = addToGraph(Node::VarArg, HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)));
6899 m_exitOK = false; // HasIndexedProperty must be treated as if it clobbers exit state, since FixupPhase may make it generic.
6900 set(bytecode.m_dst, hasIterableProperty);
6901 NEXT_OPCODE(op_has_indexed_property);
6902 }
6903
6904 case op_get_direct_pname: {
6905 auto bytecode = currentInstruction->as<OpGetDirectPname>();
6906 SpeculatedType prediction = getPredictionWithoutOSRExit();
6907
6908 Node* base = get(bytecode.m_base);
6909 Node* property = get(bytecode.m_property);
6910 Node* index = get(bytecode.m_index);
6911 Node* enumerator = get(bytecode.m_enumerator);
6912
6913 addVarArgChild(base);
6914 addVarArgChild(property);
6915 addVarArgChild(index);
6916 addVarArgChild(enumerator);
6917 set(bytecode.m_dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
6918
6919 NEXT_OPCODE(op_get_direct_pname);
6920 }
6921
6922 case op_get_property_enumerator: {
6923 auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>();
6924 set(bytecode.m_dst, addToGraph(GetPropertyEnumerator, get(bytecode.m_base)));
6925 NEXT_OPCODE(op_get_property_enumerator);
6926 }
6927
6928 case op_enumerator_structure_pname: {
6929 auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>();
6930 set(bytecode.m_dst, addToGraph(GetEnumeratorStructurePname,
6931 get(bytecode.m_enumerator),
6932 get(bytecode.m_index)));
6933 NEXT_OPCODE(op_enumerator_structure_pname);
6934 }
6935
6936 case op_enumerator_generic_pname: {
6937 auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>();
6938 set(bytecode.m_dst, addToGraph(GetEnumeratorGenericPname,
6939 get(bytecode.m_enumerator),
6940 get(bytecode.m_index)));
6941 NEXT_OPCODE(op_enumerator_generic_pname);
6942 }
6943
6944 case op_to_index_string: {
6945 auto bytecode = currentInstruction->as<OpToIndexString>();
6946 set(bytecode.m_dst, addToGraph(ToIndexString, get(bytecode.m_index)));
6947 NEXT_OPCODE(op_to_index_string);
6948 }
6949
6950 case op_log_shadow_chicken_prologue: {
6951 auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
6952 if (!m_inlineStackTop->m_inlineCallFrame)
6953 addToGraph(LogShadowChickenPrologue, get(bytecode.m_scope));
6954 NEXT_OPCODE(op_log_shadow_chicken_prologue);
6955 }
6956
6957 case op_log_shadow_chicken_tail: {
6958 auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
6959 if (!m_inlineStackTop->m_inlineCallFrame) {
6960 // FIXME: The right solution for inlining is to elide these whenever the tail call
6961 // ends up being inlined.
6962 // https://bugs.webkit.org/show_bug.cgi?id=155686
6963 addToGraph(LogShadowChickenTail, get(bytecode.m_thisValue), get(bytecode.m_scope));
6964 }
6965 NEXT_OPCODE(op_log_shadow_chicken_tail);
6966 }
6967
6968 case op_unreachable: {
6969 flushForTerminal();
6970 addToGraph(Unreachable);
6971 LAST_OPCODE(op_unreachable);
6972 }
6973
6974 default:
6975 // Parse failed! This should not happen because the capabilities checker
6976 // should have caught it.
6977 RELEASE_ASSERT_NOT_REACHED();
6978 return;
6979 }
6980 }
6981}
6982
6983void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
6984{
6985 ASSERT(!block->isLinked);
6986 ASSERT(!block->isEmpty());
6987 Node* node = block->terminal();
6988 ASSERT(node->isTerminal());
6989
6990 switch (node->op()) {
6991 case Jump:
6992 node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
6993 break;
6994
6995 case Branch: {
6996 BranchData* data = node->branchData();
6997 data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
6998 data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
6999 break;
7000 }
7001
7002 case Switch: {
7003 SwitchData* data = node->switchData();
7004 for (unsigned i = node->switchData()->cases.size(); i--;)
7005 data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
7006 data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
7007 break;
7008 }
7009
7010 default:
7011 RELEASE_ASSERT_NOT_REACHED();
7012 }
7013
7014 VERBOSE_LOG("Marking ", RawPointer(block), " as linked (actually did linking)\n");
7015 block->didLink();
7016}
7017
7018void ByteCodeParser::linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
7019{
7020 for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
7021 VERBOSE_LOG("Attempting to link ", RawPointer(unlinkedBlocks[i]), "\n");
7022 linkBlock(unlinkedBlocks[i], possibleTargets);
7023 }
7024}
7025
7026ByteCodeParser::InlineStackEntry::InlineStackEntry(
7027 ByteCodeParser* byteCodeParser,
7028 CodeBlock* codeBlock,
7029 CodeBlock* profiledBlock,
7030 JSFunction* callee, // Null if this is a closure call.
7031 VirtualRegister returnValueVR,
7032 VirtualRegister inlineCallFrameStart,
7033 int argumentCountIncludingThis,
7034 InlineCallFrame::Kind kind,
7035 BasicBlock* continuationBlock)
7036 : m_byteCodeParser(byteCodeParser)
7037 , m_codeBlock(codeBlock)
7038 , m_profiledBlock(profiledBlock)
7039 , m_continuationBlock(continuationBlock)
7040 , m_returnValue(returnValueVR)
7041 , m_caller(byteCodeParser->m_inlineStackTop)
7042{
7043 {
7044 m_exitProfile.initialize(m_profiledBlock->unlinkedCodeBlock());
7045
7046 ConcurrentJSLocker locker(m_profiledBlock->m_lock);
7047 m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles(locker));
7048
7049 // We do this while holding the lock because we want to encourage StructureStubInfo's
7050 // to be potentially added to operations and because the profiled block could be in the
7051 // middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
7052 if (m_profiledBlock->hasBaselineJITProfiling())
7053 m_profiledBlock->getICStatusMap(locker, m_baselineMap);
7054 }
7055
7056 CodeBlock* optimizedBlock = m_profiledBlock->replacement();
7057 m_optimizedContext.optimizedCodeBlock = optimizedBlock;
7058 if (Options::usePolyvariantDevirtualization() && optimizedBlock) {
7059 ConcurrentJSLocker locker(optimizedBlock->m_lock);
7060 optimizedBlock->getICStatusMap(locker, m_optimizedContext.map);
7061 }
7062 byteCodeParser->m_icContextStack.append(&m_optimizedContext);
7063
7064 int argumentCountIncludingThisWithFixup = std::max<int>(argumentCountIncludingThis, codeBlock->numParameters());
7065
7066 if (m_caller) {
7067 // Inline case.
7068 ASSERT(codeBlock != byteCodeParser->m_codeBlock);
7069 ASSERT(inlineCallFrameStart.isValid());
7070
7071 m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames()->add();
7072 m_optimizedContext.inlineCallFrame = m_inlineCallFrame;
7073
7074 // The owner is the machine code block, and we already have a barrier on that when the
7075 // plan finishes.
7076 m_inlineCallFrame->baselineCodeBlock.setWithoutWriteBarrier(codeBlock->baselineVersion());
7077 m_inlineCallFrame->setStackOffset(inlineCallFrameStart.offset() - CallFrame::headerSizeInRegisters);
7078 m_inlineCallFrame->argumentCountIncludingThis = argumentCountIncludingThis;
7079 if (callee) {
7080 m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee);
7081 m_inlineCallFrame->isClosureCall = false;
7082 } else
7083 m_inlineCallFrame->isClosureCall = true;
7084 m_inlineCallFrame->directCaller = byteCodeParser->currentCodeOrigin();
7085 m_inlineCallFrame->argumentsWithFixup.resizeToFit(argumentCountIncludingThisWithFixup); // Set the number of arguments including this, but don't configure the value recoveries, yet.
7086 m_inlineCallFrame->kind = kind;
7087
7088 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
7089 m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
7090
7091 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
7092 UniquedStringImpl* rep = codeBlock->identifier(i).impl();
7093 unsigned index = byteCodeParser->m_graph.identifiers().ensure(rep);
7094 m_identifierRemap[i] = index;
7095 }
7096 for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) {
7097 m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables();
7098 byteCodeParser->m_codeBlock->addSwitchJumpTable() = codeBlock->switchJumpTable(i);
7099 }
7100 } else {
7101 // Machine code block case.
7102 ASSERT(codeBlock == byteCodeParser->m_codeBlock);
7103 ASSERT(!callee);
7104 ASSERT(!returnValueVR.isValid());
7105 ASSERT(!inlineCallFrameStart.isValid());
7106
7107 m_inlineCallFrame = 0;
7108
7109 m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
7110 m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
7111 for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
7112 m_identifierRemap[i] = i;
7113 for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i)
7114 m_switchRemap[i] = i;
7115 }
7116
7117 m_argumentPositions.resize(argumentCountIncludingThisWithFixup);
7118 for (int i = 0; i < argumentCountIncludingThisWithFixup; ++i) {
7119 byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
7120 ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
7121 m_argumentPositions[i] = argumentPosition;
7122 }
7123 byteCodeParser->m_inlineCallFrameToArgumentPositions.add(m_inlineCallFrame, m_argumentPositions);
7124
7125 byteCodeParser->m_inlineStackTop = this;
7126}
7127
7128ByteCodeParser::InlineStackEntry::~InlineStackEntry()
7129{
7130 m_byteCodeParser->m_inlineStackTop = m_caller;
7131 RELEASE_ASSERT(m_byteCodeParser->m_icContextStack.last() == &m_optimizedContext);
7132 m_byteCodeParser->m_icContextStack.removeLast();
7133}
7134
7135void ByteCodeParser::parseCodeBlock()
7136{
7137 clearCaches();
7138
7139 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
7140
7141 if (UNLIKELY(m_graph.compilation())) {
7142 m_graph.compilation()->addProfiledBytecodes(
7143 *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
7144 }
7145
7146 if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
7147 Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump();
7148 if (inlineCallFrame()) {
7149 DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITType::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex());
7150 deferredSourceDump.append(dump);
7151 } else
7152 deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
7153 }
7154
7155 if (Options::dumpBytecodeAtDFGTime()) {
7156 dataLog("Parsing ", *codeBlock);
7157 if (inlineCallFrame()) {
7158 dataLog(
7159 " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT),
7160 " ", inlineCallFrame()->directCaller);
7161 }
7162 dataLog(
7163 ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
7164 codeBlock->baselineVersion()->dumpBytecode();
7165 }
7166
7167 Vector<InstructionStream::Offset, 32> jumpTargets;
7168 computePreciseJumpTargets(codeBlock, jumpTargets);
7169 if (Options::dumpBytecodeAtDFGTime()) {
7170 dataLog("Jump targets: ");
7171 CommaPrinter comma;
7172 for (unsigned i = 0; i < jumpTargets.size(); ++i)
7173 dataLog(comma, jumpTargets[i]);
7174 dataLog("\n");
7175 }
7176
7177 for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
7178 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
7179 unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
7180 ASSERT(m_currentIndex < limit);
7181
7182 // Loop until we reach the current limit (i.e. next jump target).
7183 do {
7184 // There may already be a currentBlock in two cases:
7185 // - we may have just entered the loop for the first time
7186 // - we may have just returned from an inlined callee that had some early returns and
7187 // so allocated a continuation block, and the instruction after the call is a jump target.
7188 // In both cases, we want to keep using it.
7189 if (!m_currentBlock) {
7190 m_currentBlock = allocateTargetableBlock(m_currentIndex);
7191
7192 // The first block is definitely an OSR target.
7193 if (m_graph.numBlocks() == 1) {
7194 m_currentBlock->isOSRTarget = true;
7195 m_graph.m_roots.append(m_currentBlock);
7196 }
7197 prepareToParseBlock();
7198 }
7199
7200 parseBlock(limit);
7201
7202 // We should not have gone beyond the limit.
7203 ASSERT(m_currentIndex <= limit);
7204
7205 if (m_currentBlock->isEmpty()) {
7206 // This case only happens if the last instruction was an inlined call with early returns
7207 // or polymorphic (creating an empty continuation block),
7208 // and then we hit the limit before putting anything in the continuation block.
7209 ASSERT(m_currentIndex == limit);
7210 makeBlockTargetable(m_currentBlock, m_currentIndex);
7211 } else {
7212 ASSERT(m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()));
7213 m_currentBlock = nullptr;
7214 }
7215 } while (m_currentIndex < limit);
7216 }
7217
7218 // Should have reached the end of the instructions.
7219 ASSERT(m_currentIndex == codeBlock->instructions().size());
7220
7221 VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n");
7222}
7223
7224template <typename Bytecode>
7225void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize)
7226{
7227 Node* base = get(bytecode.m_base);
7228 Node* property = get(bytecode.m_property);
7229 Node* value = get(bytecode.m_value);
7230 bool isDirect = Bytecode::opcodeID == op_put_by_val_direct;
7231 bool compiledAsPutById = false;
7232 {
7233 unsigned identifierNumber = std::numeric_limits<unsigned>::max();
7234 PutByIdStatus putByIdStatus;
7235 {
7236 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
7237 ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex())).byValInfo;
7238 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
7239 // At that time, there is no information.
7240 if (byValInfo
7241 && byValInfo->stubInfo
7242 && !byValInfo->tookSlowPath
7243 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent)
7244 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType)
7245 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
7246 compiledAsPutById = true;
7247 identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
7248 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
7249
7250 if (Symbol* symbol = byValInfo->cachedSymbol.get()) {
7251 FrozenValue* frozen = m_graph.freezeStrong(symbol);
7252 addToGraph(CheckCell, OpInfo(frozen), property);
7253 } else {
7254 ASSERT(!uid->isSymbol());
7255 addToGraph(CheckStringIdent, OpInfo(uid), property);
7256 }
7257
7258 putByIdStatus = PutByIdStatus::computeForStubInfo(
7259 locker, m_inlineStackTop->m_profiledBlock,
7260 byValInfo->stubInfo, currentCodeOrigin(), uid);
7261
7262 }
7263 }
7264
7265 if (compiledAsPutById)
7266 handlePutById(base, identifierNumber, value, putByIdStatus, isDirect, instructionSize);
7267 }
7268
7269 if (!compiledAsPutById) {
7270 ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_inlineStackTop->m_codeBlock).m_arrayProfile, Array::Write);
7271
7272 addVarArgChild(base);
7273 addVarArgChild(property);
7274 addVarArgChild(value);
7275 addVarArgChild(0); // Leave room for property storage.
7276 addVarArgChild(0); // Leave room for length.
7277 addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
7278 m_exitOK = false; // PutByVal and PutByValDirect must be treated as if they clobber exit state, since FixupPhase may make them generic.
7279 }
7280}
7281
7282template <typename Bytecode>
7283void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode)
7284{
7285 Node* base = get(bytecode.m_base);
7286 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.m_property];
7287 Node* accessor = get(bytecode.m_accessor);
7288 addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.m_attributes), base, accessor);
7289}
7290
7291template <typename Bytecode>
7292void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode)
7293{
7294 Node* base = get(bytecode.m_base);
7295 Node* subscript = get(bytecode.m_property);
7296 Node* accessor = get(bytecode.m_accessor);
7297 addToGraph(op, OpInfo(bytecode.m_attributes), base, subscript, accessor);
7298}
7299
7300template <typename Bytecode>
7301void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode)
7302{
7303 FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.m_functionDecl);
7304 FrozenValue* frozen = m_graph.freezeStrong(decl);
7305 Node* scope = get(bytecode.m_scope);
7306 set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
7307 // Ideally we wouldn't have to do this Phantom. But:
7308 //
7309 // For the constant case: we must do it because otherwise we would have no way of knowing
7310 // that the scope is live at OSR here.
7311 //
7312 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
7313 // won't be able to handle an Undefined scope.
7314 addToGraph(Phantom, scope);
7315}
7316
7317template <typename Bytecode>
7318void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode)
7319{
7320 FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.m_functionDecl);
7321 FrozenValue* frozen = m_graph.freezeStrong(expr);
7322 Node* scope = get(bytecode.m_scope);
7323 set(bytecode.m_dst, addToGraph(op, OpInfo(frozen), scope));
7324 // Ideally we wouldn't have to do this Phantom. But:
7325 //
7326 // For the constant case: we must do it because otherwise we would have no way of knowing
7327 // that the scope is live at OSR here.
7328 //
7329 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation
7330 // won't be able to handle an Undefined scope.
7331 addToGraph(Phantom, scope);
7332}
7333
7334void ByteCodeParser::parse()
7335{
7336 // Set during construction.
7337 ASSERT(!m_currentIndex);
7338
7339 VERBOSE_LOG("Parsing ", *m_codeBlock, "\n");
7340
7341 InlineStackEntry inlineStackEntry(
7342 this, m_codeBlock, m_profiledBlock, 0, VirtualRegister(), VirtualRegister(),
7343 m_codeBlock->numParameters(), InlineCallFrame::Call, nullptr);
7344
7345 parseCodeBlock();
7346 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
7347
7348 if (m_hasAnyForceOSRExits) {
7349 BlockSet blocksToIgnore;
7350 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7351 if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex()) {
7352 blocksToIgnore.add(block);
7353 break;
7354 }
7355 }
7356
7357 {
7358 bool isSafeToValidate = false;
7359 auto postOrder = m_graph.blocksInPostOrder(isSafeToValidate); // This algorithm doesn't rely on the predecessors list, which is not yet built.
7360 bool changed;
7361 do {
7362 changed = false;
7363 for (BasicBlock* block : postOrder) {
7364 for (BasicBlock* successor : block->successors()) {
7365 if (blocksToIgnore.contains(successor)) {
7366 changed |= blocksToIgnore.add(block);
7367 break;
7368 }
7369 }
7370 }
7371 } while (changed);
7372 }
7373
7374 InsertionSet insertionSet(m_graph);
7375 Operands<VariableAccessData*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead);
7376
7377 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7378 if (blocksToIgnore.contains(block))
7379 continue;
7380
7381 mapping.fill(nullptr);
7382 if (validationEnabled()) {
7383 // Verify that it's correct to fill mapping with nullptr.
7384 for (unsigned i = 0; i < block->variablesAtHead.size(); ++i) {
7385 Node* node = block->variablesAtHead.at(i);
7386 RELEASE_ASSERT(!node);
7387 }
7388 }
7389
7390 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
7391 {
7392 Node* node = block->at(nodeIndex);
7393
7394 if (node->hasVariableAccessData(m_graph))
7395 mapping.operand(node->local()) = node->variableAccessData();
7396
7397 if (node->op() != ForceOSRExit)
7398 continue;
7399 }
7400
7401 NodeOrigin origin = block->at(nodeIndex)->origin;
7402 RELEASE_ASSERT(origin.exitOK);
7403
7404 ++nodeIndex;
7405
7406 {
7407 if (validationEnabled()) {
7408 // This verifies that we don't need to change any of the successors's predecessor
7409 // list after planting the Unreachable below. At this point in the bytecode
7410 // parser, we haven't linked up the predecessor lists yet.
7411 for (BasicBlock* successor : block->successors())
7412 RELEASE_ASSERT(successor->predecessors.isEmpty());
7413 }
7414
7415 auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, VirtualRegister operand) {
7416 VariableAccessData* variable = mapping.operand(operand);
7417 if (!variable) {
7418 variable = newVariableAccessData(operand);
7419 mapping.operand(operand) = variable;
7420 }
7421
7422 VirtualRegister argument = operand - (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
7423 if (argument.isArgument() && !argument.isHeader()) {
7424 const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame);
7425 arguments[argument.toArgument()]->addVariable(variable);
7426 }
7427 insertionSet.insertNode(nodeIndex, SpecNone, op, origin, OpInfo(variable));
7428 };
7429 auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
7430 insertLivenessPreservingOp(inlineCallFrame, Flush, operand);
7431 };
7432 auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {
7433 insertLivenessPreservingOp(inlineCallFrame, PhantomLocal, operand);
7434 };
7435 flushForTerminalImpl(origin.semantic, addFlushDirect, addPhantomLocalDirect);
7436 }
7437
7438 while (true) {
7439 RELEASE_ASSERT(nodeIndex < block->size());
7440
7441 Node* node = block->at(nodeIndex);
7442
7443 node->origin = origin;
7444 m_graph.doToChildren(node, [&] (Edge edge) {
7445 // We only need to keep data flow edges to nodes defined prior to the ForceOSRExit. The reason
7446 // for this is we rely on backwards propagation being able to see the "full" bytecode. To model
7447 // this, we preserve uses of a node in a generic way so that backwards propagation can reason
7448 // about them. Therefore, we can't remove uses of a node which is defined before the ForceOSRExit
7449 // even when we're at a point in the program after the ForceOSRExit, because that would break backwards
7450 // propagation's analysis over the uses of a node. However, we don't need this same preservation for
7451 // nodes defined after ForceOSRExit, as we've already exitted before those defs.
7452 if (edge->hasResult())
7453 insertionSet.insertNode(nodeIndex, SpecNone, Phantom, origin, Edge(edge.node(), UntypedUse));
7454 });
7455
7456 bool isTerminal = node->isTerminal();
7457
7458 node->removeWithoutChecks();
7459
7460 if (isTerminal) {
7461 insertionSet.insertNode(nodeIndex, SpecNone, Unreachable, origin);
7462 break;
7463 }
7464
7465 ++nodeIndex;
7466 }
7467
7468 insertionSet.execute(block);
7469
7470 auto nodeAndIndex = block->findTerminal();
7471 RELEASE_ASSERT(nodeAndIndex.node->op() == Unreachable);
7472 block->resize(nodeAndIndex.index + 1);
7473 break;
7474 }
7475 }
7476 } else if (validationEnabled()) {
7477 // Ensure our bookkeeping for ForceOSRExit nodes is working.
7478 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
7479 for (Node* node : *block)
7480 RELEASE_ASSERT(node->op() != ForceOSRExit);
7481 }
7482 }
7483
7484 m_graph.determineReachability();
7485 m_graph.killUnreachableBlocks();
7486
7487 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
7488 BasicBlock* block = m_graph.block(blockIndex);
7489 if (!block)
7490 continue;
7491 ASSERT(block->variablesAtHead.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
7492 ASSERT(block->variablesAtHead.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
7493 ASSERT(block->variablesAtTail.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals());
7494 ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments());
7495 }
7496
7497 m_graph.m_localVars = m_numLocals;
7498 m_graph.m_parameterSlots = m_parameterSlots;
7499}
7500
7501void parse(Graph& graph)
7502{
7503 ByteCodeParser(graph).parse();
7504}
7505
7506} } // namespace JSC::DFG
7507
7508#endif
7509