1/*
2 * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "FTLLowerDFGToB3.h"
28
29#if ENABLE(FTL_JIT)
30
31#include "AirCode.h"
32#include "AirGenerationContext.h"
33#include "AllowMacroScratchRegisterUsage.h"
34#include "AllowMacroScratchRegisterUsageIf.h"
35#include "AtomicsObject.h"
36#include "B3CheckValue.h"
37#include "B3FenceValue.h"
38#include "B3PatchpointValue.h"
39#include "B3SlotBaseValue.h"
40#include "B3StackmapGenerationParams.h"
41#include "B3ValueInlines.h"
42#include "CallFrameShuffler.h"
43#include "CodeBlockWithJITType.h"
44#include "DFGAbstractInterpreterInlines.h"
45#include "DFGCapabilities.h"
46#include "DFGDoesGC.h"
47#include "DFGDominators.h"
48#include "DFGInPlaceAbstractState.h"
49#include "DFGLivenessAnalysisPhase.h"
50#include "DFGMayExit.h"
51#include "DFGOSRAvailabilityAnalysisPhase.h"
52#include "DFGOSRExitFuzz.h"
53#include "DirectArguments.h"
54#include "FTLAbstractHeapRepository.h"
55#include "FTLAvailableRecovery.h"
56#include "FTLExceptionTarget.h"
57#include "FTLForOSREntryJITCode.h"
58#include "FTLFormattedValue.h"
59#include "FTLLazySlowPathCall.h"
60#include "FTLLoweredNodeValue.h"
61#include "FTLOperations.h"
62#include "FTLOutput.h"
63#include "FTLPatchpointExceptionHandle.h"
64#include "FTLSnippetParams.h"
65#include "FTLThunks.h"
66#include "FTLWeightedTarget.h"
67#include "JITAddGenerator.h"
68#include "JITBitAndGenerator.h"
69#include "JITBitOrGenerator.h"
70#include "JITBitXorGenerator.h"
71#include "JITDivGenerator.h"
72#include "JITInlineCacheGenerator.h"
73#include "JITLeftShiftGenerator.h"
74#include "JITMathIC.h"
75#include "JITMulGenerator.h"
76#include "JITRightShiftGenerator.h"
77#include "JITSubGenerator.h"
78#include "JSAsyncFunction.h"
79#include "JSAsyncGeneratorFunction.h"
80#include "JSCInlines.h"
81#include "JSGeneratorFunction.h"
82#include "JSImmutableButterfly.h"
83#include "JSLexicalEnvironment.h"
84#include "JSMap.h"
85#include "OperandsInlines.h"
86#include "ProbeContext.h"
87#include "RegExpObject.h"
88#include "ScopedArguments.h"
89#include "ScopedArgumentsTable.h"
90#include "ScratchRegisterAllocator.h"
91#include "SetupVarargsFrame.h"
92#include "ShadowChicken.h"
93#include "StructureStubInfo.h"
94#include "SuperSampler.h"
95#include "ThunkGenerators.h"
96#include "VirtualRegister.h"
97#include "Watchdog.h"
98#include <atomic>
99#include <wtf/Box.h>
100#include <wtf/Gigacage.h>
101#include <wtf/RecursableLambda.h>
102#include <wtf/StdUnorderedSet.h>
103
104#undef RELEASE_ASSERT
105#define RELEASE_ASSERT(assertion) do { \
106 if (!(assertion)) { \
107 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
108 CRASH(); \
109 } \
110} while (0)
111
112namespace JSC { namespace FTL {
113
114using namespace B3;
115using namespace DFG;
116
117namespace {
118
119std::atomic<int> compileCounter;
120
121#if !ASSERT_DISABLED
122NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
123 CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
124{
125 dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
126 if (nodeIndex != UINT_MAX)
127 dataLog(", node @", nodeIndex);
128 dataLog(".\n");
129 CRASH();
130}
131#endif
132
133// Using this instead of typeCheck() helps to reduce the load on B3, by creating
134// significantly less dead code.
135#define FTL_TYPE_CHECK_WITH_EXIT_KIND(exitKind, lowValue, highValue, typesPassedThrough, failCondition) do { \
136 FormattedValue _ftc_lowValue = (lowValue); \
137 Edge _ftc_highValue = (highValue); \
138 SpeculatedType _ftc_typesPassedThrough = (typesPassedThrough); \
139 if (!m_interpreter.needsTypeCheck(_ftc_highValue, _ftc_typesPassedThrough)) \
140 break; \
141 typeCheck(_ftc_lowValue, _ftc_highValue, _ftc_typesPassedThrough, (failCondition), exitKind); \
142 } while (false)
143
144#define FTL_TYPE_CHECK(lowValue, highValue, typesPassedThrough, failCondition) \
145 FTL_TYPE_CHECK_WITH_EXIT_KIND(BadType, lowValue, highValue, typesPassedThrough, failCondition)
146
147class LowerDFGToB3 {
148 WTF_MAKE_NONCOPYABLE(LowerDFGToB3);
149public:
150 LowerDFGToB3(State& state)
151 : m_graph(state.graph)
152 , m_ftlState(state)
153 , m_out(state)
154 , m_proc(*state.proc)
155 , m_availabilityCalculator(m_graph)
156 , m_state(state.graph)
157 , m_interpreter(state.graph, m_state)
158 , m_indexMaskingMode(Options::enableSpectreMitigations() ? IndexMaskingEnabled : IndexMaskingDisabled)
159 {
160 if (Options::validateAbstractInterpreterState()) {
161 performLivenessAnalysis(m_graph);
162
163 // We only use node liveness here, not combined liveness, as we only track
164 // AI state for live nodes.
165 for (DFG::BasicBlock* block : m_graph.blocksInNaturalOrder()) {
166 NodeSet live;
167
168 for (NodeFlowProjection node : block->ssa->liveAtTail) {
169 if (node.kind() == NodeFlowProjection::Primary)
170 live.addVoid(node.node());
171 }
172
173 for (unsigned i = block->size(); i--; ) {
174 Node* node = block->at(i);
175 live.remove(node);
176 m_graph.doToChildren(node, [&] (Edge child) {
177 live.addVoid(child.node());
178 });
179 m_liveInToNode.add(node, live);
180 }
181 }
182 }
183 }
184
185 void lower()
186 {
187 State* state = &m_ftlState;
188
189 CString name;
190 if (verboseCompilationEnabled()) {
191 name = toCString(
192 "jsBody_", ++compileCounter, "_", codeBlock()->inferredName(),
193 "_", codeBlock()->hash());
194 } else
195 name = "jsBody";
196
197 {
198 m_proc.setNumEntrypoints(m_graph.m_numberOfEntrypoints);
199 CodeBlock* codeBlock = m_graph.m_codeBlock;
200
201 Ref<B3::Air::PrologueGenerator> catchPrologueGenerator = createSharedTask<B3::Air::PrologueGeneratorFunction>(
202 [codeBlock] (CCallHelpers& jit, B3::Air::Code& code) {
203 AllowMacroScratchRegisterUsage allowScratch(jit);
204 jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
205 if (Options::zeroStackFrame())
206 jit.clearStackFrame(GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister, GPRInfo::regT0, code.frameSize());
207
208 jit.emitSave(code.calleeSaveRegisterAtOffsetList());
209 jit.emitPutToCallFrameHeader(codeBlock, CallFrameSlot::codeBlock);
210 });
211
212 for (unsigned catchEntrypointIndex : m_graph.m_entrypointIndexToCatchBytecodeOffset.keys()) {
213 RELEASE_ASSERT(catchEntrypointIndex != 0);
214 m_proc.code().setPrologueForEntrypoint(catchEntrypointIndex, catchPrologueGenerator.copyRef());
215 }
216
217 if (m_graph.m_maxLocalsForCatchOSREntry) {
218 uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
219 m_ftlState.jitCode->common.catchOSREntryBuffer = m_graph.m_vm.scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
220 }
221 }
222
223 m_graph.ensureSSADominators();
224
225 if (verboseCompilationEnabled())
226 dataLog("Function ready, beginning lowering.\n");
227
228 m_out.initialize(m_heaps);
229
230 // We use prologue frequency for all of the initialization code.
231 m_out.setFrequency(1);
232
233 bool hasMultipleEntrypoints = m_graph.m_numberOfEntrypoints > 1;
234
235 LBasicBlock prologue = m_out.newBlock();
236 LBasicBlock callEntrypointArgumentSpeculations = hasMultipleEntrypoints ? m_out.newBlock() : nullptr;
237 m_handleExceptions = m_out.newBlock();
238
239 for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
240 m_highBlock = m_graph.block(blockIndex);
241 if (!m_highBlock)
242 continue;
243 m_out.setFrequency(m_highBlock->executionCount);
244 m_blocks.add(m_highBlock, m_out.newBlock());
245 }
246
247 // Back to prologue frequency for any bocks that get sneakily created in the initialization code.
248 m_out.setFrequency(1);
249
250 m_out.appendTo(prologue, hasMultipleEntrypoints ? callEntrypointArgumentSpeculations : m_handleExceptions);
251 m_out.initializeConstants(m_proc, prologue);
252 createPhiVariables();
253
254 size_t sizeOfCaptured = sizeof(JSValue) * m_graph.m_nextMachineLocal;
255 B3::SlotBaseValue* capturedBase = m_out.lockedStackSlot(sizeOfCaptured);
256 m_captured = m_out.add(capturedBase, m_out.constIntPtr(sizeOfCaptured));
257 state->capturedValue = capturedBase->slot();
258
259 auto preOrder = m_graph.blocksInPreOrder();
260
261 m_callFrame = m_out.framePointer();
262 m_tagTypeNumber = m_out.constInt64(TagTypeNumber);
263 m_tagMask = m_out.constInt64(TagMask);
264
265 // Make sure that B3 knows that we really care about the mask registers. This forces the
266 // constants to be materialized in registers.
267 m_proc.addFastConstant(m_tagTypeNumber->key());
268 m_proc.addFastConstant(m_tagMask->key());
269
270 // We don't want the CodeBlock to have a weak pointer to itself because
271 // that would cause it to always get collected.
272 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), addressFor(CallFrameSlot::codeBlock));
273
274 VM* vm = &this->vm();
275
276 // Stack Overflow Check.
277 unsigned exitFrameSize = m_graph.requiredRegisterCountForExit() * sizeof(Register);
278 MacroAssembler::AbsoluteAddress addressOfStackLimit(vm->addressOfSoftStackLimit());
279 PatchpointValue* stackOverflowHandler = m_out.patchpoint(Void);
280 CallSiteIndex callSiteIndex = callSiteIndexForCodeOrigin(m_ftlState, CodeOrigin(0));
281 stackOverflowHandler->appendSomeRegister(m_callFrame);
282 stackOverflowHandler->clobber(RegisterSet::macroScratchRegisters());
283 stackOverflowHandler->numGPScratchRegisters = 1;
284 stackOverflowHandler->setGenerator(
285 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
286 AllowMacroScratchRegisterUsage allowScratch(jit);
287 GPRReg fp = params[0].gpr();
288 GPRReg scratch = params.gpScratch(0);
289
290 unsigned ftlFrameSize = params.proc().frameSize();
291 unsigned maxFrameSize = std::max(exitFrameSize, ftlFrameSize);
292
293 jit.addPtr(MacroAssembler::TrustedImm32(-maxFrameSize), fp, scratch);
294 MacroAssembler::JumpList stackOverflow;
295 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
296 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, scratch, fp));
297 stackOverflow.append(jit.branchPtr(MacroAssembler::Above, addressOfStackLimit, scratch));
298
299 params.addLatePath([=] (CCallHelpers& jit) {
300 AllowMacroScratchRegisterUsage allowScratch(jit);
301
302 stackOverflow.link(&jit);
303
304 // FIXME: We would not have to do this if the stack check was part of the Air
305 // prologue. Then, we would know that there is no way for the callee-saves to
306 // get clobbered.
307 // https://bugs.webkit.org/show_bug.cgi?id=172456
308 jit.emitRestore(params.proc().calleeSaveRegisterAtOffsetList());
309
310 jit.store32(
311 MacroAssembler::TrustedImm32(callSiteIndex.bits()),
312 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
313 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm->topEntryFrame);
314
315 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
316 jit.move(CCallHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::argumentGPR1);
317 CCallHelpers::Call throwCall = jit.call(OperationPtrTag);
318
319 jit.move(CCallHelpers::TrustedImmPtr(vm), GPRInfo::argumentGPR0);
320 jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
321 CCallHelpers::Call lookupExceptionHandlerCall = jit.call(OperationPtrTag);
322 jit.jumpToExceptionHandler(*vm);
323
324 jit.addLinkTask(
325 [=] (LinkBuffer& linkBuffer) {
326 linkBuffer.link(throwCall, FunctionPtr<OperationPtrTag>(operationThrowStackOverflowError));
327 linkBuffer.link(lookupExceptionHandlerCall, FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame));
328 });
329 });
330 });
331
332 LBasicBlock firstDFGBasicBlock = lowBlock(m_graph.block(0));
333
334 {
335 if (hasMultipleEntrypoints) {
336 Vector<LBasicBlock> successors(m_graph.m_numberOfEntrypoints);
337 successors[0] = callEntrypointArgumentSpeculations;
338 for (unsigned i = 1; i < m_graph.m_numberOfEntrypoints; ++i) {
339 // Currently, the only other entrypoint is an op_catch entrypoint.
340 // We do OSR entry at op_catch, and we prove argument formats before
341 // jumping to FTL code, so we don't need to check argument types here
342 // for these entrypoints.
343 successors[i] = firstDFGBasicBlock;
344 }
345
346 m_out.entrySwitch(successors);
347 m_out.appendTo(callEntrypointArgumentSpeculations, m_handleExceptions);
348 }
349
350 m_node = nullptr;
351 m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
352
353 // Check Arguments.
354 availabilityMap().clear();
355 availabilityMap().m_locals = Operands<Availability>(codeBlock()->numParameters(), 0);
356 for (unsigned i = codeBlock()->numParameters(); i--;) {
357 availabilityMap().m_locals.argument(i) =
358 Availability(FlushedAt(FlushedJSValue, virtualRegisterForArgument(i)));
359 }
360
361 for (unsigned i = codeBlock()->numParameters(); i--;) {
362 MethodOfGettingAValueProfile profile(&m_graph.m_profiledBlock->valueProfileForArgument(i));
363 VirtualRegister operand = virtualRegisterForArgument(i);
364 LValue jsValue = m_out.load64(addressFor(operand));
365
366 switch (m_graph.m_argumentFormats[0][i]) {
367 case FlushedInt32:
368 speculate(BadType, jsValueValue(jsValue), profile, isNotInt32(jsValue));
369 break;
370 case FlushedBoolean:
371 speculate(BadType, jsValueValue(jsValue), profile, isNotBoolean(jsValue));
372 break;
373 case FlushedCell:
374 speculate(BadType, jsValueValue(jsValue), profile, isNotCell(jsValue));
375 break;
376 case FlushedJSValue:
377 break;
378 default:
379 DFG_CRASH(m_graph, nullptr, "Bad flush format for argument");
380 break;
381 }
382 }
383 m_out.jump(firstDFGBasicBlock);
384 }
385
386
387 m_out.appendTo(m_handleExceptions, firstDFGBasicBlock);
388 Box<CCallHelpers::Label> exceptionHandler = state->exceptionHandler;
389 m_out.patchpoint(Void)->setGenerator(
390 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
391 CCallHelpers::Jump jump = jit.jump();
392 jit.addLinkTask(
393 [=] (LinkBuffer& linkBuffer) {
394 linkBuffer.link(jump, linkBuffer.locationOf<ExceptionHandlerPtrTag>(*exceptionHandler));
395 });
396 });
397 m_out.unreachable();
398
399 for (DFG::BasicBlock* block : preOrder)
400 compileBlock(block);
401
402 // Make sure everything is decorated. This does a bunch of deferred decorating. This has
403 // to happen last because our abstract heaps are generated lazily. They have to be
404 // generated lazily because we have an infinite number of numbered, indexed, and
405 // absolute heaps. We only become aware of the ones we actually mention while lowering.
406 m_heaps.computeRangesAndDecorateInstructions();
407
408 // We create all Phi's up front, but we may then decide not to compile the basic block
409 // that would have contained one of them. So this creates orphans, which triggers B3
410 // validation failures. Calling this fixes the issue.
411 //
412 // Note that you should avoid the temptation to make this call conditional upon
413 // validation being enabled. B3 makes no guarantees of any kind of correctness when
414 // dealing with IR that would have failed validation. For example, it would be valid to
415 // write a B3 phase that so aggressively assumes the lack of orphans that it would crash
416 // if any orphans were around. We might even have such phases already.
417 m_proc.deleteOrphans();
418
419 // We put the blocks into the B3 procedure in a super weird order. Now we reorder them.
420 m_out.applyBlockOrder();
421 }
422
423private:
424
425 void createPhiVariables()
426 {
427 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
428 DFG::BasicBlock* block = m_graph.block(blockIndex);
429 if (!block)
430 continue;
431 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
432 Node* node = block->at(nodeIndex);
433 if (node->op() != DFG::Phi)
434 continue;
435 LType type;
436 switch (node->flags() & NodeResultMask) {
437 case NodeResultDouble:
438 type = Double;
439 break;
440 case NodeResultInt32:
441 type = Int32;
442 break;
443 case NodeResultInt52:
444 type = Int64;
445 break;
446 case NodeResultBoolean:
447 type = Int32;
448 break;
449 case NodeResultJS:
450 type = Int64;
451 break;
452 default:
453 DFG_CRASH(m_graph, node, "Bad Phi node result type");
454 break;
455 }
456 m_phis.add(node, m_proc.add<Value>(B3::Phi, type, Origin(node)));
457 }
458 }
459 }
460
461 void compileBlock(DFG::BasicBlock* block)
462 {
463 if (!block)
464 return;
465
466 if (verboseCompilationEnabled())
467 dataLog("Compiling block ", *block, "\n");
468
469 m_highBlock = block;
470
471 // Make sure that any blocks created while lowering code in the high block have the frequency of
472 // the high block. This is appropriate because B3 doesn't need precise frequencies. It just needs
473 // something roughly approximate for things like register allocation.
474 m_out.setFrequency(m_highBlock->executionCount);
475
476 LBasicBlock lowBlock = m_blocks.get(m_highBlock);
477
478 m_nextHighBlock = 0;
479 for (BlockIndex nextBlockIndex = m_highBlock->index + 1; nextBlockIndex < m_graph.numBlocks(); ++nextBlockIndex) {
480 m_nextHighBlock = m_graph.block(nextBlockIndex);
481 if (m_nextHighBlock)
482 break;
483 }
484 m_nextLowBlock = m_nextHighBlock ? m_blocks.get(m_nextHighBlock) : 0;
485
486 // All of this effort to find the next block gives us the ability to keep the
487 // generated IR in roughly program order. This ought not affect the performance
488 // of the generated code (since we expect B3 to reorder things) but it will
489 // make IR dumps easier to read.
490 m_out.appendTo(lowBlock, m_nextLowBlock);
491
492 if (Options::ftlCrashes())
493 m_out.trap();
494
495 if (!m_highBlock->cfaHasVisited) {
496 if (verboseCompilationEnabled())
497 dataLog("Bailing because CFA didn't reach.\n");
498 crash(m_highBlock, nullptr);
499 return;
500 }
501
502 m_aiCheckedNodes.clear();
503
504 m_availabilityCalculator.beginBlock(m_highBlock);
505
506 m_state.reset();
507 m_state.beginBasicBlock(m_highBlock);
508
509 for (m_nodeIndex = 0; m_nodeIndex < m_highBlock->size(); ++m_nodeIndex) {
510 if (!compileNode(m_nodeIndex))
511 break;
512 }
513 }
514
515 void safelyInvalidateAfterTermination()
516 {
517 if (verboseCompilationEnabled())
518 dataLog("Bailing.\n");
519 crash();
520
521 // Invalidate dominated blocks. Under normal circumstances we would expect
522 // them to be invalidated already. But you can have the CFA become more
523 // precise over time because the structures of objects change on the main
524 // thread. Failing to do this would result in weird crashes due to a value
525 // being used but not defined. Race conditions FTW!
526 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
527 DFG::BasicBlock* target = m_graph.block(blockIndex);
528 if (!target)
529 continue;
530 if (m_graph.m_ssaDominators->dominates(m_highBlock, target)) {
531 if (verboseCompilationEnabled())
532 dataLog("Block ", *target, " will bail also.\n");
533 target->cfaHasVisited = false;
534 }
535 }
536 }
537
538 void validateAIState(Node* node)
539 {
540 if (!m_graphDump) {
541 StringPrintStream out;
542 m_graph.dump(out);
543 m_graphDump = out.toString();
544 }
545
546 switch (node->op()) {
547 case MovHint:
548 case ZombieHint:
549 case JSConstant:
550 case LazyJSConstant:
551 case DoubleConstant:
552 case Int52Constant:
553 case GetStack:
554 case PutStack:
555 case KillStack:
556 case ExitOK:
557 return;
558 default:
559 break;
560 }
561
562 // Before we execute node.
563 NodeSet& live = m_liveInToNode.find(node)->value;
564 unsigned highParentIndex = node->index();
565 {
566 uint64_t hash = WTF::intHash(highParentIndex);
567 if (hash >= static_cast<uint64_t>((static_cast<double>(std::numeric_limits<unsigned>::max()) + 1) * Options::validateAbstractInterpreterStateProbability()))
568 return;
569 }
570
571 for (Node* node : live) {
572 if (node->isPhantomAllocation())
573 continue;
574
575 if (node->op() == CheckInBounds)
576 continue;
577
578 AbstractValue value = m_interpreter.forNode(node);
579 {
580 auto iter = m_aiCheckedNodes.find(node);
581 if (iter != m_aiCheckedNodes.end()) {
582 AbstractValue checkedValue = iter->value;
583 if (checkedValue == value) {
584 if (!(value.m_type & SpecCell))
585 continue;
586 }
587 }
588 m_aiCheckedNodes.set(node, value);
589 }
590
591 FlushFormat flushFormat;
592 LValue input;
593 if (node->hasJSResult()) {
594 input = lowJSValue(Edge(node, UntypedUse));
595 flushFormat = FlushedJSValue;
596 } else if (node->hasDoubleResult()) {
597 input = lowDouble(Edge(node, DoubleRepUse));
598 flushFormat = FlushedDouble;
599 } else if (node->hasInt52Result()) {
600 input = strictInt52ToJSValue(lowStrictInt52(Edge(node, Int52RepUse)));
601 flushFormat = FlushedInt52;
602 } else
603 continue;
604
605 unsigned highChildIndex = node->index();
606
607 String graphDump = m_graphDump;
608
609 PatchpointValue* patchpoint = m_out.patchpoint(Void);
610 patchpoint->effects = Effects::none();
611 patchpoint->effects.writesLocalState = true;
612 patchpoint->appendSomeRegister(input);
613 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
614 GPRReg reg = InvalidGPRReg;
615 FPRReg fpReg = InvalidFPRReg;
616 if (flushFormat == FlushedDouble)
617 fpReg = params[0].fpr();
618 else
619 reg = params[0].gpr();
620 jit.probe([=] (Probe::Context& context) {
621 JSValue input;
622 double doubleInput;
623
624 auto dumpAndCrash = [&] {
625 dataLogLn("Validation failed at node: @", highParentIndex);
626 dataLogLn("Failed validating live value: @", highChildIndex);
627 dataLogLn();
628 dataLogLn("Expected AI value = ", value);
629 if (flushFormat != FlushedDouble)
630 dataLogLn("Unexpected value = ", input);
631 else
632 dataLogLn("Unexpected double value = ", doubleInput);
633 dataLogLn();
634 dataLogLn(graphDump);
635 CRASH();
636 };
637
638 if (flushFormat == FlushedDouble) {
639 doubleInput = context.fpr(fpReg);
640 SpeculatedType type;
641 if (!std::isnan(doubleInput))
642 type = speculationFromValue(jsDoubleNumber(doubleInput));
643 else if (isImpureNaN(doubleInput))
644 type = SpecDoubleImpureNaN;
645 else
646 type = SpecDoublePureNaN;
647
648 if (!value.couldBeType(type))
649 dumpAndCrash();
650 } else {
651 input = JSValue::decode(context.gpr(reg));
652 if (flushFormat == FlushedInt52) {
653 RELEASE_ASSERT(input.isAnyInt());
654 input = jsDoubleNumber(input.asAnyInt());
655 }
656 if (!value.validateOSREntryValue(input, flushFormat))
657 dumpAndCrash();
658 }
659
660 });
661 });
662 }
663 }
664
665 bool compileNode(unsigned nodeIndex)
666 {
667 if (!m_state.isValid()) {
668 safelyInvalidateAfterTermination();
669 return false;
670 }
671
672 m_node = m_highBlock->at(nodeIndex);
673 m_origin = m_node->origin;
674 m_out.setOrigin(m_node);
675
676 if (verboseCompilationEnabled())
677 dataLog("Lowering ", m_node, "\n");
678
679 m_interpreter.startExecuting();
680 m_interpreter.executeKnownEdgeTypes(m_node);
681
682 if (Options::validateAbstractInterpreterState())
683 validateAIState(m_node);
684
685 if (validateDFGDoesGC) {
686 bool expectDoesGC = doesGC(m_graph, m_node);
687 m_out.store(m_out.constBool(expectDoesGC), m_out.absolute(vm().heap.addressOfExpectDoesGC()));
688 }
689
690 switch (m_node->op()) {
691 case DFG::Upsilon:
692 compileUpsilon();
693 break;
694 case DFG::Phi:
695 compilePhi();
696 break;
697 case JSConstant:
698 break;
699 case DoubleConstant:
700 compileDoubleConstant();
701 break;
702 case Int52Constant:
703 compileInt52Constant();
704 break;
705 case LazyJSConstant:
706 compileLazyJSConstant();
707 break;
708 case DoubleRep:
709 compileDoubleRep();
710 break;
711 case DoubleAsInt32:
712 compileDoubleAsInt32();
713 break;
714 case DFG::ValueRep:
715 compileValueRep();
716 break;
717 case Int52Rep:
718 compileInt52Rep();
719 break;
720 case ValueToInt32:
721 compileValueToInt32();
722 break;
723 case BooleanToNumber:
724 compileBooleanToNumber();
725 break;
726 case ExtractOSREntryLocal:
727 compileExtractOSREntryLocal();
728 break;
729 case ExtractCatchLocal:
730 compileExtractCatchLocal();
731 break;
732 case ClearCatchLocals:
733 compileClearCatchLocals();
734 break;
735 case GetStack:
736 compileGetStack();
737 break;
738 case PutStack:
739 compilePutStack();
740 break;
741 case DFG::Check:
742 case CheckVarargs:
743 compileNoOp();
744 break;
745 case ToObject:
746 case CallObjectConstructor:
747 compileToObjectOrCallObjectConstructor();
748 break;
749 case ToThis:
750 compileToThis();
751 break;
752 case ValueNegate:
753 compileValueNegate();
754 break;
755 case ValueAdd:
756 compileValueAdd();
757 break;
758 case ValueSub:
759 compileValueSub();
760 break;
761 case ValueMul:
762 compileValueMul();
763 break;
764 case StrCat:
765 compileStrCat();
766 break;
767 case ArithAdd:
768 case ArithSub:
769 compileArithAddOrSub();
770 break;
771 case ArithClz32:
772 compileArithClz32();
773 break;
774 case ArithMul:
775 compileArithMul();
776 break;
777 case ValueDiv:
778 compileValueDiv();
779 break;
780 case ArithDiv:
781 compileArithDiv();
782 break;
783 case ValueMod:
784 compileValueMod();
785 break;
786 case ArithMod:
787 compileArithMod();
788 break;
789 case ArithMin:
790 case ArithMax:
791 compileArithMinOrMax();
792 break;
793 case ArithAbs:
794 compileArithAbs();
795 break;
796 case ArithPow:
797 compileArithPow();
798 break;
799 case ArithRandom:
800 compileArithRandom();
801 break;
802 case ArithRound:
803 compileArithRound();
804 break;
805 case ArithFloor:
806 compileArithFloor();
807 break;
808 case ArithCeil:
809 compileArithCeil();
810 break;
811 case ArithTrunc:
812 compileArithTrunc();
813 break;
814 case ArithSqrt:
815 compileArithSqrt();
816 break;
817 case ArithFRound:
818 compileArithFRound();
819 break;
820 case ArithNegate:
821 compileArithNegate();
822 break;
823 case ArithUnary:
824 compileArithUnary();
825 break;
826 case ValueBitNot:
827 compileValueBitNot();
828 break;
829 case ArithBitNot:
830 compileArithBitNot();
831 break;
832 case ValueBitAnd:
833 compileValueBitAnd();
834 break;
835 case ArithBitAnd:
836 compileArithBitAnd();
837 break;
838 case ValueBitOr:
839 compileValueBitOr();
840 break;
841 case ArithBitOr:
842 compileArithBitOr();
843 break;
844 case ArithBitXor:
845 compileArithBitXor();
846 break;
847 case ValueBitXor:
848 compileValueBitXor();
849 break;
850 case BitRShift:
851 compileBitRShift();
852 break;
853 case BitLShift:
854 compileBitLShift();
855 break;
856 case BitURShift:
857 compileBitURShift();
858 break;
859 case UInt32ToNumber:
860 compileUInt32ToNumber();
861 break;
862 case CheckStructure:
863 compileCheckStructure();
864 break;
865 case CheckStructureOrEmpty:
866 compileCheckStructureOrEmpty();
867 break;
868 case CheckCell:
869 compileCheckCell();
870 break;
871 case CheckNotEmpty:
872 compileCheckNotEmpty();
873 break;
874 case AssertNotEmpty:
875 compileAssertNotEmpty();
876 break;
877 case CheckBadCell:
878 compileCheckBadCell();
879 break;
880 case CheckStringIdent:
881 compileCheckStringIdent();
882 break;
883 case GetExecutable:
884 compileGetExecutable();
885 break;
886 case Arrayify:
887 case ArrayifyToStructure:
888 compileArrayify();
889 break;
890 case PutStructure:
891 compilePutStructure();
892 break;
893 case TryGetById:
894 compileGetById(AccessType::TryGet);
895 break;
896 case GetById:
897 case GetByIdFlush:
898 compileGetById(AccessType::Get);
899 break;
900 case GetByIdWithThis:
901 compileGetByIdWithThis();
902 break;
903 case GetByIdDirect:
904 case GetByIdDirectFlush:
905 compileGetById(AccessType::GetDirect);
906 break;
907 case InById:
908 compileInById();
909 break;
910 case InByVal:
911 compileInByVal();
912 break;
913 case HasOwnProperty:
914 compileHasOwnProperty();
915 break;
916 case PutById:
917 case PutByIdDirect:
918 case PutByIdFlush:
919 compilePutById();
920 break;
921 case PutByIdWithThis:
922 compilePutByIdWithThis();
923 break;
924 case PutGetterById:
925 case PutSetterById:
926 compilePutAccessorById();
927 break;
928 case PutGetterSetterById:
929 compilePutGetterSetterById();
930 break;
931 case PutGetterByVal:
932 case PutSetterByVal:
933 compilePutAccessorByVal();
934 break;
935 case DeleteById:
936 compileDeleteById();
937 break;
938 case DeleteByVal:
939 compileDeleteByVal();
940 break;
941 case GetButterfly:
942 compileGetButterfly();
943 break;
944 case ConstantStoragePointer:
945 compileConstantStoragePointer();
946 break;
947 case GetIndexedPropertyStorage:
948 compileGetIndexedPropertyStorage();
949 break;
950 case CheckArray:
951 compileCheckArray();
952 break;
953 case GetArrayLength:
954 compileGetArrayLength();
955 break;
956 case GetVectorLength:
957 compileGetVectorLength();
958 break;
959 case CheckInBounds:
960 compileCheckInBounds();
961 break;
962 case GetByVal:
963 compileGetByVal();
964 break;
965 case GetMyArgumentByVal:
966 case GetMyArgumentByValOutOfBounds:
967 compileGetMyArgumentByVal();
968 break;
969 case GetByValWithThis:
970 compileGetByValWithThis();
971 break;
972 case PutByVal:
973 case PutByValAlias:
974 case PutByValDirect:
975 compilePutByVal();
976 break;
977 case PutByValWithThis:
978 compilePutByValWithThis();
979 break;
980 case AtomicsAdd:
981 case AtomicsAnd:
982 case AtomicsCompareExchange:
983 case AtomicsExchange:
984 case AtomicsLoad:
985 case AtomicsOr:
986 case AtomicsStore:
987 case AtomicsSub:
988 case AtomicsXor:
989 compileAtomicsReadModifyWrite();
990 break;
991 case AtomicsIsLockFree:
992 compileAtomicsIsLockFree();
993 break;
994 case DefineDataProperty:
995 compileDefineDataProperty();
996 break;
997 case DefineAccessorProperty:
998 compileDefineAccessorProperty();
999 break;
1000 case ArrayPush:
1001 compileArrayPush();
1002 break;
1003 case ArrayPop:
1004 compileArrayPop();
1005 break;
1006 case ArraySlice:
1007 compileArraySlice();
1008 break;
1009 case ArrayIndexOf:
1010 compileArrayIndexOf();
1011 break;
1012 case CreateActivation:
1013 compileCreateActivation();
1014 break;
1015 case PushWithScope:
1016 compilePushWithScope();
1017 break;
1018 case NewFunction:
1019 case NewGeneratorFunction:
1020 case NewAsyncGeneratorFunction:
1021 case NewAsyncFunction:
1022 compileNewFunction();
1023 break;
1024 case CreateDirectArguments:
1025 compileCreateDirectArguments();
1026 break;
1027 case CreateScopedArguments:
1028 compileCreateScopedArguments();
1029 break;
1030 case CreateClonedArguments:
1031 compileCreateClonedArguments();
1032 break;
1033 case ObjectCreate:
1034 compileObjectCreate();
1035 break;
1036 case ObjectKeys:
1037 compileObjectKeys();
1038 break;
1039 case NewObject:
1040 compileNewObject();
1041 break;
1042 case NewStringObject:
1043 compileNewStringObject();
1044 break;
1045 case NewSymbol:
1046 compileNewSymbol();
1047 break;
1048 case NewArray:
1049 compileNewArray();
1050 break;
1051 case NewArrayWithSpread:
1052 compileNewArrayWithSpread();
1053 break;
1054 case CreateThis:
1055 compileCreateThis();
1056 break;
1057 case Spread:
1058 compileSpread();
1059 break;
1060 case NewArrayBuffer:
1061 compileNewArrayBuffer();
1062 break;
1063 case NewArrayWithSize:
1064 compileNewArrayWithSize();
1065 break;
1066 case NewTypedArray:
1067 compileNewTypedArray();
1068 break;
1069 case GetTypedArrayByteOffset:
1070 compileGetTypedArrayByteOffset();
1071 break;
1072 case GetPrototypeOf:
1073 compileGetPrototypeOf();
1074 break;
1075 case AllocatePropertyStorage:
1076 compileAllocatePropertyStorage();
1077 break;
1078 case ReallocatePropertyStorage:
1079 compileReallocatePropertyStorage();
1080 break;
1081 case NukeStructureAndSetButterfly:
1082 compileNukeStructureAndSetButterfly();
1083 break;
1084 case ToNumber:
1085 compileToNumber();
1086 break;
1087 case ToString:
1088 case CallStringConstructor:
1089 case StringValueOf:
1090 compileToStringOrCallStringConstructorOrStringValueOf();
1091 break;
1092 case ToPrimitive:
1093 compileToPrimitive();
1094 break;
1095 case MakeRope:
1096 compileMakeRope();
1097 break;
1098 case StringCharAt:
1099 compileStringCharAt();
1100 break;
1101 case StringCharCodeAt:
1102 compileStringCharCodeAt();
1103 break;
1104 case StringFromCharCode:
1105 compileStringFromCharCode();
1106 break;
1107 case GetByOffset:
1108 case GetGetterSetterByOffset:
1109 compileGetByOffset();
1110 break;
1111 case GetGetter:
1112 compileGetGetter();
1113 break;
1114 case GetSetter:
1115 compileGetSetter();
1116 break;
1117 case MultiGetByOffset:
1118 compileMultiGetByOffset();
1119 break;
1120 case PutByOffset:
1121 compilePutByOffset();
1122 break;
1123 case MultiPutByOffset:
1124 compileMultiPutByOffset();
1125 break;
1126 case MatchStructure:
1127 compileMatchStructure();
1128 break;
1129 case GetGlobalVar:
1130 case GetGlobalLexicalVariable:
1131 compileGetGlobalVariable();
1132 break;
1133 case PutGlobalVariable:
1134 compilePutGlobalVariable();
1135 break;
1136 case NotifyWrite:
1137 compileNotifyWrite();
1138 break;
1139 case GetCallee:
1140 compileGetCallee();
1141 break;
1142 case SetCallee:
1143 compileSetCallee();
1144 break;
1145 case GetArgumentCountIncludingThis:
1146 compileGetArgumentCountIncludingThis();
1147 break;
1148 case SetArgumentCountIncludingThis:
1149 compileSetArgumentCountIncludingThis();
1150 break;
1151 case GetScope:
1152 compileGetScope();
1153 break;
1154 case SkipScope:
1155 compileSkipScope();
1156 break;
1157 case GetGlobalObject:
1158 compileGetGlobalObject();
1159 break;
1160 case GetGlobalThis:
1161 compileGetGlobalThis();
1162 break;
1163 case GetClosureVar:
1164 compileGetClosureVar();
1165 break;
1166 case PutClosureVar:
1167 compilePutClosureVar();
1168 break;
1169 case GetFromArguments:
1170 compileGetFromArguments();
1171 break;
1172 case PutToArguments:
1173 compilePutToArguments();
1174 break;
1175 case GetArgument:
1176 compileGetArgument();
1177 break;
1178 case CompareEq:
1179 compileCompareEq();
1180 break;
1181 case CompareStrictEq:
1182 compileCompareStrictEq();
1183 break;
1184 case CompareLess:
1185 compileCompareLess();
1186 break;
1187 case CompareLessEq:
1188 compileCompareLessEq();
1189 break;
1190 case CompareGreater:
1191 compileCompareGreater();
1192 break;
1193 case CompareGreaterEq:
1194 compileCompareGreaterEq();
1195 break;
1196 case CompareBelow:
1197 compileCompareBelow();
1198 break;
1199 case CompareBelowEq:
1200 compileCompareBelowEq();
1201 break;
1202 case CompareEqPtr:
1203 compileCompareEqPtr();
1204 break;
1205 case SameValue:
1206 compileSameValue();
1207 break;
1208 case LogicalNot:
1209 compileLogicalNot();
1210 break;
1211 case Call:
1212 case TailCallInlinedCaller:
1213 case Construct:
1214 compileCallOrConstruct();
1215 break;
1216 case DirectCall:
1217 case DirectTailCallInlinedCaller:
1218 case DirectConstruct:
1219 case DirectTailCall:
1220 compileDirectCallOrConstruct();
1221 break;
1222 case TailCall:
1223 compileTailCall();
1224 break;
1225 case CallVarargs:
1226 case CallForwardVarargs:
1227 case TailCallVarargs:
1228 case TailCallVarargsInlinedCaller:
1229 case TailCallForwardVarargs:
1230 case TailCallForwardVarargsInlinedCaller:
1231 case ConstructVarargs:
1232 case ConstructForwardVarargs:
1233 compileCallOrConstructVarargs();
1234 break;
1235 case CallEval:
1236 compileCallEval();
1237 break;
1238 case LoadVarargs:
1239 compileLoadVarargs();
1240 break;
1241 case ForwardVarargs:
1242 compileForwardVarargs();
1243 break;
1244 case DFG::Jump:
1245 compileJump();
1246 break;
1247 case DFG::Branch:
1248 compileBranch();
1249 break;
1250 case DFG::Switch:
1251 compileSwitch();
1252 break;
1253 case DFG::EntrySwitch:
1254 compileEntrySwitch();
1255 break;
1256 case DFG::Return:
1257 compileReturn();
1258 break;
1259 case ForceOSRExit:
1260 compileForceOSRExit();
1261 break;
1262 case CPUIntrinsic:
1263#if CPU(X86_64)
1264 compileCPUIntrinsic();
1265#else
1266 RELEASE_ASSERT_NOT_REACHED();
1267#endif
1268 break;
1269 case Throw:
1270 compileThrow();
1271 break;
1272 case ThrowStaticError:
1273 compileThrowStaticError();
1274 break;
1275 case InvalidationPoint:
1276 compileInvalidationPoint();
1277 break;
1278 case IsEmpty:
1279 compileIsEmpty();
1280 break;
1281 case IsUndefined:
1282 compileIsUndefined();
1283 break;
1284 case IsUndefinedOrNull:
1285 compileIsUndefinedOrNull();
1286 break;
1287 case IsBoolean:
1288 compileIsBoolean();
1289 break;
1290 case IsNumber:
1291 compileIsNumber();
1292 break;
1293 case NumberIsInteger:
1294 compileNumberIsInteger();
1295 break;
1296 case IsCellWithType:
1297 compileIsCellWithType();
1298 break;
1299 case MapHash:
1300 compileMapHash();
1301 break;
1302 case NormalizeMapKey:
1303 compileNormalizeMapKey();
1304 break;
1305 case GetMapBucket:
1306 compileGetMapBucket();
1307 break;
1308 case GetMapBucketHead:
1309 compileGetMapBucketHead();
1310 break;
1311 case GetMapBucketNext:
1312 compileGetMapBucketNext();
1313 break;
1314 case LoadKeyFromMapBucket:
1315 compileLoadKeyFromMapBucket();
1316 break;
1317 case LoadValueFromMapBucket:
1318 compileLoadValueFromMapBucket();
1319 break;
1320 case ExtractValueFromWeakMapGet:
1321 compileExtractValueFromWeakMapGet();
1322 break;
1323 case SetAdd:
1324 compileSetAdd();
1325 break;
1326 case MapSet:
1327 compileMapSet();
1328 break;
1329 case WeakMapGet:
1330 compileWeakMapGet();
1331 break;
1332 case WeakSetAdd:
1333 compileWeakSetAdd();
1334 break;
1335 case WeakMapSet:
1336 compileWeakMapSet();
1337 break;
1338 case IsObject:
1339 compileIsObject();
1340 break;
1341 case IsObjectOrNull:
1342 compileIsObjectOrNull();
1343 break;
1344 case IsFunction:
1345 compileIsFunction();
1346 break;
1347 case IsTypedArrayView:
1348 compileIsTypedArrayView();
1349 break;
1350 case ParseInt:
1351 compileParseInt();
1352 break;
1353 case TypeOf:
1354 compileTypeOf();
1355 break;
1356 case CheckTypeInfoFlags:
1357 compileCheckTypeInfoFlags();
1358 break;
1359 case OverridesHasInstance:
1360 compileOverridesHasInstance();
1361 break;
1362 case InstanceOf:
1363 compileInstanceOf();
1364 break;
1365 case InstanceOfCustom:
1366 compileInstanceOfCustom();
1367 break;
1368 case CountExecution:
1369 compileCountExecution();
1370 break;
1371 case SuperSamplerBegin:
1372 compileSuperSamplerBegin();
1373 break;
1374 case SuperSamplerEnd:
1375 compileSuperSamplerEnd();
1376 break;
1377 case StoreBarrier:
1378 case FencedStoreBarrier:
1379 compileStoreBarrier();
1380 break;
1381 case HasIndexedProperty:
1382 compileHasIndexedProperty();
1383 break;
1384 case HasGenericProperty:
1385 compileHasGenericProperty();
1386 break;
1387 case HasStructureProperty:
1388 compileHasStructureProperty();
1389 break;
1390 case GetDirectPname:
1391 compileGetDirectPname();
1392 break;
1393 case GetEnumerableLength:
1394 compileGetEnumerableLength();
1395 break;
1396 case GetPropertyEnumerator:
1397 compileGetPropertyEnumerator();
1398 break;
1399 case GetEnumeratorStructurePname:
1400 compileGetEnumeratorStructurePname();
1401 break;
1402 case GetEnumeratorGenericPname:
1403 compileGetEnumeratorGenericPname();
1404 break;
1405 case ToIndexString:
1406 compileToIndexString();
1407 break;
1408 case CheckStructureImmediate:
1409 compileCheckStructureImmediate();
1410 break;
1411 case MaterializeNewObject:
1412 compileMaterializeNewObject();
1413 break;
1414 case MaterializeCreateActivation:
1415 compileMaterializeCreateActivation();
1416 break;
1417 case CheckTraps:
1418 compileCheckTraps();
1419 break;
1420 case CreateRest:
1421 compileCreateRest();
1422 break;
1423 case GetRestLength:
1424 compileGetRestLength();
1425 break;
1426 case RegExpExec:
1427 compileRegExpExec();
1428 break;
1429 case RegExpExecNonGlobalOrSticky:
1430 compileRegExpExecNonGlobalOrSticky();
1431 break;
1432 case RegExpTest:
1433 compileRegExpTest();
1434 break;
1435 case RegExpMatchFast:
1436 compileRegExpMatchFast();
1437 break;
1438 case RegExpMatchFastGlobal:
1439 compileRegExpMatchFastGlobal();
1440 break;
1441 case NewRegexp:
1442 compileNewRegexp();
1443 break;
1444 case SetFunctionName:
1445 compileSetFunctionName();
1446 break;
1447 case StringReplace:
1448 case StringReplaceRegExp:
1449 compileStringReplace();
1450 break;
1451 case GetRegExpObjectLastIndex:
1452 compileGetRegExpObjectLastIndex();
1453 break;
1454 case SetRegExpObjectLastIndex:
1455 compileSetRegExpObjectLastIndex();
1456 break;
1457 case LogShadowChickenPrologue:
1458 compileLogShadowChickenPrologue();
1459 break;
1460 case LogShadowChickenTail:
1461 compileLogShadowChickenTail();
1462 break;
1463 case RecordRegExpCachedResult:
1464 compileRecordRegExpCachedResult();
1465 break;
1466 case ResolveScopeForHoistingFuncDeclInEval:
1467 compileResolveScopeForHoistingFuncDeclInEval();
1468 break;
1469 case ResolveScope:
1470 compileResolveScope();
1471 break;
1472 case GetDynamicVar:
1473 compileGetDynamicVar();
1474 break;
1475 case PutDynamicVar:
1476 compilePutDynamicVar();
1477 break;
1478 case Unreachable:
1479 compileUnreachable();
1480 break;
1481 case StringSlice:
1482 compileStringSlice();
1483 break;
1484 case ToLowerCase:
1485 compileToLowerCase();
1486 break;
1487 case NumberToStringWithRadix:
1488 compileNumberToStringWithRadix();
1489 break;
1490 case NumberToStringWithValidRadixConstant:
1491 compileNumberToStringWithValidRadixConstant();
1492 break;
1493 case CheckSubClass:
1494 compileCheckSubClass();
1495 break;
1496 case CallDOM:
1497 compileCallDOM();
1498 break;
1499 case CallDOMGetter:
1500 compileCallDOMGetter();
1501 break;
1502 case FilterCallLinkStatus:
1503 case FilterGetByIdStatus:
1504 case FilterPutByIdStatus:
1505 case FilterInByIdStatus:
1506 compileFilterICStatus();
1507 break;
1508 case DataViewGetInt:
1509 case DataViewGetFloat:
1510 compileDataViewGet();
1511 break;
1512 case DataViewSet:
1513 compileDataViewSet();
1514 break;
1515
1516 case PhantomLocal:
1517 case LoopHint:
1518 case MovHint:
1519 case ZombieHint:
1520 case ExitOK:
1521 case PhantomNewObject:
1522 case PhantomNewFunction:
1523 case PhantomNewGeneratorFunction:
1524 case PhantomNewAsyncGeneratorFunction:
1525 case PhantomNewAsyncFunction:
1526 case PhantomCreateActivation:
1527 case PhantomDirectArguments:
1528 case PhantomCreateRest:
1529 case PhantomSpread:
1530 case PhantomNewArrayWithSpread:
1531 case PhantomNewArrayBuffer:
1532 case PhantomClonedArguments:
1533 case PhantomNewRegexp:
1534 case PutHint:
1535 case BottomValue:
1536 case KillStack:
1537 case InitializeEntrypointArguments:
1538 break;
1539 default:
1540 DFG_CRASH(m_graph, m_node, "Unrecognized node in FTL backend");
1541 break;
1542 }
1543
1544 if (m_node->isTerminal())
1545 return false;
1546
1547 if (!m_state.isValid()) {
1548 safelyInvalidateAfterTermination();
1549 return false;
1550 }
1551
1552 m_availabilityCalculator.executeNode(m_node);
1553 m_interpreter.executeEffects(nodeIndex);
1554
1555 return true;
1556 }
1557
1558 void compileUpsilon()
1559 {
1560 LValue upsilonValue = nullptr;
1561 switch (m_node->child1().useKind()) {
1562 case DoubleRepUse:
1563 upsilonValue = lowDouble(m_node->child1());
1564 break;
1565 case Int32Use:
1566 case KnownInt32Use:
1567 upsilonValue = lowInt32(m_node->child1());
1568 break;
1569 case Int52RepUse:
1570 upsilonValue = lowInt52(m_node->child1());
1571 break;
1572 case BooleanUse:
1573 case KnownBooleanUse:
1574 upsilonValue = lowBoolean(m_node->child1());
1575 break;
1576 case CellUse:
1577 case KnownCellUse:
1578 upsilonValue = lowCell(m_node->child1());
1579 break;
1580 case UntypedUse:
1581 upsilonValue = lowJSValue(m_node->child1());
1582 break;
1583 default:
1584 DFG_CRASH(m_graph, m_node, "Bad use kind");
1585 break;
1586 }
1587 ValueFromBlock upsilon = m_out.anchor(upsilonValue);
1588 LValue phiNode = m_phis.get(m_node->phi());
1589 m_out.addIncomingToPhi(phiNode, upsilon);
1590 }
1591
1592 void compilePhi()
1593 {
1594 LValue phi = m_phis.get(m_node);
1595 m_out.m_block->append(phi);
1596
1597 switch (m_node->flags() & NodeResultMask) {
1598 case NodeResultDouble:
1599 setDouble(phi);
1600 break;
1601 case NodeResultInt32:
1602 setInt32(phi);
1603 break;
1604 case NodeResultInt52:
1605 setInt52(phi);
1606 break;
1607 case NodeResultBoolean:
1608 setBoolean(phi);
1609 break;
1610 case NodeResultJS:
1611 setJSValue(phi);
1612 break;
1613 default:
1614 DFG_CRASH(m_graph, m_node, "Bad result type");
1615 break;
1616 }
1617 }
1618
1619 void compileDoubleConstant()
1620 {
1621 setDouble(m_out.constDouble(m_node->asNumber()));
1622 }
1623
1624 void compileInt52Constant()
1625 {
1626 int64_t value = m_node->asAnyInt();
1627
1628 setInt52(m_out.constInt64(value << JSValue::int52ShiftAmount));
1629 setStrictInt52(m_out.constInt64(value));
1630 }
1631
1632 void compileLazyJSConstant()
1633 {
1634 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
1635 LazyJSValue value = m_node->lazyJSValue();
1636 patchpoint->setGenerator(
1637 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
1638 value.emit(jit, JSValueRegs(params[0].gpr()));
1639 });
1640 patchpoint->effects = Effects::none();
1641 setJSValue(patchpoint);
1642 }
1643
1644 void compileDoubleRep()
1645 {
1646 switch (m_node->child1().useKind()) {
1647 case RealNumberUse: {
1648 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1649
1650 LValue doubleValue = unboxDouble(value);
1651
1652 LBasicBlock intCase = m_out.newBlock();
1653 LBasicBlock continuation = m_out.newBlock();
1654
1655 ValueFromBlock fastResult = m_out.anchor(doubleValue);
1656 m_out.branch(
1657 m_out.doubleEqual(doubleValue, doubleValue),
1658 usually(continuation), rarely(intCase));
1659
1660 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
1661
1662 FTL_TYPE_CHECK(
1663 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
1664 isNotInt32(value, provenType(m_node->child1()) & ~SpecDoubleReal));
1665 ValueFromBlock slowResult = m_out.anchor(m_out.intToDouble(unboxInt32(value)));
1666 m_out.jump(continuation);
1667
1668 m_out.appendTo(continuation, lastNext);
1669
1670 setDouble(m_out.phi(Double, fastResult, slowResult));
1671 return;
1672 }
1673
1674 case NotCellUse:
1675 case NumberUse: {
1676 bool shouldConvertNonNumber = m_node->child1().useKind() == NotCellUse;
1677
1678 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
1679
1680 LBasicBlock intCase = m_out.newBlock();
1681 LBasicBlock doubleTesting = m_out.newBlock();
1682 LBasicBlock doubleCase = m_out.newBlock();
1683 LBasicBlock nonDoubleCase = m_out.newBlock();
1684 LBasicBlock continuation = m_out.newBlock();
1685
1686 m_out.branch(
1687 isNotInt32(value, provenType(m_node->child1())),
1688 unsure(doubleTesting), unsure(intCase));
1689
1690 LBasicBlock lastNext = m_out.appendTo(intCase, doubleTesting);
1691
1692 ValueFromBlock intToDouble = m_out.anchor(
1693 m_out.intToDouble(unboxInt32(value)));
1694 m_out.jump(continuation);
1695
1696 m_out.appendTo(doubleTesting, doubleCase);
1697 LValue valueIsNumber = isNumber(value, provenType(m_node->child1()));
1698 m_out.branch(valueIsNumber, usually(doubleCase), rarely(nonDoubleCase));
1699
1700 m_out.appendTo(doubleCase, nonDoubleCase);
1701 ValueFromBlock unboxedDouble = m_out.anchor(unboxDouble(value));
1702 m_out.jump(continuation);
1703
1704 if (shouldConvertNonNumber) {
1705 LBasicBlock undefinedCase = m_out.newBlock();
1706 LBasicBlock testNullCase = m_out.newBlock();
1707 LBasicBlock nullCase = m_out.newBlock();
1708 LBasicBlock testBooleanTrueCase = m_out.newBlock();
1709 LBasicBlock convertBooleanTrueCase = m_out.newBlock();
1710 LBasicBlock convertBooleanFalseCase = m_out.newBlock();
1711
1712 m_out.appendTo(nonDoubleCase, undefinedCase);
1713 LValue valueIsUndefined = m_out.equal(value, m_out.constInt64(ValueUndefined));
1714 m_out.branch(valueIsUndefined, unsure(undefinedCase), unsure(testNullCase));
1715
1716 m_out.appendTo(undefinedCase, testNullCase);
1717 ValueFromBlock convertedUndefined = m_out.anchor(m_out.constDouble(PNaN));
1718 m_out.jump(continuation);
1719
1720 m_out.appendTo(testNullCase, nullCase);
1721 LValue valueIsNull = m_out.equal(value, m_out.constInt64(ValueNull));
1722 m_out.branch(valueIsNull, unsure(nullCase), unsure(testBooleanTrueCase));
1723
1724 m_out.appendTo(nullCase, testBooleanTrueCase);
1725 ValueFromBlock convertedNull = m_out.anchor(m_out.constDouble(0));
1726 m_out.jump(continuation);
1727
1728 m_out.appendTo(testBooleanTrueCase, convertBooleanTrueCase);
1729 LValue valueIsBooleanTrue = m_out.equal(value, m_out.constInt64(ValueTrue));
1730 m_out.branch(valueIsBooleanTrue, unsure(convertBooleanTrueCase), unsure(convertBooleanFalseCase));
1731
1732 m_out.appendTo(convertBooleanTrueCase, convertBooleanFalseCase);
1733 ValueFromBlock convertedTrue = m_out.anchor(m_out.constDouble(1));
1734 m_out.jump(continuation);
1735
1736 m_out.appendTo(convertBooleanFalseCase, continuation);
1737
1738 LValue valueIsNotBooleanFalse = m_out.notEqual(value, m_out.constInt64(ValueFalse));
1739 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), ~SpecCellCheck, valueIsNotBooleanFalse);
1740 ValueFromBlock convertedFalse = m_out.anchor(m_out.constDouble(0));
1741 m_out.jump(continuation);
1742
1743 m_out.appendTo(continuation, lastNext);
1744 setDouble(m_out.phi(Double, intToDouble, unboxedDouble, convertedUndefined, convertedNull, convertedTrue, convertedFalse));
1745 return;
1746 }
1747 m_out.appendTo(nonDoubleCase, continuation);
1748 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecBytecodeNumber, m_out.booleanTrue);
1749 m_out.unreachable();
1750
1751 m_out.appendTo(continuation, lastNext);
1752
1753 setDouble(m_out.phi(Double, intToDouble, unboxedDouble));
1754 return;
1755 }
1756
1757 case Int52RepUse: {
1758 setDouble(strictInt52ToDouble(lowStrictInt52(m_node->child1())));
1759 return;
1760 }
1761
1762 default:
1763 DFG_CRASH(m_graph, m_node, "Bad use kind");
1764 }
1765 }
1766
1767 void compileDoubleAsInt32()
1768 {
1769 LValue integerValue = convertDoubleToInt32(lowDouble(m_node->child1()), shouldCheckNegativeZero(m_node->arithMode()));
1770 setInt32(integerValue);
1771 }
1772
1773 void compileValueRep()
1774 {
1775 switch (m_node->child1().useKind()) {
1776 case DoubleRepUse: {
1777 LValue value = lowDouble(m_node->child1());
1778
1779 if (m_interpreter.needsTypeCheck(m_node->child1(), ~SpecDoubleImpureNaN)) {
1780 value = m_out.select(
1781 m_out.doubleEqual(value, value), value, m_out.constDouble(PNaN));
1782 }
1783
1784 setJSValue(boxDouble(value));
1785 return;
1786 }
1787
1788 case Int52RepUse: {
1789 setJSValue(strictInt52ToJSValue(lowStrictInt52(m_node->child1())));
1790 return;
1791 }
1792
1793 default:
1794 DFG_CRASH(m_graph, m_node, "Bad use kind");
1795 }
1796 }
1797
1798 void compileInt52Rep()
1799 {
1800 switch (m_node->child1().useKind()) {
1801 case Int32Use:
1802 setStrictInt52(m_out.signExt32To64(lowInt32(m_node->child1())));
1803 return;
1804
1805 case AnyIntUse:
1806 setStrictInt52(
1807 jsValueToStrictInt52(
1808 m_node->child1(), lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1809 return;
1810
1811 case DoubleRepAnyIntUse:
1812 setStrictInt52(
1813 doubleToStrictInt52(
1814 m_node->child1(), lowDouble(m_node->child1())));
1815 return;
1816
1817 default:
1818 RELEASE_ASSERT_NOT_REACHED();
1819 }
1820 }
1821
1822 void compileValueToInt32()
1823 {
1824 switch (m_node->child1().useKind()) {
1825 case Int52RepUse:
1826 setInt32(m_out.castToInt32(lowStrictInt52(m_node->child1())));
1827 break;
1828
1829 case DoubleRepUse:
1830 setInt32(doubleToInt32(lowDouble(m_node->child1())));
1831 break;
1832
1833 case NumberUse:
1834 case NotCellUse: {
1835 LoweredNodeValue value = m_int32Values.get(m_node->child1().node());
1836 if (isValid(value)) {
1837 setInt32(value.value());
1838 break;
1839 }
1840
1841 value = m_jsValueValues.get(m_node->child1().node());
1842 if (isValid(value)) {
1843 setInt32(numberOrNotCellToInt32(m_node->child1(), value.value()));
1844 break;
1845 }
1846
1847 // We'll basically just get here for constants. But it's good to have this
1848 // catch-all since we often add new representations into the mix.
1849 setInt32(
1850 numberOrNotCellToInt32(
1851 m_node->child1(),
1852 lowJSValue(m_node->child1(), ManualOperandSpeculation)));
1853 break;
1854 }
1855
1856 default:
1857 DFG_CRASH(m_graph, m_node, "Bad use kind");
1858 break;
1859 }
1860 }
1861
1862 void compileBooleanToNumber()
1863 {
1864 switch (m_node->child1().useKind()) {
1865 case BooleanUse: {
1866 setInt32(m_out.zeroExt(lowBoolean(m_node->child1()), Int32));
1867 return;
1868 }
1869
1870 case UntypedUse: {
1871 LValue value = lowJSValue(m_node->child1());
1872
1873 if (!m_interpreter.needsTypeCheck(m_node->child1(), SpecBoolInt32 | SpecBoolean)) {
1874 setInt32(m_out.bitAnd(m_out.castToInt32(value), m_out.int32One));
1875 return;
1876 }
1877
1878 LBasicBlock booleanCase = m_out.newBlock();
1879 LBasicBlock continuation = m_out.newBlock();
1880
1881 ValueFromBlock notBooleanResult = m_out.anchor(value);
1882 m_out.branch(
1883 isBoolean(value, provenType(m_node->child1())),
1884 unsure(booleanCase), unsure(continuation));
1885
1886 LBasicBlock lastNext = m_out.appendTo(booleanCase, continuation);
1887 ValueFromBlock booleanResult = m_out.anchor(m_out.bitOr(
1888 m_out.zeroExt(unboxBoolean(value), Int64), m_tagTypeNumber));
1889 m_out.jump(continuation);
1890
1891 m_out.appendTo(continuation, lastNext);
1892 setJSValue(m_out.phi(Int64, booleanResult, notBooleanResult));
1893 return;
1894 }
1895
1896 default:
1897 RELEASE_ASSERT_NOT_REACHED();
1898 return;
1899 }
1900 }
1901
1902 void compileExtractOSREntryLocal()
1903 {
1904 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(
1905 m_ftlState.jitCode->ftlForOSREntry()->entryBuffer()->dataBuffer());
1906 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->unlinkedLocal().toLocal())));
1907 }
1908
1909 void compileExtractCatchLocal()
1910 {
1911 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(m_ftlState.jitCode->common.catchOSREntryBuffer->dataBuffer());
1912 setJSValue(m_out.load64(m_out.absolute(buffer + m_node->catchOSREntryIndex())));
1913 }
1914
1915 void compileClearCatchLocals()
1916 {
1917 ScratchBuffer* scratchBuffer = m_ftlState.jitCode->common.catchOSREntryBuffer;
1918 ASSERT(scratchBuffer);
1919 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
1920 }
1921
1922 void compileGetStack()
1923 {
1924 StackAccessData* data = m_node->stackAccessData();
1925 AbstractValue& value = m_state.operand(data->local);
1926
1927 DFG_ASSERT(m_graph, m_node, isConcrete(data->format), data->format);
1928
1929 switch (data->format) {
1930 case FlushedDouble:
1931 setDouble(m_out.loadDouble(addressFor(data->machineLocal)));
1932 break;
1933 case FlushedInt52:
1934 setInt52(m_out.load64(addressFor(data->machineLocal)));
1935 break;
1936 default:
1937 if (isInt32Speculation(value.m_type))
1938 setInt32(m_out.load32(payloadFor(data->machineLocal)));
1939 else
1940 setJSValue(m_out.load64(addressFor(data->machineLocal)));
1941 break;
1942 }
1943 }
1944
1945 void compilePutStack()
1946 {
1947 StackAccessData* data = m_node->stackAccessData();
1948 switch (data->format) {
1949 case FlushedJSValue: {
1950 LValue value = lowJSValue(m_node->child1());
1951 m_out.store64(value, addressFor(data->machineLocal));
1952 break;
1953 }
1954
1955 case FlushedDouble: {
1956 LValue value = lowDouble(m_node->child1());
1957 m_out.storeDouble(value, addressFor(data->machineLocal));
1958 break;
1959 }
1960
1961 case FlushedInt32: {
1962 LValue value = lowInt32(m_node->child1());
1963 m_out.store32(value, payloadFor(data->machineLocal));
1964 break;
1965 }
1966
1967 case FlushedInt52: {
1968 LValue value = lowInt52(m_node->child1());
1969 m_out.store64(value, addressFor(data->machineLocal));
1970 break;
1971 }
1972
1973 case FlushedCell: {
1974 LValue value = lowCell(m_node->child1());
1975 m_out.store64(value, addressFor(data->machineLocal));
1976 break;
1977 }
1978
1979 case FlushedBoolean: {
1980 speculateBoolean(m_node->child1());
1981 m_out.store64(
1982 lowJSValue(m_node->child1(), ManualOperandSpeculation),
1983 addressFor(data->machineLocal));
1984 break;
1985 }
1986
1987 default:
1988 DFG_CRASH(m_graph, m_node, "Bad flush format");
1989 break;
1990 }
1991 }
1992
1993 void compileNoOp()
1994 {
1995 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, speculate);
1996 }
1997
1998 void compileToObjectOrCallObjectConstructor()
1999 {
2000 LValue value = lowJSValue(m_node->child1());
2001
2002 LBasicBlock isCellCase = m_out.newBlock();
2003 LBasicBlock slowCase = m_out.newBlock();
2004 LBasicBlock continuation = m_out.newBlock();
2005
2006 m_out.branch(isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2007
2008 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2009 ValueFromBlock fastResult = m_out.anchor(value);
2010 m_out.branch(isObject(value), usually(continuation), rarely(slowCase));
2011
2012 m_out.appendTo(slowCase, continuation);
2013
2014 ValueFromBlock slowResult;
2015 if (m_node->op() == ToObject) {
2016 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2017 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToObject), m_callFrame, weakPointer(globalObject), value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
2018 } else
2019 slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationCallObjectConstructor), m_callFrame, frozenPointer(m_node->cellOperand()), value));
2020 m_out.jump(continuation);
2021
2022 m_out.appendTo(continuation, lastNext);
2023 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2024 }
2025
2026 void compileToThis()
2027 {
2028 LValue value = lowJSValue(m_node->child1());
2029
2030 LBasicBlock isCellCase = m_out.newBlock();
2031 LBasicBlock slowCase = m_out.newBlock();
2032 LBasicBlock continuation = m_out.newBlock();
2033
2034 m_out.branch(
2035 isCell(value, provenType(m_node->child1())), usually(isCellCase), rarely(slowCase));
2036
2037 LBasicBlock lastNext = m_out.appendTo(isCellCase, slowCase);
2038 ValueFromBlock fastResult = m_out.anchor(value);
2039 m_out.branch(
2040 m_out.testIsZero32(
2041 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
2042 m_out.constInt32(OverridesToThis)),
2043 usually(continuation), rarely(slowCase));
2044
2045 m_out.appendTo(slowCase, continuation);
2046 J_JITOperation_EJ function;
2047 if (m_graph.isStrictModeFor(m_node->origin.semantic))
2048 function = operationToThisStrict;
2049 else
2050 function = operationToThis;
2051 ValueFromBlock slowResult = m_out.anchor(
2052 vmCall(Int64, m_out.operation(function), m_callFrame, value));
2053 m_out.jump(continuation);
2054
2055 m_out.appendTo(continuation, lastNext);
2056 setJSValue(m_out.phi(Int64, fastResult, slowResult));
2057 }
2058
2059 void compileValueAdd()
2060 {
2061 if (m_node->isBinaryUseKind(BigIntUse)) {
2062 LValue left = lowBigInt(m_node->child1());
2063 LValue right = lowBigInt(m_node->child2());
2064
2065 LValue result = vmCall(pointerType(), m_out.operation(operationAddBigInt), m_callFrame, left, right);
2066 setJSValue(result);
2067 return;
2068 }
2069
2070 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2071 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2072 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2073 auto repatchingFunction = operationValueAddOptimize;
2074 auto nonRepatchingFunction = operationValueAdd;
2075 compileBinaryMathIC<JITAddGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2076 }
2077
2078 void compileValueSub()
2079 {
2080 if (m_node->isBinaryUseKind(BigIntUse)) {
2081 LValue left = lowBigInt(m_node->child1());
2082 LValue right = lowBigInt(m_node->child2());
2083
2084 LValue result = vmCall(pointerType(), m_out.operation(operationSubBigInt), m_callFrame, left, right);
2085 setJSValue(result);
2086 return;
2087 }
2088
2089 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2090 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2091 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2092 auto repatchingFunction = operationValueSubOptimize;
2093 auto nonRepatchingFunction = operationValueSub;
2094 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2095 }
2096
2097 void compileValueMul()
2098 {
2099 if (m_node->isBinaryUseKind(BigIntUse)) {
2100 LValue left = lowBigInt(m_node->child1());
2101 LValue right = lowBigInt(m_node->child2());
2102
2103 LValue result = vmCall(Int64, m_out.operation(operationMulBigInt), m_callFrame, left, right);
2104 setJSValue(result);
2105 return;
2106 }
2107
2108 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2109 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2110 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2111 auto repatchingFunction = operationValueMulOptimize;
2112 auto nonRepatchingFunction = operationValueMul;
2113 compileBinaryMathIC<JITMulGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2114 }
2115
2116 template <typename Generator, typename Func1, typename Func2,
2117 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2118 void compileUnaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2119 {
2120 Node* node = m_node;
2121
2122 LValue operand = lowJSValue(node->child1());
2123
2124 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2125 patchpoint->appendSomeRegister(operand);
2126 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2127 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2128 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
2129 patchpoint->numGPScratchRegisters = 1;
2130 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2131 State* state = &m_ftlState;
2132 patchpoint->setGenerator(
2133 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2134 AllowMacroScratchRegisterUsage allowScratch(jit);
2135
2136 Box<CCallHelpers::JumpList> exceptions =
2137 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2138
2139#if ENABLE(MATH_IC_STATS)
2140 auto inlineStart = jit.label();
2141#endif
2142
2143 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2144 JITUnaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2145 mathIC->m_generator = Generator(JSValueRegs(params[0].gpr()), JSValueRegs(params[1].gpr()), params.gpScratch(0));
2146
2147 bool shouldEmitProfiling = false;
2148 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2149
2150 if (generatedInline) {
2151 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2152 auto done = jit.label();
2153 params.addLatePath([=] (CCallHelpers& jit) {
2154 AllowMacroScratchRegisterUsage allowScratch(jit);
2155 mathICGenerationState->slowPathJumps.link(&jit);
2156 mathICGenerationState->slowPathStart = jit.label();
2157#if ENABLE(MATH_IC_STATS)
2158 auto slowPathStart = jit.label();
2159#endif
2160
2161 if (mathICGenerationState->shouldSlowPathRepatch) {
2162 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2163 repatchingFunction, params[0].gpr(), params[1].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2164 mathICGenerationState->slowPathCall = call.call();
2165 } else {
2166 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2167 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2168 mathICGenerationState->slowPathCall = call.call();
2169 }
2170 jit.jump().linkTo(done, &jit);
2171
2172 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2173 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2174 });
2175
2176#if ENABLE(MATH_IC_STATS)
2177 auto slowPathEnd = jit.label();
2178 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2179 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2180 mathIC->m_generatedCodeSize += size;
2181 });
2182#endif
2183 });
2184 } else {
2185 callOperation(
2186 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2187 nonRepatchingFunction, params[0].gpr(), params[1].gpr());
2188 }
2189
2190#if ENABLE(MATH_IC_STATS)
2191 auto inlineEnd = jit.label();
2192 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2193 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2194 mathIC->m_generatedCodeSize += size;
2195 });
2196#endif
2197 });
2198
2199 setJSValue(patchpoint);
2200 }
2201
2202 template <typename Generator, typename Func1, typename Func2,
2203 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>>
2204 void compileBinaryMathIC(ArithProfile* arithProfile, Func1 repatchingFunction, Func2 nonRepatchingFunction)
2205 {
2206 Node* node = m_node;
2207
2208 LValue left = lowJSValue(node->child1());
2209 LValue right = lowJSValue(node->child2());
2210
2211 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
2212 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
2213
2214 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
2215 patchpoint->appendSomeRegister(left);
2216 patchpoint->appendSomeRegister(right);
2217 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
2218 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
2219 RefPtr<PatchpointExceptionHandle> exceptionHandle =
2220 preparePatchpointForExceptions(patchpoint);
2221 patchpoint->numGPScratchRegisters = 1;
2222 patchpoint->numFPScratchRegisters = 2;
2223 patchpoint->clobber(RegisterSet::macroScratchRegisters());
2224 State* state = &m_ftlState;
2225 patchpoint->setGenerator(
2226 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
2227 AllowMacroScratchRegisterUsage allowScratch(jit);
2228
2229
2230 Box<CCallHelpers::JumpList> exceptions =
2231 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
2232
2233#if ENABLE(MATH_IC_STATS)
2234 auto inlineStart = jit.label();
2235#endif
2236
2237 Box<MathICGenerationState> mathICGenerationState = Box<MathICGenerationState>::create();
2238 JITBinaryMathIC<Generator>* mathIC = jit.codeBlock()->addMathIC<Generator>(arithProfile);
2239 mathIC->m_generator = Generator(leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
2240 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.fpScratch(0),
2241 params.fpScratch(1), params.gpScratch(0), InvalidFPRReg);
2242
2243 bool shouldEmitProfiling = false;
2244 bool generatedInline = mathIC->generateInline(jit, *mathICGenerationState, shouldEmitProfiling);
2245
2246 if (generatedInline) {
2247 ASSERT(!mathICGenerationState->slowPathJumps.empty());
2248 auto done = jit.label();
2249 params.addLatePath([=] (CCallHelpers& jit) {
2250 AllowMacroScratchRegisterUsage allowScratch(jit);
2251 mathICGenerationState->slowPathJumps.link(&jit);
2252 mathICGenerationState->slowPathStart = jit.label();
2253#if ENABLE(MATH_IC_STATS)
2254 auto slowPathStart = jit.label();
2255#endif
2256
2257 if (mathICGenerationState->shouldSlowPathRepatch) {
2258 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2259 repatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr(), CCallHelpers::TrustedImmPtr(mathIC));
2260 mathICGenerationState->slowPathCall = call.call();
2261 } else {
2262 SlowPathCall call = callOperation(*state, params.unavailableRegisters(), jit, node->origin.semantic,
2263 exceptions.get(), nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2264 mathICGenerationState->slowPathCall = call.call();
2265 }
2266 jit.jump().linkTo(done, &jit);
2267
2268 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2269 mathIC->finalizeInlineCode(*mathICGenerationState, linkBuffer);
2270 });
2271
2272#if ENABLE(MATH_IC_STATS)
2273 auto slowPathEnd = jit.label();
2274 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2275 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
2276 mathIC->m_generatedCodeSize += size;
2277 });
2278#endif
2279 });
2280 } else {
2281 callOperation(
2282 *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(),
2283 nonRepatchingFunction, params[0].gpr(), params[1].gpr(), params[2].gpr());
2284 }
2285
2286#if ENABLE(MATH_IC_STATS)
2287 auto inlineEnd = jit.label();
2288 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
2289 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
2290 mathIC->m_generatedCodeSize += size;
2291 });
2292#endif
2293 });
2294
2295 setJSValue(patchpoint);
2296 }
2297
2298 void compileStrCat()
2299 {
2300 LValue result;
2301 if (m_node->child3()) {
2302 result = vmCall(
2303 Int64, m_out.operation(operationStrCat3), m_callFrame,
2304 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2305 lowJSValue(m_node->child2(), ManualOperandSpeculation),
2306 lowJSValue(m_node->child3(), ManualOperandSpeculation));
2307 } else {
2308 result = vmCall(
2309 Int64, m_out.operation(operationStrCat2), m_callFrame,
2310 lowJSValue(m_node->child1(), ManualOperandSpeculation),
2311 lowJSValue(m_node->child2(), ManualOperandSpeculation));
2312 }
2313 setJSValue(result);
2314 }
2315
2316 void compileArithAddOrSub()
2317 {
2318 bool isSub = m_node->op() == ArithSub;
2319 switch (m_node->binaryUseKind()) {
2320 case Int32Use: {
2321 LValue left = lowInt32(m_node->child1());
2322 LValue right = lowInt32(m_node->child2());
2323
2324 if (!shouldCheckOverflow(m_node->arithMode())) {
2325 setInt32(isSub ? m_out.sub(left, right) : m_out.add(left, right));
2326 break;
2327 }
2328
2329 CheckValue* result =
2330 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2331 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2332 setInt32(result);
2333 break;
2334 }
2335
2336 case Int52RepUse: {
2337 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)
2338 && !abstractValue(m_node->child2()).couldBeType(SpecNonInt32AsInt52)) {
2339 Int52Kind kind;
2340 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2341 LValue right = lowInt52(m_node->child2(), kind);
2342 setInt52(isSub ? m_out.sub(left, right) : m_out.add(left, right), kind);
2343 break;
2344 }
2345
2346 LValue left = lowInt52(m_node->child1());
2347 LValue right = lowInt52(m_node->child2());
2348 CheckValue* result =
2349 isSub ? m_out.speculateSub(left, right) : m_out.speculateAdd(left, right);
2350 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2351 setInt52(result);
2352 break;
2353 }
2354
2355 case DoubleRepUse: {
2356 LValue C1 = lowDouble(m_node->child1());
2357 LValue C2 = lowDouble(m_node->child2());
2358
2359 setDouble(isSub ? m_out.doubleSub(C1, C2) : m_out.doubleAdd(C1, C2));
2360 break;
2361 }
2362
2363 case UntypedUse: {
2364 if (!isSub) {
2365 DFG_CRASH(m_graph, m_node, "Bad use kind");
2366 break;
2367 }
2368
2369 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
2370 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
2371 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
2372 auto repatchingFunction = operationValueSubOptimize;
2373 auto nonRepatchingFunction = operationValueSub;
2374 compileBinaryMathIC<JITSubGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
2375 break;
2376 }
2377
2378 default:
2379 DFG_CRASH(m_graph, m_node, "Bad use kind");
2380 break;
2381 }
2382 }
2383
2384 void compileArithClz32()
2385 {
2386 if (m_node->child1().useKind() == Int32Use || m_node->child1().useKind() == KnownInt32Use) {
2387 LValue operand = lowInt32(m_node->child1());
2388 setInt32(m_out.ctlz32(operand));
2389 return;
2390 }
2391 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2392 LValue argument = lowJSValue(m_node->child1());
2393 LValue result = vmCall(Int32, m_out.operation(operationArithClz32), m_callFrame, argument);
2394 setInt32(result);
2395 }
2396
2397 void compileArithMul()
2398 {
2399 switch (m_node->binaryUseKind()) {
2400 case Int32Use: {
2401 LValue left = lowInt32(m_node->child1());
2402 LValue right = lowInt32(m_node->child2());
2403
2404 LValue result;
2405
2406 if (!shouldCheckOverflow(m_node->arithMode()))
2407 result = m_out.mul(left, right);
2408 else {
2409 CheckValue* speculation = m_out.speculateMul(left, right);
2410 blessSpeculation(speculation, Overflow, noValue(), nullptr, m_origin);
2411 result = speculation;
2412 }
2413
2414 if (shouldCheckNegativeZero(m_node->arithMode())) {
2415 LBasicBlock slowCase = m_out.newBlock();
2416 LBasicBlock continuation = m_out.newBlock();
2417
2418 m_out.branch(
2419 m_out.notZero32(result), usually(continuation), rarely(slowCase));
2420
2421 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2422 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int32Zero));
2423 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int32Zero));
2424 m_out.jump(continuation);
2425 m_out.appendTo(continuation, lastNext);
2426 }
2427
2428 setInt32(result);
2429 break;
2430 }
2431
2432 case Int52RepUse: {
2433 Int52Kind kind;
2434 LValue left = lowWhicheverInt52(m_node->child1(), kind);
2435 LValue right = lowInt52(m_node->child2(), opposite(kind));
2436
2437 CheckValue* result = m_out.speculateMul(left, right);
2438 blessSpeculation(result, Overflow, noValue(), nullptr, m_origin);
2439
2440 if (shouldCheckNegativeZero(m_node->arithMode())) {
2441 LBasicBlock slowCase = m_out.newBlock();
2442 LBasicBlock continuation = m_out.newBlock();
2443
2444 m_out.branch(
2445 m_out.notZero64(result), usually(continuation), rarely(slowCase));
2446
2447 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
2448 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(left, m_out.int64Zero));
2449 speculate(NegativeZero, noValue(), nullptr, m_out.lessThan(right, m_out.int64Zero));
2450 m_out.jump(continuation);
2451 m_out.appendTo(continuation, lastNext);
2452 }
2453
2454 setInt52(result);
2455 break;
2456 }
2457
2458 case DoubleRepUse: {
2459 setDouble(
2460 m_out.doubleMul(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2461 break;
2462 }
2463
2464 default:
2465 DFG_CRASH(m_graph, m_node, "Bad use kind");
2466 break;
2467 }
2468 }
2469
2470 void compileValueDiv()
2471 {
2472 if (m_node->isBinaryUseKind(BigIntUse)) {
2473 LValue left = lowBigInt(m_node->child1());
2474 LValue right = lowBigInt(m_node->child2());
2475
2476 LValue result = vmCall(pointerType(), m_out.operation(operationDivBigInt), m_callFrame, left, right);
2477 setJSValue(result);
2478 return;
2479 }
2480
2481 emitBinarySnippet<JITDivGenerator, NeedScratchFPR>(operationValueDiv);
2482 }
2483
2484 void compileArithDiv()
2485 {
2486 switch (m_node->binaryUseKind()) {
2487 case Int32Use: {
2488 LValue numerator = lowInt32(m_node->child1());
2489 LValue denominator = lowInt32(m_node->child2());
2490
2491 if (shouldCheckNegativeZero(m_node->arithMode())) {
2492 LBasicBlock zeroNumerator = m_out.newBlock();
2493 LBasicBlock numeratorContinuation = m_out.newBlock();
2494
2495 m_out.branch(
2496 m_out.isZero32(numerator),
2497 rarely(zeroNumerator), usually(numeratorContinuation));
2498
2499 LBasicBlock innerLastNext = m_out.appendTo(zeroNumerator, numeratorContinuation);
2500
2501 speculate(
2502 NegativeZero, noValue(), 0, m_out.lessThan(denominator, m_out.int32Zero));
2503
2504 m_out.jump(numeratorContinuation);
2505
2506 m_out.appendTo(numeratorContinuation, innerLastNext);
2507 }
2508
2509 if (shouldCheckOverflow(m_node->arithMode())) {
2510 LBasicBlock unsafeDenominator = m_out.newBlock();
2511 LBasicBlock continuation = m_out.newBlock();
2512
2513 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2514 m_out.branch(
2515 m_out.above(adjustedDenominator, m_out.int32One),
2516 usually(continuation), rarely(unsafeDenominator));
2517
2518 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2519 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2520 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2521 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2522 m_out.jump(continuation);
2523
2524 m_out.appendTo(continuation, lastNext);
2525 LValue result = m_out.div(numerator, denominator);
2526 speculate(
2527 Overflow, noValue(), 0,
2528 m_out.notEqual(m_out.mul(result, denominator), numerator));
2529 setInt32(result);
2530 } else
2531 setInt32(m_out.chillDiv(numerator, denominator));
2532
2533 break;
2534 }
2535
2536 case DoubleRepUse: {
2537 setDouble(m_out.doubleDiv(
2538 lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2539 break;
2540 }
2541
2542 default:
2543 DFG_CRASH(m_graph, m_node, "Bad use kind");
2544 break;
2545 }
2546 }
2547
2548 void compileValueMod()
2549 {
2550 if (m_node->binaryUseKind() == BigIntUse) {
2551 LValue left = lowBigInt(m_node->child1());
2552 LValue right = lowBigInt(m_node->child2());
2553
2554 LValue result = vmCall(pointerType(), m_out.operation(operationModBigInt), m_callFrame, left, right);
2555 setJSValue(result);
2556 return;
2557 }
2558
2559 DFG_ASSERT(m_graph, m_node, m_node->binaryUseKind() == UntypedUse, m_node->binaryUseKind());
2560 LValue left = lowJSValue(m_node->child1());
2561 LValue right = lowJSValue(m_node->child2());
2562 LValue result = vmCall(Int64, m_out.operation(operationValueMod), m_callFrame, left, right);
2563 setJSValue(result);
2564 }
2565
2566 void compileArithMod()
2567 {
2568 switch (m_node->binaryUseKind()) {
2569 case Int32Use: {
2570 LValue numerator = lowInt32(m_node->child1());
2571 LValue denominator = lowInt32(m_node->child2());
2572
2573 LValue remainder;
2574 if (shouldCheckOverflow(m_node->arithMode())) {
2575 LBasicBlock unsafeDenominator = m_out.newBlock();
2576 LBasicBlock continuation = m_out.newBlock();
2577
2578 LValue adjustedDenominator = m_out.add(denominator, m_out.int32One);
2579 m_out.branch(
2580 m_out.above(adjustedDenominator, m_out.int32One),
2581 usually(continuation), rarely(unsafeDenominator));
2582
2583 LBasicBlock lastNext = m_out.appendTo(unsafeDenominator, continuation);
2584 LValue neg2ToThe31 = m_out.constInt32(-2147483647-1);
2585 speculate(Overflow, noValue(), nullptr, m_out.isZero32(denominator));
2586 speculate(Overflow, noValue(), nullptr, m_out.equal(numerator, neg2ToThe31));
2587 m_out.jump(continuation);
2588
2589 m_out.appendTo(continuation, lastNext);
2590 LValue result = m_out.mod(numerator, denominator);
2591 remainder = result;
2592 } else
2593 remainder = m_out.chillMod(numerator, denominator);
2594
2595 if (shouldCheckNegativeZero(m_node->arithMode())) {
2596 LBasicBlock negativeNumerator = m_out.newBlock();
2597 LBasicBlock numeratorContinuation = m_out.newBlock();
2598
2599 m_out.branch(
2600 m_out.lessThan(numerator, m_out.int32Zero),
2601 unsure(negativeNumerator), unsure(numeratorContinuation));
2602
2603 LBasicBlock innerLastNext = m_out.appendTo(negativeNumerator, numeratorContinuation);
2604
2605 speculate(NegativeZero, noValue(), 0, m_out.isZero32(remainder));
2606
2607 m_out.jump(numeratorContinuation);
2608
2609 m_out.appendTo(numeratorContinuation, innerLastNext);
2610 }
2611
2612 setInt32(remainder);
2613 break;
2614 }
2615
2616 case DoubleRepUse: {
2617 setDouble(
2618 m_out.doubleMod(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
2619 break;
2620 }
2621
2622 default:
2623 DFG_CRASH(m_graph, m_node, "Bad use kind");
2624 break;
2625 }
2626 }
2627
2628 void compileArithMinOrMax()
2629 {
2630 switch (m_node->binaryUseKind()) {
2631 case Int32Use: {
2632 LValue left = lowInt32(m_node->child1());
2633 LValue right = lowInt32(m_node->child2());
2634
2635 setInt32(
2636 m_out.select(
2637 m_node->op() == ArithMin
2638 ? m_out.lessThan(left, right)
2639 : m_out.lessThan(right, left),
2640 left, right));
2641 break;
2642 }
2643
2644 case DoubleRepUse: {
2645 LValue left = lowDouble(m_node->child1());
2646 LValue right = lowDouble(m_node->child2());
2647
2648 LBasicBlock notLessThan = m_out.newBlock();
2649 LBasicBlock continuation = m_out.newBlock();
2650
2651 Vector<ValueFromBlock, 2> results;
2652
2653 results.append(m_out.anchor(left));
2654 m_out.branch(
2655 m_node->op() == ArithMin
2656 ? m_out.doubleLessThan(left, right)
2657 : m_out.doubleGreaterThan(left, right),
2658 unsure(continuation), unsure(notLessThan));
2659
2660 LBasicBlock lastNext = m_out.appendTo(notLessThan, continuation);
2661 results.append(m_out.anchor(m_out.select(
2662 m_node->op() == ArithMin
2663 ? m_out.doubleGreaterThanOrEqual(left, right)
2664 : m_out.doubleLessThanOrEqual(left, right),
2665 right, m_out.constDouble(PNaN))));
2666 m_out.jump(continuation);
2667
2668 m_out.appendTo(continuation, lastNext);
2669 setDouble(m_out.phi(Double, results));
2670 break;
2671 }
2672
2673 default:
2674 DFG_CRASH(m_graph, m_node, "Bad use kind");
2675 break;
2676 }
2677 }
2678
2679 void compileArithAbs()
2680 {
2681 switch (m_node->child1().useKind()) {
2682 case Int32Use: {
2683 LValue value = lowInt32(m_node->child1());
2684
2685 LValue mask = m_out.aShr(value, m_out.constInt32(31));
2686 LValue result = m_out.bitXor(mask, m_out.add(mask, value));
2687
2688 if (shouldCheckOverflow(m_node->arithMode()))
2689 speculate(Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
2690
2691 setInt32(result);
2692 break;
2693 }
2694
2695 case DoubleRepUse: {
2696 setDouble(m_out.doubleAbs(lowDouble(m_node->child1())));
2697 break;
2698 }
2699
2700 default: {
2701 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2702 LValue argument = lowJSValue(m_node->child1());
2703 LValue result = vmCall(Double, m_out.operation(operationArithAbs), m_callFrame, argument);
2704 setDouble(result);
2705 break;
2706 }
2707 }
2708 }
2709
2710 void compileArithUnary()
2711 {
2712 if (m_node->child1().useKind() == DoubleRepUse) {
2713 setDouble(m_out.doubleUnary(m_node->arithUnaryType(), lowDouble(m_node->child1())));
2714 return;
2715 }
2716 LValue argument = lowJSValue(m_node->child1());
2717 LValue result = vmCall(Double, m_out.operation(DFG::arithUnaryOperation(m_node->arithUnaryType())), m_callFrame, argument);
2718 setDouble(result);
2719 }
2720
2721 void compileArithPow()
2722 {
2723 if (m_node->child2().useKind() == Int32Use)
2724 setDouble(m_out.doublePowi(lowDouble(m_node->child1()), lowInt32(m_node->child2())));
2725 else {
2726 LValue base = lowDouble(m_node->child1());
2727 LValue exponent = lowDouble(m_node->child2());
2728
2729 LBasicBlock integerExponentIsSmallBlock = m_out.newBlock();
2730 LBasicBlock integerExponentPowBlock = m_out.newBlock();
2731 LBasicBlock doubleExponentPowBlockEntry = m_out.newBlock();
2732 LBasicBlock nanExceptionBaseIsOne = m_out.newBlock();
2733 LBasicBlock nanExceptionExponentIsInfinity = m_out.newBlock();
2734 LBasicBlock testExponentIsOneHalf = m_out.newBlock();
2735 LBasicBlock handleBaseZeroExponentIsOneHalf = m_out.newBlock();
2736 LBasicBlock handleInfinityForExponentIsOneHalf = m_out.newBlock();
2737 LBasicBlock exponentIsOneHalfNormal = m_out.newBlock();
2738 LBasicBlock exponentIsOneHalfInfinity = m_out.newBlock();
2739 LBasicBlock testExponentIsNegativeOneHalf = m_out.newBlock();
2740 LBasicBlock testBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2741 LBasicBlock handleBaseZeroExponentIsNegativeOneHalf = m_out.newBlock();
2742 LBasicBlock handleInfinityForExponentIsNegativeOneHalf = m_out.newBlock();
2743 LBasicBlock exponentIsNegativeOneHalfNormal = m_out.newBlock();
2744 LBasicBlock exponentIsNegativeOneHalfInfinity = m_out.newBlock();
2745 LBasicBlock powBlock = m_out.newBlock();
2746 LBasicBlock nanExceptionResultIsNaN = m_out.newBlock();
2747 LBasicBlock continuation = m_out.newBlock();
2748
2749 LValue integerExponent = m_out.doubleToInt(exponent);
2750 LValue integerExponentConvertedToDouble = m_out.intToDouble(integerExponent);
2751 LValue exponentIsInteger = m_out.doubleEqual(exponent, integerExponentConvertedToDouble);
2752 m_out.branch(exponentIsInteger, unsure(integerExponentIsSmallBlock), unsure(doubleExponentPowBlockEntry));
2753
2754 LBasicBlock lastNext = m_out.appendTo(integerExponentIsSmallBlock, integerExponentPowBlock);
2755 LValue integerExponentBelowMax = m_out.belowOrEqual(integerExponent, m_out.constInt32(maxExponentForIntegerMathPow));
2756 m_out.branch(integerExponentBelowMax, usually(integerExponentPowBlock), rarely(doubleExponentPowBlockEntry));
2757
2758 m_out.appendTo(integerExponentPowBlock, doubleExponentPowBlockEntry);
2759 ValueFromBlock powDoubleIntResult = m_out.anchor(m_out.doublePowi(base, integerExponent));
2760 m_out.jump(continuation);
2761
2762 // If y is NaN, the result is NaN.
2763 m_out.appendTo(doubleExponentPowBlockEntry, nanExceptionBaseIsOne);
2764 LValue exponentIsNaN;
2765 if (provenType(m_node->child2()) & SpecDoubleNaN)
2766 exponentIsNaN = m_out.doubleNotEqualOrUnordered(exponent, exponent);
2767 else
2768 exponentIsNaN = m_out.booleanFalse;
2769 m_out.branch(exponentIsNaN, rarely(nanExceptionResultIsNaN), usually(nanExceptionBaseIsOne));
2770
2771 // If abs(x) is 1 and y is +infinity, the result is NaN.
2772 // If abs(x) is 1 and y is -infinity, the result is NaN.
2773
2774 // Test if base == 1.
2775 m_out.appendTo(nanExceptionBaseIsOne, nanExceptionExponentIsInfinity);
2776 LValue absoluteBase = m_out.doubleAbs(base);
2777 LValue absoluteBaseIsOne = m_out.doubleEqual(absoluteBase, m_out.constDouble(1));
2778 m_out.branch(absoluteBaseIsOne, rarely(nanExceptionExponentIsInfinity), usually(testExponentIsOneHalf));
2779
2780 // Test if abs(y) == Infinity.
2781 m_out.appendTo(nanExceptionExponentIsInfinity, testExponentIsOneHalf);
2782 LValue absoluteExponent = m_out.doubleAbs(exponent);
2783 LValue absoluteExponentIsInfinity = m_out.doubleEqual(absoluteExponent, m_out.constDouble(std::numeric_limits<double>::infinity()));
2784 m_out.branch(absoluteExponentIsInfinity, rarely(nanExceptionResultIsNaN), usually(testExponentIsOneHalf));
2785
2786 // If y == 0.5 or y == -0.5, handle it through SQRT.
2787 // We have be carefuly with -0 and -Infinity.
2788
2789 // Test if y == 0.5
2790 m_out.appendTo(testExponentIsOneHalf, handleBaseZeroExponentIsOneHalf);
2791 LValue exponentIsOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(0.5));
2792 m_out.branch(exponentIsOneHalf, rarely(handleBaseZeroExponentIsOneHalf), usually(testExponentIsNegativeOneHalf));
2793
2794 // Handle x == -0.
2795 m_out.appendTo(handleBaseZeroExponentIsOneHalf, handleInfinityForExponentIsOneHalf);
2796 LValue baseIsZeroExponentIsOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2797 ValueFromBlock zeroResultExponentIsOneHalf = m_out.anchor(m_out.doubleZero);
2798 m_out.branch(baseIsZeroExponentIsOneHalf, rarely(continuation), usually(handleInfinityForExponentIsOneHalf));
2799
2800 // Test if abs(x) == Infinity.
2801 m_out.appendTo(handleInfinityForExponentIsOneHalf, exponentIsOneHalfNormal);
2802 LValue absoluteBaseIsInfinityOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2803 m_out.branch(absoluteBaseIsInfinityOneHalf, rarely(exponentIsOneHalfInfinity), usually(exponentIsOneHalfNormal));
2804
2805 // The exponent is 0.5, the base is finite or NaN, we can use SQRT.
2806 m_out.appendTo(exponentIsOneHalfNormal, exponentIsOneHalfInfinity);
2807 ValueFromBlock sqrtResult = m_out.anchor(m_out.doubleSqrt(base));
2808 m_out.jump(continuation);
2809
2810 // The exponent is 0.5, the base is infinite, the result is always infinite.
2811 m_out.appendTo(exponentIsOneHalfInfinity, testExponentIsNegativeOneHalf);
2812 ValueFromBlock sqrtInfinityResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2813 m_out.jump(continuation);
2814
2815 // Test if y == -0.5
2816 m_out.appendTo(testExponentIsNegativeOneHalf, testBaseZeroExponentIsNegativeOneHalf);
2817 LValue exponentIsNegativeOneHalf = m_out.doubleEqual(exponent, m_out.constDouble(-0.5));
2818 m_out.branch(exponentIsNegativeOneHalf, rarely(testBaseZeroExponentIsNegativeOneHalf), usually(powBlock));
2819
2820 // Handle x == -0.
2821 m_out.appendTo(testBaseZeroExponentIsNegativeOneHalf, handleBaseZeroExponentIsNegativeOneHalf);
2822 LValue baseIsZeroExponentIsNegativeOneHalf = m_out.doubleEqual(base, m_out.doubleZero);
2823 m_out.branch(baseIsZeroExponentIsNegativeOneHalf, rarely(handleBaseZeroExponentIsNegativeOneHalf), usually(handleInfinityForExponentIsNegativeOneHalf));
2824
2825 m_out.appendTo(handleBaseZeroExponentIsNegativeOneHalf, handleInfinityForExponentIsNegativeOneHalf);
2826 ValueFromBlock oneOverSqrtZeroResult = m_out.anchor(m_out.constDouble(std::numeric_limits<double>::infinity()));
2827 m_out.jump(continuation);
2828
2829 // Test if abs(x) == Infinity.
2830 m_out.appendTo(handleInfinityForExponentIsNegativeOneHalf, exponentIsNegativeOneHalfNormal);
2831 LValue absoluteBaseIsInfinityNegativeOneHalf = m_out.doubleEqual(absoluteBase, m_out.constDouble(std::numeric_limits<double>::infinity()));
2832 m_out.branch(absoluteBaseIsInfinityNegativeOneHalf, rarely(exponentIsNegativeOneHalfInfinity), usually(exponentIsNegativeOneHalfNormal));
2833
2834 // The exponent is -0.5, the base is finite or NaN, we can use 1/SQRT.
2835 m_out.appendTo(exponentIsNegativeOneHalfNormal, exponentIsNegativeOneHalfInfinity);
2836 LValue sqrtBase = m_out.doubleSqrt(base);
2837 ValueFromBlock oneOverSqrtResult = m_out.anchor(m_out.div(m_out.constDouble(1.), sqrtBase));
2838 m_out.jump(continuation);
2839
2840 // The exponent is -0.5, the base is infinite, the result is always zero.
2841 m_out.appendTo(exponentIsNegativeOneHalfInfinity, powBlock);
2842 ValueFromBlock oneOverSqrtInfinityResult = m_out.anchor(m_out.doubleZero);
2843 m_out.jump(continuation);
2844
2845 m_out.appendTo(powBlock, nanExceptionResultIsNaN);
2846 ValueFromBlock powResult = m_out.anchor(m_out.doublePow(base, exponent));
2847 m_out.jump(continuation);
2848
2849 m_out.appendTo(nanExceptionResultIsNaN, continuation);
2850 ValueFromBlock pureNan = m_out.anchor(m_out.constDouble(PNaN));
2851 m_out.jump(continuation);
2852
2853 m_out.appendTo(continuation, lastNext);
2854 setDouble(m_out.phi(Double, powDoubleIntResult, zeroResultExponentIsOneHalf, sqrtResult, sqrtInfinityResult, oneOverSqrtZeroResult, oneOverSqrtResult, oneOverSqrtInfinityResult, powResult, pureNan));
2855 }
2856 }
2857
2858 void compileArithRandom()
2859 {
2860 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
2861
2862 // Inlined WeakRandom::advance().
2863 // uint64_t x = m_low;
2864 void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
2865 LValue low = m_out.load64(m_out.absolute(lowAddress));
2866 // uint64_t y = m_high;
2867 void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
2868 LValue high = m_out.load64(m_out.absolute(highAddress));
2869 // m_low = y;
2870 m_out.store64(high, m_out.absolute(lowAddress));
2871
2872 // x ^= x << 23;
2873 LValue phase1 = m_out.bitXor(m_out.shl(low, m_out.constInt64(23)), low);
2874
2875 // x ^= x >> 17;
2876 LValue phase2 = m_out.bitXor(m_out.lShr(phase1, m_out.constInt64(17)), phase1);
2877
2878 // x ^= y ^ (y >> 26);
2879 LValue phase3 = m_out.bitXor(m_out.bitXor(high, m_out.lShr(high, m_out.constInt64(26))), phase2);
2880
2881 // m_high = x;
2882 m_out.store64(phase3, m_out.absolute(highAddress));
2883
2884 // return x + y;
2885 LValue random64 = m_out.add(phase3, high);
2886
2887 // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
2888 LValue random53 = m_out.bitAnd(random64, m_out.constInt64((1ULL << 53) - 1));
2889
2890 LValue double53Integer = m_out.intToDouble(random53);
2891
2892 // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
2893 // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
2894 static const double scale = 1.0 / (1ULL << 53);
2895
2896 // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
2897 // It just reduces the exp part of the given 53bit double integer.
2898 // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
2899 // Now we get 53bit precision random double value in [0, 1).
2900 LValue result = m_out.doubleMul(double53Integer, m_out.constDouble(scale));
2901
2902 setDouble(result);
2903 }
2904
2905 void compileArithRound()
2906 {
2907 if (m_node->child1().useKind() == DoubleRepUse) {
2908 LValue result = nullptr;
2909 if (producesInteger(m_node->arithRoundingMode()) && !shouldCheckNegativeZero(m_node->arithRoundingMode())) {
2910 LValue value = lowDouble(m_node->child1());
2911 result = m_out.doubleFloor(m_out.doubleAdd(value, m_out.constDouble(0.5)));
2912 } else {
2913 LBasicBlock realPartIsMoreThanHalf = m_out.newBlock();
2914 LBasicBlock continuation = m_out.newBlock();
2915
2916 LValue value = lowDouble(m_node->child1());
2917 LValue integerValue = m_out.doubleCeil(value);
2918 ValueFromBlock integerValueResult = m_out.anchor(integerValue);
2919
2920 LValue realPart = m_out.doubleSub(integerValue, value);
2921
2922 m_out.branch(m_out.doubleGreaterThanOrUnordered(realPart, m_out.constDouble(0.5)), unsure(realPartIsMoreThanHalf), unsure(continuation));
2923
2924 LBasicBlock lastNext = m_out.appendTo(realPartIsMoreThanHalf, continuation);
2925 LValue integerValueRoundedDown = m_out.doubleSub(integerValue, m_out.constDouble(1));
2926 ValueFromBlock integerValueRoundedDownResult = m_out.anchor(integerValueRoundedDown);
2927 m_out.jump(continuation);
2928 m_out.appendTo(continuation, lastNext);
2929
2930 result = m_out.phi(Double, integerValueResult, integerValueRoundedDownResult);
2931 }
2932
2933 if (producesInteger(m_node->arithRoundingMode())) {
2934 LValue integerValue = convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode()));
2935 setInt32(integerValue);
2936 } else
2937 setDouble(result);
2938 return;
2939 }
2940
2941 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2942 LValue argument = lowJSValue(m_node->child1());
2943 setJSValue(vmCall(Int64, m_out.operation(operationArithRound), m_callFrame, argument));
2944 }
2945
2946 void compileArithFloor()
2947 {
2948 if (m_node->child1().useKind() == DoubleRepUse) {
2949 LValue value = lowDouble(m_node->child1());
2950 LValue integerValue = m_out.doubleFloor(value);
2951 if (producesInteger(m_node->arithRoundingMode()))
2952 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2953 else
2954 setDouble(integerValue);
2955 return;
2956 }
2957 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2958 LValue argument = lowJSValue(m_node->child1());
2959 setJSValue(vmCall(Int64, m_out.operation(operationArithFloor), m_callFrame, argument));
2960 }
2961
2962 void compileArithCeil()
2963 {
2964 if (m_node->child1().useKind() == DoubleRepUse) {
2965 LValue value = lowDouble(m_node->child1());
2966 LValue integerValue = m_out.doubleCeil(value);
2967 if (producesInteger(m_node->arithRoundingMode()))
2968 setInt32(convertDoubleToInt32(integerValue, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2969 else
2970 setDouble(integerValue);
2971 return;
2972 }
2973 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2974 LValue argument = lowJSValue(m_node->child1());
2975 setJSValue(vmCall(Int64, m_out.operation(operationArithCeil), m_callFrame, argument));
2976 }
2977
2978 void compileArithTrunc()
2979 {
2980 if (m_node->child1().useKind() == DoubleRepUse) {
2981 LValue value = lowDouble(m_node->child1());
2982 LValue result = m_out.doubleTrunc(value);
2983 if (producesInteger(m_node->arithRoundingMode()))
2984 setInt32(convertDoubleToInt32(result, shouldCheckNegativeZero(m_node->arithRoundingMode())));
2985 else
2986 setDouble(result);
2987 return;
2988 }
2989 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse, m_node->child1().useKind());
2990 LValue argument = lowJSValue(m_node->child1());
2991 setJSValue(vmCall(Int64, m_out.operation(operationArithTrunc), m_callFrame, argument));
2992 }
2993
2994 void compileArithSqrt()
2995 {
2996 if (m_node->child1().useKind() == DoubleRepUse) {
2997 setDouble(m_out.doubleSqrt(lowDouble(m_node->child1())));
2998 return;
2999 }
3000 LValue argument = lowJSValue(m_node->child1());
3001 LValue result = vmCall(Double, m_out.operation(operationArithSqrt), m_callFrame, argument);
3002 setDouble(result);
3003 }
3004
3005 void compileArithFRound()
3006 {
3007 if (m_node->child1().useKind() == DoubleRepUse) {
3008 setDouble(m_out.fround(lowDouble(m_node->child1())));
3009 return;
3010 }
3011 LValue argument = lowJSValue(m_node->child1());
3012 LValue result = vmCall(Double, m_out.operation(operationArithFRound), m_callFrame, argument);
3013 setDouble(result);
3014 }
3015
3016 void compileValueNegate()
3017 {
3018 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
3019 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic);
3020 unsigned bytecodeIndex = m_node->origin.semantic.bytecodeIndex();
3021 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(bytecodeIndex);
3022 auto repatchingFunction = operationArithNegateOptimize;
3023 auto nonRepatchingFunction = operationArithNegate;
3024 compileUnaryMathIC<JITNegGenerator>(arithProfile, repatchingFunction, nonRepatchingFunction);
3025 }
3026
3027 void compileArithNegate()
3028 {
3029 switch (m_node->child1().useKind()) {
3030 case Int32Use: {
3031 LValue value = lowInt32(m_node->child1());
3032
3033 LValue result;
3034 if (!shouldCheckOverflow(m_node->arithMode()))
3035 result = m_out.neg(value);
3036 else if (!shouldCheckNegativeZero(m_node->arithMode())) {
3037 CheckValue* check = m_out.speculateSub(m_out.int32Zero, value);
3038 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
3039 result = check;
3040 } else {
3041 speculate(Overflow, noValue(), 0, m_out.testIsZero32(value, m_out.constInt32(0x7fffffff)));
3042 result = m_out.neg(value);
3043 }
3044
3045 setInt32(result);
3046 break;
3047 }
3048
3049 case Int52RepUse: {
3050 if (!abstractValue(m_node->child1()).couldBeType(SpecNonInt32AsInt52)) {
3051 Int52Kind kind;
3052 LValue value = lowWhicheverInt52(m_node->child1(), kind);
3053 LValue result = m_out.neg(value);
3054 if (shouldCheckNegativeZero(m_node->arithMode()))
3055 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3056 setInt52(result, kind);
3057 break;
3058 }
3059
3060 LValue value = lowInt52(m_node->child1());
3061 CheckValue* result = m_out.speculateSub(m_out.int64Zero, value);
3062 blessSpeculation(result, Int52Overflow, noValue(), nullptr, m_origin);
3063 if (shouldCheckNegativeZero(m_node->arithMode()))
3064 speculate(NegativeZero, noValue(), 0, m_out.isZero64(result));
3065 setInt52(result);
3066 break;
3067 }
3068
3069 case DoubleRepUse: {
3070 setDouble(m_out.doubleNeg(lowDouble(m_node->child1())));
3071 break;
3072 }
3073
3074 default:
3075 DFG_CRASH(m_graph, m_node, "Bad use kind");
3076 break;
3077 }
3078 }
3079
3080 void compileValueBitNot()
3081 {
3082 if (m_node->child1().useKind() == BigIntUse) {
3083 LValue operand = lowBigInt(m_node->child1());
3084 LValue result = vmCall(pointerType(), m_out.operation(operationBitNotBigInt), m_callFrame, operand);
3085 setJSValue(result);
3086 return;
3087 }
3088
3089 LValue operand = lowJSValue(m_node->child1());
3090 LValue result = vmCall(Int64, m_out.operation(operationValueBitNot), m_callFrame, operand);
3091 setJSValue(result);
3092 }
3093
3094 void compileArithBitNot()
3095 {
3096 setInt32(m_out.bitNot(lowInt32(m_node->child1())));
3097 }
3098
3099 void compileValueBitAnd()
3100 {
3101 if (m_node->isBinaryUseKind(BigIntUse)) {
3102 LValue left = lowBigInt(m_node->child1());
3103 LValue right = lowBigInt(m_node->child2());
3104
3105 LValue result = vmCall(pointerType(), m_out.operation(operationBitAndBigInt), m_callFrame, left, right);
3106 setJSValue(result);
3107 return;
3108 }
3109
3110 emitBinaryBitOpSnippet<JITBitAndGenerator>(operationValueBitAnd);
3111 }
3112
3113 void compileArithBitAnd()
3114 {
3115 setInt32(m_out.bitAnd(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3116 }
3117
3118 void compileValueBitOr()
3119 {
3120 if (m_node->isBinaryUseKind(BigIntUse)) {
3121 LValue left = lowBigInt(m_node->child1());
3122 LValue right = lowBigInt(m_node->child2());
3123
3124 LValue result = vmCall(pointerType(), m_out.operation(operationBitOrBigInt), m_callFrame, left, right);
3125 setJSValue(result);
3126 return;
3127 }
3128
3129 emitBinaryBitOpSnippet<JITBitOrGenerator>(operationValueBitOr);
3130 }
3131
3132 void compileArithBitOr()
3133 {
3134 setInt32(m_out.bitOr(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3135 }
3136
3137 void compileValueBitXor()
3138 {
3139 if (m_node->isBinaryUseKind(BigIntUse)) {
3140 LValue left = lowBigInt(m_node->child1());
3141 LValue right = lowBigInt(m_node->child2());
3142
3143 LValue result = vmCall(pointerType(), m_out.operation(operationBitXorBigInt), m_callFrame, left, right);
3144 setJSValue(result);
3145 return;
3146 }
3147
3148 emitBinaryBitOpSnippet<JITBitXorGenerator>(operationValueBitXor);
3149 }
3150
3151 void compileArithBitXor()
3152 {
3153 setInt32(m_out.bitXor(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
3154 }
3155
3156 void compileBitRShift()
3157 {
3158 if (m_node->isBinaryUseKind(UntypedUse)) {
3159 emitRightShiftSnippet(JITRightShiftGenerator::SignedShift);
3160 return;
3161 }
3162 setInt32(m_out.aShr(
3163 lowInt32(m_node->child1()),
3164 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3165 }
3166
3167 void compileBitLShift()
3168 {
3169 if (m_node->isBinaryUseKind(UntypedUse)) {
3170 emitBinaryBitOpSnippet<JITLeftShiftGenerator>(operationValueBitLShift);
3171 return;
3172 }
3173 setInt32(m_out.shl(
3174 lowInt32(m_node->child1()),
3175 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3176 }
3177
3178 void compileBitURShift()
3179 {
3180 if (m_node->isBinaryUseKind(UntypedUse)) {
3181 emitRightShiftSnippet(JITRightShiftGenerator::UnsignedShift);
3182 return;
3183 }
3184 setInt32(m_out.lShr(
3185 lowInt32(m_node->child1()),
3186 m_out.bitAnd(lowInt32(m_node->child2()), m_out.constInt32(31))));
3187 }
3188
3189 void compileUInt32ToNumber()
3190 {
3191 LValue value = lowInt32(m_node->child1());
3192
3193 if (doesOverflow(m_node->arithMode())) {
3194 setStrictInt52(m_out.zeroExtPtr(value));
3195 return;
3196 }
3197
3198 speculate(Overflow, noValue(), 0, m_out.lessThan(value, m_out.int32Zero));
3199 setInt32(value);
3200 }
3201
3202 void compileCheckStructure()
3203 {
3204 ExitKind exitKind;
3205 if (m_node->child1()->hasConstant())
3206 exitKind = BadConstantCache;
3207 else
3208 exitKind = BadCache;
3209
3210 switch (m_node->child1().useKind()) {
3211 case CellUse:
3212 case KnownCellUse: {
3213 LValue cell = lowCell(m_node->child1());
3214
3215 checkStructure(
3216 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3217 exitKind, m_node->structureSet(),
3218 [&] (RegisteredStructure structure) {
3219 return weakStructureID(structure);
3220 });
3221 return;
3222 }
3223
3224 case CellOrOtherUse: {
3225 LValue value = lowJSValue(m_node->child1(), ManualOperandSpeculation);
3226
3227 LBasicBlock cellCase = m_out.newBlock();
3228 LBasicBlock notCellCase = m_out.newBlock();
3229 LBasicBlock continuation = m_out.newBlock();
3230
3231 m_out.branch(
3232 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3233
3234 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3235 checkStructure(
3236 m_out.load32(value, m_heaps.JSCell_structureID), jsValueValue(value),
3237 exitKind, m_node->structureSet(),
3238 [&] (RegisteredStructure structure) {
3239 return weakStructureID(structure);
3240 });
3241 m_out.jump(continuation);
3242
3243 m_out.appendTo(notCellCase, continuation);
3244 FTL_TYPE_CHECK(jsValueValue(value), m_node->child1(), SpecCell | SpecOther, isNotOther(value));
3245 m_out.jump(continuation);
3246
3247 m_out.appendTo(continuation, lastNext);
3248 return;
3249 }
3250
3251 default:
3252 DFG_CRASH(m_graph, m_node, "Bad use kind");
3253 return;
3254 }
3255 }
3256
3257 void compileCheckStructureOrEmpty()
3258 {
3259 ExitKind exitKind;
3260 if (m_node->child1()->hasConstant())
3261 exitKind = BadConstantCache;
3262 else
3263 exitKind = BadCache;
3264
3265 LValue cell = lowCell(m_node->child1());
3266 bool maySeeEmptyValue = m_interpreter.forNode(m_node->child1()).m_type & SpecEmpty;
3267 LBasicBlock notEmpty;
3268 LBasicBlock continuation;
3269 LBasicBlock lastNext;
3270 if (maySeeEmptyValue) {
3271 notEmpty = m_out.newBlock();
3272 continuation = m_out.newBlock();
3273 m_out.branch(m_out.isZero64(cell), unsure(continuation), unsure(notEmpty));
3274 lastNext = m_out.appendTo(notEmpty, continuation);
3275 }
3276
3277 checkStructure(
3278 m_out.load32(cell, m_heaps.JSCell_structureID), jsValueValue(cell),
3279 exitKind, m_node->structureSet(),
3280 [&] (RegisteredStructure structure) {
3281 return weakStructureID(structure);
3282 });
3283
3284 if (maySeeEmptyValue) {
3285 m_out.jump(continuation);
3286 m_out.appendTo(continuation, lastNext);
3287 }
3288 }
3289
3290 void compileCheckCell()
3291 {
3292 LValue cell = lowCell(m_node->child1());
3293
3294 speculate(
3295 BadCell, jsValueValue(cell), m_node->child1().node(),
3296 m_out.notEqual(cell, weakPointer(m_node->cellOperand()->cell())));
3297 }
3298
3299 void compileCheckBadCell()
3300 {
3301 terminate(BadCell);
3302 }
3303
3304 void compileCheckNotEmpty()
3305 {
3306 speculate(TDZFailure, noValue(), nullptr, m_out.isZero64(lowJSValue(m_node->child1())));
3307 }
3308
3309 void compileAssertNotEmpty()
3310 {
3311 if (!validationEnabled())
3312 return;
3313
3314 LValue val = lowJSValue(m_node->child1());
3315 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3316 patchpoint->appendSomeRegister(val);
3317 patchpoint->setGenerator(
3318 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3319 AllowMacroScratchRegisterUsage allowScratch(jit);
3320 GPRReg input = params[0].gpr();
3321 CCallHelpers::Jump done = jit.branchIfNotEmpty(input);
3322 jit.breakpoint();
3323 done.link(&jit);
3324 });
3325 }
3326
3327 void compileCheckStringIdent()
3328 {
3329 UniquedStringImpl* uid = m_node->uidOperand();
3330 LValue stringImpl = lowStringIdent(m_node->child1());
3331 speculate(BadIdent, noValue(), nullptr, m_out.notEqual(stringImpl, m_out.constIntPtr(uid)));
3332 }
3333
3334 void compileGetExecutable()
3335 {
3336 LValue cell = lowCell(m_node->child1());
3337 speculateFunction(m_node->child1(), cell);
3338 setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
3339 }
3340
3341 void compileArrayify()
3342 {
3343 LValue cell = lowCell(m_node->child1());
3344 LValue property = !!m_node->child2() ? lowInt32(m_node->child2()) : 0;
3345
3346 LBasicBlock unexpectedStructure = m_out.newBlock();
3347 LBasicBlock continuation = m_out.newBlock();
3348
3349 auto isUnexpectedArray = [&] (LValue cell) {
3350 if (m_node->op() == Arrayify)
3351 return m_out.logicalNot(isArrayTypeForArrayify(cell, m_node->arrayMode()));
3352
3353 ASSERT(m_node->op() == ArrayifyToStructure);
3354 return m_out.notEqual(m_out.load32(cell, m_heaps.JSCell_structureID), weakStructureID(m_node->structure()));
3355 };
3356
3357 m_out.branch(isUnexpectedArray(cell), rarely(unexpectedStructure), usually(continuation));
3358
3359 LBasicBlock lastNext = m_out.appendTo(unexpectedStructure, continuation);
3360
3361 if (property) {
3362 switch (m_node->arrayMode().type()) {
3363 case Array::Int32:
3364 case Array::Double:
3365 case Array::Contiguous:
3366 speculate(
3367 Uncountable, noValue(), 0,
3368 m_out.aboveOrEqual(property, m_out.constInt32(MIN_SPARSE_ARRAY_INDEX)));
3369 break;
3370 default:
3371 break;
3372 }
3373 }
3374
3375 switch (m_node->arrayMode().type()) {
3376 case Array::Int32:
3377 vmCall(Void, m_out.operation(operationEnsureInt32), m_callFrame, cell);
3378 break;
3379 case Array::Double:
3380 vmCall(Void, m_out.operation(operationEnsureDouble), m_callFrame, cell);
3381 break;
3382 case Array::Contiguous:
3383 vmCall(Void, m_out.operation(operationEnsureContiguous), m_callFrame, cell);
3384 break;
3385 case Array::ArrayStorage:
3386 case Array::SlowPutArrayStorage:
3387 vmCall(Void, m_out.operation(operationEnsureArrayStorage), m_callFrame, cell);
3388 break;
3389 default:
3390 DFG_CRASH(m_graph, m_node, "Bad array type");
3391 break;
3392 }
3393
3394 speculate(BadIndexingType, jsValueValue(cell), 0, isUnexpectedArray(cell));
3395 m_out.jump(continuation);
3396
3397 m_out.appendTo(continuation, lastNext);
3398 }
3399
3400 void compilePutStructure()
3401 {
3402 m_ftlState.jitCode->common.notifyCompilingStructureTransition(m_graph.m_plan, codeBlock(), m_node);
3403
3404 RegisteredStructure oldStructure = m_node->transition()->previous;
3405 RegisteredStructure newStructure = m_node->transition()->next;
3406 ASSERT_UNUSED(oldStructure, oldStructure->indexingMode() == newStructure->indexingMode());
3407 ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
3408 ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
3409
3410 LValue cell = lowCell(m_node->child1());
3411 m_out.store32(
3412 weakStructureID(newStructure),
3413 cell, m_heaps.JSCell_structureID);
3414 }
3415
3416 void compileGetById(AccessType type)
3417 {
3418 ASSERT(type == AccessType::Get || type == AccessType::TryGet || type == AccessType::GetDirect);
3419 switch (m_node->child1().useKind()) {
3420 case CellUse: {
3421 setJSValue(getById(lowCell(m_node->child1()), type));
3422 return;
3423 }
3424
3425 case UntypedUse: {
3426 // This is pretty weird, since we duplicate the slow path both here and in the
3427 // code generated by the IC. We should investigate making this less bad.
3428 // https://bugs.webkit.org/show_bug.cgi?id=127830
3429 LValue value = lowJSValue(m_node->child1());
3430
3431 LBasicBlock cellCase = m_out.newBlock();
3432 LBasicBlock notCellCase = m_out.newBlock();
3433 LBasicBlock continuation = m_out.newBlock();
3434
3435 m_out.branch(
3436 isCell(value, provenType(m_node->child1())), unsure(cellCase), unsure(notCellCase));
3437
3438 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
3439 ValueFromBlock cellResult = m_out.anchor(getById(value, type));
3440 m_out.jump(continuation);
3441
3442 J_JITOperation_EJI getByIdFunction = appropriateGenericGetByIdFunction(type);
3443
3444 m_out.appendTo(notCellCase, continuation);
3445 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3446 Int64, m_out.operation(getByIdFunction),
3447 m_callFrame, value,
3448 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3449 m_out.jump(continuation);
3450
3451 m_out.appendTo(continuation, lastNext);
3452 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3453 return;
3454 }
3455
3456 default:
3457 DFG_CRASH(m_graph, m_node, "Bad use kind");
3458 return;
3459 }
3460 }
3461
3462 void compileGetByIdWithThis()
3463 {
3464 if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
3465 setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
3466 else {
3467 LValue base = lowJSValue(m_node->child1());
3468 LValue thisValue = lowJSValue(m_node->child2());
3469
3470 LBasicBlock baseCellCase = m_out.newBlock();
3471 LBasicBlock notCellCase = m_out.newBlock();
3472 LBasicBlock thisValueCellCase = m_out.newBlock();
3473 LBasicBlock continuation = m_out.newBlock();
3474
3475 m_out.branch(
3476 isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
3477
3478 LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
3479
3480 m_out.branch(
3481 isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
3482
3483 m_out.appendTo(thisValueCellCase, notCellCase);
3484 ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
3485 m_out.jump(continuation);
3486
3487 m_out.appendTo(notCellCase, continuation);
3488 ValueFromBlock notCellResult = m_out.anchor(vmCall(
3489 Int64, m_out.operation(operationGetByIdWithThisGeneric),
3490 m_callFrame, base, thisValue,
3491 m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
3492 m_out.jump(continuation);
3493
3494 m_out.appendTo(continuation, lastNext);
3495 setJSValue(m_out.phi(Int64, cellResult, notCellResult));
3496 }
3497
3498 }
3499
3500 void compileGetByValWithThis()
3501 {
3502 LValue base = lowJSValue(m_node->child1());
3503 LValue thisValue = lowJSValue(m_node->child2());
3504 LValue subscript = lowJSValue(m_node->child3());
3505
3506 LValue result = vmCall(Int64, m_out.operation(operationGetByValWithThis), m_callFrame, base, thisValue, subscript);
3507 setJSValue(result);
3508 }
3509
3510 void compilePutByIdWithThis()
3511 {
3512 LValue base = lowJSValue(m_node->child1());
3513 LValue thisValue = lowJSValue(m_node->child2());
3514 LValue value = lowJSValue(m_node->child3());
3515
3516 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByIdWithThisStrict : operationPutByIdWithThis),
3517 m_callFrame, base, thisValue, value, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
3518 }
3519
3520 void compilePutByValWithThis()
3521 {
3522 LValue base = lowJSValue(m_graph.varArgChild(m_node, 0));
3523 LValue thisValue = lowJSValue(m_graph.varArgChild(m_node, 1));
3524 LValue property = lowJSValue(m_graph.varArgChild(m_node, 2));
3525 LValue value = lowJSValue(m_graph.varArgChild(m_node, 3));
3526
3527 vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis),
3528 m_callFrame, base, thisValue, property, value);
3529 }
3530
3531 void compileAtomicsReadModifyWrite()
3532 {
3533 TypedArrayType type = m_node->arrayMode().typedArrayType();
3534 unsigned numExtraArgs = numExtraAtomicsArgs(m_node->op());
3535 Edge baseEdge = m_graph.child(m_node, 0);
3536 Edge indexEdge = m_graph.child(m_node, 1);
3537 Edge argEdges[maxNumExtraAtomicsArgs];
3538 for (unsigned i = numExtraArgs; i--;)
3539 argEdges[i] = m_graph.child(m_node, 2 + i);
3540 Edge storageEdge = m_graph.child(m_node, 2 + numExtraArgs);
3541
3542 auto operation = [&] () -> LValue {
3543 switch (m_node->op()) {
3544 case AtomicsAdd:
3545 return m_out.operation(operationAtomicsAdd);
3546 case AtomicsAnd:
3547 return m_out.operation(operationAtomicsAnd);
3548 case AtomicsCompareExchange:
3549 return m_out.operation(operationAtomicsCompareExchange);
3550 case AtomicsExchange:
3551 return m_out.operation(operationAtomicsExchange);
3552 case AtomicsLoad:
3553 return m_out.operation(operationAtomicsLoad);
3554 case AtomicsOr:
3555 return m_out.operation(operationAtomicsOr);
3556 case AtomicsStore:
3557 return m_out.operation(operationAtomicsStore);
3558 case AtomicsSub:
3559 return m_out.operation(operationAtomicsSub);
3560 case AtomicsXor:
3561 return m_out.operation(operationAtomicsXor);
3562 default:
3563 RELEASE_ASSERT_NOT_REACHED();
3564 break;
3565 }
3566 };
3567
3568 if (!storageEdge) {
3569 Vector<LValue> args;
3570 args.append(m_callFrame);
3571 args.append(lowJSValue(baseEdge));
3572 args.append(lowJSValue(indexEdge));
3573 for (unsigned i = 0; i < numExtraArgs; ++i)
3574 args.append(lowJSValue(argEdges[i]));
3575 LValue result = vmCall(Int64, operation(), args);
3576 setJSValue(result);
3577 return;
3578 }
3579
3580 LValue index = lowInt32(indexEdge);
3581 LValue args[2];
3582 for (unsigned i = numExtraArgs; i--;)
3583 args[i] = getIntTypedArrayStoreOperand(argEdges[i]);
3584 LValue storage = lowStorage(storageEdge);
3585
3586 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
3587 Width width = widthForBytes(elementSize(type));
3588
3589 LValue atomicValue;
3590 LValue result;
3591
3592 auto sanitizeResult = [&] (LValue value) -> LValue {
3593 if (isSigned(type)) {
3594 switch (elementSize(type)) {
3595 case 1:
3596 value = m_out.bitAnd(value, m_out.constInt32(0xff));
3597 break;
3598 case 2:
3599 value = m_out.bitAnd(value, m_out.constInt32(0xffff));
3600 break;
3601 case 4:
3602 break;
3603 default:
3604 RELEASE_ASSERT_NOT_REACHED();
3605 break;
3606 }
3607 }
3608 return value;
3609 };
3610
3611 switch (m_node->op()) {
3612 case AtomicsAdd:
3613 atomicValue = m_out.atomicXchgAdd(args[0], pointer, width);
3614 result = sanitizeResult(atomicValue);
3615 break;
3616 case AtomicsAnd:
3617 atomicValue = m_out.atomicXchgAnd(args[0], pointer, width);
3618 result = sanitizeResult(atomicValue);
3619 break;
3620 case AtomicsCompareExchange:
3621 atomicValue = m_out.atomicStrongCAS(args[0], args[1], pointer, width);
3622 result = sanitizeResult(atomicValue);
3623 break;
3624 case AtomicsExchange:
3625 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3626 result = sanitizeResult(atomicValue);
3627 break;
3628 case AtomicsLoad:
3629 atomicValue = m_out.atomicXchgAdd(m_out.int32Zero, pointer, width);
3630 result = sanitizeResult(atomicValue);
3631 break;
3632 case AtomicsOr:
3633 atomicValue = m_out.atomicXchgOr(args[0], pointer, width);
3634 result = sanitizeResult(atomicValue);
3635 break;
3636 case AtomicsStore:
3637 atomicValue = m_out.atomicXchg(args[0], pointer, width);
3638 result = args[0];
3639 break;
3640 case AtomicsSub:
3641 atomicValue = m_out.atomicXchgSub(args[0], pointer, width);
3642 result = sanitizeResult(atomicValue);
3643 break;
3644 case AtomicsXor:
3645 atomicValue = m_out.atomicXchgXor(args[0], pointer, width);
3646 result = sanitizeResult(atomicValue);
3647 break;
3648 default:
3649 RELEASE_ASSERT_NOT_REACHED();
3650 break;
3651 }
3652 // Signify that the state against which the atomic operations are serialized is confined to just
3653 // the typed array storage, since that's as precise of an abstraction as we can have of shared
3654 // array buffer storage.
3655 m_heaps.decorateFencedAccess(&m_heaps.typedArrayProperties, atomicValue);
3656
3657 setIntTypedArrayLoadResult(result, type);
3658 }
3659
3660 void compileAtomicsIsLockFree()
3661 {
3662 if (m_node->child1().useKind() != Int32Use) {
3663 setJSValue(vmCall(Int64, m_out.operation(operationAtomicsIsLockFree), m_callFrame, lowJSValue(m_node->child1())));
3664 return;
3665 }
3666
3667 LValue bytes = lowInt32(m_node->child1());
3668
3669 LBasicBlock trueCase = m_out.newBlock();
3670 LBasicBlock falseCase = m_out.newBlock();
3671 LBasicBlock continuation = m_out.newBlock();
3672
3673 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueCase);
3674
3675 Vector<SwitchCase> cases;
3676 cases.append(SwitchCase(m_out.constInt32(1), trueCase, Weight()));
3677 cases.append(SwitchCase(m_out.constInt32(2), trueCase, Weight()));
3678 cases.append(SwitchCase(m_out.constInt32(4), trueCase, Weight()));
3679 m_out.switchInstruction(bytes, cases, falseCase, Weight());
3680
3681 m_out.appendTo(trueCase, falseCase);
3682 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
3683 m_out.jump(continuation);
3684 m_out.appendTo(falseCase, continuation);
3685 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
3686 m_out.jump(continuation);
3687
3688 m_out.appendTo(continuation, lastNext);
3689 setBoolean(m_out.phi(Int32, trueValue, falseValue));
3690 }
3691
3692 void compileDefineDataProperty()
3693 {
3694 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3695 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
3696 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 3));
3697 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3698 switch (propertyEdge.useKind()) {
3699 case StringUse: {
3700 LValue property = lowString(propertyEdge);
3701 vmCall(Void, m_out.operation(operationDefineDataPropertyString), m_callFrame, base, property, value, attributes);
3702 break;
3703 }
3704 case StringIdentUse: {
3705 LValue property = lowStringIdent(propertyEdge);
3706 vmCall(Void, m_out.operation(operationDefineDataPropertyStringIdent), m_callFrame, base, property, value, attributes);
3707 break;
3708 }
3709 case SymbolUse: {
3710 LValue property = lowSymbol(propertyEdge);
3711 vmCall(Void, m_out.operation(operationDefineDataPropertySymbol), m_callFrame, base, property, value, attributes);
3712 break;
3713 }
3714 case UntypedUse: {
3715 LValue property = lowJSValue(propertyEdge);
3716 vmCall(Void, m_out.operation(operationDefineDataProperty), m_callFrame, base, property, value, attributes);
3717 break;
3718 }
3719 default:
3720 RELEASE_ASSERT_NOT_REACHED();
3721 }
3722 }
3723
3724 void compileDefineAccessorProperty()
3725 {
3726 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
3727 LValue getter = lowCell(m_graph.varArgChild(m_node, 2));
3728 LValue setter = lowCell(m_graph.varArgChild(m_node, 3));
3729 LValue attributes = lowInt32(m_graph.varArgChild(m_node, 4));
3730 Edge& propertyEdge = m_graph.varArgChild(m_node, 1);
3731 switch (propertyEdge.useKind()) {
3732 case StringUse: {
3733 LValue property = lowString(propertyEdge);
3734 vmCall(Void, m_out.operation(operationDefineAccessorPropertyString), m_callFrame, base, property, getter, setter, attributes);
3735 break;
3736 }
3737 case StringIdentUse: {
3738 LValue property = lowStringIdent(propertyEdge);
3739 vmCall(Void, m_out.operation(operationDefineAccessorPropertyStringIdent), m_callFrame, base, property, getter, setter, attributes);
3740 break;
3741 }
3742 case SymbolUse: {
3743 LValue property = lowSymbol(propertyEdge);
3744 vmCall(Void, m_out.operation(operationDefineAccessorPropertySymbol), m_callFrame, base, property, getter, setter, attributes);
3745 break;
3746 }
3747 case UntypedUse: {
3748 LValue property = lowJSValue(propertyEdge);
3749 vmCall(Void, m_out.operation(operationDefineAccessorProperty), m_callFrame, base, property, getter, setter, attributes);
3750 break;
3751 }
3752 default:
3753 RELEASE_ASSERT_NOT_REACHED();
3754 }
3755 }
3756
3757 void compilePutById()
3758 {
3759 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == CellUse, m_node->child1().useKind());
3760
3761 Node* node = m_node;
3762 LValue base = lowCell(node->child1());
3763 LValue value = lowJSValue(node->child2());
3764 auto uid = m_graph.identifiers()[node->identifierNumber()];
3765
3766 PatchpointValue* patchpoint = m_out.patchpoint(Void);
3767 patchpoint->appendSomeRegister(base);
3768 patchpoint->appendSomeRegister(value);
3769 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
3770 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
3771 patchpoint->clobber(RegisterSet::macroScratchRegisters());
3772
3773 // FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
3774 // https://bugs.webkit.org/show_bug.cgi?id=152848
3775
3776 RefPtr<PatchpointExceptionHandle> exceptionHandle =
3777 preparePatchpointForExceptions(patchpoint);
3778
3779 State* state = &m_ftlState;
3780 ECMAMode ecmaMode = m_graph.executableFor(node->origin.semantic)->ecmaMode();
3781
3782 patchpoint->setGenerator(
3783 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
3784 AllowMacroScratchRegisterUsage allowScratch(jit);
3785
3786 CallSiteIndex callSiteIndex =
3787 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
3788
3789 Box<CCallHelpers::JumpList> exceptions =
3790 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
3791
3792 // JS setter call ICs generated by the PutById IC will need this.
3793 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
3794
3795 auto generator = Box<JITPutByIdGenerator>::create(
3796 jit.codeBlock(), node->origin.semantic, callSiteIndex,
3797 params.unavailableRegisters(), JSValueRegs(params[0].gpr()),
3798 JSValueRegs(params[1].gpr()), GPRInfo::patchpointScratchRegister, ecmaMode,
3799 node->op() == PutByIdDirect ? Direct : NotDirect);
3800
3801 generator->generateFastPath(jit);
3802 CCallHelpers::Label done = jit.label();
3803
3804 params.addLatePath(
3805 [=] (CCallHelpers& jit) {
3806 AllowMacroScratchRegisterUsage allowScratch(jit);
3807
3808 generator->slowPathJump().link(&jit);
3809 CCallHelpers::Label slowPathBegin = jit.label();
3810 CCallHelpers::Call slowPathCall = callOperation(
3811 *state, params.unavailableRegisters(), jit, node->origin.semantic,
3812 exceptions.get(), generator->slowPathFunction(), InvalidGPRReg,
3813 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
3814 params[0].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
3815 jit.jump().linkTo(done, &jit);
3816
3817 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
3818
3819 jit.addLinkTask(
3820 [=] (LinkBuffer& linkBuffer) {
3821 generator->finalize(linkBuffer, linkBuffer);
3822 });
3823 });
3824 });
3825 }
3826
3827 void compileGetButterfly()
3828 {
3829 LValue butterfly = m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly);
3830 setStorage(butterfly);
3831 }
3832
3833 void compileConstantStoragePointer()
3834 {
3835 setStorage(m_out.constIntPtr(m_node->storagePointer()));
3836 }
3837
3838 void compileGetIndexedPropertyStorage()
3839 {
3840 LValue cell = lowCell(m_node->child1());
3841
3842 if (m_node->arrayMode().type() == Array::String) {
3843 LBasicBlock slowPath = m_out.newBlock();
3844 LBasicBlock continuation = m_out.newBlock();
3845
3846 LValue fastResultValue = m_out.loadPtr(cell, m_heaps.JSString_value);
3847 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
3848
3849 m_out.branch(isRopeString(cell, m_node->child1()), rarely(slowPath), usually(continuation));
3850
3851 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
3852
3853 ValueFromBlock slowResult = m_out.anchor(
3854 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, cell));
3855
3856 m_out.jump(continuation);
3857
3858 m_out.appendTo(continuation, lastNext);
3859
3860 setStorage(m_out.loadPtr(m_out.phi(pointerType(), fastResult, slowResult), m_heaps.StringImpl_data));
3861 return;
3862 }
3863
3864 DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType()), m_node->arrayMode().typedArrayType());
3865 LValue vector = m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector);
3866 setStorage(caged(Gigacage::Primitive, vector, cell));
3867 }
3868
3869 void compileCheckArray()
3870 {
3871 Edge edge = m_node->child1();
3872 LValue cell = lowCell(edge);
3873
3874 if (m_node->arrayMode().alreadyChecked(m_graph, m_node, abstractValue(edge)))
3875 return;
3876
3877 speculate(
3878 BadIndexingType, jsValueValue(cell), 0,
3879 m_out.logicalNot(isArrayTypeForCheckArray(cell, m_node->arrayMode())));
3880 }
3881
3882 void compileGetTypedArrayByteOffset()
3883 {
3884 LValue basePtr = lowCell(m_node->child1());
3885
3886 LBasicBlock simpleCase = m_out.newBlock();
3887 LBasicBlock wastefulCase = m_out.newBlock();
3888 LBasicBlock notNull = m_out.newBlock();
3889 LBasicBlock continuation = m_out.newBlock();
3890
3891 LValue mode = m_out.load32(basePtr, m_heaps.JSArrayBufferView_mode);
3892 m_out.branch(
3893 m_out.notEqual(mode, m_out.constInt32(WastefulTypedArray)),
3894 unsure(simpleCase), unsure(wastefulCase));
3895
3896 LBasicBlock lastNext = m_out.appendTo(simpleCase, wastefulCase);
3897
3898 ValueFromBlock simpleOut = m_out.anchor(m_out.constIntPtr(0));
3899
3900 m_out.jump(continuation);
3901
3902 m_out.appendTo(wastefulCase, notNull);
3903
3904 LValue vector = m_out.loadPtr(basePtr, m_heaps.JSArrayBufferView_vector);
3905 ValueFromBlock nullVectorOut = m_out.anchor(vector);
3906 m_out.branch(vector, unsure(notNull), unsure(continuation));
3907
3908 m_out.appendTo(notNull, continuation);
3909
3910 LValue butterflyPtr = caged(Gigacage::JSValue, m_out.loadPtr(basePtr, m_heaps.JSObject_butterfly), basePtr);
3911 LValue arrayBufferPtr = m_out.loadPtr(butterflyPtr, m_heaps.Butterfly_arrayBuffer);
3912
3913 LValue vectorPtr = caged(Gigacage::Primitive, vector, basePtr);
3914
3915 // FIXME: This needs caging.
3916 // https://bugs.webkit.org/show_bug.cgi?id=175515
3917 LValue dataPtr = m_out.loadPtr(arrayBufferPtr, m_heaps.ArrayBuffer_data);
3918 dataPtr = removeArrayPtrTag(dataPtr);
3919
3920 ValueFromBlock wastefulOut = m_out.anchor(m_out.sub(vectorPtr, dataPtr));
3921
3922 m_out.jump(continuation);
3923 m_out.appendTo(continuation, lastNext);
3924
3925 setInt32(m_out.castToInt32(m_out.phi(pointerType(), simpleOut, nullVectorOut, wastefulOut)));
3926 }
3927
3928 void compileGetPrototypeOf()
3929 {
3930 switch (m_node->child1().useKind()) {
3931 case ArrayUse:
3932 case FunctionUse:
3933 case FinalObjectUse: {
3934 LValue object = lowCell(m_node->child1());
3935 switch (m_node->child1().useKind()) {
3936 case ArrayUse:
3937 speculateArray(m_node->child1(), object);
3938 break;
3939 case FunctionUse:
3940 speculateFunction(m_node->child1(), object);
3941 break;
3942 case FinalObjectUse:
3943 speculateFinalObject(m_node->child1(), object);
3944 break;
3945 default:
3946 RELEASE_ASSERT_NOT_REACHED();
3947 break;
3948 }
3949
3950 LValue structure = loadStructure(object);
3951
3952 AbstractValue& value = m_state.forNode(m_node->child1());
3953 if ((value.m_type && !(value.m_type & ~SpecObject)) && value.m_structure.isFinite()) {
3954 bool hasPolyProto = false;
3955 bool hasMonoProto = false;
3956 value.m_structure.forEach([&] (RegisteredStructure structure) {
3957 if (structure->hasPolyProto())
3958 hasPolyProto = true;
3959 else
3960 hasMonoProto = true;
3961 });
3962
3963 if (hasMonoProto && !hasPolyProto) {
3964 setJSValue(m_out.load64(structure, m_heaps.Structure_prototype));
3965 return;
3966 }
3967
3968 if (hasPolyProto && !hasMonoProto) {
3969 setJSValue(m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
3970 return;
3971 }
3972 }
3973
3974 LBasicBlock continuation = m_out.newBlock();
3975 LBasicBlock loadPolyProto = m_out.newBlock();
3976
3977 LValue prototypeBits = m_out.load64(structure, m_heaps.Structure_prototype);
3978 ValueFromBlock directPrototype = m_out.anchor(prototypeBits);
3979 m_out.branch(m_out.isZero64(prototypeBits), unsure(loadPolyProto), unsure(continuation));
3980
3981 LBasicBlock lastNext = m_out.appendTo(loadPolyProto, continuation);
3982 ValueFromBlock polyProto = m_out.anchor(
3983 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), object, m_out.constInt64(knownPolyProtoOffset), ScaleEight, JSObject::offsetOfInlineStorage())));
3984 m_out.jump(continuation);
3985
3986 m_out.appendTo(continuation, lastNext);
3987 setJSValue(m_out.phi(Int64, directPrototype, polyProto));
3988 return;
3989 }
3990 case ObjectUse: {
3991 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOfObject), m_callFrame, lowObject(m_node->child1())));
3992 return;
3993 }
3994 default: {
3995 setJSValue(vmCall(Int64, m_out.operation(operationGetPrototypeOf), m_callFrame, lowJSValue(m_node->child1())));
3996 return;
3997 }
3998 }
3999 }
4000
4001 void compileGetArrayLength()
4002 {
4003 switch (m_node->arrayMode().type()) {
4004 case Array::Undecided:
4005 case Array::Int32:
4006 case Array::Double:
4007 case Array::Contiguous: {
4008 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.Butterfly_publicLength));
4009 return;
4010 }
4011
4012 case Array::ArrayStorage:
4013 case Array::SlowPutArrayStorage: {
4014 LValue length = m_out.load32(lowStorage(m_node->child2()), m_heaps.ArrayStorage_publicLength);
4015 speculate(Uncountable, noValue(), nullptr, m_out.lessThan(length, m_out.int32Zero));
4016 setInt32(length);
4017 return;
4018 }
4019
4020 case Array::String: {
4021 LValue string = lowCell(m_node->child1());
4022
4023 LBasicBlock ropePath = m_out.newBlock();
4024 LBasicBlock nonRopePath = m_out.newBlock();
4025 LBasicBlock continuation = m_out.newBlock();
4026
4027 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropePath), usually(nonRopePath));
4028
4029 LBasicBlock lastNext = m_out.appendTo(ropePath, nonRopePath);
4030 ValueFromBlock ropeLength = m_out.anchor(m_out.load32NonNegative(string, m_heaps.JSRopeString_length));
4031 m_out.jump(continuation);
4032
4033 m_out.appendTo(nonRopePath, continuation);
4034 ValueFromBlock nonRopeLength = m_out.anchor(m_out.load32NonNegative(m_out.loadPtr(string, m_heaps.JSString_value), m_heaps.StringImpl_length));
4035 m_out.jump(continuation);
4036
4037 m_out.appendTo(continuation, lastNext);
4038 setInt32(m_out.phi(Int32, ropeLength, nonRopeLength));
4039 return;
4040 }
4041
4042 case Array::DirectArguments: {
4043 LValue arguments = lowCell(m_node->child1());
4044 speculate(
4045 ExoticObjectMode, noValue(), nullptr,
4046 m_out.notNull(m_out.loadPtr(arguments, m_heaps.DirectArguments_mappedArguments)));
4047 setInt32(m_out.load32NonNegative(arguments, m_heaps.DirectArguments_length));
4048 return;
4049 }
4050
4051 case Array::ScopedArguments: {
4052 LValue arguments = lowCell(m_node->child1());
4053 LValue storage = m_out.loadPtr(arguments, m_heaps.ScopedArguments_storage);
4054 speculate(
4055 ExoticObjectMode, noValue(), nullptr,
4056 m_out.notZero32(m_out.load8ZeroExt32(storage, m_heaps.ScopedArguments_Storage_overrodeThings)));
4057 setInt32(m_out.load32NonNegative(storage, m_heaps.ScopedArguments_Storage_totalLength));
4058 return;
4059 }
4060
4061 default:
4062 if (m_node->arrayMode().isSomeTypedArrayView()) {
4063 setInt32(
4064 m_out.load32NonNegative(lowCell(m_node->child1()), m_heaps.JSArrayBufferView_length));
4065 return;
4066 }
4067
4068 DFG_CRASH(m_graph, m_node, "Bad array type");
4069 return;
4070 }
4071 }
4072
4073 void compileGetVectorLength()
4074 {
4075 switch (m_node->arrayMode().type()) {
4076 case Array::ArrayStorage:
4077 case Array::SlowPutArrayStorage:
4078 setInt32(m_out.load32NonNegative(lowStorage(m_node->child2()), m_heaps.ArrayStorage_vectorLength));
4079 return;
4080 default:
4081 return;
4082 }
4083 }
4084
4085 void compileCheckInBounds()
4086 {
4087 speculate(
4088 OutOfBounds, noValue(), 0,
4089 m_out.aboveOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
4090
4091 // Even though we claim to have JSValue result, no user of us should
4092 // depend on our value. Users of this node just need to maintain that
4093 // we dominate them.
4094 }
4095
4096 void compileGetByVal()
4097 {
4098 switch (m_node->arrayMode().type()) {
4099 case Array::Int32:
4100 case Array::Contiguous: {
4101 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4102 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4103
4104 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
4105 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
4106
4107 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4108
4109 if (m_node->arrayMode().isInBounds()) {
4110 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4111 LValue isHole = m_out.isZero64(result);
4112 if (m_node->arrayMode().isSaneChain()) {
4113 DFG_ASSERT(
4114 m_graph, m_node, m_node->arrayMode().type() == Array::Contiguous, m_node->arrayMode().type());
4115 result = m_out.select(
4116 isHole, m_out.constInt64(JSValue::encode(jsUndefined())), result);
4117 } else
4118 speculate(LoadFromHole, noValue(), 0, isHole);
4119 setJSValue(result);
4120 return;
4121 }
4122
4123 LBasicBlock fastCase = m_out.newBlock();
4124 LBasicBlock slowCase = m_out.newBlock();
4125 LBasicBlock continuation = m_out.newBlock();
4126
4127 m_out.branch(
4128 m_out.aboveOrEqual(
4129 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
4130 rarely(slowCase), usually(fastCase));
4131
4132 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
4133
4134 LValue fastResultValue = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4135 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
4136 m_out.branch(
4137 m_out.isZero64(fastResultValue), rarely(slowCase), usually(continuation));
4138
4139 m_out.appendTo(slowCase, continuation);
4140 ValueFromBlock slowResult = m_out.anchor(
4141 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4142 m_out.jump(continuation);
4143
4144 m_out.appendTo(continuation, lastNext);
4145 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4146 return;
4147 }
4148
4149 case Array::Double: {
4150 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4151 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4152 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4153
4154 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
4155
4156 if (m_node->arrayMode().isInBounds()) {
4157 LValue result = m_out.loadDouble(
4158 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4159
4160 if (!m_node->arrayMode().isSaneChain()) {
4161 speculate(
4162 LoadFromHole, noValue(), 0,
4163 m_out.doubleNotEqualOrUnordered(result, result));
4164 }
4165 setDouble(result);
4166 break;
4167 }
4168
4169 LBasicBlock inBounds = m_out.newBlock();
4170 LBasicBlock boxPath = m_out.newBlock();
4171 LBasicBlock slowCase = m_out.newBlock();
4172 LBasicBlock continuation = m_out.newBlock();
4173
4174 m_out.branch(
4175 m_out.aboveOrEqual(
4176 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
4177 rarely(slowCase), usually(inBounds));
4178
4179 LBasicBlock lastNext = m_out.appendTo(inBounds, boxPath);
4180 LValue doubleValue = m_out.loadDouble(
4181 baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4182 m_out.branch(
4183 m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue),
4184 rarely(slowCase), usually(boxPath));
4185
4186 m_out.appendTo(boxPath, slowCase);
4187 ValueFromBlock fastResult = m_out.anchor(boxDouble(doubleValue));
4188 m_out.jump(continuation);
4189
4190 m_out.appendTo(slowCase, continuation);
4191 ValueFromBlock slowResult = m_out.anchor(
4192 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4193 m_out.jump(continuation);
4194
4195 m_out.appendTo(continuation, lastNext);
4196 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4197 return;
4198 }
4199
4200 case Array::Undecided: {
4201 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4202
4203 speculate(OutOfBounds, noValue(), m_node, m_out.lessThan(index, m_out.int32Zero));
4204 setJSValue(m_out.constInt64(ValueUndefined));
4205 return;
4206 }
4207
4208 case Array::DirectArguments: {
4209 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4210 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4211
4212 speculate(
4213 ExoticObjectMode, noValue(), nullptr,
4214 m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_mappedArguments)));
4215
4216 LValue length = m_out.load32NonNegative(base, m_heaps.DirectArguments_length);
4217 auto isOutOfBounds = m_out.aboveOrEqual(index, length);
4218 if (m_node->arrayMode().isInBounds()) {
4219 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
4220 TypedPointer address = m_out.baseIndex(
4221 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
4222 setJSValue(m_out.load64(address));
4223 return;
4224 }
4225
4226 LBasicBlock inBounds = m_out.newBlock();
4227 LBasicBlock slowCase = m_out.newBlock();
4228 LBasicBlock continuation = m_out.newBlock();
4229
4230 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBounds));
4231
4232 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
4233 TypedPointer address = m_out.baseIndex(
4234 m_heaps.DirectArguments_storage,
4235 base,
4236 m_out.zeroExt(index, pointerType()));
4237 ValueFromBlock fastResult = m_out.anchor(m_out.load64(address));
4238 m_out.jump(continuation);
4239
4240 m_out.appendTo(slowCase, continuation);
4241 ValueFromBlock slowResult = m_out.anchor(
4242 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4243 m_out.jump(continuation);
4244
4245 m_out.appendTo(continuation, lastNext);
4246 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4247 return;
4248 }
4249
4250 case Array::ScopedArguments: {
4251 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4252 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4253
4254 LValue storage = m_out.loadPtr(base, m_heaps.ScopedArguments_storage);
4255 LValue totalLength = m_out.load32NonNegative(
4256 storage, m_heaps.ScopedArguments_Storage_totalLength);
4257 speculate(
4258 ExoticObjectMode, noValue(), nullptr,
4259 m_out.aboveOrEqual(index, totalLength));
4260
4261 LValue table = m_out.loadPtr(base, m_heaps.ScopedArguments_table);
4262 LValue namedLength = m_out.load32(table, m_heaps.ScopedArgumentsTable_length);
4263
4264 LBasicBlock namedCase = m_out.newBlock();
4265 LBasicBlock overflowCase = m_out.newBlock();
4266 LBasicBlock continuation = m_out.newBlock();
4267
4268 m_out.branch(
4269 m_out.aboveOrEqual(index, namedLength), unsure(overflowCase), unsure(namedCase));
4270
4271 LBasicBlock lastNext = m_out.appendTo(namedCase, overflowCase);
4272
4273 LValue scope = m_out.loadPtr(base, m_heaps.ScopedArguments_scope);
4274 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments);
4275
4276 TypedPointer address = m_out.baseIndex(
4277 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index));
4278 LValue scopeOffset = m_out.load32(address);
4279
4280 speculate(
4281 ExoticObjectMode, noValue(), nullptr,
4282 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset)));
4283
4284 address = m_out.baseIndex(
4285 m_heaps.JSLexicalEnvironment_variables, scope, m_out.zeroExtPtr(scopeOffset));
4286 ValueFromBlock namedResult = m_out.anchor(m_out.load64(address));
4287 m_out.jump(continuation);
4288
4289 m_out.appendTo(overflowCase, continuation);
4290
4291 address = m_out.baseIndex(
4292 m_heaps.ScopedArguments_Storage_storage, storage,
4293 m_out.zeroExtPtr(m_out.sub(index, namedLength)));
4294 LValue overflowValue = m_out.load64(address);
4295 speculate(ExoticObjectMode, noValue(), nullptr, m_out.isZero64(overflowValue));
4296 ValueFromBlock overflowResult = m_out.anchor(overflowValue);
4297 m_out.jump(continuation);
4298
4299 m_out.appendTo(continuation, lastNext);
4300
4301 LValue result = m_out.phi(Int64, namedResult, overflowResult);
4302 result = preciseIndexMask32(result, index, totalLength);
4303
4304 setJSValue(result);
4305 return;
4306 }
4307
4308 case Array::Generic: {
4309 if (m_graph.varArgChild(m_node, 0).useKind() == ObjectUse) {
4310 if (m_graph.varArgChild(m_node, 1).useKind() == StringUse) {
4311 setJSValue(vmCall(
4312 Int64, m_out.operation(operationGetByValObjectString), m_callFrame,
4313 lowObject(m_graph.varArgChild(m_node, 0)), lowString(m_graph.varArgChild(m_node, 1))));
4314 return;
4315 }
4316
4317 if (m_graph.varArgChild(m_node, 1).useKind() == SymbolUse) {
4318 setJSValue(vmCall(
4319 Int64, m_out.operation(operationGetByValObjectSymbol), m_callFrame,
4320 lowObject(m_graph.varArgChild(m_node, 0)), lowSymbol(m_graph.varArgChild(m_node, 1))));
4321 return;
4322 }
4323 }
4324 setJSValue(vmCall(
4325 Int64, m_out.operation(operationGetByVal), m_callFrame,
4326 lowJSValue(m_graph.varArgChild(m_node, 0)), lowJSValue(m_graph.varArgChild(m_node, 1))));
4327 return;
4328 }
4329
4330 case Array::ArrayStorage:
4331 case Array::SlowPutArrayStorage: {
4332 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
4333 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4334 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4335
4336 IndexedAbstractHeap& heap = m_heaps.ArrayStorage_vector;
4337
4338 if (m_node->arrayMode().isInBounds()) {
4339 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4340 speculate(LoadFromHole, noValue(), 0, m_out.isZero64(result));
4341 setJSValue(result);
4342 break;
4343 }
4344
4345 LBasicBlock inBounds = m_out.newBlock();
4346 LBasicBlock slowCase = m_out.newBlock();
4347 LBasicBlock continuation = m_out.newBlock();
4348
4349 m_out.branch(
4350 m_out.aboveOrEqual(index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
4351 rarely(slowCase), usually(inBounds));
4352
4353 LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
4354 LValue result = m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
4355 ValueFromBlock fastResult = m_out.anchor(result);
4356 m_out.branch(
4357 m_out.isZero64(result),
4358 rarely(slowCase), usually(continuation));
4359
4360 m_out.appendTo(slowCase, continuation);
4361 ValueFromBlock slowResult = m_out.anchor(
4362 vmCall(Int64, m_out.operation(operationGetByValObjectInt), m_callFrame, base, index));
4363 m_out.jump(continuation);
4364
4365 m_out.appendTo(continuation, lastNext);
4366 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4367 return;
4368 }
4369
4370 case Array::String: {
4371 compileStringCharAt();
4372 return;
4373 }
4374
4375 case Array::Int8Array:
4376 case Array::Int16Array:
4377 case Array::Int32Array:
4378 case Array::Uint8Array:
4379 case Array::Uint8ClampedArray:
4380 case Array::Uint16Array:
4381 case Array::Uint32Array:
4382 case Array::Float32Array:
4383 case Array::Float64Array: {
4384 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
4385 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
4386
4387 TypedArrayType type = m_node->arrayMode().typedArrayType();
4388 ASSERT(isTypedView(type));
4389 {
4390 TypedPointer pointer = pointerIntoTypedArray(storage, index, type);
4391
4392 if (isInt(type)) {
4393 LValue result = loadFromIntTypedArray(pointer, type);
4394 bool canSpeculate = true;
4395 setIntTypedArrayLoadResult(result, type, canSpeculate);
4396 return;
4397 }
4398
4399 ASSERT(isFloat(type));
4400
4401 LValue result;
4402 switch (type) {
4403 case TypeFloat32:
4404 result = m_out.floatToDouble(m_out.loadFloat(pointer));
4405 break;
4406 case TypeFloat64:
4407 result = m_out.loadDouble(pointer);
4408 break;
4409 default:
4410 DFG_CRASH(m_graph, m_node, "Bad typed array type");
4411 }
4412
4413 setDouble(result);
4414 return;
4415 }
4416 }
4417
4418 case Array::AnyTypedArray:
4419 case Array::ForceExit:
4420 case Array::SelectUsingArguments:
4421 case Array::SelectUsingPredictions:
4422 case Array::Unprofiled:
4423 DFG_CRASH(m_graph, m_node, "Bad array type");
4424 return;
4425 }
4426 }
4427
4428 void compileGetMyArgumentByVal()
4429 {
4430 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
4431
4432 LValue originalIndex = lowInt32(m_node->child2());
4433
4434 LValue numberOfArgsIncludingThis;
4435 if (inlineCallFrame && !inlineCallFrame->isVarargs())
4436 numberOfArgsIncludingThis = m_out.constInt32(inlineCallFrame->argumentCountIncludingThis);
4437 else {
4438 VirtualRegister argumentCountRegister = AssemblyHelpers::argumentCount(inlineCallFrame);
4439 numberOfArgsIncludingThis = m_out.load32(payloadFor(argumentCountRegister));
4440 }
4441
4442 LValue numberOfArgs = m_out.sub(numberOfArgsIncludingThis, m_out.int32One);
4443 LValue indexToCheck = originalIndex;
4444 if (m_node->numberOfArgumentsToSkip()) {
4445 CheckValue* check = m_out.speculateAdd(indexToCheck, m_out.constInt32(m_node->numberOfArgumentsToSkip()));
4446 blessSpeculation(check, Overflow, noValue(), nullptr, m_origin);
4447 indexToCheck = check;
4448 }
4449
4450 LValue isOutOfBounds = m_out.aboveOrEqual(indexToCheck, numberOfArgs);
4451 LBasicBlock continuation = nullptr;
4452 LBasicBlock lastNext = nullptr;
4453 ValueFromBlock slowResult;
4454 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
4455 LBasicBlock normalCase = m_out.newBlock();
4456 continuation = m_out.newBlock();
4457
4458 slowResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined())));
4459 m_out.branch(isOutOfBounds, unsure(continuation), unsure(normalCase));
4460
4461 lastNext = m_out.appendTo(normalCase, continuation);
4462 } else
4463 speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
4464
4465 LValue index = m_out.add(indexToCheck, m_out.int32One);
4466
4467 TypedPointer base;
4468 if (inlineCallFrame) {
4469 if (inlineCallFrame->argumentCountIncludingThis > 1)
4470 base = addressFor(inlineCallFrame->argumentsWithFixup[0].virtualRegister());
4471 } else
4472 base = addressFor(virtualRegisterForArgument(0));
4473
4474 LValue result;
4475 if (base) {
4476 LValue pointer = m_out.baseIndex(
4477 base.value(), m_out.zeroExt(index, pointerType()), ScaleEight);
4478 result = m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer));
4479 result = preciseIndexMask32(result, indexToCheck, numberOfArgs);
4480 } else
4481 result = m_out.constInt64(JSValue::encode(jsUndefined()));
4482
4483 if (m_node->op() == GetMyArgumentByValOutOfBounds) {
4484 ValueFromBlock normalResult = m_out.anchor(result);
4485 m_out.jump(continuation);
4486
4487 m_out.appendTo(continuation, lastNext);
4488 result = m_out.phi(Int64, slowResult, normalResult);
4489 }
4490
4491 setJSValue(result);
4492 }
4493
4494 void compilePutByVal()
4495 {
4496 Edge child1 = m_graph.varArgChild(m_node, 0);
4497 Edge child2 = m_graph.varArgChild(m_node, 1);
4498 Edge child3 = m_graph.varArgChild(m_node, 2);
4499 Edge child4 = m_graph.varArgChild(m_node, 3);
4500 Edge child5 = m_graph.varArgChild(m_node, 4);
4501
4502 ArrayMode arrayMode = m_node->arrayMode().modeForPut();
4503 switch (arrayMode.type()) {
4504 case Array::Generic: {
4505 if (child1.useKind() == CellUse) {
4506 V_JITOperation_ECCJ operation = nullptr;
4507 if (child2.useKind() == StringUse) {
4508 if (m_node->op() == PutByValDirect) {
4509 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4510 operation = operationPutByValDirectCellStringStrict;
4511 else
4512 operation = operationPutByValDirectCellStringNonStrict;
4513 } else {
4514 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4515 operation = operationPutByValCellStringStrict;
4516 else
4517 operation = operationPutByValCellStringNonStrict;
4518 }
4519 vmCall(Void, m_out.operation(operation), m_callFrame, lowCell(child1), lowString(child2), lowJSValue(child3));
4520 return;
4521 }
4522
4523 if (child2.useKind() == SymbolUse) {
4524 if (m_node->op() == PutByValDirect) {
4525 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4526 operation = operationPutByValDirectCellSymbolStrict;
4527 else
4528 operation = operationPutByValDirectCellSymbolNonStrict;
4529 } else {
4530 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4531 operation = operationPutByValCellSymbolStrict;
4532 else
4533 operation = operationPutByValCellSymbolNonStrict;
4534 }
4535 vmCall(Void, m_out.operation(operation), m_callFrame, lowCell(child1), lowSymbol(child2), lowJSValue(child3));
4536 return;
4537 }
4538 }
4539
4540 V_JITOperation_EJJJ operation;
4541 if (m_node->op() == PutByValDirect) {
4542 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4543 operation = operationPutByValDirectStrict;
4544 else
4545 operation = operationPutByValDirectNonStrict;
4546 } else {
4547 if (m_graph.isStrictModeFor(m_node->origin.semantic))
4548 operation = operationPutByValStrict;
4549 else
4550 operation = operationPutByValNonStrict;
4551 }
4552
4553 vmCall(
4554 Void, m_out.operation(operation), m_callFrame,
4555 lowJSValue(child1), lowJSValue(child2), lowJSValue(child3));
4556 return;
4557 }
4558
4559 default:
4560 break;
4561 }
4562
4563 LValue base = lowCell(child1);
4564 LValue index = lowInt32(child2);
4565 LValue storage = lowStorage(child4);
4566
4567 switch (arrayMode.type()) {
4568 case Array::Int32:
4569 case Array::Double:
4570 case Array::Contiguous: {
4571 LBasicBlock continuation = m_out.newBlock();
4572 LBasicBlock outerLastNext = m_out.appendTo(m_out.m_block, continuation);
4573
4574 switch (arrayMode.type()) {
4575 case Array::Int32:
4576 case Array::Contiguous: {
4577 LValue value = lowJSValue(child3, ManualOperandSpeculation);
4578
4579 if (arrayMode.type() == Array::Int32)
4580 FTL_TYPE_CHECK(jsValueValue(value), child3, SpecInt32Only, isNotInt32(value));
4581
4582 TypedPointer elementPointer = m_out.baseIndex(
4583 arrayMode.type() == Array::Int32 ?
4584 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties,
4585 storage, m_out.zeroExtPtr(index), provenValue(child2));
4586
4587 if (m_node->op() == PutByValAlias) {
4588 m_out.store64(value, elementPointer);
4589 break;
4590 }
4591
4592 contiguousPutByValOutOfBounds(
4593 m_graph.isStrictModeFor(m_node->origin.semantic)
4594 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
4595 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict),
4596 base, storage, index, value, continuation);
4597
4598 m_out.store64(value, elementPointer);
4599 break;
4600 }
4601
4602 case Array::Double: {
4603 LValue value = lowDouble(child3);
4604
4605 FTL_TYPE_CHECK(
4606 doubleValue(value), child3, SpecDoubleReal,
4607 m_out.doubleNotEqualOrUnordered(value, value));
4608
4609 TypedPointer elementPointer = m_out.baseIndex(
4610 m_heaps.indexedDoubleProperties, storage, m_out.zeroExtPtr(index),
4611 provenValue(child2));
4612
4613 if (m_node->op() == PutByValAlias) {
4614 m_out.storeDouble(value, elementPointer);
4615 break;
4616 }
4617
4618 contiguousPutByValOutOfBounds(
4619 m_graph.isStrictModeFor(m_node->origin.semantic)
4620 ? (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
4621 : (m_node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
4622 base, storage, index, value, continuation);
4623
4624 m_out.storeDouble(value, elementPointer);
4625 break;
4626 }
4627
4628 default:
4629 DFG_CRASH(m_graph, m_node, "Bad array type");
4630 }
4631
4632 m_out.jump(continuation);
4633 m_out.appendTo(continuation, outerLastNext);
4634 return;
4635 }
4636
4637 case Array::ArrayStorage:
4638 case Array::SlowPutArrayStorage: {
4639 LValue value = lowJSValue(child3);
4640
4641 TypedPointer elementPointer = m_out.baseIndex(
4642 m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(index),
4643 provenValue(child2));
4644
4645 if (m_node->op() == PutByValAlias) {
4646 m_out.store64(value, elementPointer);
4647 return;
4648 }
4649
4650 if (arrayMode.isInBounds()) {
4651 speculate(StoreToHole, noValue(), 0, m_out.isZero64(m_out.load64(elementPointer)));
4652 m_out.store64(value, elementPointer);
4653 return;
4654 }
4655
4656 LValue isOutOfBounds = m_out.aboveOrEqual(
4657 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength));
4658
4659 auto slowPathFunction = m_graph.isStrictModeFor(m_node->origin.semantic)
4660 ? (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict)
4661 : (m_node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict);
4662 if (!arrayMode.isOutOfBounds()) {
4663 speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
4664 isOutOfBounds = m_out.booleanFalse;
4665 }
4666
4667 LBasicBlock inBoundCase = m_out.newBlock();
4668 LBasicBlock slowCase = m_out.newBlock();
4669 LBasicBlock holeCase = m_out.newBlock();
4670 LBasicBlock doStoreCase = m_out.newBlock();
4671 LBasicBlock lengthUpdateCase = m_out.newBlock();
4672 LBasicBlock continuation = m_out.newBlock();
4673
4674 m_out.branch(isOutOfBounds, rarely(slowCase), usually(inBoundCase));
4675
4676 LBasicBlock lastNext = m_out.appendTo(slowCase, inBoundCase);
4677 vmCall(
4678 Void, m_out.operation(slowPathFunction),
4679 m_callFrame, base, index, value);
4680 m_out.jump(continuation);
4681
4682
4683 if (arrayMode.isSlowPut()) {
4684 m_out.appendTo(inBoundCase, doStoreCase);
4685 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(slowCase), usually(doStoreCase));
4686 } else {
4687 m_out.appendTo(inBoundCase, holeCase);
4688 m_out.branch(m_out.isZero64(m_out.load64(elementPointer)), rarely(holeCase), usually(doStoreCase));
4689
4690 m_out.appendTo(holeCase, lengthUpdateCase);
4691 m_out.store32(
4692 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
4693 storage, m_heaps.ArrayStorage_numValuesInVector);
4694 m_out.branch(
4695 m_out.below(
4696 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_publicLength)),
4697 unsure(doStoreCase), unsure(lengthUpdateCase));
4698
4699 m_out.appendTo(lengthUpdateCase, doStoreCase);
4700 m_out.store32(
4701 m_out.add(index, m_out.int32One),
4702 storage, m_heaps.ArrayStorage_publicLength);
4703 m_out.jump(doStoreCase);
4704 }
4705
4706 m_out.appendTo(doStoreCase, continuation);
4707 m_out.store64(value, elementPointer);
4708 m_out.jump(continuation);
4709
4710 m_out.appendTo(continuation, lastNext);
4711 return;
4712 }
4713
4714 case Array::Int8Array:
4715 case Array::Int16Array:
4716 case Array::Int32Array:
4717 case Array::Uint8Array:
4718 case Array::Uint8ClampedArray:
4719 case Array::Uint16Array:
4720 case Array::Uint32Array:
4721 case Array::Float32Array:
4722 case Array::Float64Array: {
4723 TypedArrayType type = arrayMode.typedArrayType();
4724
4725 ASSERT(isTypedView(type));
4726 {
4727 TypedPointer pointer = TypedPointer(
4728 m_heaps.typedArrayProperties,
4729 m_out.add(
4730 storage,
4731 m_out.shl(
4732 m_out.zeroExt(index, pointerType()),
4733 m_out.constIntPtr(logElementSize(type)))));
4734
4735 LValue valueToStore;
4736
4737 if (isInt(type)) {
4738 LValue intValue = getIntTypedArrayStoreOperand(child3, isClamped(type));
4739
4740 valueToStore = intValue;
4741 } else /* !isInt(type) */ {
4742 LValue value = lowDouble(child3);
4743 switch (type) {
4744 case TypeFloat32:
4745 valueToStore = m_out.doubleToFloat(value);
4746 break;
4747 case TypeFloat64:
4748 valueToStore = value;
4749 break;
4750 default:
4751 DFG_CRASH(m_graph, m_node, "Bad typed array type");
4752 }
4753 }
4754
4755 if (arrayMode.isInBounds() || m_node->op() == PutByValAlias)
4756 m_out.store(valueToStore, pointer, storeType(type));
4757 else {
4758 LBasicBlock isInBounds = m_out.newBlock();
4759 LBasicBlock isOutOfBounds = m_out.newBlock();
4760 LBasicBlock continuation = m_out.newBlock();
4761
4762 m_out.branch(
4763 m_out.aboveOrEqual(index, lowInt32(child5)),
4764 unsure(isOutOfBounds), unsure(isInBounds));
4765
4766 LBasicBlock lastNext = m_out.appendTo(isInBounds, isOutOfBounds);
4767 m_out.store(valueToStore, pointer, storeType(type));
4768 m_out.jump(continuation);
4769
4770 m_out.appendTo(isOutOfBounds, continuation);
4771 speculateTypedArrayIsNotNeutered(base);
4772 m_out.jump(continuation);
4773
4774 m_out.appendTo(continuation, lastNext);
4775 }
4776
4777 return;
4778 }
4779 }
4780
4781 case Array::AnyTypedArray:
4782 case Array::String:
4783 case Array::DirectArguments:
4784 case Array::ForceExit:
4785 case Array::Generic:
4786 case Array::ScopedArguments:
4787 case Array::SelectUsingArguments:
4788 case Array::SelectUsingPredictions:
4789 case Array::Undecided:
4790 case Array::Unprofiled:
4791 DFG_CRASH(m_graph, m_node, "Bad array type");
4792 break;
4793 }
4794 }
4795
4796 void compilePutAccessorById()
4797 {
4798 LValue base = lowCell(m_node->child1());
4799 LValue accessor = lowCell(m_node->child2());
4800 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4801 vmCall(
4802 Void,
4803 m_out.operation(m_node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById),
4804 m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), accessor);
4805 }
4806
4807 void compilePutGetterSetterById()
4808 {
4809 LValue base = lowCell(m_node->child1());
4810 LValue getter = lowJSValue(m_node->child2());
4811 LValue setter = lowJSValue(m_node->child3());
4812 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4813 vmCall(
4814 Void, m_out.operation(operationPutGetterSetter),
4815 m_callFrame, base, m_out.constIntPtr(uid), m_out.constInt32(m_node->accessorAttributes()), getter, setter);
4816
4817 }
4818
4819 void compilePutAccessorByVal()
4820 {
4821 LValue base = lowCell(m_node->child1());
4822 LValue subscript = lowJSValue(m_node->child2());
4823 LValue accessor = lowCell(m_node->child3());
4824 vmCall(
4825 Void,
4826 m_out.operation(m_node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal),
4827 m_callFrame, base, subscript, m_out.constInt32(m_node->accessorAttributes()), accessor);
4828 }
4829
4830 void compileDeleteById()
4831 {
4832 LValue base = lowJSValue(m_node->child1());
4833 auto uid = m_graph.identifiers()[m_node->identifierNumber()];
4834 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationDeleteById), m_callFrame, base, m_out.constIntPtr(uid))));
4835 }
4836
4837 void compileDeleteByVal()
4838 {
4839 LValue base = lowJSValue(m_node->child1());
4840 LValue subscript = lowJSValue(m_node->child2());
4841 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationDeleteByVal), m_callFrame, base, subscript)));
4842 }
4843
4844 void compileArrayPush()
4845 {
4846 LValue base = lowCell(m_graph.varArgChild(m_node, 1));
4847 LValue storage = lowStorage(m_graph.varArgChild(m_node, 0));
4848 unsigned elementOffset = 2;
4849 unsigned elementCount = m_node->numChildren() - elementOffset;
4850
4851 switch (m_node->arrayMode().type()) {
4852 case Array::Int32:
4853 case Array::Contiguous:
4854 case Array::Double: {
4855 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
4856
4857 if (elementCount == 1) {
4858 LValue value;
4859 Output::StoreType storeType;
4860
4861 Edge& element = m_graph.varArgChild(m_node, elementOffset);
4862 speculate(element);
4863 if (m_node->arrayMode().type() != Array::Double) {
4864 value = lowJSValue(element, ManualOperandSpeculation);
4865 storeType = Output::Store64;
4866 } else {
4867 value = lowDouble(element);
4868 storeType = Output::StoreDouble;
4869 }
4870
4871 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
4872
4873 LBasicBlock fastPath = m_out.newBlock();
4874 LBasicBlock slowPath = m_out.newBlock();
4875 LBasicBlock continuation = m_out.newBlock();
4876
4877 m_out.branch(
4878 m_out.aboveOrEqual(
4879 prevLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength)),
4880 unsure(slowPath), unsure(fastPath));
4881
4882 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4883 m_out.store(
4884 value, m_out.baseIndex(heap, storage, m_out.zeroExtPtr(prevLength)), storeType);
4885 LValue newLength = m_out.add(prevLength, m_out.int32One);
4886 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
4887
4888 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4889 m_out.jump(continuation);
4890
4891 m_out.appendTo(slowPath, continuation);
4892 LValue operation;
4893 if (m_node->arrayMode().type() != Array::Double)
4894 operation = m_out.operation(operationArrayPush);
4895 else
4896 operation = m_out.operation(operationArrayPushDouble);
4897 ValueFromBlock slowResult = m_out.anchor(
4898 vmCall(Int64, operation, m_callFrame, value, base));
4899 m_out.jump(continuation);
4900
4901 m_out.appendTo(continuation, lastNext);
4902 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4903 return;
4904 }
4905
4906 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
4907 Edge element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
4908 speculate(element);
4909 }
4910
4911 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
4912 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
4913
4914 LBasicBlock fastPath = m_out.newBlock();
4915 LBasicBlock slowPath = m_out.newBlock();
4916 LBasicBlock setup = m_out.newBlock();
4917 LBasicBlock slowCallPath = m_out.newBlock();
4918 LBasicBlock continuation = m_out.newBlock();
4919
4920 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.Butterfly_vectorLength));
4921
4922 m_out.branch(beyondVectorLength, unsure(slowPath), unsure(fastPath));
4923
4924 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4925 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
4926 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight));
4927 m_out.jump(setup);
4928
4929 m_out.appendTo(slowPath, setup);
4930 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
4931 static_assert(sizeof(EncodedJSValue) == sizeof(double), "");
4932 ASSERT(scratchSize);
4933 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
4934 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
4935 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
4936 m_out.jump(setup);
4937
4938 m_out.appendTo(setup, slowCallPath);
4939 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
4940 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
4941 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
4942
4943 LValue value;
4944 Output::StoreType storeType;
4945 if (m_node->arrayMode().type() != Array::Double) {
4946 value = lowJSValue(element, ManualOperandSpeculation);
4947 storeType = Output::Store64;
4948 } else {
4949 value = lowDouble(element);
4950 storeType = Output::StoreDouble;
4951 }
4952
4953 m_out.store(value, m_out.baseIndex(heap, buffer, m_out.constInt32(elementIndex), jsNumber(elementIndex)), storeType);
4954 }
4955 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
4956
4957 m_out.branch(beyondVectorLength, unsure(slowCallPath), unsure(continuation));
4958
4959 m_out.appendTo(slowCallPath, continuation);
4960 LValue operation;
4961 if (m_node->arrayMode().type() != Array::Double)
4962 operation = m_out.operation(operationArrayPushMultiple);
4963 else
4964 operation = m_out.operation(operationArrayPushDoubleMultiple);
4965 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, base, buffer, m_out.constInt32(elementCount)));
4966 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
4967 m_out.jump(continuation);
4968
4969 m_out.appendTo(continuation, lastNext);
4970 setJSValue(m_out.phi(Int64, fastResult, slowResult));
4971 return;
4972 }
4973
4974 case Array::ArrayStorage: {
4975 // This ensures that the result of ArrayPush is Int32 in AI.
4976 int32_t largestPositiveInt32Length = 0x7fffffff - elementCount;
4977
4978 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
4979 // Refuse to handle bizarre lengths.
4980 speculate(Uncountable, noValue(), nullptr, m_out.above(prevLength, m_out.constInt32(largestPositiveInt32Length)));
4981
4982 if (elementCount == 1) {
4983 Edge& element = m_graph.varArgChild(m_node, elementOffset);
4984
4985 LValue value = lowJSValue(element);
4986
4987 LBasicBlock fastPath = m_out.newBlock();
4988 LBasicBlock slowPath = m_out.newBlock();
4989 LBasicBlock continuation = m_out.newBlock();
4990
4991 m_out.branch(
4992 m_out.aboveOrEqual(
4993 prevLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)),
4994 rarely(slowPath), usually(fastPath));
4995
4996 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
4997 m_out.store64(
4998 value, m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(prevLength)));
4999 LValue newLength = m_out.add(prevLength, m_out.int32One);
5000 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5001 m_out.store32(
5002 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
5003 storage, m_heaps.ArrayStorage_numValuesInVector);
5004
5005 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
5006 m_out.jump(continuation);
5007
5008 m_out.appendTo(slowPath, continuation);
5009 ValueFromBlock slowResult = m_out.anchor(
5010 vmCall(Int64, m_out.operation(operationArrayPush), m_callFrame, value, base));
5011 m_out.jump(continuation);
5012
5013 m_out.appendTo(continuation, lastNext);
5014 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5015 return;
5016 }
5017
5018 LValue newLength = m_out.add(prevLength, m_out.constInt32(elementCount));
5019
5020 LBasicBlock fastPath = m_out.newBlock();
5021 LBasicBlock slowPath = m_out.newBlock();
5022 LBasicBlock setup = m_out.newBlock();
5023 LBasicBlock slowCallPath = m_out.newBlock();
5024 LBasicBlock continuation = m_out.newBlock();
5025
5026 LValue beyondVectorLength = m_out.above(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength));
5027
5028 m_out.branch(beyondVectorLength, rarely(slowPath), usually(fastPath));
5029
5030 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
5031 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5032 m_out.store32(
5033 m_out.add(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.constInt32(elementCount)),
5034 storage, m_heaps.ArrayStorage_numValuesInVector);
5035 ValueFromBlock fastBufferResult = m_out.anchor(m_out.baseIndex(storage, m_out.zeroExtPtr(prevLength), ScaleEight, ArrayStorage::vectorOffset()));
5036 m_out.jump(setup);
5037
5038 m_out.appendTo(slowPath, setup);
5039 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
5040 ASSERT(scratchSize);
5041 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
5042 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5043 ValueFromBlock slowBufferResult = m_out.anchor(m_out.constIntPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
5044 m_out.jump(setup);
5045
5046 m_out.appendTo(setup, slowCallPath);
5047 LValue buffer = m_out.phi(pointerType(), fastBufferResult, slowBufferResult);
5048 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
5049 Edge& element = m_graph.varArgChild(m_node, elementIndex + elementOffset);
5050
5051 LValue value = lowJSValue(element);
5052 m_out.store64(value, m_out.baseIndex(m_heaps.ArrayStorage_vector.atAnyIndex(), buffer, m_out.constIntPtr(elementIndex), ScaleEight));
5053 }
5054 ValueFromBlock fastResult = m_out.anchor(boxInt32(newLength));
5055
5056 m_out.branch(beyondVectorLength, rarely(slowCallPath), usually(continuation));
5057
5058 m_out.appendTo(slowCallPath, continuation);
5059 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationArrayPushMultiple), m_callFrame, base, buffer, m_out.constInt32(elementCount)));
5060 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5061 m_out.jump(continuation);
5062
5063 m_out.appendTo(continuation, lastNext);
5064 setJSValue(m_out.phi(Int64, fastResult, slowResult));
5065 return;
5066 }
5067
5068 default:
5069 DFG_CRASH(m_graph, m_node, "Bad array type");
5070 return;
5071 }
5072 }
5073
5074 std::pair<LValue, LValue> populateSliceRange(LValue start, LValue end, LValue length)
5075 {
5076 // end can be nullptr.
5077 ASSERT(start);
5078 ASSERT(length);
5079
5080 auto pickIndex = [&] (LValue index) {
5081 return m_out.select(m_out.greaterThanOrEqual(index, m_out.int32Zero),
5082 m_out.select(m_out.above(index, length), length, index),
5083 m_out.select(m_out.lessThan(m_out.add(length, index), m_out.int32Zero), m_out.int32Zero, m_out.add(length, index)));
5084 };
5085
5086 LValue endBoundary = length;
5087 if (end)
5088 endBoundary = pickIndex(end);
5089 LValue startIndex = pickIndex(start);
5090 return std::make_pair(startIndex, endBoundary);
5091 }
5092
5093 void compileArraySlice()
5094 {
5095 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5096
5097 LValue sourceStorage = lowStorage(m_graph.varArgChild(m_node, m_node->numChildren() - 1));
5098 LValue inputLength = m_out.load32(sourceStorage, m_heaps.Butterfly_publicLength);
5099
5100 LValue startIndex = nullptr;
5101 LValue resultLength = nullptr;
5102 if (m_node->numChildren() == 2) {
5103 startIndex = m_out.constInt32(0);
5104 resultLength = inputLength;
5105 } else {
5106 LValue start = lowInt32(m_graph.varArgChild(m_node, 1));
5107 LValue end = nullptr;
5108 if (m_node->numChildren() != 3)
5109 end = lowInt32(m_graph.varArgChild(m_node, 2));
5110
5111 auto range = populateSliceRange(start, end, inputLength);
5112 startIndex = range.first;
5113 LValue endBoundary = range.second;
5114
5115 resultLength = m_out.select(m_out.belowOrEqual(startIndex, endBoundary),
5116 m_out.sub(endBoundary, startIndex),
5117 m_out.constInt32(0));
5118 }
5119
5120 ArrayValues arrayResult;
5121 {
5122 LValue indexingType = m_out.load8ZeroExt32(lowCell(m_graph.varArgChild(m_node, 0)), m_heaps.JSCell_indexingTypeAndMisc);
5123 // We can ignore the writability of the cell since we won't write to the source.
5124 indexingType = m_out.bitAnd(indexingType, m_out.constInt32(AllWritableArrayTypesAndHistory));
5125 // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
5126 // to ensure the incoming array is one to be one of the original array structures
5127 // with one of the following indexing shapes: Int32, Contiguous, Double.
5128 LValue structure = m_out.select(
5129 m_out.equal(indexingType, m_out.constInt32(ArrayWithInt32)),
5130 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithInt32))),
5131 m_out.select(m_out.equal(indexingType, m_out.constInt32(ArrayWithContiguous)),
5132 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous))),
5133 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithDouble)))));
5134 arrayResult = allocateJSArray(resultLength, resultLength, structure, indexingType, false, false);
5135 }
5136
5137 LBasicBlock loop = m_out.newBlock();
5138 LBasicBlock continuation = m_out.newBlock();
5139
5140 resultLength = m_out.zeroExtPtr(resultLength);
5141 ValueFromBlock startLoadIndex = m_out.anchor(m_out.zeroExtPtr(startIndex));
5142 ValueFromBlock startStoreIndex = m_out.anchor(m_out.constIntPtr(0));
5143
5144 m_out.branch(
5145 m_out.below(m_out.constIntPtr(0), resultLength), unsure(loop), unsure(continuation));
5146
5147 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
5148 LValue storeIndex = m_out.phi(pointerType(), startStoreIndex);
5149 LValue loadIndex = m_out.phi(pointerType(), startLoadIndex);
5150 LValue value = m_out.load64(m_out.baseIndex(m_heaps.root, sourceStorage, loadIndex, ScaleEight));
5151 m_out.store64(value, m_out.baseIndex(m_heaps.root, arrayResult.butterfly, storeIndex, ScaleEight));
5152 LValue nextStoreIndex = m_out.add(storeIndex, m_out.constIntPtr(1));
5153 m_out.addIncomingToPhi(storeIndex, m_out.anchor(nextStoreIndex));
5154 m_out.addIncomingToPhi(loadIndex, m_out.anchor(m_out.add(loadIndex, m_out.constIntPtr(1))));
5155 m_out.branch(
5156 m_out.below(nextStoreIndex, resultLength), unsure(loop), unsure(continuation));
5157
5158 m_out.appendTo(continuation, lastNext);
5159
5160 mutatorFence();
5161 setJSValue(arrayResult.array);
5162 }
5163
5164 void compileArrayIndexOf()
5165 {
5166 LValue storage = lowStorage(m_node->numChildren() == 3 ? m_graph.varArgChild(m_node, 2) : m_graph.varArgChild(m_node, 3));
5167 LValue length = m_out.load32(storage, m_heaps.Butterfly_publicLength);
5168
5169 LValue startIndex;
5170 if (m_node->numChildren() == 4) {
5171 startIndex = lowInt32(m_graph.varArgChild(m_node, 2));
5172 startIndex = m_out.select(m_out.greaterThanOrEqual(startIndex, m_out.int32Zero),
5173 m_out.select(m_out.above(startIndex, length), length, startIndex),
5174 m_out.select(m_out.lessThan(m_out.add(length, startIndex), m_out.int32Zero), m_out.int32Zero, m_out.add(length, startIndex)));
5175 } else
5176 startIndex = m_out.int32Zero;
5177
5178 Edge& searchElementEdge = m_graph.varArgChild(m_node, 1);
5179 switch (searchElementEdge.useKind()) {
5180 case Int32Use:
5181 case ObjectUse:
5182 case SymbolUse:
5183 case OtherUse:
5184 case DoubleRepUse: {
5185 LBasicBlock loopHeader = m_out.newBlock();
5186 LBasicBlock loopBody = m_out.newBlock();
5187 LBasicBlock loopNext = m_out.newBlock();
5188 LBasicBlock notFound = m_out.newBlock();
5189 LBasicBlock continuation = m_out.newBlock();
5190
5191 LValue searchElement;
5192 switch (searchElementEdge.useKind()) {
5193 case Int32Use:
5194 ASSERT(m_node->arrayMode().type() == Array::Int32);
5195 speculate(searchElementEdge);
5196 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
5197 break;
5198 case ObjectUse:
5199 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5200 searchElement = lowObject(searchElementEdge);
5201 break;
5202 case SymbolUse:
5203 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5204 searchElement = lowSymbol(searchElementEdge);
5205 break;
5206 case OtherUse:
5207 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5208 speculate(searchElementEdge);
5209 searchElement = lowJSValue(searchElementEdge, ManualOperandSpeculation);
5210 break;
5211 case DoubleRepUse:
5212 ASSERT(m_node->arrayMode().type() == Array::Double);
5213 searchElement = lowDouble(searchElementEdge);
5214 break;
5215 default:
5216 RELEASE_ASSERT_NOT_REACHED();
5217 break;
5218 }
5219
5220 startIndex = m_out.zeroExtPtr(startIndex);
5221 length = m_out.zeroExtPtr(length);
5222
5223 ValueFromBlock initialStartIndex = m_out.anchor(startIndex);
5224 m_out.jump(loopHeader);
5225
5226 LBasicBlock lastNext = m_out.appendTo(loopHeader, loopBody);
5227 LValue index = m_out.phi(pointerType(), initialStartIndex);
5228 m_out.branch(m_out.notEqual(index, length), unsure(loopBody), unsure(notFound));
5229
5230 m_out.appendTo(loopBody, loopNext);
5231 ValueFromBlock foundResult = m_out.anchor(index);
5232 switch (searchElementEdge.useKind()) {
5233 case Int32Use: {
5234 // Empty value is ignored because of TagTypeNumber.
5235 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedInt32Properties, storage, index));
5236 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
5237 break;
5238 }
5239 case ObjectUse:
5240 case SymbolUse:
5241 case OtherUse: {
5242 // Empty value never matches against non-empty JS values.
5243 LValue value = m_out.load64(m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, index));
5244 m_out.branch(m_out.equal(value, searchElement), unsure(continuation), unsure(loopNext));
5245 break;
5246 }
5247 case DoubleRepUse: {
5248 // Empty value is ignored because of NaN.
5249 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, storage, index));
5250 m_out.branch(m_out.doubleEqual(value, searchElement), unsure(continuation), unsure(loopNext));
5251 break;
5252 }
5253 default:
5254 RELEASE_ASSERT_NOT_REACHED();
5255 break;
5256 }
5257
5258 m_out.appendTo(loopNext, notFound);
5259 LValue nextIndex = m_out.add(index, m_out.intPtrOne);
5260 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
5261 m_out.jump(loopHeader);
5262
5263 m_out.appendTo(notFound, continuation);
5264 ValueFromBlock notFoundResult = m_out.anchor(m_out.constIntPtr(-1));
5265 m_out.jump(continuation);
5266
5267 m_out.appendTo(continuation, lastNext);
5268 setInt32(m_out.castToInt32(m_out.phi(pointerType(), notFoundResult, foundResult)));
5269 break;
5270 }
5271
5272 case StringUse:
5273 ASSERT(m_node->arrayMode().type() == Array::Contiguous);
5274 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfString), m_callFrame, storage, lowString(searchElementEdge), startIndex));
5275 break;
5276
5277 case UntypedUse:
5278 switch (m_node->arrayMode().type()) {
5279 case Array::Double:
5280 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfValueDouble), m_callFrame, storage, lowJSValue(searchElementEdge), startIndex));
5281 break;
5282 case Array::Int32:
5283 case Array::Contiguous:
5284 setInt32(vmCall(Int32, m_out.operation(operationArrayIndexOfValueInt32OrContiguous), m_callFrame, storage, lowJSValue(searchElementEdge), startIndex));
5285 break;
5286 default:
5287 RELEASE_ASSERT_NOT_REACHED();
5288 break;
5289 }
5290 break;
5291
5292 default:
5293 RELEASE_ASSERT_NOT_REACHED();
5294 break;
5295 }
5296 }
5297
5298
5299 void compileArrayPop()
5300 {
5301 LValue base = lowCell(m_node->child1());
5302 LValue storage = lowStorage(m_node->child2());
5303
5304 switch (m_node->arrayMode().type()) {
5305 case Array::Int32:
5306 case Array::Double:
5307 case Array::Contiguous: {
5308 IndexedAbstractHeap& heap = m_heaps.forArrayType(m_node->arrayMode().type());
5309
5310 LBasicBlock fastCase = m_out.newBlock();
5311 LBasicBlock slowCase = m_out.newBlock();
5312 LBasicBlock continuation = m_out.newBlock();
5313
5314 LValue prevLength = m_out.load32(storage, m_heaps.Butterfly_publicLength);
5315
5316 Vector<ValueFromBlock, 3> results;
5317 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
5318 m_out.branch(
5319 m_out.isZero32(prevLength), rarely(continuation), usually(fastCase));
5320
5321 LBasicBlock lastNext = m_out.appendTo(fastCase, slowCase);
5322 LValue newLength = m_out.sub(prevLength, m_out.int32One);
5323 m_out.store32(newLength, storage, m_heaps.Butterfly_publicLength);
5324 TypedPointer pointer = m_out.baseIndex(heap, storage, m_out.zeroExtPtr(newLength));
5325 if (m_node->arrayMode().type() != Array::Double) {
5326 LValue result = m_out.load64(pointer);
5327 m_out.store64(m_out.int64Zero, pointer);
5328 results.append(m_out.anchor(result));
5329 m_out.branch(
5330 m_out.notZero64(result), usually(continuation), rarely(slowCase));
5331 } else {
5332 LValue result = m_out.loadDouble(pointer);
5333 m_out.store64(m_out.constInt64(bitwise_cast<int64_t>(PNaN)), pointer);
5334 results.append(m_out.anchor(boxDouble(result)));
5335 m_out.branch(
5336 m_out.doubleEqual(result, result),
5337 usually(continuation), rarely(slowCase));
5338 }
5339
5340 m_out.appendTo(slowCase, continuation);
5341 results.append(m_out.anchor(vmCall(
5342 Int64, m_out.operation(operationArrayPopAndRecoverLength), m_callFrame, base)));
5343 m_out.jump(continuation);
5344
5345 m_out.appendTo(continuation, lastNext);
5346 setJSValue(m_out.phi(Int64, results));
5347 return;
5348 }
5349
5350 case Array::ArrayStorage: {
5351 LBasicBlock vectorLengthCheckCase = m_out.newBlock();
5352 LBasicBlock popCheckCase = m_out.newBlock();
5353 LBasicBlock fastCase = m_out.newBlock();
5354 LBasicBlock slowCase = m_out.newBlock();
5355 LBasicBlock continuation = m_out.newBlock();
5356
5357 LValue prevLength = m_out.load32(storage, m_heaps.ArrayStorage_publicLength);
5358
5359 Vector<ValueFromBlock, 3> results;
5360 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
5361 m_out.branch(
5362 m_out.isZero32(prevLength), rarely(continuation), usually(vectorLengthCheckCase));
5363
5364 LBasicBlock lastNext = m_out.appendTo(vectorLengthCheckCase, popCheckCase);
5365 LValue newLength = m_out.sub(prevLength, m_out.int32One);
5366 m_out.branch(
5367 m_out.aboveOrEqual(newLength, m_out.load32(storage, m_heaps.ArrayStorage_vectorLength)), rarely(slowCase), usually(popCheckCase));
5368
5369 m_out.appendTo(popCheckCase, fastCase);
5370 TypedPointer pointer = m_out.baseIndex(m_heaps.ArrayStorage_vector, storage, m_out.zeroExtPtr(newLength));
5371 LValue result = m_out.load64(pointer);
5372 m_out.branch(m_out.notZero64(result), usually(fastCase), rarely(slowCase));
5373
5374 m_out.appendTo(fastCase, slowCase);
5375 m_out.store32(newLength, storage, m_heaps.ArrayStorage_publicLength);
5376 m_out.store64(m_out.int64Zero, pointer);
5377 m_out.store32(
5378 m_out.sub(m_out.load32(storage, m_heaps.ArrayStorage_numValuesInVector), m_out.int32One),
5379 storage, m_heaps.ArrayStorage_numValuesInVector);
5380 results.append(m_out.anchor(result));
5381 m_out.jump(continuation);
5382
5383 m_out.appendTo(slowCase, continuation);
5384 results.append(m_out.anchor(vmCall(
5385 Int64, m_out.operation(operationArrayPop), m_callFrame, base)));
5386 m_out.jump(continuation);
5387
5388 m_out.appendTo(continuation, lastNext);
5389 setJSValue(m_out.phi(Int64, results));
5390 return;
5391 }
5392
5393 default:
5394 DFG_CRASH(m_graph, m_node, "Bad array type");
5395 return;
5396 }
5397 }
5398
5399 void compilePushWithScope()
5400 {
5401 LValue parentScope = lowCell(m_node->child1());
5402 auto objectEdge = m_node->child2();
5403 if (objectEdge.useKind() == ObjectUse) {
5404 LValue object = lowNonNullObject(objectEdge);
5405 LValue result = vmCall(Int64, m_out.operation(operationPushWithScopeObject), m_callFrame, parentScope, object);
5406 setJSValue(result);
5407 } else {
5408 ASSERT(objectEdge.useKind() == UntypedUse);
5409 LValue object = lowJSValue(m_node->child2());
5410 LValue result = vmCall(Int64, m_out.operation(operationPushWithScope), m_callFrame, parentScope, object);
5411 setJSValue(result);
5412 }
5413 }
5414
5415 void compileCreateActivation()
5416 {
5417 LValue scope = lowCell(m_node->child1());
5418 SymbolTable* table = m_node->castOperand<SymbolTable*>();
5419 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
5420 JSValue initializationValue = m_node->initializationValueForActivation();
5421 ASSERT(initializationValue.isUndefined() || initializationValue == jsTDZValue());
5422 if (table->singletonScope()->isStillValid()) {
5423 LValue callResult = vmCall(
5424 Int64,
5425 m_out.operation(operationCreateActivationDirect), m_callFrame, weakStructure(structure),
5426 scope, weakPointer(table), m_out.constInt64(JSValue::encode(initializationValue)));
5427 setJSValue(callResult);
5428 return;
5429 }
5430
5431 LBasicBlock slowPath = m_out.newBlock();
5432 LBasicBlock continuation = m_out.newBlock();
5433
5434 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5435
5436 LValue fastObject = allocateObject<JSLexicalEnvironment>(
5437 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
5438
5439 // We don't need memory barriers since we just fast-created the activation, so the
5440 // activation must be young.
5441 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
5442 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
5443
5444 for (unsigned i = 0; i < table->scopeSize(); ++i) {
5445 m_out.store64(
5446 m_out.constInt64(JSValue::encode(initializationValue)),
5447 fastObject, m_heaps.JSLexicalEnvironment_variables[i]);
5448 }
5449
5450 mutatorFence();
5451
5452 ValueFromBlock fastResult = m_out.anchor(fastObject);
5453 m_out.jump(continuation);
5454
5455 m_out.appendTo(slowPath, continuation);
5456 VM& vm = this->vm();
5457 LValue callResult = lazySlowPath(
5458 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5459 return createLazyCallGenerator(vm,
5460 operationCreateActivationDirect, locations[0].directGPR(),
5461 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
5462 CCallHelpers::TrustedImmPtr(table),
5463 CCallHelpers::TrustedImm64(JSValue::encode(initializationValue)));
5464 },
5465 scope);
5466 ValueFromBlock slowResult = m_out.anchor(callResult);
5467 m_out.jump(continuation);
5468
5469 m_out.appendTo(continuation, lastNext);
5470 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5471 }
5472
5473 void compileNewFunction()
5474 {
5475 ASSERT(m_node->op() == NewFunction || m_node->op() == NewGeneratorFunction || m_node->op() == NewAsyncGeneratorFunction || m_node->op() == NewAsyncFunction);
5476 bool isGeneratorFunction = m_node->op() == NewGeneratorFunction;
5477 bool isAsyncFunction = m_node->op() == NewAsyncFunction;
5478 bool isAsyncGeneratorFunction = m_node->op() == NewAsyncGeneratorFunction;
5479
5480 LValue scope = lowCell(m_node->child1());
5481
5482 FunctionExecutable* executable = m_node->castOperand<FunctionExecutable*>();
5483 if (executable->singletonFunction()->isStillValid()) {
5484 LValue callResult =
5485 isGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
5486 isAsyncFunction ? vmCall(Int64, m_out.operation(operationNewAsyncFunction), m_callFrame, scope, weakPointer(executable)) :
5487 isAsyncGeneratorFunction ? vmCall(Int64, m_out.operation(operationNewAsyncGeneratorFunction), m_callFrame, scope, weakPointer(executable)) :
5488 vmCall(Int64, m_out.operation(operationNewFunction), m_callFrame, scope, weakPointer(executable));
5489 setJSValue(callResult);
5490 return;
5491 }
5492
5493 RegisteredStructure structure = m_graph.registerStructure(
5494 [&] () {
5495 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5496 switch (m_node->op()) {
5497 case NewGeneratorFunction:
5498 return globalObject->generatorFunctionStructure();
5499 case NewAsyncFunction:
5500 return globalObject->asyncFunctionStructure();
5501 case NewAsyncGeneratorFunction:
5502 return globalObject->asyncGeneratorFunctionStructure();
5503 case NewFunction:
5504 return JSFunction::selectStructureForNewFuncExp(globalObject, m_node->castOperand<FunctionExecutable*>());
5505 default:
5506 RELEASE_ASSERT_NOT_REACHED();
5507 }
5508 }());
5509
5510 LBasicBlock slowPath = m_out.newBlock();
5511 LBasicBlock continuation = m_out.newBlock();
5512
5513 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5514
5515 LValue fastObject =
5516 isGeneratorFunction ? allocateObject<JSGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
5517 isAsyncFunction ? allocateObject<JSAsyncFunction>(structure, m_out.intPtrZero, slowPath) :
5518 isAsyncGeneratorFunction ? allocateObject<JSAsyncGeneratorFunction>(structure, m_out.intPtrZero, slowPath) :
5519 allocateObject<JSFunction>(structure, m_out.intPtrZero, slowPath);
5520
5521
5522 // We don't need memory barriers since we just fast-created the function, so it
5523 // must be young.
5524 m_out.storePtr(scope, fastObject, m_heaps.JSFunction_scope);
5525 m_out.storePtr(weakPointer(executable), fastObject, m_heaps.JSFunction_executable);
5526 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.JSFunction_rareData);
5527
5528 mutatorFence();
5529
5530 ValueFromBlock fastResult = m_out.anchor(fastObject);
5531 m_out.jump(continuation);
5532
5533 m_out.appendTo(slowPath, continuation);
5534
5535 Vector<LValue> slowPathArguments;
5536 slowPathArguments.append(scope);
5537 VM& vm = this->vm();
5538 LValue callResult = lazySlowPath(
5539 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5540 auto* operation = operationNewFunctionWithInvalidatedReallocationWatchpoint;
5541 if (isGeneratorFunction)
5542 operation = operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint;
5543 else if (isAsyncFunction)
5544 operation = operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint;
5545 else if (isAsyncGeneratorFunction)
5546 operation = operationNewAsyncGeneratorFunctionWithInvalidatedReallocationWatchpoint;
5547
5548 return createLazyCallGenerator(vm, operation,
5549 locations[0].directGPR(), locations[1].directGPR(),
5550 CCallHelpers::TrustedImmPtr(executable));
5551 },
5552 slowPathArguments);
5553 ValueFromBlock slowResult = m_out.anchor(callResult);
5554 m_out.jump(continuation);
5555
5556 m_out.appendTo(continuation, lastNext);
5557 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5558 }
5559
5560 void compileCreateDirectArguments()
5561 {
5562 // FIXME: A more effective way of dealing with the argument count and callee is to have
5563 // them be explicit arguments to this node.
5564 // https://bugs.webkit.org/show_bug.cgi?id=142207
5565
5566 RegisteredStructure structure =
5567 m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->directArgumentsStructure());
5568
5569 unsigned minCapacity = m_graph.baselineCodeBlockFor(m_node->origin.semantic)->numParameters() - 1;
5570
5571 LBasicBlock slowPath = m_out.newBlock();
5572 LBasicBlock continuation = m_out.newBlock();
5573
5574 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
5575
5576 ArgumentsLength length = getArgumentsLength();
5577
5578 LValue fastObject;
5579 if (length.isKnown) {
5580 fastObject = allocateObject<DirectArguments>(
5581 DirectArguments::allocationSize(std::max(length.known, minCapacity)), structure,
5582 m_out.intPtrZero, slowPath);
5583 } else {
5584 LValue size = m_out.add(
5585 m_out.shl(length.value, m_out.constInt32(3)),
5586 m_out.constInt32(DirectArguments::storageOffset()));
5587
5588 size = m_out.select(
5589 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
5590 size, m_out.constInt32(DirectArguments::allocationSize(minCapacity)));
5591
5592 fastObject = allocateVariableSizedObject<DirectArguments>(
5593 m_out.zeroExtPtr(size), structure, m_out.intPtrZero, slowPath);
5594 }
5595
5596 m_out.store32(length.value, fastObject, m_heaps.DirectArguments_length);
5597 m_out.store32(m_out.constInt32(minCapacity), fastObject, m_heaps.DirectArguments_minCapacity);
5598 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_mappedArguments);
5599 m_out.storePtr(m_out.intPtrZero, fastObject, m_heaps.DirectArguments_modifiedArgumentsDescriptor);
5600
5601 ValueFromBlock fastResult = m_out.anchor(fastObject);
5602 m_out.jump(continuation);
5603
5604 m_out.appendTo(slowPath, continuation);
5605 VM& vm = this->vm();
5606 LValue callResult = lazySlowPath(
5607 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5608 return createLazyCallGenerator(vm,
5609 operationCreateDirectArguments, locations[0].directGPR(),
5610 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
5611 CCallHelpers::TrustedImm32(minCapacity));
5612 }, length.value);
5613 ValueFromBlock slowResult = m_out.anchor(callResult);
5614 m_out.jump(continuation);
5615
5616 m_out.appendTo(continuation, lastNext);
5617 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
5618
5619 m_out.storePtr(getCurrentCallee(), result, m_heaps.DirectArguments_callee);
5620
5621 if (length.isKnown) {
5622 VirtualRegister start = AssemblyHelpers::argumentsStart(m_node->origin.semantic);
5623 for (unsigned i = 0; i < std::max(length.known, minCapacity); ++i) {
5624 m_out.store64(
5625 m_out.load64(addressFor(start + i)),
5626 result, m_heaps.DirectArguments_storage[i]);
5627 }
5628 } else {
5629 LValue stackBase = getArgumentsStart();
5630
5631 LBasicBlock loop = m_out.newBlock();
5632 LBasicBlock end = m_out.newBlock();
5633
5634 ValueFromBlock originalLength;
5635 if (minCapacity) {
5636 LValue capacity = m_out.select(
5637 m_out.aboveOrEqual(length.value, m_out.constInt32(minCapacity)),
5638 length.value,
5639 m_out.constInt32(minCapacity));
5640 LValue originalLengthValue = m_out.zeroExtPtr(capacity);
5641 originalLength = m_out.anchor(originalLengthValue);
5642 m_out.jump(loop);
5643 } else {
5644 LValue originalLengthValue = m_out.zeroExtPtr(length.value);
5645 originalLength = m_out.anchor(originalLengthValue);
5646 m_out.branch(m_out.isNull(originalLengthValue), unsure(end), unsure(loop));
5647 }
5648
5649 lastNext = m_out.appendTo(loop, end);
5650 LValue previousIndex = m_out.phi(pointerType(), originalLength);
5651 LValue index = m_out.sub(previousIndex, m_out.intPtrOne);
5652 m_out.store64(
5653 m_out.load64(m_out.baseIndex(m_heaps.variables, stackBase, index)),
5654 m_out.baseIndex(m_heaps.DirectArguments_storage, result, index));
5655 ValueFromBlock nextIndex = m_out.anchor(index);
5656 m_out.addIncomingToPhi(previousIndex, nextIndex);
5657 m_out.branch(m_out.isNull(index), unsure(end), unsure(loop));
5658
5659 m_out.appendTo(end, lastNext);
5660 }
5661
5662 mutatorFence();
5663
5664 setJSValue(result);
5665 }
5666
5667 void compileCreateScopedArguments()
5668 {
5669 LValue scope = lowCell(m_node->child1());
5670
5671 LValue result = vmCall(
5672 Int64, m_out.operation(operationCreateScopedArguments), m_callFrame,
5673 weakPointer(
5674 m_graph.globalObjectFor(m_node->origin.semantic)->scopedArgumentsStructure()),
5675 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee(), scope);
5676
5677 setJSValue(result);
5678 }
5679
5680 void compileCreateClonedArguments()
5681 {
5682 LValue result = vmCall(
5683 Int64, m_out.operation(operationCreateClonedArguments), m_callFrame,
5684 weakPointer(
5685 m_graph.globalObjectFor(m_node->origin.semantic)->clonedArgumentsStructure()),
5686 getArgumentsStart(), getArgumentsLength().value, getCurrentCallee());
5687
5688 setJSValue(result);
5689 }
5690
5691 void compileCreateRest()
5692 {
5693 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5694 LBasicBlock continuation = m_out.newBlock();
5695 LValue arrayLength = lowInt32(m_node->child1());
5696 LBasicBlock loopStart = m_out.newBlock();
5697 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5698 RegisteredStructure structure = m_graph.registerStructure(globalObject->originalRestParameterStructure());
5699 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(arrayLength, structure);
5700 LValue array = arrayValues.array;
5701 LValue butterfly = arrayValues.butterfly;
5702 ValueFromBlock startLength = m_out.anchor(arrayLength);
5703 LValue argumentRegion = m_out.add(getArgumentsStart(), m_out.constInt64(sizeof(Register) * m_node->numberOfArgumentsToSkip()));
5704 m_out.branch(m_out.equal(arrayLength, m_out.constInt32(0)),
5705 unsure(continuation), unsure(loopStart));
5706
5707 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
5708 LValue phiOffset = m_out.phi(Int32, startLength);
5709 LValue currentOffset = m_out.sub(phiOffset, m_out.int32One);
5710 m_out.addIncomingToPhi(phiOffset, m_out.anchor(currentOffset));
5711 LValue loadedValue = m_out.load64(m_out.baseIndex(m_heaps.variables, argumentRegion, m_out.zeroExtPtr(currentOffset)));
5712 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
5713 m_out.store64(loadedValue, m_out.baseIndex(heap, butterfly, m_out.zeroExtPtr(currentOffset)));
5714 m_out.branch(m_out.equal(currentOffset, m_out.constInt32(0)), unsure(continuation), unsure(loopStart));
5715
5716 m_out.appendTo(continuation, lastNext);
5717 mutatorFence();
5718 setJSValue(array);
5719 return;
5720 }
5721
5722 LValue arrayLength = lowInt32(m_node->child1());
5723 LValue argumentStart = getArgumentsStart();
5724 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5725 setJSValue(vmCall(
5726 Int64, m_out.operation(operationCreateRest), m_callFrame, argumentStart, numberOfArgumentsToSkip, arrayLength));
5727 }
5728
5729 void compileGetRestLength()
5730 {
5731 LBasicBlock nonZeroLength = m_out.newBlock();
5732 LBasicBlock continuation = m_out.newBlock();
5733
5734 ValueFromBlock zeroLengthResult = m_out.anchor(m_out.constInt32(0));
5735
5736 LValue numberOfArgumentsToSkip = m_out.constInt32(m_node->numberOfArgumentsToSkip());
5737 LValue argumentsLength = getArgumentsLength().value;
5738 m_out.branch(m_out.above(argumentsLength, numberOfArgumentsToSkip),
5739 unsure(nonZeroLength), unsure(continuation));
5740
5741 LBasicBlock lastNext = m_out.appendTo(nonZeroLength, continuation);
5742 ValueFromBlock nonZeroLengthResult = m_out.anchor(m_out.sub(argumentsLength, numberOfArgumentsToSkip));
5743 m_out.jump(continuation);
5744
5745 m_out.appendTo(continuation, lastNext);
5746 setInt32(m_out.phi(Int32, zeroLengthResult, nonZeroLengthResult));
5747 }
5748
5749 void compileObjectKeys()
5750 {
5751 switch (m_node->child1().useKind()) {
5752 case ObjectUse: {
5753 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5754 LBasicBlock notNullCase = m_out.newBlock();
5755 LBasicBlock rareDataCase = m_out.newBlock();
5756 LBasicBlock useCacheCase = m_out.newBlock();
5757 LBasicBlock slowButArrayBufferCase = m_out.newBlock();
5758 LBasicBlock slowCase = m_out.newBlock();
5759 LBasicBlock continuation = m_out.newBlock();
5760
5761 LValue object = lowObject(m_node->child1());
5762 LValue structure = loadStructure(object);
5763 LValue previousOrRareData = m_out.loadPtr(structure, m_heaps.Structure_previousOrRareData);
5764 m_out.branch(m_out.notNull(previousOrRareData), unsure(notNullCase), unsure(slowCase));
5765
5766 LBasicBlock lastNext = m_out.appendTo(notNullCase, rareDataCase);
5767 m_out.branch(
5768 m_out.notEqual(m_out.load32(previousOrRareData, m_heaps.JSCell_structureID), m_out.constInt32(m_graph.m_vm.structureStructure->structureID())),
5769 unsure(rareDataCase), unsure(slowCase));
5770
5771 m_out.appendTo(rareDataCase, useCacheCase);
5772 ASSERT(bitwise_cast<uintptr_t>(StructureRareData::cachedOwnKeysSentinel()) == 1);
5773 LValue cachedOwnKeys = m_out.loadPtr(previousOrRareData, m_heaps.StructureRareData_cachedOwnKeys);
5774 m_out.branch(m_out.belowOrEqual(cachedOwnKeys, m_out.constIntPtr(bitwise_cast<void*>(StructureRareData::cachedOwnKeysSentinel()))), unsure(slowCase), unsure(useCacheCase));
5775
5776 m_out.appendTo(useCacheCase, slowButArrayBufferCase);
5777 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5778 RegisteredStructure arrayStructure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(CopyOnWriteArrayWithContiguous));
5779 LValue fastArray = allocateObject<JSArray>(arrayStructure, m_out.addPtr(cachedOwnKeys, JSImmutableButterfly::offsetOfData()), slowButArrayBufferCase);
5780 ValueFromBlock fastResult = m_out.anchor(fastArray);
5781 m_out.jump(continuation);
5782
5783 m_out.appendTo(slowButArrayBufferCase, slowCase);
5784 LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(arrayStructure), cachedOwnKeys);
5785 ValueFromBlock slowButArrayBufferResult = m_out.anchor(slowArray);
5786 m_out.jump(continuation);
5787
5788 m_out.appendTo(slowCase, continuation);
5789 VM& vm = this->vm();
5790 LValue slowResultValue = lazySlowPath(
5791 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5792 return createLazyCallGenerator(vm,
5793 operationObjectKeysObject, locations[0].directGPR(), locations[1].directGPR());
5794 },
5795 object);
5796 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
5797 m_out.jump(continuation);
5798
5799 m_out.appendTo(continuation, lastNext);
5800 setJSValue(m_out.phi(pointerType(), fastResult, slowButArrayBufferResult, slowResult));
5801 break;
5802 }
5803 setJSValue(vmCall(Int64, m_out.operation(operationObjectKeysObject), m_callFrame, lowObject(m_node->child1())));
5804 break;
5805 }
5806 case UntypedUse:
5807 setJSValue(vmCall(Int64, m_out.operation(operationObjectKeys), m_callFrame, lowJSValue(m_node->child1())));
5808 break;
5809 default:
5810 RELEASE_ASSERT_NOT_REACHED();
5811 break;
5812 }
5813 }
5814
5815 void compileObjectCreate()
5816 {
5817 switch (m_node->child1().useKind()) {
5818 case ObjectUse:
5819 setJSValue(vmCall(Int64, m_out.operation(operationObjectCreateObject), m_callFrame, lowObject(m_node->child1())));
5820 break;
5821 case UntypedUse:
5822 setJSValue(vmCall(Int64, m_out.operation(operationObjectCreate), m_callFrame, lowJSValue(m_node->child1())));
5823 break;
5824 default:
5825 RELEASE_ASSERT_NOT_REACHED();
5826 break;
5827 }
5828 }
5829
5830 void compileNewObject()
5831 {
5832 setJSValue(allocateObject(m_node->structure()));
5833 mutatorFence();
5834 }
5835
5836 void compileNewStringObject()
5837 {
5838 RegisteredStructure structure = m_node->structure();
5839 LValue string = lowString(m_node->child1());
5840
5841 LBasicBlock slowCase = m_out.newBlock();
5842 LBasicBlock continuation = m_out.newBlock();
5843
5844 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
5845
5846 LValue fastResultValue = allocateObject<StringObject>(structure, m_out.intPtrZero, slowCase);
5847 m_out.storePtr(m_out.constIntPtr(StringObject::info()), fastResultValue, m_heaps.JSDestructibleObject_classInfo);
5848 m_out.store64(string, fastResultValue, m_heaps.JSWrapperObject_internalValue);
5849 mutatorFence();
5850 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
5851 m_out.jump(continuation);
5852
5853 m_out.appendTo(slowCase, continuation);
5854 VM& vm = this->vm();
5855 LValue slowResultValue = lazySlowPath(
5856 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
5857 return createLazyCallGenerator(vm,
5858 operationNewStringObject, locations[0].directGPR(), locations[1].directGPR(),
5859 CCallHelpers::TrustedImmPtr(structure.get()));
5860 },
5861 string);
5862 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
5863 m_out.jump(continuation);
5864
5865 m_out.appendTo(continuation, lastNext);
5866 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
5867 }
5868
5869 void compileNewSymbol()
5870 {
5871 if (!m_node->child1()) {
5872 setJSValue(vmCall(pointerType(), m_out.operation(operationNewSymbol), m_callFrame));
5873 return;
5874 }
5875 ASSERT(m_node->child1().useKind() == KnownStringUse);
5876 setJSValue(vmCall(pointerType(), m_out.operation(operationNewSymbolWithDescription), m_callFrame, lowString(m_node->child1())));
5877 }
5878
5879 void compileNewArray()
5880 {
5881 // First speculate appropriately on all of the children. Do this unconditionally up here
5882 // because some of the slow paths may otherwise forget to do it. It's sort of arguable
5883 // that doing the speculations up here might be unprofitable for RA - so we can consider
5884 // sinking this to below the allocation fast path if we find that this has a lot of
5885 // register pressure.
5886 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex)
5887 speculate(m_graph.varArgChild(m_node, operandIndex));
5888
5889 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
5890 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
5891 m_node->indexingType()));
5892
5893 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
5894 unsigned numElements = m_node->numChildren();
5895 unsigned vectorLengthHint = m_node->vectorLengthHint();
5896 ASSERT(vectorLengthHint >= numElements);
5897
5898 ArrayValues arrayValues =
5899 allocateUninitializedContiguousJSArray(numElements, vectorLengthHint, structure);
5900
5901 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
5902 Edge edge = m_graph.varArgChild(m_node, operandIndex);
5903
5904 switch (m_node->indexingType()) {
5905 case ALL_BLANK_INDEXING_TYPES:
5906 case ALL_UNDECIDED_INDEXING_TYPES:
5907 DFG_CRASH(m_graph, m_node, "Bad indexing type");
5908 break;
5909
5910 case ALL_DOUBLE_INDEXING_TYPES:
5911 m_out.storeDouble(
5912 lowDouble(edge),
5913 arrayValues.butterfly, m_heaps.indexedDoubleProperties[operandIndex]);
5914 break;
5915
5916 case ALL_INT32_INDEXING_TYPES:
5917 case ALL_CONTIGUOUS_INDEXING_TYPES:
5918 m_out.store64(
5919 lowJSValue(edge, ManualOperandSpeculation),
5920 arrayValues.butterfly,
5921 m_heaps.forIndexingType(m_node->indexingType())->at(operandIndex));
5922 break;
5923
5924 default:
5925 DFG_CRASH(m_graph, m_node, "Corrupt indexing type");
5926 break;
5927 }
5928 }
5929
5930 setJSValue(arrayValues.array);
5931 mutatorFence();
5932 return;
5933 }
5934
5935 if (!m_node->numChildren()) {
5936 setJSValue(vmCall(
5937 Int64, m_out.operation(operationNewEmptyArray), m_callFrame,
5938 weakStructure(structure)));
5939 return;
5940 }
5941
5942 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
5943 ASSERT(scratchSize);
5944 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
5945 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
5946
5947 for (unsigned operandIndex = 0; operandIndex < m_node->numChildren(); ++operandIndex) {
5948 Edge edge = m_graph.varArgChild(m_node, operandIndex);
5949 LValue valueToStore;
5950 switch (m_node->indexingType()) {
5951 case ALL_DOUBLE_INDEXING_TYPES:
5952 valueToStore = boxDouble(lowDouble(edge));
5953 break;
5954 default:
5955 valueToStore = lowJSValue(edge, ManualOperandSpeculation);
5956 break;
5957 }
5958 m_out.store64(valueToStore, m_out.absolute(buffer + operandIndex));
5959 }
5960
5961 m_out.storePtr(
5962 m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
5963
5964 LValue result = vmCall(
5965 Int64, m_out.operation(operationNewArray), m_callFrame,
5966 weakStructure(structure), m_out.constIntPtr(buffer),
5967 m_out.constIntPtr(m_node->numChildren()));
5968
5969 m_out.storePtr(m_out.intPtrZero, m_out.absolute(scratchBuffer->addressOfActiveLength()));
5970
5971 setJSValue(result);
5972 }
5973
5974 void compileNewArrayWithSpread()
5975 {
5976 if (m_graph.isWatchingHavingABadTimeWatchpoint(m_node)) {
5977 CheckedInt32 startLength = 0;
5978 BitVector* bitVector = m_node->bitVector();
5979 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
5980
5981 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
5982 if (!bitVector->get(i))
5983 ++startLength;
5984 else {
5985 Edge& child = m_graph.varArgChild(m_node, i);
5986 if (child->op() == PhantomSpread && child->child1()->op() == PhantomNewArrayBuffer)
5987 startLength += child->child1()->castOperand<JSImmutableButterfly*>()->length();
5988 }
5989 }
5990
5991 if (startLength.hasOverflowed()) {
5992 terminate(Overflow);
5993 return;
5994 }
5995
5996 LValue length = m_out.constInt32(startLength.unsafeGet());
5997
5998 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
5999 if (bitVector->get(i)) {
6000 Edge use = m_graph.varArgChild(m_node, i);
6001 CheckValue* lengthCheck = nullptr;
6002 if (use->op() == PhantomSpread) {
6003 if (use->child1()->op() == PhantomCreateRest) {
6004 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
6005 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
6006 LValue spreadLength = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
6007 return getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
6008 }).iterator->value;
6009 lengthCheck = m_out.speculateAdd(length, spreadLength);
6010 }
6011 } else {
6012 LValue fixedArray = lowCell(use);
6013 lengthCheck = m_out.speculateAdd(length, m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
6014 }
6015
6016 if (lengthCheck) {
6017 blessSpeculation(lengthCheck, Overflow, noValue(), nullptr, m_origin);
6018 length = lengthCheck;
6019 }
6020 }
6021 }
6022
6023 LValue exceedsMaxAllowedLength = m_out.aboveOrEqual(length, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
6024 blessSpeculation(m_out.speculate(exceedsMaxAllowedLength), Overflow, noValue(), nullptr, m_origin);
6025
6026 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->originalArrayStructureForIndexingType(ArrayWithContiguous));
6027 ArrayValues arrayValues = allocateUninitializedContiguousJSArray(length, structure);
6028 LValue result = arrayValues.array;
6029 LValue storage = arrayValues.butterfly;
6030 LValue index = m_out.constIntPtr(0);
6031
6032 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6033 Edge use = m_graph.varArgChild(m_node, i);
6034 if (bitVector->get(i)) {
6035 if (use->op() == PhantomSpread) {
6036 if (use->child1()->op() == PhantomNewArrayBuffer) {
6037 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6038 auto* array = use->child1()->castOperand<JSImmutableButterfly*>();
6039 for (unsigned i = 0; i < array->length(); ++i) {
6040 // Because resulted array from NewArrayWithSpread is always contiguous, we should not generate value
6041 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
6042 int64_t value = JSValue::encode(array->get(i));
6043 m_out.store64(m_out.constInt64(value), m_out.baseIndex(heap, storage, index, JSValue(), (Checked<int32_t>(sizeof(JSValue)) * i).unsafeGet()));
6044 }
6045 index = m_out.add(index, m_out.constIntPtr(array->length()));
6046 } else {
6047 RELEASE_ASSERT(use->child1()->op() == PhantomCreateRest);
6048 InlineCallFrame* inlineCallFrame = use->child1()->origin.semantic.inlineCallFrame();
6049 unsigned numberOfArgumentsToSkip = use->child1()->numberOfArgumentsToSkip();
6050
6051 LValue length = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
6052 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
6053
6054 LBasicBlock loopStart = m_out.newBlock();
6055 LBasicBlock continuation = m_out.newBlock();
6056
6057 ValueFromBlock loadIndexStart = m_out.anchor(m_out.constIntPtr(0));
6058 ValueFromBlock arrayIndexStart = m_out.anchor(index);
6059 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
6060
6061 m_out.branch(
6062 m_out.isZero64(length),
6063 unsure(continuation), unsure(loopStart));
6064
6065 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6066
6067 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
6068 LValue loadIndex = m_out.phi(pointerType(), loadIndexStart);
6069
6070 LValue item = m_out.load64(m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
6071 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
6072
6073 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
6074 LValue nextLoadIndex = m_out.add(loadIndex, m_out.constIntPtr(1));
6075 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
6076
6077 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
6078 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
6079
6080 m_out.branch(
6081 m_out.below(nextLoadIndex, length),
6082 unsure(loopStart), unsure(continuation));
6083
6084 m_out.appendTo(continuation, lastNext);
6085 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
6086 }
6087 } else {
6088 LBasicBlock loopStart = m_out.newBlock();
6089 LBasicBlock continuation = m_out.newBlock();
6090
6091 LValue fixedArray = lowCell(use);
6092
6093 ValueFromBlock fixedIndexStart = m_out.anchor(m_out.constIntPtr(0));
6094 ValueFromBlock arrayIndexStart = m_out.anchor(index);
6095 ValueFromBlock arrayIndexStartForFinish = m_out.anchor(index);
6096
6097 LValue fixedArraySize = m_out.zeroExtPtr(m_out.load32(fixedArray, m_heaps.JSFixedArray_size));
6098
6099 m_out.branch(
6100 m_out.isZero64(fixedArraySize),
6101 unsure(continuation), unsure(loopStart));
6102
6103 LBasicBlock lastNext = m_out.appendTo(loopStart, continuation);
6104
6105 LValue arrayIndex = m_out.phi(pointerType(), arrayIndexStart);
6106 LValue fixedArrayIndex = m_out.phi(pointerType(), fixedIndexStart);
6107
6108 LValue item = m_out.load64(m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, fixedArrayIndex));
6109 m_out.store64(item, m_out.baseIndex(m_heaps.indexedContiguousProperties, storage, arrayIndex));
6110
6111 LValue nextArrayIndex = m_out.add(arrayIndex, m_out.constIntPtr(1));
6112 LValue nextFixedArrayIndex = m_out.add(fixedArrayIndex, m_out.constIntPtr(1));
6113 ValueFromBlock arrayIndexLoopForFinish = m_out.anchor(nextArrayIndex);
6114
6115 m_out.addIncomingToPhi(fixedArrayIndex, m_out.anchor(nextFixedArrayIndex));
6116 m_out.addIncomingToPhi(arrayIndex, m_out.anchor(nextArrayIndex));
6117
6118 m_out.branch(
6119 m_out.below(nextFixedArrayIndex, fixedArraySize),
6120 unsure(loopStart), unsure(continuation));
6121
6122 m_out.appendTo(continuation, lastNext);
6123 index = m_out.phi(pointerType(), arrayIndexStartForFinish, arrayIndexLoopForFinish);
6124 }
6125 } else {
6126 IndexedAbstractHeap& heap = m_heaps.indexedContiguousProperties;
6127 LValue item = lowJSValue(use);
6128 m_out.store64(item, m_out.baseIndex(heap, storage, index));
6129 index = m_out.add(index, m_out.constIntPtr(1));
6130 }
6131 }
6132
6133 mutatorFence();
6134 setJSValue(result);
6135 return;
6136 }
6137
6138 ASSERT(m_node->numChildren());
6139 size_t scratchSize = sizeof(EncodedJSValue) * m_node->numChildren();
6140 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
6141 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
6142 BitVector* bitVector = m_node->bitVector();
6143 for (unsigned i = 0; i < m_node->numChildren(); ++i) {
6144 Edge use = m_graph.m_varArgChildren[m_node->firstChild() + i];
6145 LValue value;
6146 if (bitVector->get(i))
6147 value = lowCell(use);
6148 else
6149 value = lowJSValue(use);
6150 m_out.store64(value, m_out.absolute(&buffer[i]));
6151 }
6152
6153 m_out.storePtr(m_out.constIntPtr(scratchSize), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6154 LValue result = vmCall(Int64, m_out.operation(operationNewArrayWithSpreadSlow), m_callFrame, m_out.constIntPtr(buffer), m_out.constInt32(m_node->numChildren()));
6155 m_out.storePtr(m_out.constIntPtr(0), m_out.absolute(scratchBuffer->addressOfActiveLength()));
6156
6157 setJSValue(result);
6158 }
6159
6160 void compileCreateThis()
6161 {
6162 LValue callee = lowCell(m_node->child1());
6163
6164 LBasicBlock isFunctionBlock = m_out.newBlock();
6165 LBasicBlock hasRareData = m_out.newBlock();
6166 LBasicBlock slowPath = m_out.newBlock();
6167 LBasicBlock continuation = m_out.newBlock();
6168
6169 m_out.branch(isFunction(callee, provenType(m_node->child1())), usually(isFunctionBlock), rarely(slowPath));
6170
6171 LBasicBlock lastNext = m_out.appendTo(isFunctionBlock, hasRareData);
6172 LValue rareData = m_out.loadPtr(callee, m_heaps.JSFunction_rareData);
6173 m_out.branch(m_out.isZero64(rareData), rarely(slowPath), usually(hasRareData));
6174
6175 m_out.appendTo(hasRareData, slowPath);
6176 LValue allocator = m_out.loadPtr(rareData, m_heaps.FunctionRareData_allocator);
6177 LValue structure = m_out.loadPtr(rareData, m_heaps.FunctionRareData_structure);
6178 LValue butterfly = m_out.constIntPtr(0);
6179 ValueFromBlock fastResult = m_out.anchor(allocateObject(allocator, structure, butterfly, slowPath));
6180 m_out.jump(continuation);
6181
6182 m_out.appendTo(slowPath, continuation);
6183 ValueFromBlock slowResult = m_out.anchor(vmCall(
6184 Int64, m_out.operation(operationCreateThis), m_callFrame, callee, m_out.constInt32(m_node->inlineCapacity())));
6185 m_out.jump(continuation);
6186
6187 m_out.appendTo(continuation, lastNext);
6188 LValue result = m_out.phi(Int64, fastResult, slowResult);
6189
6190 mutatorFence();
6191 setJSValue(result);
6192 }
6193
6194 void compileSpread()
6195 {
6196 if (m_node->child1()->op() == PhantomNewArrayBuffer) {
6197 LBasicBlock slowAllocation = m_out.newBlock();
6198 LBasicBlock continuation = m_out.newBlock();
6199
6200 auto* immutableButterfly = m_node->child1()->castOperand<JSImmutableButterfly*>();
6201
6202 LValue fastFixedArrayValue = allocateVariableSizedCell<JSFixedArray>(
6203 m_out.constIntPtr(JSFixedArray::allocationSize(immutableButterfly->length()).unsafeGet()),
6204 m_graph.m_vm.fixedArrayStructure.get(), slowAllocation);
6205 m_out.store32(m_out.constInt32(immutableButterfly->length()), fastFixedArrayValue, m_heaps.JSFixedArray_size);
6206 ValueFromBlock fastFixedArray = m_out.anchor(fastFixedArrayValue);
6207 m_out.jump(continuation);
6208
6209 LBasicBlock lastNext = m_out.appendTo(slowAllocation, continuation);
6210 ValueFromBlock slowFixedArray = m_out.anchor(vmCall(pointerType(), m_out.operation(operationCreateFixedArray), m_callFrame, m_out.constInt32(immutableButterfly->length())));
6211 m_out.jump(continuation);
6212
6213 m_out.appendTo(continuation, lastNext);
6214 LValue fixedArray = m_out.phi(pointerType(), fastFixedArray, slowFixedArray);
6215 for (unsigned i = 0; i < immutableButterfly->length(); i++) {
6216 // Because forwarded values are drained as JSValue, we should not generate value
6217 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
6218 int64_t value = JSValue::encode(immutableButterfly->get(i));
6219 m_out.store64(
6220 m_out.constInt64(value),
6221 m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, m_out.constIntPtr(i), jsNumber(i)));
6222 }
6223 mutatorFence();
6224 setJSValue(fixedArray);
6225 return;
6226 }
6227
6228 if (m_node->child1()->op() == PhantomCreateRest) {
6229 // This IR is rare to generate since it requires escaping the Spread
6230 // but not the CreateRest. In bytecode, we have only few operations that
6231 // accept Spread's result as input. This usually leads to the Spread node not
6232 // escaping. However, this can happen if for example we generate a PutStack on
6233 // the Spread but nothing escapes the CreateRest.
6234 LBasicBlock loopHeader = m_out.newBlock();
6235 LBasicBlock loopBody = m_out.newBlock();
6236 LBasicBlock slowAllocation = m_out.newBlock();
6237 LBasicBlock continuation = m_out.newBlock();
6238 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopHeader);
6239
6240 InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
6241 unsigned numberOfArgumentsToSkip = m_node->child1()->numberOfArgumentsToSkip();
6242 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
6243 LValue length = getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
6244 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
6245 LValue size = m_out.add(
6246 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
6247 m_out.constIntPtr(JSFixedArray::offsetOfData()));
6248
6249 LValue fastArrayValue = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowAllocation);
6250 m_out.store32(length, fastArrayValue, m_heaps.JSFixedArray_size);
6251 ValueFromBlock fastArray = m_out.anchor(fastArrayValue);
6252 m_out.jump(loopHeader);
6253
6254 m_out.appendTo(slowAllocation, loopHeader);
6255 ValueFromBlock slowArray = m_out.anchor(vmCall(pointerType(), m_out.operation(operationCreateFixedArray), m_callFrame, length));
6256 m_out.jump(loopHeader);
6257
6258 m_out.appendTo(loopHeader, loopBody);
6259 LValue fixedArray = m_out.phi(pointerType(), fastArray, slowArray);
6260 ValueFromBlock startIndex = m_out.anchor(m_out.constIntPtr(0));
6261 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopBody));
6262
6263 m_out.appendTo(loopBody, continuation);
6264 LValue index = m_out.phi(pointerType(), startIndex);
6265 LValue value = m_out.load64(
6266 m_out.baseIndex(m_heaps.variables, sourceStart, index));
6267 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fixedArray, index));
6268 LValue nextIndex = m_out.add(m_out.constIntPtr(1), index);
6269 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6270 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)), unsure(loopBody), unsure(continuation));
6271
6272 m_out.appendTo(continuation, lastNext);
6273 mutatorFence();
6274 setJSValue(fixedArray);
6275 return;
6276 }
6277
6278 LValue argument = lowCell(m_node->child1());
6279
6280 LValue result;
6281
6282 if (m_node->child1().useKind() == ArrayUse)
6283 speculateArray(m_node->child1());
6284
6285 if (m_graph.canDoFastSpread(m_node, m_state.forNode(m_node->child1()))) {
6286 LBasicBlock preLoop = m_out.newBlock();
6287 LBasicBlock loopSelection = m_out.newBlock();
6288 LBasicBlock contiguousLoopStart = m_out.newBlock();
6289 LBasicBlock doubleLoopStart = m_out.newBlock();
6290 LBasicBlock slowPath = m_out.newBlock();
6291 LBasicBlock continuation = m_out.newBlock();
6292
6293 LValue indexingShape = m_out.load8ZeroExt32(argument, m_heaps.JSCell_indexingTypeAndMisc);
6294 indexingShape = m_out.bitAnd(indexingShape, m_out.constInt32(IndexingShapeMask));
6295 LValue isOKIndexingType = m_out.belowOrEqual(
6296 m_out.sub(indexingShape, m_out.constInt32(Int32Shape)),
6297 m_out.constInt32(ContiguousShape - Int32Shape));
6298
6299 m_out.branch(isOKIndexingType, unsure(preLoop), unsure(slowPath));
6300 LBasicBlock lastNext = m_out.appendTo(preLoop, loopSelection);
6301
6302 LValue butterfly = m_out.loadPtr(argument, m_heaps.JSObject_butterfly);
6303 LValue length = m_out.load32NonNegative(butterfly, m_heaps.Butterfly_publicLength);
6304 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "Assumed in the code below.");
6305 LValue size = m_out.add(
6306 m_out.shl(m_out.zeroExtPtr(length), m_out.constInt32(3)),
6307 m_out.constIntPtr(JSFixedArray::offsetOfData()));
6308
6309 LValue fastAllocation = allocateVariableSizedCell<JSFixedArray>(size, m_graph.m_vm.fixedArrayStructure.get(), slowPath);
6310 ValueFromBlock fastResult = m_out.anchor(fastAllocation);
6311 m_out.store32(length, fastAllocation, m_heaps.JSFixedArray_size);
6312
6313 ValueFromBlock startIndexForContiguous = m_out.anchor(m_out.constIntPtr(0));
6314 ValueFromBlock startIndexForDouble = m_out.anchor(m_out.constIntPtr(0));
6315
6316 m_out.branch(m_out.isZero32(length), unsure(continuation), unsure(loopSelection));
6317
6318 m_out.appendTo(loopSelection, contiguousLoopStart);
6319 m_out.branch(m_out.equal(indexingShape, m_out.constInt32(DoubleShape)),
6320 unsure(doubleLoopStart), unsure(contiguousLoopStart));
6321
6322 {
6323 m_out.appendTo(contiguousLoopStart, doubleLoopStart);
6324 LValue index = m_out.phi(pointerType(), startIndexForContiguous);
6325
6326 TypedPointer loadSite = m_out.baseIndex(m_heaps.root, butterfly, index, ScaleEight); // We read TOP here since we can be reading either int32 or contiguous properties.
6327 LValue value = m_out.load64(loadSite);
6328 value = m_out.select(m_out.isZero64(value), m_out.constInt64(JSValue::encode(jsUndefined())), value);
6329 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
6330
6331 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
6332 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6333
6334 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
6335 unsure(contiguousLoopStart), unsure(continuation));
6336 }
6337
6338 {
6339 m_out.appendTo(doubleLoopStart, slowPath);
6340 LValue index = m_out.phi(pointerType(), startIndexForDouble);
6341
6342 LValue value = m_out.loadDouble(m_out.baseIndex(m_heaps.indexedDoubleProperties, butterfly, index));
6343 LValue isNaN = m_out.doubleNotEqualOrUnordered(value, value);
6344 LValue holeResult = m_out.constInt64(JSValue::encode(jsUndefined()));
6345 LValue normalResult = boxDouble(value);
6346 value = m_out.select(isNaN, holeResult, normalResult);
6347 m_out.store64(value, m_out.baseIndex(m_heaps.JSFixedArray_buffer, fastAllocation, index));
6348
6349 LValue nextIndex = m_out.add(index, m_out.constIntPtr(1));
6350 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
6351
6352 m_out.branch(m_out.below(nextIndex, m_out.zeroExtPtr(length)),
6353 unsure(doubleLoopStart), unsure(continuation));
6354 }
6355
6356 m_out.appendTo(slowPath, continuation);
6357 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationSpreadFastArray), m_callFrame, argument));
6358 m_out.jump(continuation);
6359
6360 m_out.appendTo(continuation, lastNext);
6361 result = m_out.phi(pointerType(), fastResult, slowResult);
6362 mutatorFence();
6363 } else
6364 result = vmCall(pointerType(), m_out.operation(operationSpreadGeneric), m_callFrame, argument);
6365
6366 setJSValue(result);
6367 }
6368
6369 void compileNewArrayBuffer()
6370 {
6371 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6372 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
6373 m_node->indexingMode()));
6374 auto* immutableButterfly = m_node->castOperand<JSImmutableButterfly*>();
6375
6376 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingMode())) {
6377 LBasicBlock slowPath = m_out.newBlock();
6378 LBasicBlock continuation = m_out.newBlock();
6379
6380 LValue fastArray = allocateObject<JSArray>(structure, m_out.constIntPtr(immutableButterfly->toButterfly()), slowPath);
6381 ValueFromBlock fastResult = m_out.anchor(fastArray);
6382 m_out.jump(continuation);
6383
6384 m_out.appendTo(slowPath, continuation);
6385 LValue slowArray = vmCall(Int64, m_out.operation(operationNewArrayBuffer), m_callFrame, weakStructure(structure), m_out.weakPointer(m_node->cellOperand()));
6386 ValueFromBlock slowResult = m_out.anchor(slowArray);
6387 m_out.jump(continuation);
6388
6389 m_out.appendTo(continuation);
6390
6391 mutatorFence();
6392 setJSValue(m_out.phi(pointerType(), slowResult, fastResult));
6393 return;
6394 }
6395
6396 setJSValue(vmCall(
6397 Int64, m_out.operation(operationNewArrayBuffer), m_callFrame,
6398 weakStructure(structure), m_out.weakPointer(m_node->cellOperand())));
6399 }
6400
6401 void compileNewArrayWithSize()
6402 {
6403 LValue publicLength = lowInt32(m_node->child1());
6404
6405 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6406 RegisteredStructure structure = m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(
6407 m_node->indexingType()));
6408
6409 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(m_node->indexingType())) {
6410 IndexingType indexingType = m_node->indexingType();
6411 setJSValue(
6412 allocateJSArray(
6413 publicLength, publicLength, weakPointer(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType)), m_out.constInt32(indexingType)).array);
6414 mutatorFence();
6415 return;
6416 }
6417
6418 LValue structureValue = m_out.select(
6419 m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)),
6420 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))),
6421 weakStructure(structure));
6422 setJSValue(vmCall(Int64, m_out.operation(operationNewArrayWithSize), m_callFrame, structureValue, publicLength, m_out.intPtrZero));
6423 }
6424
6425 void compileNewTypedArray()
6426 {
6427 TypedArrayType typedArrayType = m_node->typedArrayType();
6428 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6429
6430 switch (m_node->child1().useKind()) {
6431 case Int32Use: {
6432 RegisteredStructure structure = m_graph.registerStructure(globalObject->typedArrayStructureConcurrently(typedArrayType));
6433
6434 LValue size = lowInt32(m_node->child1());
6435
6436 LBasicBlock smallEnoughCase = m_out.newBlock();
6437 LBasicBlock slowCase = m_out.newBlock();
6438 LBasicBlock continuation = m_out.newBlock();
6439
6440 ValueFromBlock noStorage = m_out.anchor(m_out.intPtrZero);
6441
6442 m_out.branch(
6443 m_out.above(size, m_out.constInt32(JSArrayBufferView::fastSizeLimit)),
6444 rarely(slowCase), usually(smallEnoughCase));
6445
6446 LBasicBlock lastNext = m_out.appendTo(smallEnoughCase, slowCase);
6447
6448 LValue byteSize =
6449 m_out.shl(m_out.zeroExtPtr(size), m_out.constInt32(logElementSize(typedArrayType)));
6450 if (elementSize(typedArrayType) < 8) {
6451 byteSize = m_out.bitAnd(
6452 m_out.add(byteSize, m_out.constIntPtr(7)),
6453 m_out.constIntPtr(~static_cast<intptr_t>(7)));
6454 }
6455
6456 LValue allocator = allocatorForSize(vm().primitiveGigacageAuxiliarySpace, byteSize, slowCase);
6457 LValue storage = allocateHeapCell(allocator, slowCase);
6458
6459 splatWords(
6460 storage,
6461 m_out.int32Zero,
6462 m_out.castToInt32(m_out.lShr(byteSize, m_out.constIntPtr(3))),
6463 m_out.int64Zero,
6464 m_heaps.typedArrayProperties);
6465
6466#if !GIGACAGE_ENABLED && CPU(ARM64E)
6467 {
6468 LValue sizePtr = m_out.zeroExtPtr(size);
6469 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
6470 authenticate->appendSomeRegister(storage);
6471 authenticate->append(sizePtr, B3::ValueRep(B3::ValueRep::SomeLateRegister));
6472 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
6473 jit.move(params[1].gpr(), params[0].gpr());
6474 jit.tagArrayPtr(params[2].gpr(), params[0].gpr());
6475 });
6476 storage = authenticate;
6477 }
6478#endif
6479
6480 ValueFromBlock haveStorage = m_out.anchor(storage);
6481
6482 LValue fastResultValue =
6483 allocateObject<JSArrayBufferView>(structure, m_out.intPtrZero, slowCase);
6484
6485 m_out.storePtr(storage, fastResultValue, m_heaps.JSArrayBufferView_vector);
6486 m_out.store32(size, fastResultValue, m_heaps.JSArrayBufferView_length);
6487 m_out.store32(m_out.constInt32(FastTypedArray), fastResultValue, m_heaps.JSArrayBufferView_mode);
6488
6489 mutatorFence();
6490 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
6491 m_out.jump(continuation);
6492
6493 m_out.appendTo(slowCase, continuation);
6494 LValue storageValue = m_out.phi(pointerType(), noStorage, haveStorage);
6495
6496 VM& vm = this->vm();
6497 LValue slowResultValue = lazySlowPath(
6498 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6499 return createLazyCallGenerator(vm,
6500 operationNewTypedArrayWithSizeForType(typedArrayType), locations[0].directGPR(),
6501 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
6502 locations[2].directGPR());
6503 },
6504 size, storageValue);
6505 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
6506 m_out.jump(continuation);
6507
6508 m_out.appendTo(continuation, lastNext);
6509 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
6510 return;
6511 }
6512
6513 case UntypedUse: {
6514 LValue argument = lowJSValue(m_node->child1());
6515
6516 LValue result = vmCall(
6517 pointerType(), m_out.operation(operationNewTypedArrayWithOneArgumentForType(typedArrayType)),
6518 m_callFrame, weakPointer(globalObject->typedArrayStructureConcurrently(typedArrayType)), argument);
6519
6520 setJSValue(result);
6521 return;
6522 }
6523
6524 default:
6525 DFG_CRASH(m_graph, m_node, "Bad use kind");
6526 return;
6527 }
6528 }
6529
6530 void compileAllocatePropertyStorage()
6531 {
6532 LValue object = lowCell(m_node->child1());
6533 setStorage(allocatePropertyStorage(object, m_node->transition()->previous.get()));
6534 }
6535
6536 void compileReallocatePropertyStorage()
6537 {
6538 Transition* transition = m_node->transition();
6539 LValue object = lowCell(m_node->child1());
6540 LValue oldStorage = lowStorage(m_node->child2());
6541
6542 setStorage(
6543 reallocatePropertyStorage(
6544 object, oldStorage, transition->previous.get(), transition->next.get()));
6545 }
6546
6547 void compileNukeStructureAndSetButterfly()
6548 {
6549 nukeStructureAndSetButterfly(lowStorage(m_node->child2()), lowCell(m_node->child1()));
6550 }
6551
6552 void compileToNumber()
6553 {
6554 LValue value = lowJSValue(m_node->child1());
6555
6556 if (!(abstractValue(m_node->child1()).m_type & SpecBytecodeNumber))
6557 setJSValue(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
6558 else {
6559 LBasicBlock notNumber = m_out.newBlock();
6560 LBasicBlock continuation = m_out.newBlock();
6561
6562 ValueFromBlock fastResult = m_out.anchor(value);
6563 m_out.branch(isNumber(value, provenType(m_node->child1())), unsure(continuation), unsure(notNumber));
6564
6565 // notNumber case.
6566 LBasicBlock lastNext = m_out.appendTo(notNumber, continuation);
6567 // We have several attempts to remove ToNumber. But ToNumber still exists.
6568 // It means that converting non-numbers to numbers by this ToNumber is not rare.
6569 // Instead of the lazy slow path generator, we call the operation here.
6570 ValueFromBlock slowResult = m_out.anchor(vmCall(Int64, m_out.operation(operationToNumber), m_callFrame, value));
6571 m_out.jump(continuation);
6572
6573 // continuation case.
6574 m_out.appendTo(continuation, lastNext);
6575 setJSValue(m_out.phi(Int64, fastResult, slowResult));
6576 }
6577 }
6578
6579 void compileToStringOrCallStringConstructorOrStringValueOf()
6580 {
6581 ASSERT(m_node->op() != StringValueOf || m_node->child1().useKind() == UntypedUse);
6582 switch (m_node->child1().useKind()) {
6583 case StringObjectUse: {
6584 LValue cell = lowCell(m_node->child1());
6585 speculateStringObjectForCell(m_node->child1(), cell);
6586 setJSValue(m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
6587 return;
6588 }
6589
6590 case StringOrStringObjectUse: {
6591 LValue cell = lowCell(m_node->child1());
6592 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
6593
6594 LBasicBlock notString = m_out.newBlock();
6595 LBasicBlock continuation = m_out.newBlock();
6596
6597 ValueFromBlock simpleResult = m_out.anchor(cell);
6598 m_out.branch(
6599 m_out.equal(type, m_out.constInt32(StringType)),
6600 unsure(continuation), unsure(notString));
6601
6602 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
6603 speculate(
6604 BadType, jsValueValue(cell), m_node->child1().node(),
6605 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
6606 ValueFromBlock unboxedResult = m_out.anchor(
6607 m_out.loadPtr(cell, m_heaps.JSWrapperObject_internalValue));
6608 m_out.jump(continuation);
6609
6610 m_out.appendTo(continuation, lastNext);
6611 setJSValue(m_out.phi(Int64, simpleResult, unboxedResult));
6612
6613 m_interpreter.filter(m_node->child1(), SpecString | SpecStringObject);
6614 return;
6615 }
6616
6617 case CellUse:
6618 case NotCellUse:
6619 case UntypedUse: {
6620 LValue value;
6621 if (m_node->child1().useKind() == CellUse)
6622 value = lowCell(m_node->child1());
6623 else if (m_node->child1().useKind() == NotCellUse)
6624 value = lowNotCell(m_node->child1());
6625 else
6626 value = lowJSValue(m_node->child1());
6627
6628 LBasicBlock isCell = m_out.newBlock();
6629 LBasicBlock notString = m_out.newBlock();
6630 LBasicBlock continuation = m_out.newBlock();
6631
6632 LValue isCellPredicate;
6633 if (m_node->child1().useKind() == CellUse)
6634 isCellPredicate = m_out.booleanTrue;
6635 else if (m_node->child1().useKind() == NotCellUse)
6636 isCellPredicate = m_out.booleanFalse;
6637 else
6638 isCellPredicate = this->isCell(value, provenType(m_node->child1()));
6639 m_out.branch(isCellPredicate, unsure(isCell), unsure(notString));
6640
6641 LBasicBlock lastNext = m_out.appendTo(isCell, notString);
6642 ValueFromBlock simpleResult = m_out.anchor(value);
6643 LValue isStringPredicate;
6644 if (m_node->child1()->prediction() & SpecString) {
6645 isStringPredicate = isString(value, provenType(m_node->child1()));
6646 } else
6647 isStringPredicate = m_out.booleanFalse;
6648 m_out.branch(isStringPredicate, unsure(continuation), unsure(notString));
6649
6650 m_out.appendTo(notString, continuation);
6651 LValue operation;
6652 if (m_node->child1().useKind() == CellUse) {
6653 ASSERT(m_node->op() != StringValueOf);
6654 operation = m_out.operation(m_node->op() == ToString ? operationToStringOnCell : operationCallStringConstructorOnCell);
6655 } else {
6656 operation = m_out.operation(m_node->op() == ToString
6657 ? operationToString : m_node->op() == StringValueOf
6658 ? operationStringValueOf : operationCallStringConstructor);
6659 }
6660 ValueFromBlock convertedResult = m_out.anchor(vmCall(Int64, operation, m_callFrame, value));
6661 m_out.jump(continuation);
6662
6663 m_out.appendTo(continuation, lastNext);
6664 setJSValue(m_out.phi(Int64, simpleResult, convertedResult));
6665 return;
6666 }
6667
6668 case Int32Use:
6669 setJSValue(vmCall(Int64, m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(10)));
6670 return;
6671
6672 case Int52RepUse:
6673 setJSValue(vmCall(Int64, m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(10)));
6674 return;
6675
6676 case DoubleRepUse:
6677 setJSValue(vmCall(Int64, m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(10)));
6678 return;
6679
6680 default:
6681 DFG_CRASH(m_graph, m_node, "Bad use kind");
6682 break;
6683 }
6684 }
6685
6686 void compileToPrimitive()
6687 {
6688 LValue value = lowJSValue(m_node->child1());
6689
6690 LBasicBlock isCellCase = m_out.newBlock();
6691 LBasicBlock isObjectCase = m_out.newBlock();
6692 LBasicBlock continuation = m_out.newBlock();
6693
6694 Vector<ValueFromBlock, 3> results;
6695
6696 results.append(m_out.anchor(value));
6697 m_out.branch(
6698 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
6699
6700 LBasicBlock lastNext = m_out.appendTo(isCellCase, isObjectCase);
6701 results.append(m_out.anchor(value));
6702 m_out.branch(
6703 isObject(value, provenType(m_node->child1())),
6704 unsure(isObjectCase), unsure(continuation));
6705
6706 m_out.appendTo(isObjectCase, continuation);
6707 results.append(m_out.anchor(vmCall(
6708 Int64, m_out.operation(operationToPrimitive), m_callFrame, value)));
6709 m_out.jump(continuation);
6710
6711 m_out.appendTo(continuation, lastNext);
6712 setJSValue(m_out.phi(Int64, results));
6713 }
6714
6715 void compileMakeRope()
6716 {
6717 struct FlagsAndLength {
6718 LValue flags;
6719 LValue length;
6720 };
6721
6722 Edge edges[3] = {
6723 m_node->child1(),
6724 m_node->child2(),
6725 m_node->child3(),
6726 };
6727 LValue kids[3];
6728 unsigned numKids;
6729 kids[0] = lowCell(edges[0]);
6730 kids[1] = lowCell(edges[1]);
6731 if (edges[2]) {
6732 kids[2] = lowCell(edges[2]);
6733 numKids = 3;
6734 } else {
6735 kids[2] = 0;
6736 numKids = 2;
6737 }
6738
6739 LBasicBlock emptyCase = m_out.newBlock();
6740 LBasicBlock slowPath = m_out.newBlock();
6741 LBasicBlock continuation = m_out.newBlock();
6742
6743 Allocator allocator = allocatorForNonVirtualConcurrently<JSRopeString>(vm(), sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
6744
6745 LValue result = allocateCell(
6746 m_out.constIntPtr(allocator.localAllocator()), vm().stringStructure.get(), slowPath);
6747
6748 // This puts nullptr for the first fiber. It makes visitChildren safe even if this JSRopeString is discarded due to the speculation failure in the following path.
6749 m_out.storePtr(m_out.constIntPtr(JSString::isRopeInPointer), result, m_heaps.JSRopeString_fiber0);
6750
6751 auto getFlagsAndLength = [&] (Edge& edge, LValue child) {
6752 if (JSString* string = edge->dynamicCastConstant<JSString*>(vm())) {
6753 return FlagsAndLength {
6754 m_out.constInt32(string->is8Bit() ? StringImpl::flagIs8Bit() : 0),
6755 m_out.constInt32(string->length())
6756 };
6757 }
6758
6759 LBasicBlock continuation = m_out.newBlock();
6760 LBasicBlock ropeCase = m_out.newBlock();
6761 LBasicBlock notRopeCase = m_out.newBlock();
6762
6763 m_out.branch(isRopeString(child, edge), unsure(ropeCase), unsure(notRopeCase));
6764
6765 LBasicBlock lastNext = m_out.appendTo(ropeCase, notRopeCase);
6766 ValueFromBlock flagsForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_flags));
6767 ValueFromBlock lengthForRope = m_out.anchor(m_out.load32NonNegative(child, m_heaps.JSRopeString_length));
6768 m_out.jump(continuation);
6769
6770 m_out.appendTo(notRopeCase, continuation);
6771 LValue stringImpl = m_out.loadPtr(child, m_heaps.JSString_value);
6772 ValueFromBlock flagsForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_hashAndFlags));
6773 ValueFromBlock lengthForNonRope = m_out.anchor(m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length));
6774 m_out.jump(continuation);
6775
6776 m_out.appendTo(continuation, lastNext);
6777 return FlagsAndLength {
6778 m_out.phi(Int32, flagsForRope, flagsForNonRope),
6779 m_out.phi(Int32, lengthForRope, lengthForNonRope)
6780 };
6781 };
6782
6783 FlagsAndLength flagsAndLength = getFlagsAndLength(edges[0], kids[0]);
6784 for (unsigned i = 1; i < numKids; ++i) {
6785 auto mergeFlagsAndLength = [&] (Edge& edge, LValue child, FlagsAndLength previousFlagsAndLength) {
6786 FlagsAndLength flagsAndLength = getFlagsAndLength(edge, child);
6787 LValue flags = m_out.bitAnd(previousFlagsAndLength.flags, flagsAndLength.flags);
6788 CheckValue* lengthCheck = m_out.speculateAdd(previousFlagsAndLength.length, flagsAndLength.length);
6789 blessSpeculation(lengthCheck, Uncountable, noValue(), nullptr, m_origin);
6790 return FlagsAndLength {
6791 flags,
6792 lengthCheck
6793 };
6794 };
6795 flagsAndLength = mergeFlagsAndLength(edges[i], kids[i], flagsAndLength);
6796 }
6797
6798 m_out.storePtr(
6799 m_out.bitOr(
6800 m_out.bitOr(kids[0], m_out.constIntPtr(JSString::isRopeInPointer)),
6801 m_out.bitAnd(m_out.constIntPtr(JSRopeString::is8BitInPointer), m_out.zeroExtPtr(flagsAndLength.flags))),
6802 result, m_heaps.JSRopeString_fiber0);
6803 m_out.storePtr(
6804 m_out.bitOr(m_out.zeroExtPtr(flagsAndLength.length), m_out.shl(kids[1], m_out.constInt32(32))),
6805 result, m_heaps.JSRopeString_fiber1);
6806 if (numKids == 2)
6807 m_out.storePtr(m_out.lShr(kids[1], m_out.constInt32(32)), result, m_heaps.JSRopeString_fiber2);
6808 else
6809 m_out.storePtr(m_out.bitOr(m_out.lShr(kids[1], m_out.constInt32(32)), m_out.shl(kids[2], m_out.constInt32(16))), result, m_heaps.JSRopeString_fiber2);
6810
6811 mutatorFence();
6812 ValueFromBlock fastResult = m_out.anchor(result);
6813 m_out.branch(m_out.isZero32(flagsAndLength.length), rarely(emptyCase), usually(continuation));
6814
6815 LBasicBlock lastNext = m_out.appendTo(emptyCase, slowPath);
6816 ValueFromBlock emptyResult = m_out.anchor(weakPointer(jsEmptyString(&m_graph.m_vm)));
6817 m_out.jump(continuation);
6818
6819 m_out.appendTo(slowPath, continuation);
6820 LValue slowResultValue;
6821 VM& vm = this->vm();
6822 switch (numKids) {
6823 case 2:
6824 slowResultValue = lazySlowPath(
6825 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6826 return createLazyCallGenerator(vm,
6827 operationMakeRope2, locations[0].directGPR(), locations[1].directGPR(),
6828 locations[2].directGPR());
6829 }, kids[0], kids[1]);
6830 break;
6831 case 3:
6832 slowResultValue = lazySlowPath(
6833 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
6834 return createLazyCallGenerator(vm,
6835 operationMakeRope3, locations[0].directGPR(), locations[1].directGPR(),
6836 locations[2].directGPR(), locations[3].directGPR());
6837 }, kids[0], kids[1], kids[2]);
6838 break;
6839 default:
6840 DFG_CRASH(m_graph, m_node, "Bad number of children");
6841 break;
6842 }
6843 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
6844 m_out.jump(continuation);
6845
6846 m_out.appendTo(continuation, lastNext);
6847 setJSValue(m_out.phi(Int64, fastResult, emptyResult, slowResult));
6848 }
6849
6850 void compileStringCharAt()
6851 {
6852 LValue base = lowString(m_graph.child(m_node, 0));
6853 LValue index = lowInt32(m_graph.child(m_node, 1));
6854 LValue storage = lowStorage(m_graph.child(m_node, 2));
6855
6856 LBasicBlock fastPath = m_out.newBlock();
6857 LBasicBlock slowPath = m_out.newBlock();
6858 LBasicBlock continuation = m_out.newBlock();
6859
6860 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
6861 m_out.branch(
6862 m_out.aboveOrEqual(
6863 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)),
6864 rarely(slowPath), usually(fastPath));
6865
6866 LBasicBlock lastNext = m_out.appendTo(fastPath, slowPath);
6867
6868 LBasicBlock is8Bit = m_out.newBlock();
6869 LBasicBlock is16Bit = m_out.newBlock();
6870 LBasicBlock bitsContinuation = m_out.newBlock();
6871 LBasicBlock bigCharacter = m_out.newBlock();
6872
6873 m_out.branch(
6874 m_out.testIsZero32(
6875 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
6876 m_out.constInt32(StringImpl::flagIs8Bit())),
6877 unsure(is16Bit), unsure(is8Bit));
6878
6879 m_out.appendTo(is8Bit, is16Bit);
6880
6881 // FIXME: Need to cage strings!
6882 // https://bugs.webkit.org/show_bug.cgi?id=174924
6883 ValueFromBlock char8Bit = m_out.anchor(
6884 m_out.load8ZeroExt32(m_out.baseIndex(
6885 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
6886 provenValue(m_graph.child(m_node, 1)))));
6887 m_out.jump(bitsContinuation);
6888
6889 m_out.appendTo(is16Bit, bigCharacter);
6890
6891 LValue char16BitValue = m_out.load16ZeroExt32(
6892 m_out.baseIndex(
6893 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
6894 provenValue(m_graph.child(m_node, 1))));
6895 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
6896 m_out.branch(
6897 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
6898 rarely(bigCharacter), usually(bitsContinuation));
6899
6900 m_out.appendTo(bigCharacter, bitsContinuation);
6901
6902 Vector<ValueFromBlock, 4> results;
6903 results.append(m_out.anchor(vmCall(
6904 Int64, m_out.operation(operationSingleCharacterString),
6905 m_callFrame, char16BitValue)));
6906 m_out.jump(continuation);
6907
6908 m_out.appendTo(bitsContinuation, slowPath);
6909
6910 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
6911
6912 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
6913
6914 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
6915 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
6916 m_out.jump(continuation);
6917
6918 m_out.appendTo(slowPath, continuation);
6919
6920 if (m_node->arrayMode().isInBounds()) {
6921 speculate(OutOfBounds, noValue(), 0, m_out.booleanTrue);
6922 results.append(m_out.anchor(m_out.intPtrZero));
6923 } else {
6924 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
6925
6926 bool prototypeChainIsSane = false;
6927 if (globalObject->stringPrototypeChainIsSane()) {
6928 // FIXME: This could be captured using a Speculation mode that means
6929 // "out-of-bounds loads return a trivial value", something like
6930 // SaneChainOutOfBounds.
6931 // https://bugs.webkit.org/show_bug.cgi?id=144668
6932
6933 m_graph.registerAndWatchStructureTransition(globalObject->stringPrototype()->structure(vm()));
6934 m_graph.registerAndWatchStructureTransition(globalObject->objectPrototype()->structure(vm()));
6935
6936 prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
6937 }
6938 if (prototypeChainIsSane) {
6939 LBasicBlock negativeIndex = m_out.newBlock();
6940
6941 results.append(m_out.anchor(m_out.constInt64(JSValue::encode(jsUndefined()))));
6942 m_out.branch(
6943 m_out.lessThan(index, m_out.int32Zero),
6944 rarely(negativeIndex), usually(continuation));
6945
6946 m_out.appendTo(negativeIndex, continuation);
6947 }
6948
6949 results.append(m_out.anchor(vmCall(
6950 Int64, m_out.operation(operationGetByValStringInt), m_callFrame, base, index)));
6951 }
6952
6953 m_out.jump(continuation);
6954
6955 m_out.appendTo(continuation, lastNext);
6956 setJSValue(m_out.phi(Int64, results));
6957 }
6958
6959 void compileStringCharCodeAt()
6960 {
6961 LBasicBlock is8Bit = m_out.newBlock();
6962 LBasicBlock is16Bit = m_out.newBlock();
6963 LBasicBlock continuation = m_out.newBlock();
6964
6965 LValue base = lowString(m_node->child1());
6966 LValue index = lowInt32(m_node->child2());
6967 LValue storage = lowStorage(m_node->child3());
6968
6969 LValue stringImpl = m_out.loadPtr(base, m_heaps.JSString_value);
6970
6971 speculate(
6972 Uncountable, noValue(), 0,
6973 m_out.aboveOrEqual(
6974 index, m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length)));
6975
6976 m_out.branch(
6977 m_out.testIsZero32(
6978 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
6979 m_out.constInt32(StringImpl::flagIs8Bit())),
6980 unsure(is16Bit), unsure(is8Bit));
6981
6982 LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit);
6983
6984 // FIXME: need to cage strings!
6985 // https://bugs.webkit.org/show_bug.cgi?id=174924
6986 ValueFromBlock char8Bit = m_out.anchor(
6987 m_out.load8ZeroExt32(m_out.baseIndex(
6988 m_heaps.characters8, storage, m_out.zeroExtPtr(index),
6989 provenValue(m_node->child2()))));
6990 m_out.jump(continuation);
6991
6992 m_out.appendTo(is16Bit, continuation);
6993
6994 ValueFromBlock char16Bit = m_out.anchor(
6995 m_out.load16ZeroExt32(m_out.baseIndex(
6996 m_heaps.characters16, storage, m_out.zeroExtPtr(index),
6997 provenValue(m_node->child2()))));
6998 m_out.jump(continuation);
6999
7000 m_out.appendTo(continuation, lastNext);
7001
7002 setInt32(m_out.phi(Int32, char8Bit, char16Bit));
7003 }
7004
7005 void compileStringFromCharCode()
7006 {
7007 Edge childEdge = m_node->child1();
7008
7009 if (childEdge.useKind() == UntypedUse) {
7010 LValue result = vmCall(
7011 Int64, m_out.operation(operationStringFromCharCodeUntyped), m_callFrame,
7012 lowJSValue(childEdge));
7013 setJSValue(result);
7014 return;
7015 }
7016
7017 DFG_ASSERT(m_graph, m_node, childEdge.useKind() == Int32Use, childEdge.useKind());
7018
7019 LValue value = lowInt32(childEdge);
7020
7021 LBasicBlock smallIntCase = m_out.newBlock();
7022 LBasicBlock slowCase = m_out.newBlock();
7023 LBasicBlock continuation = m_out.newBlock();
7024
7025 m_out.branch(
7026 m_out.above(value, m_out.constInt32(maxSingleCharacterString)),
7027 rarely(slowCase), usually(smallIntCase));
7028
7029 LBasicBlock lastNext = m_out.appendTo(smallIntCase, slowCase);
7030
7031 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
7032 LValue fastResultValue = m_out.loadPtr(
7033 m_out.baseIndex(m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(value)));
7034 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
7035 m_out.jump(continuation);
7036
7037 m_out.appendTo(slowCase, continuation);
7038
7039 LValue slowResultValue = vmCall(
7040 pointerType(), m_out.operation(operationStringFromCharCode), m_callFrame, value);
7041 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
7042 m_out.jump(continuation);
7043
7044 m_out.appendTo(continuation, lastNext);
7045
7046 setJSValue(m_out.phi(Int64, fastResult, slowResult));
7047 }
7048
7049 void compileGetByOffset()
7050 {
7051 StorageAccessData& data = m_node->storageAccessData();
7052
7053 setJSValue(loadProperty(
7054 lowStorage(m_node->child1()), data.identifierNumber, data.offset));
7055 }
7056
7057 void compileGetGetter()
7058 {
7059 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_getter));
7060 }
7061
7062 void compileGetSetter()
7063 {
7064 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.GetterSetter_setter));
7065 }
7066
7067 void compileMultiGetByOffset()
7068 {
7069 LValue base = lowCell(m_node->child1());
7070
7071 MultiGetByOffsetData& data = m_node->multiGetByOffsetData();
7072
7073 Vector<LBasicBlock, 2> blocks(data.cases.size());
7074 for (unsigned i = data.cases.size(); i--;)
7075 blocks[i] = m_out.newBlock();
7076 LBasicBlock exit = m_out.newBlock();
7077 LBasicBlock continuation = m_out.newBlock();
7078
7079 Vector<SwitchCase, 2> cases;
7080 RegisteredStructureSet baseSet;
7081 for (unsigned i = data.cases.size(); i--;) {
7082 MultiGetByOffsetCase getCase = data.cases[i];
7083 for (unsigned j = getCase.set().size(); j--;) {
7084 RegisteredStructure structure = getCase.set()[j];
7085 baseSet.add(structure);
7086 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
7087 }
7088 }
7089 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7090 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
7091
7092 LBasicBlock lastNext = m_out.m_nextBlock;
7093
7094 Vector<ValueFromBlock, 2> results;
7095 for (unsigned i = data.cases.size(); i--;) {
7096 MultiGetByOffsetCase getCase = data.cases[i];
7097 GetByOffsetMethod method = getCase.method();
7098
7099 m_out.appendTo(blocks[i], i + 1 < data.cases.size() ? blocks[i + 1] : exit);
7100
7101 LValue result;
7102
7103 switch (method.kind()) {
7104 case GetByOffsetMethod::Invalid:
7105 RELEASE_ASSERT_NOT_REACHED();
7106 break;
7107
7108 case GetByOffsetMethod::Constant:
7109 result = m_out.constInt64(JSValue::encode(method.constant()->value()));
7110 break;
7111
7112 case GetByOffsetMethod::Load:
7113 case GetByOffsetMethod::LoadFromPrototype: {
7114 LValue propertyBase;
7115 if (method.kind() == GetByOffsetMethod::Load)
7116 propertyBase = base;
7117 else
7118 propertyBase = weakPointer(method.prototype()->value().asCell());
7119 if (!isInlineOffset(method.offset()))
7120 propertyBase = m_out.loadPtr(propertyBase, m_heaps.JSObject_butterfly);
7121 result = loadProperty(
7122 propertyBase, data.identifierNumber, method.offset());
7123 break;
7124 } }
7125
7126 results.append(m_out.anchor(result));
7127 m_out.jump(continuation);
7128 }
7129
7130 m_out.appendTo(exit, continuation);
7131 if (!structuresChecked)
7132 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7133 m_out.unreachable();
7134
7135 m_out.appendTo(continuation, lastNext);
7136 setJSValue(m_out.phi(Int64, results));
7137 }
7138
7139 void compilePutByOffset()
7140 {
7141 StorageAccessData& data = m_node->storageAccessData();
7142
7143 storeProperty(
7144 lowJSValue(m_node->child3()),
7145 lowStorage(m_node->child1()), data.identifierNumber, data.offset);
7146 }
7147
7148 void compileMultiPutByOffset()
7149 {
7150 LValue base = lowCell(m_node->child1());
7151 LValue value = lowJSValue(m_node->child2());
7152
7153 MultiPutByOffsetData& data = m_node->multiPutByOffsetData();
7154
7155 Vector<LBasicBlock, 2> blocks(data.variants.size());
7156 for (unsigned i = data.variants.size(); i--;)
7157 blocks[i] = m_out.newBlock();
7158 LBasicBlock exit = m_out.newBlock();
7159 LBasicBlock continuation = m_out.newBlock();
7160
7161 Vector<SwitchCase, 2> cases;
7162 RegisteredStructureSet baseSet;
7163 for (unsigned i = data.variants.size(); i--;) {
7164 PutByIdVariant variant = data.variants[i];
7165 for (unsigned j = variant.oldStructure().size(); j--;) {
7166 RegisteredStructure structure = m_graph.registerStructure(variant.oldStructure()[j]);
7167 baseSet.add(structure);
7168 cases.append(SwitchCase(weakStructureID(structure), blocks[i], Weight(1)));
7169 }
7170 }
7171 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7172 emitSwitchForMultiByOffset(base, structuresChecked, cases, exit);
7173
7174 LBasicBlock lastNext = m_out.m_nextBlock;
7175
7176 for (unsigned i = data.variants.size(); i--;) {
7177 m_out.appendTo(blocks[i], i + 1 < data.variants.size() ? blocks[i + 1] : exit);
7178
7179 PutByIdVariant variant = data.variants[i];
7180
7181 LValue storage;
7182 if (variant.kind() == PutByIdVariant::Replace) {
7183 if (isInlineOffset(variant.offset()))
7184 storage = base;
7185 else
7186 storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
7187 } else {
7188 DFG_ASSERT(m_graph, m_node, variant.kind() == PutByIdVariant::Transition, variant.kind());
7189 m_graph.m_plan.transitions().addLazily(
7190 codeBlock(), m_node->origin.semantic.codeOriginOwner(),
7191 variant.oldStructureForTransition(), variant.newStructure());
7192
7193 storage = storageForTransition(
7194 base, variant.offset(),
7195 variant.oldStructureForTransition(), variant.newStructure());
7196 }
7197
7198 storeProperty(value, storage, data.identifierNumber, variant.offset());
7199
7200 if (variant.kind() == PutByIdVariant::Transition) {
7201 ASSERT(variant.oldStructureForTransition()->indexingType() == variant.newStructure()->indexingType());
7202 ASSERT(variant.oldStructureForTransition()->typeInfo().inlineTypeFlags() == variant.newStructure()->typeInfo().inlineTypeFlags());
7203 ASSERT(variant.oldStructureForTransition()->typeInfo().type() == variant.newStructure()->typeInfo().type());
7204 m_out.store32(
7205 weakStructureID(m_graph.registerStructure(variant.newStructure())), base, m_heaps.JSCell_structureID);
7206 }
7207
7208 m_out.jump(continuation);
7209 }
7210
7211 m_out.appendTo(exit, continuation);
7212 if (!structuresChecked)
7213 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7214 m_out.unreachable();
7215
7216 m_out.appendTo(continuation, lastNext);
7217 }
7218
7219 void compileMatchStructure()
7220 {
7221 LValue base = lowCell(m_node->child1());
7222
7223 MatchStructureData& data = m_node->matchStructureData();
7224
7225 LBasicBlock trueBlock = m_out.newBlock();
7226 LBasicBlock falseBlock = m_out.newBlock();
7227 LBasicBlock exitBlock = m_out.newBlock();
7228 LBasicBlock continuation = m_out.newBlock();
7229
7230 LBasicBlock lastNext = m_out.insertNewBlocksBefore(trueBlock);
7231
7232 Vector<SwitchCase, 2> cases;
7233 RegisteredStructureSet baseSet;
7234 for (MatchStructureVariant& variant : data.variants) {
7235 baseSet.add(variant.structure);
7236 cases.append(SwitchCase(
7237 weakStructureID(variant.structure),
7238 variant.result ? trueBlock : falseBlock, Weight(1)));
7239 }
7240 bool structuresChecked = m_interpreter.forNode(m_node->child1()).m_structure.isSubsetOf(baseSet);
7241 emitSwitchForMultiByOffset(base, structuresChecked, cases, exitBlock);
7242
7243 m_out.appendTo(trueBlock, falseBlock);
7244 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
7245 m_out.jump(continuation);
7246
7247 m_out.appendTo(falseBlock, exitBlock);
7248 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
7249 m_out.jump(continuation);
7250
7251 m_out.appendTo(exitBlock, continuation);
7252 if (!structuresChecked)
7253 speculate(BadCache, noValue(), nullptr, m_out.booleanTrue);
7254 m_out.unreachable();
7255
7256 m_out.appendTo(continuation, lastNext);
7257 setBoolean(m_out.phi(Int32, trueResult, falseResult));
7258 }
7259
7260 void compileGetGlobalVariable()
7261 {
7262 setJSValue(m_out.load64(m_out.absolute(m_node->variablePointer())));
7263 }
7264
7265 void compilePutGlobalVariable()
7266 {
7267 m_out.store64(
7268 lowJSValue(m_node->child2()), m_out.absolute(m_node->variablePointer()));
7269 }
7270
7271 void compileNotifyWrite()
7272 {
7273 WatchpointSet* set = m_node->watchpointSet();
7274
7275 LBasicBlock isNotInvalidated = m_out.newBlock();
7276 LBasicBlock continuation = m_out.newBlock();
7277
7278 LValue state = m_out.load8ZeroExt32(m_out.absolute(set->addressOfState()));
7279 m_out.branch(
7280 m_out.equal(state, m_out.constInt32(IsInvalidated)),
7281 usually(continuation), rarely(isNotInvalidated));
7282
7283 LBasicBlock lastNext = m_out.appendTo(isNotInvalidated, continuation);
7284
7285 VM& vm = this->vm();
7286 lazySlowPath(
7287 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
7288 return createLazyCallGenerator(vm,
7289 operationNotifyWrite, InvalidGPRReg, CCallHelpers::TrustedImmPtr(set));
7290 });
7291 m_out.jump(continuation);
7292
7293 m_out.appendTo(continuation, lastNext);
7294 }
7295
7296 void compileGetCallee()
7297 {
7298 setJSValue(m_out.loadPtr(addressFor(CallFrameSlot::callee)));
7299 }
7300
7301 void compileSetCallee()
7302 {
7303 auto callee = lowCell(m_node->child1());
7304 m_out.storePtr(callee, payloadFor(CallFrameSlot::callee));
7305 }
7306
7307 void compileGetArgumentCountIncludingThis()
7308 {
7309 VirtualRegister argumentCountRegister;
7310 if (InlineCallFrame* inlineCallFrame = m_node->argumentsInlineCallFrame())
7311 argumentCountRegister = inlineCallFrame->argumentCountRegister;
7312 else
7313 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
7314 setInt32(m_out.load32(payloadFor(argumentCountRegister)));
7315 }
7316
7317 void compileSetArgumentCountIncludingThis()
7318 {
7319 m_out.store32(m_out.constInt32(m_node->argumentCountIncludingThis()), payloadFor(CallFrameSlot::argumentCount));
7320 }
7321
7322 void compileGetScope()
7323 {
7324 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSFunction_scope));
7325 }
7326
7327 void compileSkipScope()
7328 {
7329 setJSValue(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSScope_next));
7330 }
7331
7332 void compileGetGlobalObject()
7333 {
7334 LValue structure = loadStructure(lowCell(m_node->child1()));
7335 setJSValue(m_out.loadPtr(structure, m_heaps.Structure_globalObject));
7336 }
7337
7338 void compileGetGlobalThis()
7339 {
7340 auto* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
7341 setJSValue(m_out.loadPtr(m_out.absolute(globalObject->addressOfGlobalThis())));
7342 }
7343
7344 void compileGetClosureVar()
7345 {
7346 setJSValue(
7347 m_out.load64(
7348 lowCell(m_node->child1()),
7349 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]));
7350 }
7351
7352 void compilePutClosureVar()
7353 {
7354 m_out.store64(
7355 lowJSValue(m_node->child2()),
7356 lowCell(m_node->child1()),
7357 m_heaps.JSLexicalEnvironment_variables[m_node->scopeOffset().offset()]);
7358 }
7359
7360 void compileGetFromArguments()
7361 {
7362 setJSValue(
7363 m_out.load64(
7364 lowCell(m_node->child1()),
7365 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]));
7366 }
7367
7368 void compilePutToArguments()
7369 {
7370 m_out.store64(
7371 lowJSValue(m_node->child2()),
7372 lowCell(m_node->child1()),
7373 m_heaps.DirectArguments_storage[m_node->capturedArgumentsOffset().offset()]);
7374 }
7375
7376 void compileGetArgument()
7377 {
7378 LValue argumentCount = m_out.load32(payloadFor(AssemblyHelpers::argumentCount(m_node->origin.semantic)));
7379
7380 LBasicBlock inBounds = m_out.newBlock();
7381 LBasicBlock outOfBounds = m_out.newBlock();
7382 LBasicBlock continuation = m_out.newBlock();
7383
7384 m_out.branch(m_out.lessThanOrEqual(argumentCount, m_out.constInt32(m_node->argumentIndex())), unsure(outOfBounds), unsure(inBounds));
7385
7386 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
7387 VirtualRegister arg = AssemblyHelpers::argumentsStart(m_node->origin.semantic) + m_node->argumentIndex() - 1;
7388 ValueFromBlock inBoundsResult = m_out.anchor(m_out.load64(addressFor(arg)));
7389 m_out.jump(continuation);
7390
7391 m_out.appendTo(outOfBounds, continuation);
7392 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueUndefined));
7393 m_out.jump(continuation);
7394
7395 m_out.appendTo(continuation, lastNext);
7396 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
7397 }
7398
7399 void compileCompareEq()
7400 {
7401 if (m_node->isBinaryUseKind(Int32Use)
7402 || m_node->isBinaryUseKind(Int52RepUse)
7403 || m_node->isBinaryUseKind(DoubleRepUse)
7404 || m_node->isBinaryUseKind(ObjectUse)
7405 || m_node->isBinaryUseKind(BooleanUse)
7406 || m_node->isBinaryUseKind(SymbolUse)
7407 || m_node->isBinaryUseKind(StringIdentUse)
7408 || m_node->isBinaryUseKind(StringUse)) {
7409 compileCompareStrictEq();
7410 return;
7411 }
7412
7413 if (m_node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
7414 compareEqObjectOrOtherToObject(m_node->child2(), m_node->child1());
7415 return;
7416 }
7417
7418 if (m_node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
7419 compareEqObjectOrOtherToObject(m_node->child1(), m_node->child2());
7420 return;
7421 }
7422
7423 if (m_node->child1().useKind() == KnownOtherUse) {
7424 ASSERT(!m_interpreter.needsTypeCheck(m_node->child1(), SpecOther));
7425 setBoolean(equalNullOrUndefined(m_node->child2(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
7426 return;
7427 }
7428
7429 if (m_node->child2().useKind() == KnownOtherUse) {
7430 ASSERT(!m_interpreter.needsTypeCheck(m_node->child2(), SpecOther));
7431 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualNullOrUndefined, ManualOperandSpeculation));
7432 return;
7433 }
7434
7435 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
7436 nonSpeculativeCompare(
7437 [&] (LValue left, LValue right) {
7438 return m_out.equal(left, right);
7439 },
7440 operationCompareEq);
7441 }
7442
7443 void compileCompareStrictEq()
7444 {
7445 if (m_node->isBinaryUseKind(Int32Use)) {
7446 setBoolean(
7447 m_out.equal(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7448 return;
7449 }
7450
7451 if (m_node->isBinaryUseKind(Int52RepUse)) {
7452 Int52Kind kind;
7453 LValue left = lowWhicheverInt52(m_node->child1(), kind);
7454 LValue right = lowInt52(m_node->child2(), kind);
7455 setBoolean(m_out.equal(left, right));
7456 return;
7457 }
7458
7459 if (m_node->isBinaryUseKind(DoubleRepUse)) {
7460 setBoolean(
7461 m_out.doubleEqual(lowDouble(m_node->child1()), lowDouble(m_node->child2())));
7462 return;
7463 }
7464
7465 if (m_node->isBinaryUseKind(StringIdentUse)) {
7466 setBoolean(
7467 m_out.equal(lowStringIdent(m_node->child1()), lowStringIdent(m_node->child2())));
7468 return;
7469 }
7470
7471 if (m_node->isBinaryUseKind(StringUse)) {
7472 LValue left = lowCell(m_node->child1());
7473 LValue right = lowCell(m_node->child2());
7474
7475 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
7476 LBasicBlock continuation = m_out.newBlock();
7477
7478 speculateString(m_node->child1(), left);
7479
7480 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
7481 m_out.branch(
7482 m_out.equal(left, right), unsure(continuation), unsure(notTriviallyEqualCase));
7483
7484 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
7485
7486 speculateString(m_node->child2(), right);
7487
7488 ValueFromBlock slowResult = m_out.anchor(stringsEqual(left, right, m_node->child1(), m_node->child2()));
7489 m_out.jump(continuation);
7490
7491 m_out.appendTo(continuation, lastNext);
7492 setBoolean(m_out.phi(Int32, fastResult, slowResult));
7493 return;
7494 }
7495
7496 if (m_node->isBinaryUseKind(ObjectUse, UntypedUse)) {
7497 setBoolean(
7498 m_out.equal(
7499 lowNonNullObject(m_node->child1()),
7500 lowJSValue(m_node->child2())));
7501 return;
7502 }
7503
7504 if (m_node->isBinaryUseKind(UntypedUse, ObjectUse)) {
7505 setBoolean(
7506 m_out.equal(
7507 lowNonNullObject(m_node->child2()),
7508 lowJSValue(m_node->child1())));
7509 return;
7510 }
7511
7512 if (m_node->isBinaryUseKind(ObjectUse)) {
7513 setBoolean(
7514 m_out.equal(
7515 lowNonNullObject(m_node->child1()),
7516 lowNonNullObject(m_node->child2())));
7517 return;
7518 }
7519
7520 if (m_node->isBinaryUseKind(BooleanUse)) {
7521 setBoolean(
7522 m_out.equal(lowBoolean(m_node->child1()), lowBoolean(m_node->child2())));
7523 return;
7524 }
7525
7526 if (m_node->isBinaryUseKind(SymbolUse)) {
7527 LValue leftSymbol = lowSymbol(m_node->child1());
7528 LValue rightSymbol = lowSymbol(m_node->child2());
7529 setBoolean(m_out.equal(leftSymbol, rightSymbol));
7530 return;
7531 }
7532
7533 if (m_node->isBinaryUseKind(BigIntUse)) {
7534 // FIXME: [ESNext][BigInt] Create specialized version of strict equals for BigIntUse
7535 // https://bugs.webkit.org/show_bug.cgi?id=182895
7536 LValue left = lowBigInt(m_node->child1());
7537 LValue right = lowBigInt(m_node->child2());
7538
7539 LBasicBlock notTriviallyEqualCase = m_out.newBlock();
7540 LBasicBlock continuation = m_out.newBlock();
7541
7542 ValueFromBlock fastResult = m_out.anchor(m_out.booleanTrue);
7543 m_out.branch(m_out.equal(left, right), rarely(continuation), usually(notTriviallyEqualCase));
7544
7545 LBasicBlock lastNext = m_out.appendTo(notTriviallyEqualCase, continuation);
7546
7547 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
7548 pointerType(), m_out.operation(operationCompareStrictEq), m_callFrame, left, right)));
7549 m_out.jump(continuation);
7550
7551 m_out.appendTo(continuation, lastNext);
7552 setBoolean(m_out.phi(Int32, fastResult, slowResult));
7553 return;
7554 }
7555
7556 if (m_node->isBinaryUseKind(SymbolUse, UntypedUse)
7557 || m_node->isBinaryUseKind(UntypedUse, SymbolUse)) {
7558 Edge symbolEdge = m_node->child1();
7559 Edge untypedEdge = m_node->child2();
7560 if (symbolEdge.useKind() != SymbolUse)
7561 std::swap(symbolEdge, untypedEdge);
7562
7563 LValue leftSymbol = lowSymbol(symbolEdge);
7564 LValue untypedValue = lowJSValue(untypedEdge);
7565
7566 setBoolean(m_out.equal(leftSymbol, untypedValue));
7567 return;
7568 }
7569
7570 if (m_node->isBinaryUseKind(MiscUse, UntypedUse)
7571 || m_node->isBinaryUseKind(UntypedUse, MiscUse)) {
7572 speculate(m_node->child1());
7573 speculate(m_node->child2());
7574 LValue left = lowJSValue(m_node->child1(), ManualOperandSpeculation);
7575 LValue right = lowJSValue(m_node->child2(), ManualOperandSpeculation);
7576 setBoolean(m_out.equal(left, right));
7577 return;
7578 }
7579
7580 if (m_node->isBinaryUseKind(StringIdentUse, NotStringVarUse)
7581 || m_node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
7582 Edge leftEdge = m_node->childFor(StringIdentUse);
7583 Edge rightEdge = m_node->childFor(NotStringVarUse);
7584
7585 LValue left = lowStringIdent(leftEdge);
7586 LValue rightValue = lowJSValue(rightEdge, ManualOperandSpeculation);
7587
7588 LBasicBlock isCellCase = m_out.newBlock();
7589 LBasicBlock isStringCase = m_out.newBlock();
7590 LBasicBlock continuation = m_out.newBlock();
7591
7592 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
7593 m_out.branch(
7594 isCell(rightValue, provenType(rightEdge)),
7595 unsure(isCellCase), unsure(continuation));
7596
7597 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
7598 ValueFromBlock notStringResult = m_out.anchor(m_out.booleanFalse);
7599 m_out.branch(
7600 isString(rightValue, provenType(rightEdge)),
7601 unsure(isStringCase), unsure(continuation));
7602
7603 m_out.appendTo(isStringCase, continuation);
7604 LValue right = m_out.loadPtr(rightValue, m_heaps.JSString_value);
7605 speculateStringIdent(rightEdge, rightValue, right);
7606 ValueFromBlock isStringResult = m_out.anchor(m_out.equal(left, right));
7607 m_out.jump(continuation);
7608
7609 m_out.appendTo(continuation, lastNext);
7610 setBoolean(m_out.phi(Int32, notCellResult, notStringResult, isStringResult));
7611 return;
7612 }
7613
7614 if (m_node->isBinaryUseKind(StringUse, UntypedUse)) {
7615 compileStringToUntypedStrictEquality(m_node->child1(), m_node->child2());
7616 return;
7617 }
7618 if (m_node->isBinaryUseKind(UntypedUse, StringUse)) {
7619 compileStringToUntypedStrictEquality(m_node->child2(), m_node->child1());
7620 return;
7621 }
7622
7623 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
7624 nonSpeculativeCompare(
7625 [&] (LValue left, LValue right) {
7626 return m_out.equal(left, right);
7627 },
7628 operationCompareStrictEq);
7629 }
7630
7631 void compileStringToUntypedStrictEquality(Edge stringEdge, Edge untypedEdge)
7632 {
7633 ASSERT(stringEdge.useKind() == StringUse);
7634 ASSERT(untypedEdge.useKind() == UntypedUse);
7635
7636 LValue leftString = lowCell(stringEdge);
7637 LValue rightValue = lowJSValue(untypedEdge);
7638 SpeculatedType rightValueType = provenType(untypedEdge);
7639
7640 // Verify left is string.
7641 speculateString(stringEdge, leftString);
7642
7643 LBasicBlock testUntypedEdgeIsCell = m_out.newBlock();
7644 LBasicBlock testUntypedEdgeIsString = m_out.newBlock();
7645 LBasicBlock testStringEquality = m_out.newBlock();
7646 LBasicBlock continuation = m_out.newBlock();
7647
7648 // Given left is string. If the value are strictly equal, rightValue has to be the same string.
7649 ValueFromBlock fastTrue = m_out.anchor(m_out.booleanTrue);
7650 m_out.branch(m_out.equal(leftString, rightValue), unsure(continuation), unsure(testUntypedEdgeIsCell));
7651
7652 LBasicBlock lastNext = m_out.appendTo(testUntypedEdgeIsCell, testUntypedEdgeIsString);
7653 ValueFromBlock fastFalse = m_out.anchor(m_out.booleanFalse);
7654 m_out.branch(isNotCell(rightValue, rightValueType), unsure(continuation), unsure(testUntypedEdgeIsString));
7655
7656 // Check if the untyped edge is a string.
7657 m_out.appendTo(testUntypedEdgeIsString, testStringEquality);
7658 m_out.branch(isNotString(rightValue, rightValueType), unsure(continuation), unsure(testStringEquality));
7659
7660 // Full String compare.
7661 m_out.appendTo(testStringEquality, continuation);
7662 ValueFromBlock slowResult = m_out.anchor(stringsEqual(leftString, rightValue, stringEdge, untypedEdge));
7663 m_out.jump(continuation);
7664
7665 // Continuation.
7666 m_out.appendTo(continuation, lastNext);
7667 setBoolean(m_out.phi(Int32, fastTrue, fastFalse, slowResult));
7668 }
7669
7670 void compileCompareEqPtr()
7671 {
7672 setBoolean(
7673 m_out.equal(
7674 lowJSValue(m_node->child1()),
7675 weakPointer(m_node->cellOperand()->cell())));
7676 }
7677
7678 void compileCompareLess()
7679 {
7680 compare(
7681 [&] (LValue left, LValue right) {
7682 return m_out.lessThan(left, right);
7683 },
7684 [&] (LValue left, LValue right) {
7685 return m_out.doubleLessThan(left, right);
7686 },
7687 operationCompareStringImplLess,
7688 operationCompareStringLess,
7689 operationCompareLess);
7690 }
7691
7692 void compileCompareLessEq()
7693 {
7694 compare(
7695 [&] (LValue left, LValue right) {
7696 return m_out.lessThanOrEqual(left, right);
7697 },
7698 [&] (LValue left, LValue right) {
7699 return m_out.doubleLessThanOrEqual(left, right);
7700 },
7701 operationCompareStringImplLessEq,
7702 operationCompareStringLessEq,
7703 operationCompareLessEq);
7704 }
7705
7706 void compileCompareGreater()
7707 {
7708 compare(
7709 [&] (LValue left, LValue right) {
7710 return m_out.greaterThan(left, right);
7711 },
7712 [&] (LValue left, LValue right) {
7713 return m_out.doubleGreaterThan(left, right);
7714 },
7715 operationCompareStringImplGreater,
7716 operationCompareStringGreater,
7717 operationCompareGreater);
7718 }
7719
7720 void compileCompareGreaterEq()
7721 {
7722 compare(
7723 [&] (LValue left, LValue right) {
7724 return m_out.greaterThanOrEqual(left, right);
7725 },
7726 [&] (LValue left, LValue right) {
7727 return m_out.doubleGreaterThanOrEqual(left, right);
7728 },
7729 operationCompareStringImplGreaterEq,
7730 operationCompareStringGreaterEq,
7731 operationCompareGreaterEq);
7732 }
7733
7734 void compileCompareBelow()
7735 {
7736 setBoolean(m_out.below(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7737 }
7738
7739 void compileCompareBelowEq()
7740 {
7741 setBoolean(m_out.belowOrEqual(lowInt32(m_node->child1()), lowInt32(m_node->child2())));
7742 }
7743
7744 void compileSameValue()
7745 {
7746 if (m_node->isBinaryUseKind(DoubleRepUse)) {
7747 LValue arg1 = lowDouble(m_node->child1());
7748 LValue arg2 = lowDouble(m_node->child2());
7749
7750 LBasicBlock numberCase = m_out.newBlock();
7751 LBasicBlock continuation = m_out.newBlock();
7752
7753 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
7754 patchpoint->append(arg1, ValueRep::SomeRegister);
7755 patchpoint->append(arg2, ValueRep::SomeRegister);
7756 patchpoint->numGPScratchRegisters = 1;
7757 patchpoint->setGenerator(
7758 [] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7759 GPRReg scratchGPR = params.gpScratch(0);
7760 jit.moveDoubleTo64(params[1].fpr(), scratchGPR);
7761 jit.moveDoubleTo64(params[2].fpr(), params[0].gpr());
7762 jit.compare64(CCallHelpers::Equal, scratchGPR, params[0].gpr(), params[0].gpr());
7763 });
7764 patchpoint->effects = Effects::none();
7765 ValueFromBlock compareResult = m_out.anchor(patchpoint);
7766 m_out.branch(patchpoint, unsure(continuation), unsure(numberCase));
7767
7768 LBasicBlock lastNext = m_out.appendTo(numberCase, continuation);
7769 LValue isArg1NaN = m_out.doubleNotEqualOrUnordered(arg1, arg1);
7770 LValue isArg2NaN = m_out.doubleNotEqualOrUnordered(arg2, arg2);
7771 ValueFromBlock nanResult = m_out.anchor(m_out.bitAnd(isArg1NaN, isArg2NaN));
7772 m_out.jump(continuation);
7773
7774 m_out.appendTo(continuation, lastNext);
7775 setBoolean(m_out.phi(Int32, compareResult, nanResult));
7776 return;
7777 }
7778
7779 ASSERT(m_node->isBinaryUseKind(UntypedUse));
7780 setBoolean(vmCall(Int32, m_out.operation(operationSameValue), m_callFrame, lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
7781 }
7782
7783 void compileLogicalNot()
7784 {
7785 setBoolean(m_out.logicalNot(boolify(m_node->child1())));
7786 }
7787
7788 void compileCallOrConstruct()
7789 {
7790 Node* node = m_node;
7791 unsigned numArgs = node->numChildren() - 1;
7792
7793 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
7794
7795 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
7796 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
7797
7798 // JS->JS calling convention requires that the caller allows this much space on top of stack to
7799 // get trashed by the callee, even if not all of that space is used to pass arguments. We tell
7800 // B3 this explicitly for two reasons:
7801 //
7802 // - We will only pass frameSize worth of stuff.
7803 // - The trashed stack guarantee is logically separate from the act of passing arguments, so we
7804 // shouldn't rely on Air to infer the trashed stack property based on the arguments it ends
7805 // up seeing.
7806 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
7807
7808 // Collect the arguments, since this can generate code and we want to generate it before we emit
7809 // the call.
7810 Vector<ConstrainedValue> arguments;
7811
7812 // Make sure that the callee goes into GPR0 because that's where the slow path thunks expect the
7813 // callee to be.
7814 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
7815
7816 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
7817 intptr_t offsetFromSP =
7818 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
7819 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
7820 };
7821
7822 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
7823 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
7824 for (unsigned i = 0; i < numArgs; ++i)
7825 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
7826
7827 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
7828 patchpoint->appendVector(arguments);
7829
7830 RefPtr<PatchpointExceptionHandle> exceptionHandle =
7831 preparePatchpointForExceptions(patchpoint);
7832
7833 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
7834 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
7835 patchpoint->clobber(RegisterSet::macroScratchRegisters());
7836 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
7837 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
7838
7839 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
7840 State* state = &m_ftlState;
7841 VM* vm = &this->vm();
7842 patchpoint->setGenerator(
7843 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7844 AllowMacroScratchRegisterUsage allowScratch(jit);
7845 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
7846
7847 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
7848
7849 jit.store32(
7850 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
7851 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
7852
7853 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
7854
7855 CCallHelpers::DataLabelPtr targetToCheck;
7856 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
7857 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
7858 CCallHelpers::TrustedImmPtr(nullptr));
7859
7860 CCallHelpers::Call fastCall = jit.nearCall();
7861 CCallHelpers::Jump done = jit.jump();
7862
7863 slowPath.link(&jit);
7864
7865 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
7866 CCallHelpers::Call slowCall = jit.nearCall();
7867 done.link(&jit);
7868
7869 callLinkInfo->setUpCall(
7870 node->op() == Construct ? CallLinkInfo::Construct : CallLinkInfo::Call,
7871 node->origin.semantic, GPRInfo::regT0);
7872
7873 jit.addPtr(
7874 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
7875 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
7876
7877 jit.addLinkTask(
7878 [=] (LinkBuffer& linkBuffer) {
7879 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
7880 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
7881
7882 callLinkInfo->setCallLocations(
7883 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
7884 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
7885 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
7886 });
7887 });
7888
7889 setJSValue(patchpoint);
7890 }
7891
7892 void compileDirectCallOrConstruct()
7893 {
7894 Node* node = m_node;
7895 bool isTail = node->op() == DirectTailCall;
7896 bool isConstruct = node->op() == DirectConstruct;
7897
7898 ExecutableBase* executable = node->castOperand<ExecutableBase*>();
7899 FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(vm(), executable);
7900
7901 unsigned numPassedArgs = node->numChildren() - 1;
7902 unsigned numAllocatedArgs = numPassedArgs;
7903
7904 if (functionExecutable) {
7905 numAllocatedArgs = std::max(
7906 numAllocatedArgs,
7907 std::min(
7908 static_cast<unsigned>(functionExecutable->parameterCount()) + 1,
7909 Options::maximumDirectCallStackSize()));
7910 }
7911
7912 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
7913
7914 if (!isTail) {
7915 unsigned frameSize = (CallFrame::headerSizeInRegisters + numAllocatedArgs) * sizeof(EncodedJSValue);
7916 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
7917
7918 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
7919 }
7920
7921 Vector<ConstrainedValue> arguments;
7922
7923 arguments.append(ConstrainedValue(jsCallee, ValueRep::SomeRegister));
7924 if (!isTail) {
7925 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
7926 intptr_t offsetFromSP =
7927 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
7928 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
7929 };
7930
7931 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
7932 addArgument(m_out.constInt32(numPassedArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
7933 for (unsigned i = 0; i < numPassedArgs; ++i)
7934 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
7935 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
7936 addArgument(m_out.constInt64(JSValue::encode(jsUndefined())), virtualRegisterForArgument(i), 0);
7937 } else {
7938 for (unsigned i = 0; i < numPassedArgs; ++i)
7939 arguments.append(ConstrainedValue(lowJSValue(m_graph.varArgChild(node, 1 + i)), ValueRep::WarmAny));
7940 }
7941
7942 PatchpointValue* patchpoint = m_out.patchpoint(isTail ? Void : Int64);
7943 patchpoint->appendVector(arguments);
7944
7945 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
7946
7947 if (isTail) {
7948 // The shuffler needs tags.
7949 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
7950 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
7951 }
7952
7953 patchpoint->clobber(RegisterSet::macroScratchRegisters());
7954 if (!isTail) {
7955 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
7956 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
7957 }
7958
7959 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
7960 State* state = &m_ftlState;
7961 patchpoint->setGenerator(
7962 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
7963 AllowMacroScratchRegisterUsage allowScratch(jit);
7964 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
7965
7966 GPRReg calleeGPR = params[!isTail].gpr();
7967
7968 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
7969
7970 Box<CCallHelpers::JumpList> exceptions =
7971 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
7972
7973 if (isTail) {
7974 CallFrameShuffleData shuffleData;
7975 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
7976
7977 RegisterSet toSave = params.unavailableRegisters();
7978 shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatCell);
7979 toSave.set(calleeGPR);
7980 for (unsigned i = 0; i < numPassedArgs; ++i) {
7981 ValueRecovery recovery = params[1 + i].recoveryForJSValue();
7982 shuffleData.args.append(recovery);
7983 recovery.forEachReg(
7984 [&] (Reg reg) {
7985 toSave.set(reg);
7986 });
7987 }
7988 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
7989 shuffleData.args.append(ValueRecovery::constant(jsUndefined()));
7990 shuffleData.numPassedArgs = numPassedArgs;
7991 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
7992
7993 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
7994
7995 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
7996 CCallHelpers::Label mainPath = jit.label();
7997
7998 jit.store32(
7999 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8000 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8001
8002 callLinkInfo->setFrameShuffleData(shuffleData);
8003 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
8004
8005 CCallHelpers::Call call = jit.nearTailCall();
8006
8007 jit.abortWithReason(JITDidReturnFromTailCall);
8008
8009 CCallHelpers::Label slowPath = jit.label();
8010 patchableJump.m_jump.linkTo(slowPath, &jit);
8011 callOperation(
8012 *state, toSave, jit,
8013 node->origin.semantic, exceptions.get(), operationLinkDirectCall,
8014 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo), calleeGPR).call();
8015 jit.jump().linkTo(mainPath, &jit);
8016
8017 callLinkInfo->setUpCall(
8018 CallLinkInfo::DirectTailCall, node->origin.semantic, InvalidGPRReg);
8019 callLinkInfo->setExecutableDuringCompilation(executable);
8020 if (numAllocatedArgs > numPassedArgs)
8021 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
8022
8023 jit.addLinkTask(
8024 [=] (LinkBuffer& linkBuffer) {
8025 CodeLocationLabel<JSInternalPtrTag> patchableJumpLocation = linkBuffer.locationOf<JSInternalPtrTag>(patchableJump);
8026 CodeLocationNearCall<JSInternalPtrTag> callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
8027 CodeLocationLabel<JSInternalPtrTag> slowPathLocation = linkBuffer.locationOf<JSInternalPtrTag>(slowPath);
8028
8029 callLinkInfo->setCallLocations(
8030 patchableJumpLocation,
8031 slowPathLocation,
8032 callLocation);
8033 });
8034 return;
8035 }
8036
8037 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8038
8039 CCallHelpers::Label mainPath = jit.label();
8040
8041 jit.store32(
8042 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8043 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8044
8045 CCallHelpers::Call call = jit.nearCall();
8046 jit.addPtr(
8047 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
8048 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8049
8050 callLinkInfo->setUpCall(
8051 isConstruct ? CallLinkInfo::DirectConstruct : CallLinkInfo::DirectCall,
8052 node->origin.semantic, InvalidGPRReg);
8053 callLinkInfo->setExecutableDuringCompilation(executable);
8054 if (numAllocatedArgs > numPassedArgs)
8055 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
8056
8057 params.addLatePath(
8058 [=] (CCallHelpers& jit) {
8059 AllowMacroScratchRegisterUsage allowScratch(jit);
8060
8061 CCallHelpers::Label slowPath = jit.label();
8062 if (isX86())
8063 jit.pop(CCallHelpers::selectScratchGPR(calleeGPR));
8064
8065 callOperation(
8066 *state, params.unavailableRegisters(), jit,
8067 node->origin.semantic, exceptions.get(), operationLinkDirectCall,
8068 InvalidGPRReg, CCallHelpers::TrustedImmPtr(callLinkInfo),
8069 calleeGPR).call();
8070 jit.jump().linkTo(mainPath, &jit);
8071
8072 jit.addLinkTask(
8073 [=] (LinkBuffer& linkBuffer) {
8074 CodeLocationNearCall<JSInternalPtrTag> callLocation = linkBuffer.locationOfNearCall<JSInternalPtrTag>(call);
8075 CodeLocationLabel<JSInternalPtrTag> slowPathLocation = linkBuffer.locationOf<JSInternalPtrTag>(slowPath);
8076
8077 linkBuffer.link(call, slowPathLocation);
8078
8079 callLinkInfo->setCallLocations(
8080 CodeLocationLabel<JSInternalPtrTag>(),
8081 slowPathLocation,
8082 callLocation);
8083 });
8084 });
8085 });
8086
8087 if (isTail)
8088 patchpoint->effects.terminal = true;
8089 else
8090 setJSValue(patchpoint);
8091 }
8092
8093 void compileTailCall()
8094 {
8095 Node* node = m_node;
8096 unsigned numArgs = node->numChildren() - 1;
8097
8098 // It seems counterintuitive that this is needed given that tail calls don't create a new frame
8099 // on the stack. However, the tail call slow path builds the frame at SP instead of FP before
8100 // calling into the slow path C code. This slow path may decide to throw an exception because
8101 // the callee we're trying to call is not callable. Throwing an exception will cause us to walk
8102 // the stack, which may read, for the sake of the correctness of this code, arbitrary slots on the
8103 // stack to recover state. This call arg area ensures the call frame shuffler does not overwrite
8104 // any of the slots the stack walking code requires when on the slow path.
8105 m_proc.requestCallArgAreaSizeInBytes(
8106 WTF::roundUpToMultipleOf(stackAlignmentBytes(), (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue)));
8107
8108 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
8109
8110 // We want B3 to give us all of the arguments using whatever mechanism it thinks is
8111 // convenient. The generator then shuffles those arguments into our own call frame,
8112 // destroying our frame in the process.
8113
8114 // Note that we don't have to do anything special for exceptions. A tail call is only a
8115 // tail call if it is not inside a try block.
8116
8117 Vector<ConstrainedValue> arguments;
8118
8119 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
8120
8121 for (unsigned i = 0; i < numArgs; ++i) {
8122 // Note: we could let the shuffler do boxing for us, but it's not super clear that this
8123 // would be better. Also, if we wanted to do that, then we'd have to teach the shuffler
8124 // that 32-bit values could land at 4-byte alignment but not 8-byte alignment.
8125
8126 ConstrainedValue constrainedValue(
8127 lowJSValue(m_graph.varArgChild(node, 1 + i)),
8128 ValueRep::WarmAny);
8129 arguments.append(constrainedValue);
8130 }
8131
8132 PatchpointValue* patchpoint = m_out.patchpoint(Void);
8133 patchpoint->appendVector(arguments);
8134
8135 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8136 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8137
8138 // Prevent any of the arguments from using the scratch register.
8139 patchpoint->clobberEarly(RegisterSet::macroScratchRegisters());
8140
8141 patchpoint->effects.terminal = true;
8142
8143 // We don't have to tell the patchpoint that we will clobber registers, since we won't return
8144 // anyway.
8145
8146 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8147 State* state = &m_ftlState;
8148 VM* vm = &this->vm();
8149 patchpoint->setGenerator(
8150 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8151 AllowMacroScratchRegisterUsage allowScratch(jit);
8152 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8153
8154 // Yes, this is really necessary. You could throw an exception in a host call on the
8155 // slow path. That'll route us to lookupExceptionHandler(), which unwinds starting
8156 // with the call site index of our frame. Bad things happen if it's not set.
8157 jit.store32(
8158 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8159 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8160
8161 CallFrameShuffleData shuffleData;
8162 shuffleData.numLocals = state->jitCode->common.frameRegisterCount;
8163 shuffleData.callee = ValueRecovery::inGPR(GPRInfo::regT0, DataFormatJS);
8164
8165 for (unsigned i = 0; i < numArgs; ++i)
8166 shuffleData.args.append(params[1 + i].recoveryForJSValue());
8167
8168 shuffleData.numPassedArgs = numArgs;
8169
8170 shuffleData.setupCalleeSaveRegisters(jit.codeBlock());
8171
8172 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8173
8174 CCallHelpers::DataLabelPtr targetToCheck;
8175 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8176 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8177 CCallHelpers::TrustedImmPtr(nullptr));
8178
8179 callLinkInfo->setFrameShuffleData(shuffleData);
8180 CallFrameShuffler(jit, shuffleData).prepareForTailCall();
8181
8182 CCallHelpers::Call fastCall = jit.nearTailCall();
8183
8184 slowPath.link(&jit);
8185
8186 CallFrameShuffler slowPathShuffler(jit, shuffleData);
8187 slowPathShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
8188 slowPathShuffler.prepareForSlowPath();
8189
8190 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8191 CCallHelpers::Call slowCall = jit.nearCall();
8192
8193 jit.abortWithReason(JITDidReturnFromTailCall);
8194
8195 callLinkInfo->setUpCall(CallLinkInfo::TailCall, codeOrigin, GPRInfo::regT0);
8196
8197 jit.addLinkTask(
8198 [=] (LinkBuffer& linkBuffer) {
8199 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8200 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8201
8202 callLinkInfo->setCallLocations(
8203 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8204 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8205 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8206 });
8207 });
8208 }
8209
8210 void compileCallOrConstructVarargsSpread()
8211 {
8212 Node* node = m_node;
8213 Node* arguments = node->child3().node();
8214
8215 LValue jsCallee = lowJSValue(m_node->child1());
8216 LValue thisArg = lowJSValue(m_node->child2());
8217
8218 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomSpread || arguments->op() == PhantomNewArrayBuffer);
8219
8220 unsigned staticArgumentCount = 0;
8221 Vector<LValue, 2> spreadLengths;
8222 Vector<LValue, 8> patchpointArguments;
8223 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
8224 auto pushAndCountArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
8225 if (target->op() == PhantomSpread) {
8226 self(target->child1().node());
8227 return;
8228 }
8229
8230 if (target->op() == PhantomNewArrayWithSpread) {
8231 BitVector* bitVector = target->bitVector();
8232 for (unsigned i = target->numChildren(); i--; ) {
8233 if (bitVector->get(i))
8234 self(m_graph.varArgChild(target, i).node());
8235 else {
8236 ++staticArgumentCount;
8237 LValue argument = this->lowJSValue(m_graph.varArgChild(target, i));
8238 patchpointArguments.append(argument);
8239 }
8240 }
8241 return;
8242 }
8243
8244 if (target->op() == PhantomNewArrayBuffer) {
8245 staticArgumentCount += target->castOperand<JSImmutableButterfly*>()->length();
8246 return;
8247 }
8248
8249 RELEASE_ASSERT(target->op() == PhantomCreateRest);
8250 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
8251 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
8252 LValue length = cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
8253 return m_out.zeroExtPtr(this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip));
8254 }).iterator->value;
8255 patchpointArguments.append(length);
8256 spreadLengths.append(length);
8257 });
8258
8259 pushAndCountArgumentsFromRightToLeft(arguments);
8260 LValue argumentCountIncludingThis = m_out.constIntPtr(staticArgumentCount + 1);
8261 for (LValue length : spreadLengths)
8262 argumentCountIncludingThis = m_out.add(length, argumentCountIncludingThis);
8263
8264 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8265
8266 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
8267 patchpoint->append(thisArg, ValueRep::WarmAny);
8268 patchpoint->append(argumentCountIncludingThis, ValueRep::WarmAny);
8269 patchpoint->appendVectorWithRep(patchpointArguments, ValueRep::WarmAny);
8270 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8271 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8272
8273 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
8274
8275 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8276 patchpoint->clobber(RegisterSet::volatileRegistersForJSCall()); // No inputs will be in a volatile register.
8277 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8278
8279 patchpoint->numGPScratchRegisters = 0;
8280
8281 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
8282 unsigned minimumJSCallAreaSize =
8283 sizeof(CallerFrameAndPC) +
8284 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
8285
8286 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
8287
8288 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8289 State* state = &m_ftlState;
8290 VM* vm = &this->vm();
8291 patchpoint->setGenerator(
8292 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8293 AllowMacroScratchRegisterUsage allowScratch(jit);
8294 CallSiteIndex callSiteIndex =
8295 state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8296
8297 Box<CCallHelpers::JumpList> exceptions =
8298 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8299
8300 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8301
8302 jit.store32(
8303 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8304 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8305
8306 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8307
8308 RegisterSet usedRegisters = RegisterSet::allRegisters();
8309 usedRegisters.exclude(RegisterSet::volatileRegistersForJSCall());
8310 GPRReg calleeGPR = params[1].gpr();
8311 usedRegisters.set(calleeGPR);
8312
8313 ScratchRegisterAllocator allocator(usedRegisters);
8314 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
8315 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
8316 GPRReg scratchGPR3 = allocator.allocateScratchGPR();
8317 GPRReg scratchGPR4 = allocator.allocateScratchGPR();
8318 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
8319
8320 auto getValueFromRep = [&] (B3::ValueRep rep, GPRReg result) {
8321 ASSERT(!usedRegisters.get(result));
8322
8323 if (rep.isConstant()) {
8324 jit.move(CCallHelpers::Imm64(rep.value()), result);
8325 return;
8326 }
8327
8328 // Note: in this function, we only request 64 bit values.
8329 if (rep.isStack()) {
8330 jit.load64(
8331 CCallHelpers::Address(GPRInfo::callFrameRegister, rep.offsetFromFP()),
8332 result);
8333 return;
8334 }
8335
8336 RELEASE_ASSERT(rep.isGPR());
8337 ASSERT(usedRegisters.get(rep.gpr()));
8338 jit.move(rep.gpr(), result);
8339 };
8340
8341 auto callWithExceptionCheck = [&] (void* callee) {
8342 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
8343 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8344 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8345 };
8346
8347 CCallHelpers::JumpList slowCase;
8348 unsigned originalStackHeight = params.proc().frameSize();
8349
8350 {
8351 unsigned numUsedSlots = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), originalStackHeight / sizeof(EncodedJSValue));
8352 B3::ValueRep argumentCountIncludingThisRep = params[3];
8353 getValueFromRep(argumentCountIncludingThisRep, scratchGPR2);
8354 slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR2, CCallHelpers::TrustedImm32(JSC::maxArguments + 1)));
8355
8356 jit.move(scratchGPR2, scratchGPR1);
8357 jit.addPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(numUsedSlots + CallFrame::headerSizeInRegisters)), scratchGPR1);
8358 // scratchGPR1 now has the required frame size in Register units
8359 // Round scratchGPR1 to next multiple of stackAlignmentRegisters()
8360 jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), scratchGPR1);
8361 jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), scratchGPR1);
8362 jit.negPtr(scratchGPR1);
8363 jit.getEffectiveAddress(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight), scratchGPR1);
8364
8365 // Before touching stack values, we should update the stack pointer to protect them from signal stack.
8366 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR1, CCallHelpers::stackPointerRegister);
8367
8368 jit.store32(scratchGPR2, CCallHelpers::Address(scratchGPR1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
8369
8370 int storeOffset = CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register));
8371
8372 unsigned paramsOffset = 4;
8373 unsigned index = 0;
8374 auto emitArgumentsFromRightToLeft = recursableLambda([&](auto self, Node* target) -> void {
8375 if (target->op() == PhantomSpread) {
8376 self(target->child1().node());
8377 return;
8378 }
8379
8380 if (target->op() == PhantomNewArrayWithSpread) {
8381 BitVector* bitVector = target->bitVector();
8382 for (unsigned i = target->numChildren(); i--; ) {
8383 if (bitVector->get(i))
8384 self(state->graph.varArgChild(target, i).node());
8385 else {
8386 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
8387 getValueFromRep(params[paramsOffset + (index++)], scratchGPR3);
8388 jit.store64(scratchGPR3,
8389 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
8390 }
8391 }
8392 return;
8393 }
8394
8395 if (target->op() == PhantomNewArrayBuffer) {
8396 auto* array = target->castOperand<JSImmutableButterfly*>();
8397 Checked<int32_t> offsetCount { 1 };
8398 for (unsigned i = array->length(); i--; ++offsetCount) {
8399 // Because varargs values are drained as JSValue, we should not generate value
8400 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
8401 int64_t value = JSValue::encode(array->get(i));
8402 jit.move(CCallHelpers::TrustedImm64(value), scratchGPR3);
8403 Checked<int32_t> currentStoreOffset { storeOffset };
8404 currentStoreOffset -= (offsetCount * static_cast<int32_t>(sizeof(Register)));
8405 jit.store64(scratchGPR3,
8406 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, currentStoreOffset.unsafeGet()));
8407 }
8408 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(array->length())), scratchGPR2);
8409 return;
8410 }
8411
8412 RELEASE_ASSERT(target->op() == PhantomCreateRest);
8413 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
8414
8415 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
8416
8417 B3::ValueRep numArgumentsToCopy = params[paramsOffset + (index++)];
8418 getValueFromRep(numArgumentsToCopy, scratchGPR3);
8419 int loadOffset = (AssemblyHelpers::argumentsStart(inlineCallFrame).offset() + numberOfArgumentsToSkip) * static_cast<int>(sizeof(Register));
8420
8421 auto done = jit.branchTestPtr(MacroAssembler::Zero, scratchGPR3);
8422 auto loopStart = jit.label();
8423 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR3);
8424 jit.subPtr(CCallHelpers::TrustedImmPtr(static_cast<size_t>(1)), scratchGPR2);
8425 jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR3, CCallHelpers::TimesEight, loadOffset), scratchGPR4);
8426 jit.store64(scratchGPR4,
8427 CCallHelpers::BaseIndex(scratchGPR1, scratchGPR2, CCallHelpers::TimesEight, storeOffset));
8428 jit.branchTestPtr(CCallHelpers::NonZero, scratchGPR3).linkTo(loopStart, &jit);
8429 done.link(&jit);
8430 });
8431 emitArgumentsFromRightToLeft(arguments);
8432 }
8433
8434 {
8435 CCallHelpers::Jump dontThrow = jit.jump();
8436 slowCase.link(&jit);
8437 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>();
8438 callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
8439 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
8440
8441 dontThrow.link(&jit);
8442 }
8443
8444 ASSERT(calleeGPR == GPRInfo::regT0);
8445 jit.store64(calleeGPR, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
8446 getValueFromRep(params[2], scratchGPR3);
8447 jit.store64(scratchGPR3, CCallHelpers::calleeArgumentSlot(0));
8448
8449 CallLinkInfo::CallType callType;
8450 if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
8451 callType = CallLinkInfo::ConstructVarargs;
8452 else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
8453 callType = CallLinkInfo::TailCallVarargs;
8454 else
8455 callType = CallLinkInfo::CallVarargs;
8456
8457 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
8458
8459 CCallHelpers::DataLabelPtr targetToCheck;
8460 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8461 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8462 CCallHelpers::TrustedImmPtr(nullptr));
8463
8464 CCallHelpers::Call fastCall;
8465 CCallHelpers::Jump done;
8466
8467 if (isTailCall) {
8468 jit.emitRestoreCalleeSaves();
8469 jit.prepareForTailCallSlow();
8470 fastCall = jit.nearTailCall();
8471 } else {
8472 fastCall = jit.nearCall();
8473 done = jit.jump();
8474 }
8475
8476 slowPath.link(&jit);
8477
8478 if (isTailCall)
8479 jit.emitRestoreCalleeSaves();
8480 ASSERT(!usedRegisters.get(GPRInfo::regT2));
8481 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8482 CCallHelpers::Call slowCall = jit.nearCall();
8483
8484 if (isTailCall)
8485 jit.abortWithReason(JITDidReturnFromTailCall);
8486 else
8487 done.link(&jit);
8488
8489 callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
8490
8491 jit.addPtr(
8492 CCallHelpers::TrustedImm32(-originalStackHeight),
8493 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8494
8495 jit.addLinkTask(
8496 [=] (LinkBuffer& linkBuffer) {
8497 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8498 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8499
8500 callLinkInfo->setCallLocations(
8501 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8502 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8503 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8504 });
8505 });
8506
8507 switch (node->op()) {
8508 case TailCallForwardVarargs:
8509 m_out.unreachable();
8510 break;
8511
8512 default:
8513 setJSValue(patchpoint);
8514 break;
8515 }
8516 }
8517
8518 void compileCallOrConstructVarargs()
8519 {
8520 Node* node = m_node;
8521 LValue jsCallee = lowJSValue(m_node->child1());
8522 LValue thisArg = lowJSValue(m_node->child2());
8523
8524 LValue jsArguments = nullptr;
8525 bool forwarding = false;
8526
8527 switch (node->op()) {
8528 case CallVarargs:
8529 case TailCallVarargs:
8530 case TailCallVarargsInlinedCaller:
8531 case ConstructVarargs:
8532 jsArguments = lowJSValue(node->child3());
8533 break;
8534 case CallForwardVarargs:
8535 case TailCallForwardVarargs:
8536 case TailCallForwardVarargsInlinedCaller:
8537 case ConstructForwardVarargs:
8538 forwarding = true;
8539 break;
8540 default:
8541 DFG_CRASH(m_graph, node, "bad node type");
8542 break;
8543 }
8544
8545 if (forwarding && m_node->child3()) {
8546 Node* arguments = m_node->child3().node();
8547 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
8548 compileCallOrConstructVarargsSpread();
8549 return;
8550 }
8551 }
8552
8553
8554 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8555
8556 // Append the forms of the arguments that we will use before any clobbering happens.
8557 patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
8558 if (jsArguments)
8559 patchpoint->appendSomeRegister(jsArguments);
8560 patchpoint->appendSomeRegister(thisArg);
8561
8562 if (!forwarding) {
8563 // Now append them again for after clobbering. Note that the compiler may ask us to use a
8564 // different register for the late for the post-clobbering version of the value. This gives
8565 // the compiler a chance to spill these values without having to burn any callee-saves.
8566 patchpoint->append(jsCallee, ValueRep::LateColdAny);
8567 patchpoint->append(jsArguments, ValueRep::LateColdAny);
8568 patchpoint->append(thisArg, ValueRep::LateColdAny);
8569 }
8570
8571 RefPtr<PatchpointExceptionHandle> exceptionHandle =
8572 preparePatchpointForExceptions(patchpoint);
8573
8574 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8575 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8576
8577 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8578 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
8579 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8580
8581 // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
8582 unsigned minimumJSCallAreaSize =
8583 sizeof(CallerFrameAndPC) +
8584 WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
8585
8586 m_proc.requestCallArgAreaSizeInBytes(minimumJSCallAreaSize);
8587
8588 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8589 State* state = &m_ftlState;
8590 VM* vm = &this->vm();
8591 patchpoint->setGenerator(
8592 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8593 AllowMacroScratchRegisterUsage allowScratch(jit);
8594 CallSiteIndex callSiteIndex =
8595 state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8596
8597 Box<CCallHelpers::JumpList> exceptions =
8598 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8599
8600 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8601
8602 jit.store32(
8603 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8604 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8605
8606 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8607 CallVarargsData* data = node->callVarargsData();
8608
8609 unsigned argIndex = 1;
8610 GPRReg calleeGPR = params[argIndex++].gpr();
8611 ASSERT(calleeGPR == GPRInfo::regT0);
8612 GPRReg argumentsGPR = jsArguments ? params[argIndex++].gpr() : InvalidGPRReg;
8613 GPRReg thisGPR = params[argIndex++].gpr();
8614
8615 B3::ValueRep calleeLateRep;
8616 B3::ValueRep argumentsLateRep;
8617 B3::ValueRep thisLateRep;
8618 if (!forwarding) {
8619 // If we're not forwarding then we'll need callee, arguments, and this after we
8620 // have potentially clobbered calleeGPR, argumentsGPR, and thisGPR. Our technique
8621 // for this is to supply all of those operands as late uses in addition to
8622 // specifying them as early uses. It's possible that the late use uses a spill
8623 // while the early use uses a register, and it's possible for the late and early
8624 // uses to use different registers. We do know that the late uses interfere with
8625 // all volatile registers and so won't use those, but the early uses may use
8626 // volatile registers and in the case of calleeGPR, it's pinned to regT0 so it
8627 // definitely will.
8628 //
8629 // Note that we have to be super careful with these. It's possible that these
8630 // use a shuffling of the registers used for calleeGPR, argumentsGPR, and
8631 // thisGPR. If that happens and we do for example:
8632 //
8633 // calleeLateRep.emitRestore(jit, calleeGPR);
8634 // argumentsLateRep.emitRestore(jit, calleeGPR);
8635 //
8636 // Then we might end up with garbage if calleeLateRep.gpr() == argumentsGPR and
8637 // argumentsLateRep.gpr() == calleeGPR.
8638 //
8639 // We do a variety of things to prevent this from happening. For example, we use
8640 // argumentsLateRep before needing the other two and after we've already stopped
8641 // using the *GPRs. Also, we pin calleeGPR to regT0, and rely on the fact that
8642 // the *LateReps cannot use volatile registers (so they cannot be regT0, so
8643 // calleeGPR != argumentsLateRep.gpr() and calleeGPR != thisLateRep.gpr()).
8644 //
8645 // An alternative would have been to just use early uses and early-clobber all
8646 // volatile registers. But that would force callee, arguments, and this into
8647 // callee-save registers even if we have to spill them. We don't want spilling to
8648 // use up three callee-saves.
8649 //
8650 // TL;DR: The way we use LateReps here is dangerous and barely works but achieves
8651 // some desirable performance properties, so don't mistake the cleverness for
8652 // elegance.
8653 calleeLateRep = params[argIndex++];
8654 argumentsLateRep = params[argIndex++];
8655 thisLateRep = params[argIndex++];
8656 }
8657
8658 // Get some scratch registers.
8659 RegisterSet usedRegisters;
8660 usedRegisters.merge(RegisterSet::stackRegisters());
8661 usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
8662 usedRegisters.merge(RegisterSet::calleeSaveRegisters());
8663 usedRegisters.set(calleeGPR);
8664 if (argumentsGPR != InvalidGPRReg)
8665 usedRegisters.set(argumentsGPR);
8666 usedRegisters.set(thisGPR);
8667 if (calleeLateRep.isReg())
8668 usedRegisters.set(calleeLateRep.reg());
8669 if (argumentsLateRep.isReg())
8670 usedRegisters.set(argumentsLateRep.reg());
8671 if (thisLateRep.isReg())
8672 usedRegisters.set(thisLateRep.reg());
8673 ScratchRegisterAllocator allocator(usedRegisters);
8674 GPRReg scratchGPR1 = allocator.allocateScratchGPR();
8675 GPRReg scratchGPR2 = allocator.allocateScratchGPR();
8676 GPRReg scratchGPR3 = forwarding ? allocator.allocateScratchGPR() : InvalidGPRReg;
8677 RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
8678
8679 auto callWithExceptionCheck = [&] (void* callee) {
8680 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(callee)), GPRInfo::nonPreservedNonArgumentGPR0);
8681 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8682 exceptions->append(jit.emitExceptionCheck(*vm, AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8683 };
8684
8685 unsigned originalStackHeight = params.proc().frameSize();
8686
8687 if (forwarding) {
8688 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
8689
8690 CCallHelpers::JumpList slowCase;
8691 InlineCallFrame* inlineCallFrame;
8692 if (node->child3())
8693 inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame();
8694 else
8695 inlineCallFrame = node->origin.semantic.inlineCallFrame();
8696
8697 // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
8698 emitSetupVarargsFrameFastCase(*vm, jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
8699
8700 CCallHelpers::Jump done = jit.jump();
8701 slowCase.link(&jit);
8702 jit.setupArguments<decltype(operationThrowStackOverflowForVarargs)>();
8703 callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
8704 jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
8705
8706 done.link(&jit);
8707 } else {
8708 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1);
8709 jit.setupArguments<decltype(operationSizeFrameForVarargs)>(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
8710 callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
8711
8712 jit.move(GPRInfo::returnValueGPR, scratchGPR1);
8713 jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
8714 argumentsLateRep.emitRestore(jit, argumentsGPR);
8715 emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
8716 jit.addPtr(CCallHelpers::TrustedImm32(-minimumJSCallAreaSize), scratchGPR2, CCallHelpers::stackPointerRegister);
8717 jit.setupArguments<decltype(operationSetupVarargsFrame)>(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
8718 callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
8719
8720 jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, CCallHelpers::stackPointerRegister);
8721
8722 calleeLateRep.emitRestore(jit, GPRInfo::regT0);
8723
8724 // This may not emit code if thisGPR got a callee-save. Also, we're guaranteed
8725 // that thisGPR != GPRInfo::regT0 because regT0 interferes with it.
8726 thisLateRep.emitRestore(jit, thisGPR);
8727 }
8728
8729 jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
8730 jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
8731
8732 CallLinkInfo::CallType callType;
8733 if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
8734 callType = CallLinkInfo::ConstructVarargs;
8735 else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
8736 callType = CallLinkInfo::TailCallVarargs;
8737 else
8738 callType = CallLinkInfo::CallVarargs;
8739
8740 bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
8741
8742 CCallHelpers::DataLabelPtr targetToCheck;
8743 CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
8744 CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
8745 CCallHelpers::TrustedImmPtr(nullptr));
8746
8747 CCallHelpers::Call fastCall;
8748 CCallHelpers::Jump done;
8749
8750 if (isTailCall) {
8751 jit.emitRestoreCalleeSaves();
8752 jit.prepareForTailCallSlow();
8753 fastCall = jit.nearTailCall();
8754 } else {
8755 fastCall = jit.nearCall();
8756 done = jit.jump();
8757 }
8758
8759 slowPath.link(&jit);
8760
8761 if (isTailCall)
8762 jit.emitRestoreCalleeSaves();
8763 jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
8764 CCallHelpers::Call slowCall = jit.nearCall();
8765
8766 if (isTailCall)
8767 jit.abortWithReason(JITDidReturnFromTailCall);
8768 else
8769 done.link(&jit);
8770
8771 callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
8772
8773 jit.addPtr(
8774 CCallHelpers::TrustedImm32(-originalStackHeight),
8775 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8776
8777 jit.addLinkTask(
8778 [=] (LinkBuffer& linkBuffer) {
8779 MacroAssemblerCodePtr<JITThunkPtrTag> linkCall = vm->getCTIStub(linkCallThunkGenerator).code();
8780 linkBuffer.link(slowCall, FunctionPtr<JITThunkPtrTag>(linkCall));
8781
8782 callLinkInfo->setCallLocations(
8783 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(slowCall)),
8784 CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(targetToCheck)),
8785 linkBuffer.locationOfNearCall<JSInternalPtrTag>(fastCall));
8786 });
8787 });
8788
8789 switch (node->op()) {
8790 case TailCallVarargs:
8791 case TailCallForwardVarargs:
8792 m_out.unreachable();
8793 break;
8794
8795 default:
8796 setJSValue(patchpoint);
8797 break;
8798 }
8799 }
8800
8801 void compileCallEval()
8802 {
8803 Node* node = m_node;
8804 unsigned numArgs = node->numChildren() - 1;
8805
8806 LValue jsCallee = lowJSValue(m_graph.varArgChild(node, 0));
8807
8808 unsigned frameSize = (CallFrame::headerSizeInRegisters + numArgs) * sizeof(EncodedJSValue);
8809 unsigned alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentBytes(), frameSize);
8810
8811 m_proc.requestCallArgAreaSizeInBytes(alignedFrameSize);
8812
8813 Vector<ConstrainedValue> arguments;
8814 arguments.append(ConstrainedValue(jsCallee, ValueRep::reg(GPRInfo::regT0)));
8815
8816 auto addArgument = [&] (LValue value, VirtualRegister reg, int offset) {
8817 intptr_t offsetFromSP =
8818 (reg.offset() - CallerFrameAndPC::sizeInRegisters) * sizeof(EncodedJSValue) + offset;
8819 arguments.append(ConstrainedValue(value, ValueRep::stackArgument(offsetFromSP)));
8820 };
8821
8822 addArgument(jsCallee, VirtualRegister(CallFrameSlot::callee), 0);
8823 addArgument(m_out.constInt32(numArgs), VirtualRegister(CallFrameSlot::argumentCount), PayloadOffset);
8824 for (unsigned i = 0; i < numArgs; ++i)
8825 addArgument(lowJSValue(m_graph.varArgChild(node, 1 + i)), virtualRegisterForArgument(i), 0);
8826
8827 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
8828 patchpoint->appendVector(arguments);
8829
8830 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
8831
8832 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
8833 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
8834 patchpoint->clobber(RegisterSet::macroScratchRegisters());
8835 patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
8836 patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
8837
8838 CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
8839 State* state = &m_ftlState;
8840 VM& vm = this->vm();
8841 patchpoint->setGenerator(
8842 [=, &vm] (CCallHelpers& jit, const StackmapGenerationParams& params) {
8843 AllowMacroScratchRegisterUsage allowScratch(jit);
8844 CallSiteIndex callSiteIndex = state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
8845
8846 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
8847
8848 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
8849
8850 jit.store32(
8851 CCallHelpers::TrustedImm32(callSiteIndex.bits()),
8852 CCallHelpers::tagFor(VirtualRegister(CallFrameSlot::argumentCount)));
8853
8854 CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
8855 callLinkInfo->setUpCall(CallLinkInfo::Call, node->origin.semantic, GPRInfo::regT0);
8856
8857 jit.addPtr(CCallHelpers::TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), CCallHelpers::stackPointerRegister, GPRInfo::regT1);
8858 jit.storePtr(GPRInfo::callFrameRegister, CCallHelpers::Address(GPRInfo::regT1, CallFrame::callerFrameOffset()));
8859
8860 // Now we need to make room for:
8861 // - The caller frame and PC for a call to operationCallEval.
8862 // - Potentially two arguments on the stack.
8863 unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2;
8864 requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
8865 jit.subPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
8866 jit.setupArguments<decltype(operationCallEval)>(GPRInfo::regT1);
8867 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationCallEval)), GPRInfo::nonPreservedNonArgumentGPR0);
8868 jit.call(GPRInfo::nonPreservedNonArgumentGPR0, OperationPtrTag);
8869 exceptions->append(jit.emitExceptionCheck(state->vm(), AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
8870
8871 CCallHelpers::Jump done = jit.branchTest64(CCallHelpers::NonZero, GPRInfo::returnValueGPR);
8872
8873 jit.addPtr(CCallHelpers::TrustedImm32(requiredBytes), CCallHelpers::stackPointerRegister);
8874 jit.load64(CCallHelpers::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0);
8875 jit.emitDumbVirtualCall(vm, callLinkInfo);
8876
8877 done.link(&jit);
8878 jit.addPtr(
8879 CCallHelpers::TrustedImm32(-params.proc().frameSize()),
8880 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
8881 });
8882
8883 setJSValue(patchpoint);
8884 }
8885
8886 void compileLoadVarargs()
8887 {
8888 LoadVarargsData* data = m_node->loadVarargsData();
8889 LValue jsArguments = lowJSValue(m_node->child1());
8890
8891 LValue length = vmCall(
8892 Int32, m_out.operation(operationSizeOfVarargs), m_callFrame, jsArguments,
8893 m_out.constInt32(data->offset));
8894
8895 // FIXME: There is a chance that we will call an effectful length property twice. This is safe
8896 // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
8897 // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
8898 // past the sizing.
8899 // https://bugs.webkit.org/show_bug.cgi?id=141448
8900
8901 LValue lengthIncludingThis = m_out.add(length, m_out.int32One);
8902
8903 speculate(
8904 VarargsOverflow, noValue(), nullptr,
8905 m_out.above(length, lengthIncludingThis));
8906
8907 speculate(
8908 VarargsOverflow, noValue(), nullptr,
8909 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
8910
8911 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
8912
8913 // FIXME: This computation is rather silly. If operationLaodVarargs just took a pointer instead
8914 // of a VirtualRegister, we wouldn't have to do this.
8915 // https://bugs.webkit.org/show_bug.cgi?id=141660
8916 LValue machineStart = m_out.lShr(
8917 m_out.sub(addressFor(data->machineStart.offset()).value(), m_callFrame),
8918 m_out.constIntPtr(3));
8919
8920 vmCall(
8921 Void, m_out.operation(operationLoadVarargs), m_callFrame,
8922 m_out.castToInt32(machineStart), jsArguments, m_out.constInt32(data->offset),
8923 length, m_out.constInt32(data->mandatoryMinimum));
8924 }
8925
8926 void compileForwardVarargs()
8927 {
8928 if (m_node->child1()) {
8929 Node* arguments = m_node->child1().node();
8930 if (arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread) {
8931 compileForwardVarargsWithSpread();
8932 return;
8933 }
8934 }
8935
8936 LoadVarargsData* data = m_node->loadVarargsData();
8937 InlineCallFrame* inlineCallFrame;
8938 if (m_node->child1())
8939 inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame();
8940 else
8941 inlineCallFrame = m_node->origin.semantic.inlineCallFrame();
8942
8943 LValue length = nullptr;
8944 LValue lengthIncludingThis = nullptr;
8945 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
8946 if (argumentsLength.isKnown) {
8947 unsigned knownLength = argumentsLength.known;
8948 if (knownLength >= data->offset)
8949 knownLength = knownLength - data->offset;
8950 else
8951 knownLength = 0;
8952 length = m_out.constInt32(knownLength);
8953 lengthIncludingThis = m_out.constInt32(knownLength + 1);
8954 } else {
8955 // We need to perform the same logical operation as the code above, but through dynamic operations.
8956 if (!data->offset)
8957 length = argumentsLength.value;
8958 else {
8959 LBasicBlock isLarger = m_out.newBlock();
8960 LBasicBlock continuation = m_out.newBlock();
8961
8962 ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
8963 m_out.branch(
8964 m_out.above(argumentsLength.value, m_out.constInt32(data->offset)), unsure(isLarger), unsure(continuation));
8965 LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
8966 ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(data->offset)));
8967 m_out.jump(continuation);
8968
8969 m_out.appendTo(continuation, lastNext);
8970 length = m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
8971 }
8972 lengthIncludingThis = m_out.add(length, m_out.constInt32(1));
8973 }
8974
8975 speculate(
8976 VarargsOverflow, noValue(), nullptr,
8977 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
8978
8979 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
8980
8981 unsigned numberOfArgumentsToSkip = data->offset;
8982 LValue sourceStart = getArgumentsStart(inlineCallFrame, numberOfArgumentsToSkip);
8983 LValue targetStart = addressFor(data->machineStart).value();
8984
8985 LBasicBlock undefinedLoop = m_out.newBlock();
8986 LBasicBlock mainLoopEntry = m_out.newBlock();
8987 LBasicBlock mainLoop = m_out.newBlock();
8988 LBasicBlock continuation = m_out.newBlock();
8989
8990 LValue lengthAsPtr = m_out.zeroExtPtr(length);
8991 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
8992 ValueFromBlock loopBound = m_out.anchor(loopBoundValue);
8993 m_out.branch(
8994 m_out.above(loopBoundValue, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
8995
8996 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, mainLoopEntry);
8997 LValue previousIndex = m_out.phi(pointerType(), loopBound);
8998 LValue currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
8999 m_out.store64(
9000 m_out.constInt64(JSValue::encode(jsUndefined())),
9001 m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
9002 ValueFromBlock nextIndex = m_out.anchor(currentIndex);
9003 m_out.addIncomingToPhi(previousIndex, nextIndex);
9004 m_out.branch(
9005 m_out.above(currentIndex, lengthAsPtr), unsure(undefinedLoop), unsure(mainLoopEntry));
9006
9007 m_out.appendTo(mainLoopEntry, mainLoop);
9008 loopBound = m_out.anchor(lengthAsPtr);
9009 m_out.branch(m_out.notNull(lengthAsPtr), unsure(mainLoop), unsure(continuation));
9010
9011 m_out.appendTo(mainLoop, continuation);
9012 previousIndex = m_out.phi(pointerType(), loopBound);
9013 currentIndex = m_out.sub(previousIndex, m_out.intPtrOne);
9014 LValue value = m_out.load64(
9015 m_out.baseIndex(m_heaps.variables, sourceStart, currentIndex));
9016 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, currentIndex));
9017 nextIndex = m_out.anchor(currentIndex);
9018 m_out.addIncomingToPhi(previousIndex, nextIndex);
9019 m_out.branch(m_out.isNull(currentIndex), unsure(continuation), unsure(mainLoop));
9020
9021 m_out.appendTo(continuation, lastNext);
9022 }
9023
9024 LValue getSpreadLengthFromInlineCallFrame(InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip)
9025 {
9026 ArgumentsLength argumentsLength = getArgumentsLength(inlineCallFrame);
9027 if (argumentsLength.isKnown) {
9028 unsigned knownLength = argumentsLength.known;
9029 if (knownLength >= numberOfArgumentsToSkip)
9030 knownLength = knownLength - numberOfArgumentsToSkip;
9031 else
9032 knownLength = 0;
9033 return m_out.constInt32(knownLength);
9034 }
9035
9036
9037 // We need to perform the same logical operation as the code above, but through dynamic operations.
9038 if (!numberOfArgumentsToSkip)
9039 return argumentsLength.value;
9040
9041 LBasicBlock isLarger = m_out.newBlock();
9042 LBasicBlock continuation = m_out.newBlock();
9043
9044 ValueFromBlock smallerOrEqualLengthResult = m_out.anchor(m_out.constInt32(0));
9045 m_out.branch(
9046 m_out.above(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)), unsure(isLarger), unsure(continuation));
9047 LBasicBlock lastNext = m_out.appendTo(isLarger, continuation);
9048 ValueFromBlock largerLengthResult = m_out.anchor(m_out.sub(argumentsLength.value, m_out.constInt32(numberOfArgumentsToSkip)));
9049 m_out.jump(continuation);
9050
9051 m_out.appendTo(continuation, lastNext);
9052 return m_out.phi(Int32, smallerOrEqualLengthResult, largerLengthResult);
9053 }
9054
9055 void compileForwardVarargsWithSpread()
9056 {
9057 HashMap<InlineCallFrame*, LValue, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> cachedSpreadLengths;
9058
9059 Node* arguments = m_node->child1().node();
9060 RELEASE_ASSERT(arguments->op() == PhantomNewArrayWithSpread || arguments->op() == PhantomNewArrayBuffer || arguments->op() == PhantomSpread);
9061
9062 unsigned numberOfStaticArguments = 0;
9063 Vector<LValue, 2> spreadLengths;
9064
9065 auto collectArgumentCount = recursableLambda([&](auto self, Node* target) -> void {
9066 if (target->op() == PhantomSpread) {
9067 self(target->child1().node());
9068 return;
9069 }
9070
9071 if (target->op() == PhantomNewArrayWithSpread) {
9072 BitVector* bitVector = target->bitVector();
9073 for (unsigned i = 0; i < target->numChildren(); i++) {
9074 if (bitVector->get(i))
9075 self(m_graph.varArgChild(target, i).node());
9076 else
9077 ++numberOfStaticArguments;
9078 }
9079 return;
9080 }
9081
9082 if (target->op() == PhantomNewArrayBuffer) {
9083 numberOfStaticArguments += target->castOperand<JSImmutableButterfly*>()->length();
9084 return;
9085 }
9086
9087 ASSERT(target->op() == PhantomCreateRest);
9088 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
9089 unsigned numberOfArgumentsToSkip = target->numberOfArgumentsToSkip();
9090 spreadLengths.append(cachedSpreadLengths.ensure(inlineCallFrame, [&] () {
9091 return this->getSpreadLengthFromInlineCallFrame(inlineCallFrame, numberOfArgumentsToSkip);
9092 }).iterator->value);
9093 });
9094
9095 collectArgumentCount(arguments);
9096 LValue lengthIncludingThis = m_out.constInt32(1 + numberOfStaticArguments);
9097 for (LValue length : spreadLengths)
9098 lengthIncludingThis = m_out.add(lengthIncludingThis, length);
9099
9100 LoadVarargsData* data = m_node->loadVarargsData();
9101 speculate(
9102 VarargsOverflow, noValue(), nullptr,
9103 m_out.above(lengthIncludingThis, m_out.constInt32(data->limit)));
9104
9105 m_out.store32(lengthIncludingThis, payloadFor(data->machineCount));
9106
9107 LValue targetStart = addressFor(data->machineStart).value();
9108
9109 auto forwardSpread = recursableLambda([this, &cachedSpreadLengths, &targetStart](auto self, Node* target, LValue storeIndex) -> LValue {
9110 if (target->op() == PhantomSpread)
9111 return self(target->child1().node(), storeIndex);
9112
9113 if (target->op() == PhantomNewArrayWithSpread) {
9114 BitVector* bitVector = target->bitVector();
9115 for (unsigned i = 0; i < target->numChildren(); i++) {
9116 if (bitVector->get(i))
9117 storeIndex = self(m_graph.varArgChild(target, i).node(), storeIndex);
9118 else {
9119 LValue value = this->lowJSValue(m_graph.varArgChild(target, i));
9120 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, storeIndex));
9121 storeIndex = m_out.add(m_out.constIntPtr(1), storeIndex);
9122 }
9123 }
9124 return storeIndex;
9125 }
9126
9127 if (target->op() == PhantomNewArrayBuffer) {
9128 auto* array = target->castOperand<JSImmutableButterfly*>();
9129 for (unsigned i = 0; i < array->length(); i++) {
9130 // Because forwarded values are drained as JSValue, we should not generate value
9131 // in Double form even if PhantomNewArrayBuffer's indexingType is ArrayWithDouble.
9132 int64_t value = JSValue::encode(array->get(i));
9133 m_out.store64(m_out.constInt64(value), m_out.baseIndex(m_heaps.variables, targetStart, storeIndex, JSValue(), (Checked<int32_t>(sizeof(Register)) * i).unsafeGet()));
9134 }
9135 return m_out.add(m_out.constIntPtr(array->length()), storeIndex);
9136 }
9137
9138 RELEASE_ASSERT(target->op() == PhantomCreateRest);
9139 InlineCallFrame* inlineCallFrame = target->origin.semantic.inlineCallFrame();
9140
9141 LValue sourceStart = this->getArgumentsStart(inlineCallFrame, target->numberOfArgumentsToSkip());
9142 LValue spreadLength = m_out.zeroExtPtr(cachedSpreadLengths.get(inlineCallFrame));
9143
9144 LBasicBlock loop = m_out.newBlock();
9145 LBasicBlock continuation = m_out.newBlock();
9146 ValueFromBlock startLoadIndex = m_out.anchor(m_out.constIntPtr(0));
9147 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
9148 ValueFromBlock startStoreIndexForEnd = m_out.anchor(storeIndex);
9149
9150 m_out.branch(m_out.isZero64(spreadLength), unsure(continuation), unsure(loop));
9151
9152 LBasicBlock lastNext = m_out.appendTo(loop, continuation);
9153 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
9154 LValue loadIndex = m_out.phi(Int64, startLoadIndex);
9155 LValue value = m_out.load64(
9156 m_out.baseIndex(m_heaps.variables, sourceStart, loadIndex));
9157 m_out.store64(value, m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
9158 LValue nextLoadIndex = m_out.add(m_out.constIntPtr(1), loadIndex);
9159 m_out.addIncomingToPhi(loadIndex, m_out.anchor(nextLoadIndex));
9160 LValue nextStoreIndex = m_out.add(m_out.constIntPtr(1), loopStoreIndex);
9161 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextStoreIndex));
9162 ValueFromBlock loopStoreIndexForEnd = m_out.anchor(nextStoreIndex);
9163 m_out.branch(m_out.below(nextLoadIndex, spreadLength), unsure(loop), unsure(continuation));
9164
9165 m_out.appendTo(continuation, lastNext);
9166 return m_out.phi(Int64, startStoreIndexForEnd, loopStoreIndexForEnd);
9167 });
9168
9169 LValue storeIndex = forwardSpread(arguments, m_out.constIntPtr(0));
9170
9171 LBasicBlock undefinedLoop = m_out.newBlock();
9172 LBasicBlock continuation = m_out.newBlock();
9173
9174 ValueFromBlock startStoreIndex = m_out.anchor(storeIndex);
9175 LValue loopBoundValue = m_out.constIntPtr(data->mandatoryMinimum);
9176 m_out.branch(m_out.below(storeIndex, loopBoundValue),
9177 unsure(undefinedLoop), unsure(continuation));
9178
9179 LBasicBlock lastNext = m_out.appendTo(undefinedLoop, continuation);
9180 LValue loopStoreIndex = m_out.phi(Int64, startStoreIndex);
9181 m_out.store64(
9182 m_out.constInt64(JSValue::encode(jsUndefined())),
9183 m_out.baseIndex(m_heaps.variables, targetStart, loopStoreIndex));
9184 LValue nextIndex = m_out.add(loopStoreIndex, m_out.constIntPtr(1));
9185 m_out.addIncomingToPhi(loopStoreIndex, m_out.anchor(nextIndex));
9186 m_out.branch(
9187 m_out.below(nextIndex, loopBoundValue), unsure(undefinedLoop), unsure(continuation));
9188
9189 m_out.appendTo(continuation, lastNext);
9190 }
9191
9192 void compileJump()
9193 {
9194 m_out.jump(lowBlock(m_node->targetBlock()));
9195 }
9196
9197 void compileBranch()
9198 {
9199 m_out.branch(
9200 boolify(m_node->child1()),
9201 WeightedTarget(
9202 lowBlock(m_node->branchData()->taken.block),
9203 m_node->branchData()->taken.count),
9204 WeightedTarget(
9205 lowBlock(m_node->branchData()->notTaken.block),
9206 m_node->branchData()->notTaken.count));
9207 }
9208
9209 void compileSwitch()
9210 {
9211 SwitchData* data = m_node->switchData();
9212 switch (data->kind) {
9213 case SwitchImm: {
9214 Vector<ValueFromBlock, 2> intValues;
9215 LBasicBlock switchOnInts = m_out.newBlock();
9216
9217 LBasicBlock lastNext = m_out.appendTo(m_out.m_block, switchOnInts);
9218
9219 switch (m_node->child1().useKind()) {
9220 case Int32Use: {
9221 intValues.append(m_out.anchor(lowInt32(m_node->child1())));
9222 m_out.jump(switchOnInts);
9223 break;
9224 }
9225
9226 case UntypedUse: {
9227 LBasicBlock isInt = m_out.newBlock();
9228 LBasicBlock isNotInt = m_out.newBlock();
9229 LBasicBlock isDouble = m_out.newBlock();
9230
9231 LValue boxedValue = lowJSValue(m_node->child1());
9232 m_out.branch(isNotInt32(boxedValue), unsure(isNotInt), unsure(isInt));
9233
9234 LBasicBlock innerLastNext = m_out.appendTo(isInt, isNotInt);
9235
9236 intValues.append(m_out.anchor(unboxInt32(boxedValue)));
9237 m_out.jump(switchOnInts);
9238
9239 m_out.appendTo(isNotInt, isDouble);
9240 m_out.branch(
9241 isCellOrMisc(boxedValue, provenType(m_node->child1())),
9242 usually(lowBlock(data->fallThrough.block)), rarely(isDouble));
9243
9244 m_out.appendTo(isDouble, innerLastNext);
9245 LValue doubleValue = unboxDouble(boxedValue);
9246 LValue intInDouble = m_out.doubleToInt(doubleValue);
9247 intValues.append(m_out.anchor(intInDouble));
9248 m_out.branch(
9249 m_out.doubleEqual(m_out.intToDouble(intInDouble), doubleValue),
9250 unsure(switchOnInts), unsure(lowBlock(data->fallThrough.block)));
9251 break;
9252 }
9253
9254 default:
9255 DFG_CRASH(m_graph, m_node, "Bad use kind");
9256 break;
9257 }
9258
9259 m_out.appendTo(switchOnInts, lastNext);
9260 buildSwitch(data, Int32, m_out.phi(Int32, intValues));
9261 return;
9262 }
9263
9264 case SwitchChar: {
9265 LValue stringValue;
9266
9267 // FIXME: We should use something other than unsure() for the branch weight
9268 // of the fallThrough block. The main challenge is just that we have multiple
9269 // branches to fallThrough but a single count, so we would need to divvy it up
9270 // among the different lowered branches.
9271 // https://bugs.webkit.org/show_bug.cgi?id=129082
9272
9273 switch (m_node->child1().useKind()) {
9274 case StringUse: {
9275 stringValue = lowString(m_node->child1());
9276 break;
9277 }
9278
9279 case UntypedUse: {
9280 LValue unboxedValue = lowJSValue(m_node->child1());
9281
9282 LBasicBlock isCellCase = m_out.newBlock();
9283 LBasicBlock isStringCase = m_out.newBlock();
9284
9285 m_out.branch(
9286 isNotCell(unboxedValue, provenType(m_node->child1())),
9287 unsure(lowBlock(data->fallThrough.block)), unsure(isCellCase));
9288
9289 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9290 LValue cellValue = unboxedValue;
9291 m_out.branch(
9292 isNotString(cellValue, provenType(m_node->child1())),
9293 unsure(lowBlock(data->fallThrough.block)), unsure(isStringCase));
9294
9295 m_out.appendTo(isStringCase, lastNext);
9296 stringValue = cellValue;
9297 break;
9298 }
9299
9300 default:
9301 DFG_CRASH(m_graph, m_node, "Bad use kind");
9302 break;
9303 }
9304
9305 LBasicBlock lengthIs1 = m_out.newBlock();
9306 LBasicBlock needResolution = m_out.newBlock();
9307 LBasicBlock resolved = m_out.newBlock();
9308 LBasicBlock is8Bit = m_out.newBlock();
9309 LBasicBlock is16Bit = m_out.newBlock();
9310 LBasicBlock continuation = m_out.newBlock();
9311
9312 ValueFromBlock fastValue = m_out.anchor(m_out.loadPtr(stringValue, m_heaps.JSString_value));
9313 m_out.branch(
9314 isRopeString(stringValue, m_node->child1()),
9315 rarely(needResolution), usually(resolved));
9316
9317 LBasicBlock lastNext = m_out.appendTo(needResolution, resolved);
9318 ValueFromBlock slowValue = m_out.anchor(
9319 vmCall(pointerType(), m_out.operation(operationResolveRope), m_callFrame, stringValue));
9320 m_out.jump(resolved);
9321
9322 m_out.appendTo(resolved, lengthIs1);
9323 LValue value = m_out.phi(pointerType(), fastValue, slowValue);
9324 m_out.branch(
9325 m_out.notEqual(
9326 m_out.load32NonNegative(value, m_heaps.StringImpl_length),
9327 m_out.int32One),
9328 unsure(lowBlock(data->fallThrough.block)), unsure(lengthIs1));
9329
9330 m_out.appendTo(lengthIs1, is8Bit);
9331 LValue characterData = m_out.loadPtr(value, m_heaps.StringImpl_data);
9332 m_out.branch(
9333 m_out.testNonZero32(
9334 m_out.load32(value, m_heaps.StringImpl_hashAndFlags),
9335 m_out.constInt32(StringImpl::flagIs8Bit())),
9336 unsure(is8Bit), unsure(is16Bit));
9337
9338 Vector<ValueFromBlock, 2> characters;
9339 m_out.appendTo(is8Bit, is16Bit);
9340 characters.append(m_out.anchor(m_out.load8ZeroExt32(characterData, m_heaps.characters8[0])));
9341 m_out.jump(continuation);
9342
9343 m_out.appendTo(is16Bit, continuation);
9344 characters.append(m_out.anchor(m_out.load16ZeroExt32(characterData, m_heaps.characters16[0])));
9345 m_out.jump(continuation);
9346
9347 m_out.appendTo(continuation, lastNext);
9348 buildSwitch(data, Int32, m_out.phi(Int32, characters));
9349 return;
9350 }
9351
9352 case SwitchString: {
9353 switch (m_node->child1().useKind()) {
9354 case StringIdentUse: {
9355 LValue stringImpl = lowStringIdent(m_node->child1());
9356
9357 Vector<SwitchCase> cases;
9358 for (unsigned i = 0; i < data->cases.size(); ++i) {
9359 LValue value = m_out.constIntPtr(data->cases[i].value.stringImpl());
9360 LBasicBlock block = lowBlock(data->cases[i].target.block);
9361 Weight weight = Weight(data->cases[i].target.count);
9362 cases.append(SwitchCase(value, block, weight));
9363 }
9364
9365 m_out.switchInstruction(
9366 stringImpl, cases, lowBlock(data->fallThrough.block),
9367 Weight(data->fallThrough.count));
9368 return;
9369 }
9370
9371 case StringUse: {
9372 switchString(data, lowString(m_node->child1()), m_node->child1());
9373 return;
9374 }
9375
9376 case UntypedUse: {
9377 LValue value = lowJSValue(m_node->child1());
9378
9379 LBasicBlock isCellBlock = m_out.newBlock();
9380 LBasicBlock isStringBlock = m_out.newBlock();
9381
9382 m_out.branch(
9383 isCell(value, provenType(m_node->child1())),
9384 unsure(isCellBlock), unsure(lowBlock(data->fallThrough.block)));
9385
9386 LBasicBlock lastNext = m_out.appendTo(isCellBlock, isStringBlock);
9387
9388 m_out.branch(
9389 isString(value, provenType(m_node->child1())),
9390 unsure(isStringBlock), unsure(lowBlock(data->fallThrough.block)));
9391
9392 m_out.appendTo(isStringBlock, lastNext);
9393
9394 switchString(data, value, m_node->child1());
9395 return;
9396 }
9397
9398 default:
9399 DFG_CRASH(m_graph, m_node, "Bad use kind");
9400 return;
9401 }
9402 return;
9403 }
9404
9405 case SwitchCell: {
9406 LValue cell;
9407 switch (m_node->child1().useKind()) {
9408 case CellUse: {
9409 cell = lowCell(m_node->child1());
9410 break;
9411 }
9412
9413 case UntypedUse: {
9414 LValue value = lowJSValue(m_node->child1());
9415 LBasicBlock cellCase = m_out.newBlock();
9416 m_out.branch(
9417 isCell(value, provenType(m_node->child1())),
9418 unsure(cellCase), unsure(lowBlock(data->fallThrough.block)));
9419 m_out.appendTo(cellCase);
9420 cell = value;
9421 break;
9422 }
9423
9424 default:
9425 DFG_CRASH(m_graph, m_node, "Bad use kind");
9426 return;
9427 }
9428
9429 buildSwitch(m_node->switchData(), pointerType(), cell);
9430 return;
9431 } }
9432
9433 DFG_CRASH(m_graph, m_node, "Bad switch kind");
9434 }
9435
9436 void compileEntrySwitch()
9437 {
9438 Vector<LBasicBlock> successors;
9439 for (DFG::BasicBlock* successor : m_node->entrySwitchData()->cases)
9440 successors.append(lowBlock(successor));
9441 m_out.entrySwitch(successors);
9442 }
9443
9444 void compileReturn()
9445 {
9446 m_out.ret(lowJSValue(m_node->child1()));
9447 }
9448
9449 void compileForceOSRExit()
9450 {
9451 terminate(InadequateCoverage);
9452 }
9453
9454 void compileCPUIntrinsic()
9455 {
9456#if CPU(X86_64)
9457 Intrinsic intrinsic = m_node->intrinsic();
9458 switch (intrinsic) {
9459 case CPUMfenceIntrinsic:
9460 case CPUCpuidIntrinsic:
9461 case CPUPauseIntrinsic: {
9462 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9463 patchpoint->effects = Effects::forCall();
9464 if (intrinsic == CPUCpuidIntrinsic)
9465 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::ebx, X86Registers::ecx, X86Registers::edx });
9466
9467 patchpoint->setGenerator([=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
9468 switch (intrinsic) {
9469 case CPUMfenceIntrinsic:
9470 jit.mfence();
9471 break;
9472 case CPUCpuidIntrinsic:
9473 jit.cpuid();
9474 break;
9475 case CPUPauseIntrinsic:
9476 jit.pause();
9477 break;
9478 default:
9479 RELEASE_ASSERT_NOT_REACHED();
9480 }
9481 });
9482 setJSValue(m_out.constInt64(JSValue::encode(jsUndefined())));
9483 break;
9484 }
9485 case CPURdtscIntrinsic: {
9486 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9487 patchpoint->effects = Effects::forCall();
9488 patchpoint->clobber(RegisterSet { X86Registers::eax, X86Registers::edx });
9489 // The low 32-bits of rdtsc go into rax.
9490 patchpoint->resultConstraint = ValueRep::reg(X86Registers::eax);
9491 patchpoint->setGenerator( [=] (CCallHelpers& jit, const B3::StackmapGenerationParams&) {
9492 jit.rdtsc();
9493 });
9494 setJSValue(boxInt32(patchpoint));
9495 break;
9496 }
9497 default:
9498 RELEASE_ASSERT_NOT_REACHED();
9499
9500 }
9501#endif
9502 }
9503
9504 void compileThrow()
9505 {
9506 LValue error = lowJSValue(m_node->child1());
9507 vmCall(Void, m_out.operation(operationThrowDFG), m_callFrame, error);
9508 // vmCall() does an exception check so we should never reach this.
9509 m_out.unreachable();
9510 }
9511
9512 void compileThrowStaticError()
9513 {
9514 LValue errorMessage = lowString(m_node->child1());
9515 LValue errorType = m_out.constInt32(m_node->errorType());
9516 vmCall(Void, m_out.operation(operationThrowStaticError), m_callFrame, errorMessage, errorType);
9517 // vmCall() does an exception check so we should never reach this.
9518 m_out.unreachable();
9519 }
9520
9521 void compileInvalidationPoint()
9522 {
9523 if (verboseCompilationEnabled())
9524 dataLog(" Invalidation point with availability: ", availabilityMap(), "\n");
9525
9526 DFG_ASSERT(m_graph, m_node, m_origin.exitOK);
9527
9528 PatchpointValue* patchpoint = m_out.patchpoint(Void);
9529 OSRExitDescriptor* descriptor = appendOSRExitDescriptor(noValue(), nullptr);
9530 NodeOrigin origin = m_origin;
9531 patchpoint->appendColdAnys(buildExitArguments(descriptor, origin.forExit, noValue()));
9532
9533 State* state = &m_ftlState;
9534
9535 patchpoint->setGenerator(
9536 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
9537 // The MacroAssembler knows more about this than B3 does. The watchpointLabel() method
9538 // will ensure that this is followed by a nop shadow but only when this is actually
9539 // necessary.
9540 CCallHelpers::Label label = jit.watchpointLabel();
9541
9542 RefPtr<OSRExitHandle> handle = descriptor->emitOSRExitLater(
9543 *state, UncountableInvalidation, origin, params);
9544
9545 RefPtr<JITCode> jitCode = state->jitCode.get();
9546
9547 jit.addLinkTask(
9548 [=] (LinkBuffer& linkBuffer) {
9549 JumpReplacement jumpReplacement(
9550 linkBuffer.locationOf<JSInternalPtrTag>(label),
9551 linkBuffer.locationOf<OSRExitPtrTag>(handle->label));
9552 jitCode->common.jumpReplacements.append(jumpReplacement);
9553 });
9554 });
9555
9556 // Set some obvious things.
9557 patchpoint->effects.terminal = false;
9558 patchpoint->effects.writesLocalState = false;
9559 patchpoint->effects.readsLocalState = false;
9560
9561 // This is how we tell B3 about the possibility of jump replacement.
9562 patchpoint->effects.exitsSideways = true;
9563
9564 // It's not possible for some prior branch to determine the safety of this operation. It's always
9565 // fine to execute this on some path that wouldn't have originally executed it before
9566 // optimization.
9567 patchpoint->effects.controlDependent = false;
9568
9569 // If this falls through then it won't write anything.
9570 patchpoint->effects.writes = HeapRange();
9571
9572 // When this abruptly terminates, it could read any heap location.
9573 patchpoint->effects.reads = HeapRange::top();
9574 }
9575
9576 void compileIsEmpty()
9577 {
9578 setBoolean(m_out.isZero64(lowJSValue(m_node->child1())));
9579 }
9580
9581 void compileIsUndefined()
9582 {
9583 setBoolean(equalNullOrUndefined(m_node->child1(), AllCellsAreFalse, EqualUndefined));
9584 }
9585
9586 void compileIsUndefinedOrNull()
9587 {
9588 setBoolean(isOther(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9589 }
9590
9591 void compileIsBoolean()
9592 {
9593 setBoolean(isBoolean(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9594 }
9595
9596 void compileIsNumber()
9597 {
9598 setBoolean(isNumber(lowJSValue(m_node->child1()), provenType(m_node->child1())));
9599 }
9600
9601 void compileNumberIsInteger()
9602 {
9603 LBasicBlock notInt32 = m_out.newBlock();
9604 LBasicBlock doubleCase = m_out.newBlock();
9605 LBasicBlock doubleNotNanOrInf = m_out.newBlock();
9606 LBasicBlock continuation = m_out.newBlock();
9607
9608 LValue input = lowJSValue(m_node->child1());
9609
9610 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
9611 m_out.branch(
9612 isInt32(input, provenType(m_node->child1())), unsure(continuation), unsure(notInt32));
9613
9614 LBasicBlock lastNext = m_out.appendTo(notInt32, doubleCase);
9615 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
9616 m_out.branch(
9617 isNotNumber(input, provenType(m_node->child1())), unsure(continuation), unsure(doubleCase));
9618
9619 m_out.appendTo(doubleCase, doubleNotNanOrInf);
9620 LValue doubleAsInt;
9621 LValue asDouble = unboxDouble(input, &doubleAsInt);
9622 LValue expBits = m_out.bitAnd(m_out.lShr(doubleAsInt, m_out.constInt32(52)), m_out.constInt64(0x7ff));
9623 m_out.branch(
9624 m_out.equal(expBits, m_out.constInt64(0x7ff)),
9625 unsure(continuation), unsure(doubleNotNanOrInf));
9626
9627 m_out.appendTo(doubleNotNanOrInf, continuation);
9628 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
9629 patchpoint->appendSomeRegister(asDouble);
9630 patchpoint->numFPScratchRegisters = 1;
9631 patchpoint->effects = Effects::none();
9632 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
9633 GPRReg result = params[0].gpr();
9634 FPRReg input = params[1].fpr();
9635 FPRReg temp = params.fpScratch(0);
9636 jit.roundTowardZeroDouble(input, temp);
9637 jit.compareDouble(MacroAssembler::DoubleEqual, input, temp, result);
9638 });
9639 ValueFromBlock patchpointResult = m_out.anchor(patchpoint);
9640 m_out.jump(continuation);
9641
9642 m_out.appendTo(continuation, lastNext);
9643 setBoolean(m_out.phi(Int32, trueResult, falseResult, patchpointResult));
9644 }
9645
9646 void compileIsCellWithType()
9647 {
9648 if (m_node->child1().useKind() == UntypedUse) {
9649 LValue value = lowJSValue(m_node->child1());
9650
9651 LBasicBlock isCellCase = m_out.newBlock();
9652 LBasicBlock continuation = m_out.newBlock();
9653
9654 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9655 m_out.branch(
9656 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
9657
9658 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
9659 ValueFromBlock cellResult = m_out.anchor(isCellWithType(value, m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
9660 m_out.jump(continuation);
9661
9662 m_out.appendTo(continuation, lastNext);
9663 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
9664 } else {
9665 ASSERT(m_node->child1().useKind() == CellUse);
9666 setBoolean(isCellWithType(lowCell(m_node->child1()), m_node->queriedType(), m_node->speculatedTypeForQuery(), provenType(m_node->child1())));
9667 }
9668 }
9669
9670 void compileIsObject()
9671 {
9672 LValue value = lowJSValue(m_node->child1());
9673
9674 LBasicBlock isCellCase = m_out.newBlock();
9675 LBasicBlock continuation = m_out.newBlock();
9676
9677 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
9678 m_out.branch(
9679 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
9680
9681 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
9682 ValueFromBlock cellResult = m_out.anchor(isObject(value, provenType(m_node->child1())));
9683 m_out.jump(continuation);
9684
9685 m_out.appendTo(continuation, lastNext);
9686 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
9687 }
9688
9689 LValue wangsInt64Hash(LValue input)
9690 {
9691 // key += ~(key << 32);
9692 LValue key = input;
9693 LValue temp = key;
9694 temp = m_out.shl(temp, m_out.constInt32(32));
9695 temp = m_out.bitNot(temp);
9696 key = m_out.add(key, temp);
9697 // key ^= (key >> 22);
9698 temp = key;
9699 temp = m_out.lShr(temp, m_out.constInt32(22));
9700 key = m_out.bitXor(key, temp);
9701 // key += ~(key << 13);
9702 temp = key;
9703 temp = m_out.shl(temp, m_out.constInt32(13));
9704 temp = m_out.bitNot(temp);
9705 key = m_out.add(key, temp);
9706 // key ^= (key >> 8);
9707 temp = key;
9708 temp = m_out.lShr(temp, m_out.constInt32(8));
9709 key = m_out.bitXor(key, temp);
9710 // key += (key << 3);
9711 temp = key;
9712 temp = m_out.shl(temp, m_out.constInt32(3));
9713 key = m_out.add(key, temp);
9714 // key ^= (key >> 15);
9715 temp = key;
9716 temp = m_out.lShr(temp, m_out.constInt32(15));
9717 key = m_out.bitXor(key, temp);
9718 // key += ~(key << 27);
9719 temp = key;
9720 temp = m_out.shl(temp, m_out.constInt32(27));
9721 temp = m_out.bitNot(temp);
9722 key = m_out.add(key, temp);
9723 // key ^= (key >> 31);
9724 temp = key;
9725 temp = m_out.lShr(temp, m_out.constInt32(31));
9726 key = m_out.bitXor(key, temp);
9727 key = m_out.castToInt32(key);
9728
9729 return key;
9730 }
9731
9732 LValue mapHashString(LValue string, Edge& edge)
9733 {
9734 LBasicBlock nonEmptyStringCase = m_out.newBlock();
9735 LBasicBlock slowCase = m_out.newBlock();
9736 LBasicBlock continuation = m_out.newBlock();
9737
9738 m_out.branch(isRopeString(string, edge), rarely(slowCase), usually(nonEmptyStringCase));
9739
9740 LBasicBlock lastNext = m_out.appendTo(nonEmptyStringCase, slowCase);
9741 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
9742 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
9743 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
9744 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
9745 unsure(slowCase), unsure(continuation));
9746
9747 m_out.appendTo(slowCase, continuation);
9748 ValueFromBlock slowResult = m_out.anchor(
9749 vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, string));
9750 m_out.jump(continuation);
9751
9752 m_out.appendTo(continuation, lastNext);
9753 return m_out.phi(Int32, slowResult, nonEmptyStringHashResult);
9754 }
9755
9756 void compileMapHash()
9757 {
9758 switch (m_node->child1().useKind()) {
9759 case BooleanUse:
9760 case Int32Use:
9761 case SymbolUse:
9762 case ObjectUse: {
9763 LValue key = lowJSValue(m_node->child1(), ManualOperandSpeculation);
9764 speculate(m_node->child1());
9765 setInt32(wangsInt64Hash(key));
9766 return;
9767 }
9768
9769 case CellUse: {
9770 LBasicBlock isString = m_out.newBlock();
9771 LBasicBlock notString = m_out.newBlock();
9772 LBasicBlock continuation = m_out.newBlock();
9773
9774 LValue value = lowCell(m_node->child1());
9775 LValue isStringValue = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
9776 m_out.branch(
9777 isStringValue, unsure(isString), unsure(notString));
9778
9779 LBasicBlock lastNext = m_out.appendTo(isString, notString);
9780 ValueFromBlock stringResult = m_out.anchor(mapHashString(value, m_node->child1()));
9781 m_out.jump(continuation);
9782
9783 m_out.appendTo(notString, continuation);
9784 ValueFromBlock notStringResult = m_out.anchor(wangsInt64Hash(value));
9785 m_out.jump(continuation);
9786
9787 m_out.appendTo(continuation, lastNext);
9788 setInt32(m_out.phi(Int32, stringResult, notStringResult));
9789 return;
9790 }
9791
9792 case StringUse: {
9793 LValue string = lowString(m_node->child1());
9794 setInt32(mapHashString(string, m_node->child1()));
9795 return;
9796 }
9797
9798 default:
9799 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse);
9800 break;
9801 }
9802
9803 LValue value = lowJSValue(m_node->child1());
9804
9805 LBasicBlock isCellCase = m_out.newBlock();
9806 LBasicBlock slowCase = m_out.newBlock();
9807 LBasicBlock straightHash = m_out.newBlock();
9808 LBasicBlock isStringCase = m_out.newBlock();
9809 LBasicBlock nonEmptyStringCase = m_out.newBlock();
9810 LBasicBlock continuation = m_out.newBlock();
9811
9812 m_out.branch(
9813 isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(straightHash));
9814
9815 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
9816 LValue isString = m_out.equal(m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType), m_out.constInt32(StringType));
9817 m_out.branch(
9818 isString, unsure(isStringCase), unsure(straightHash));
9819
9820 m_out.appendTo(isStringCase, nonEmptyStringCase);
9821 m_out.branch(isRopeString(value, m_node->child1()), rarely(slowCase), usually(nonEmptyStringCase));
9822
9823 m_out.appendTo(nonEmptyStringCase, straightHash);
9824 LValue stringImpl = m_out.loadPtr(value, m_heaps.JSString_value);
9825 LValue hash = m_out.lShr(m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
9826 ValueFromBlock nonEmptyStringHashResult = m_out.anchor(hash);
9827 m_out.branch(m_out.equal(hash, m_out.constInt32(0)),
9828 unsure(slowCase), unsure(continuation));
9829
9830 m_out.appendTo(straightHash, slowCase);
9831 ValueFromBlock fastResult = m_out.anchor(wangsInt64Hash(value));
9832 m_out.jump(continuation);
9833
9834 m_out.appendTo(slowCase, continuation);
9835 ValueFromBlock slowResult = m_out.anchor(
9836 vmCall(Int32, m_out.operation(operationMapHash), m_callFrame, value));
9837 m_out.jump(continuation);
9838
9839 m_out.appendTo(continuation, lastNext);
9840 setInt32(m_out.phi(Int32, fastResult, slowResult, nonEmptyStringHashResult));
9841 }
9842
9843 void compileNormalizeMapKey()
9844 {
9845 ASSERT(m_node->child1().useKind() == UntypedUse);
9846
9847 LBasicBlock isNumberCase = m_out.newBlock();
9848 LBasicBlock notInt32NumberCase = m_out.newBlock();
9849 LBasicBlock notNaNCase = m_out.newBlock();
9850 LBasicBlock convertibleCase = m_out.newBlock();
9851 LBasicBlock continuation = m_out.newBlock();
9852
9853 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isNumberCase);
9854
9855 LValue key = lowJSValue(m_node->child1());
9856 ValueFromBlock fastResult = m_out.anchor(key);
9857 m_out.branch(isNotNumber(key), unsure(continuation), unsure(isNumberCase));
9858
9859 m_out.appendTo(isNumberCase, notInt32NumberCase);
9860 m_out.branch(isInt32(key), unsure(continuation), unsure(notInt32NumberCase));
9861
9862 m_out.appendTo(notInt32NumberCase, notNaNCase);
9863 LValue doubleValue = unboxDouble(key);
9864 ValueFromBlock normalizedNaNResult = m_out.anchor(m_out.constInt64(JSValue::encode(jsNaN())));
9865 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, doubleValue), unsure(continuation), unsure(notNaNCase));
9866
9867 m_out.appendTo(notNaNCase, convertibleCase);
9868 LValue integerValue = m_out.doubleToInt(doubleValue);
9869 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
9870 ValueFromBlock doubleResult = m_out.anchor(key);
9871 m_out.branch(m_out.doubleNotEqualOrUnordered(doubleValue, integerValueConvertedToDouble), unsure(continuation), unsure(convertibleCase));
9872
9873 m_out.appendTo(convertibleCase, continuation);
9874 ValueFromBlock boxedIntResult = m_out.anchor(boxInt32(integerValue));
9875 m_out.jump(continuation);
9876
9877 m_out.appendTo(continuation, lastNext);
9878 setJSValue(m_out.phi(Int64, fastResult, normalizedNaNResult, doubleResult, boxedIntResult));
9879 }
9880
9881 void compileGetMapBucket()
9882 {
9883 LBasicBlock loopStart = m_out.newBlock();
9884 LBasicBlock loopAround = m_out.newBlock();
9885 LBasicBlock slowPath = m_out.newBlock();
9886 LBasicBlock notPresentInTable = m_out.newBlock();
9887 LBasicBlock notEmptyValue = m_out.newBlock();
9888 LBasicBlock notDeletedValue = m_out.newBlock();
9889 LBasicBlock continuation = m_out.newBlock();
9890
9891 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
9892
9893 LValue map;
9894 if (m_node->child1().useKind() == MapObjectUse)
9895 map = lowMapObject(m_node->child1());
9896 else if (m_node->child1().useKind() == SetObjectUse)
9897 map = lowSetObject(m_node->child1());
9898 else
9899 RELEASE_ASSERT_NOT_REACHED();
9900
9901 LValue key = lowJSValue(m_node->child2(), ManualOperandSpeculation);
9902 if (m_node->child2().useKind() != UntypedUse)
9903 speculate(m_node->child2());
9904
9905 LValue hash = lowInt32(m_node->child3());
9906
9907 LValue buffer = m_out.loadPtr(map, m_heaps.HashMapImpl_buffer);
9908 LValue mask = m_out.sub(m_out.load32(map, m_heaps.HashMapImpl_capacity), m_out.int32One);
9909
9910 ValueFromBlock indexStart = m_out.anchor(hash);
9911 m_out.jump(loopStart);
9912
9913 m_out.appendTo(loopStart, notEmptyValue);
9914 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
9915 LValue index = m_out.bitAnd(mask, unmaskedIndex);
9916 // FIXME: I think these buffers are caged?
9917 // https://bugs.webkit.org/show_bug.cgi?id=174925
9918 LValue hashMapBucket = m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), buffer, m_out.zeroExt(index, Int64), ScaleEight));
9919 ValueFromBlock bucketResult = m_out.anchor(hashMapBucket);
9920 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue()))),
9921 unsure(notPresentInTable), unsure(notEmptyValue));
9922
9923 m_out.appendTo(notEmptyValue, notDeletedValue);
9924 m_out.branch(m_out.equal(hashMapBucket, m_out.constIntPtr(bitwise_cast<intptr_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue()))),
9925 unsure(loopAround), unsure(notDeletedValue));
9926
9927 m_out.appendTo(notDeletedValue, loopAround);
9928 LValue bucketKey = m_out.load64(hashMapBucket, m_heaps.HashMapBucket_key);
9929
9930 // Perform Object.is()
9931 switch (m_node->child2().useKind()) {
9932 case BooleanUse:
9933 case Int32Use:
9934 case SymbolUse:
9935 case ObjectUse: {
9936 m_out.branch(m_out.equal(key, bucketKey),
9937 unsure(continuation), unsure(loopAround));
9938 break;
9939 }
9940 case StringUse: {
9941 LBasicBlock notBitEqual = m_out.newBlock();
9942 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9943
9944 m_out.branch(m_out.equal(key, bucketKey),
9945 unsure(continuation), unsure(notBitEqual));
9946
9947 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9948 m_out.branch(isCell(bucketKey),
9949 unsure(bucketKeyIsCell), unsure(loopAround));
9950
9951 m_out.appendTo(bucketKeyIsCell, loopAround);
9952 m_out.branch(isString(bucketKey),
9953 unsure(slowPath), unsure(loopAround));
9954 break;
9955 }
9956 case CellUse: {
9957 LBasicBlock notBitEqual = m_out.newBlock();
9958 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9959 LBasicBlock bucketKeyIsString = m_out.newBlock();
9960
9961 m_out.branch(m_out.equal(key, bucketKey),
9962 unsure(continuation), unsure(notBitEqual));
9963
9964 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9965 m_out.branch(isCell(bucketKey),
9966 unsure(bucketKeyIsCell), unsure(loopAround));
9967
9968 m_out.appendTo(bucketKeyIsCell, bucketKeyIsString);
9969 m_out.branch(isString(bucketKey),
9970 unsure(bucketKeyIsString), unsure(loopAround));
9971
9972 m_out.appendTo(bucketKeyIsString, loopAround);
9973 m_out.branch(isString(key),
9974 unsure(slowPath), unsure(loopAround));
9975 break;
9976 }
9977 case UntypedUse: {
9978 LBasicBlock notBitEqual = m_out.newBlock();
9979 LBasicBlock bucketKeyIsCell = m_out.newBlock();
9980 LBasicBlock bothAreCells = m_out.newBlock();
9981 LBasicBlock bucketKeyIsString = m_out.newBlock();
9982
9983 m_out.branch(m_out.equal(key, bucketKey),
9984 unsure(continuation), unsure(notBitEqual));
9985
9986 m_out.appendTo(notBitEqual, bucketKeyIsCell);
9987 m_out.branch(isCell(bucketKey),
9988 unsure(bucketKeyIsCell), unsure(loopAround));
9989
9990 m_out.appendTo(bucketKeyIsCell, bothAreCells);
9991 m_out.branch(isCell(key),
9992 unsure(bothAreCells), unsure(loopAround));
9993
9994 m_out.appendTo(bothAreCells, bucketKeyIsString);
9995 m_out.branch(isString(bucketKey),
9996 unsure(bucketKeyIsString), unsure(loopAround));
9997
9998 m_out.appendTo(bucketKeyIsString, loopAround);
9999 m_out.branch(isString(key),
10000 unsure(slowPath), unsure(loopAround));
10001 break;
10002 }
10003 default:
10004 RELEASE_ASSERT_NOT_REACHED();
10005 }
10006
10007 m_out.appendTo(loopAround, slowPath);
10008 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
10009 m_out.jump(loopStart);
10010
10011 m_out.appendTo(slowPath, notPresentInTable);
10012 ValueFromBlock slowPathResult = m_out.anchor(vmCall(pointerType(),
10013 m_out.operation(m_node->child1().useKind() == MapObjectUse ? operationJSMapFindBucket : operationJSSetFindBucket), m_callFrame, map, key, hash));
10014 m_out.jump(continuation);
10015
10016 m_out.appendTo(notPresentInTable, continuation);
10017 ValueFromBlock notPresentResult;
10018 if (m_node->child1().useKind() == MapObjectUse)
10019 notPresentResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
10020 else if (m_node->child1().useKind() == SetObjectUse)
10021 notPresentResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
10022 else
10023 RELEASE_ASSERT_NOT_REACHED();
10024 m_out.jump(continuation);
10025
10026 m_out.appendTo(continuation, lastNext);
10027 setJSValue(m_out.phi(pointerType(), bucketResult, slowPathResult, notPresentResult));
10028 }
10029
10030 void compileGetMapBucketHead()
10031 {
10032 LValue map;
10033 if (m_node->child1().useKind() == MapObjectUse)
10034 map = lowMapObject(m_node->child1());
10035 else if (m_node->child1().useKind() == SetObjectUse)
10036 map = lowSetObject(m_node->child1());
10037 else
10038 RELEASE_ASSERT_NOT_REACHED();
10039
10040 ASSERT(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfHead() == HashMapImpl<HashMapBucket<HashMapBucketDataKeyValue>>::offsetOfHead());
10041 setJSValue(m_out.loadPtr(map, m_heaps.HashMapImpl_head));
10042 }
10043
10044 void compileGetMapBucketNext()
10045 {
10046 LBasicBlock loopStart = m_out.newBlock();
10047 LBasicBlock continuation = m_out.newBlock();
10048 LBasicBlock noBucket = m_out.newBlock();
10049 LBasicBlock hasBucket = m_out.newBlock();
10050 LBasicBlock nextBucket = m_out.newBlock();
10051
10052 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
10053
10054 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfNext() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfNext());
10055 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey());
10056 LValue mapBucketPrev = lowCell(m_node->child1());
10057 ValueFromBlock mapBucketStart = m_out.anchor(m_out.loadPtr(mapBucketPrev, m_heaps.HashMapBucket_next));
10058 m_out.jump(loopStart);
10059
10060 m_out.appendTo(loopStart, noBucket);
10061 LValue mapBucket = m_out.phi(pointerType(), mapBucketStart);
10062 m_out.branch(m_out.isNull(mapBucket), unsure(noBucket), unsure(hasBucket));
10063
10064 m_out.appendTo(noBucket, hasBucket);
10065 ValueFromBlock noBucketResult;
10066 if (m_node->bucketOwnerType() == BucketOwnerType::Map)
10067 noBucketResult = m_out.anchor(weakPointer(vm().sentinelMapBucket()));
10068 else {
10069 ASSERT(m_node->bucketOwnerType() == BucketOwnerType::Set);
10070 noBucketResult = m_out.anchor(weakPointer(vm().sentinelSetBucket()));
10071 }
10072 m_out.jump(continuation);
10073
10074 m_out.appendTo(hasBucket, nextBucket);
10075 ValueFromBlock bucketResult = m_out.anchor(mapBucket);
10076 m_out.branch(m_out.isZero64(m_out.load64(mapBucket, m_heaps.HashMapBucket_key)), unsure(nextBucket), unsure(continuation));
10077
10078 m_out.appendTo(nextBucket, continuation);
10079 m_out.addIncomingToPhi(mapBucket, m_out.anchor(m_out.loadPtr(mapBucket, m_heaps.HashMapBucket_next)));
10080 m_out.jump(loopStart);
10081
10082 m_out.appendTo(continuation, lastNext);
10083 setJSValue(m_out.phi(pointerType(), noBucketResult, bucketResult));
10084 }
10085
10086 void compileLoadValueFromMapBucket()
10087 {
10088 LValue mapBucket = lowCell(m_node->child1());
10089 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_value));
10090 }
10091
10092 void compileExtractValueFromWeakMapGet()
10093 {
10094 LValue value = lowJSValue(m_node->child1());
10095 setJSValue(m_out.select(m_out.isZero64(value),
10096 m_out.constInt64(JSValue::encode(jsUndefined())),
10097 value));
10098 }
10099
10100 void compileLoadKeyFromMapBucket()
10101 {
10102 LValue mapBucket = lowCell(m_node->child1());
10103 setJSValue(m_out.load64(mapBucket, m_heaps.HashMapBucket_key));
10104 }
10105
10106 void compileSetAdd()
10107 {
10108 LValue set = lowSetObject(m_node->child1());
10109 LValue key = lowJSValue(m_node->child2());
10110 LValue hash = lowInt32(m_node->child3());
10111
10112 setJSValue(vmCall(pointerType(), m_out.operation(operationSetAdd), m_callFrame, set, key, hash));
10113 }
10114
10115 void compileMapSet()
10116 {
10117 LValue map = lowMapObject(m_graph.varArgChild(m_node, 0));
10118 LValue key = lowJSValue(m_graph.varArgChild(m_node, 1));
10119 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
10120 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
10121
10122 setJSValue(vmCall(pointerType(), m_out.operation(operationMapSet), m_callFrame, map, key, value, hash));
10123 }
10124
10125 void compileWeakMapGet()
10126 {
10127 LBasicBlock loopStart = m_out.newBlock();
10128 LBasicBlock loopAround = m_out.newBlock();
10129 LBasicBlock notEqualValue = m_out.newBlock();
10130 LBasicBlock continuation = m_out.newBlock();
10131
10132 LBasicBlock lastNext = m_out.insertNewBlocksBefore(loopStart);
10133
10134 LValue weakMap;
10135 if (m_node->child1().useKind() == WeakMapObjectUse)
10136 weakMap = lowWeakMapObject(m_node->child1());
10137 else if (m_node->child1().useKind() == WeakSetObjectUse)
10138 weakMap = lowWeakSetObject(m_node->child1());
10139 else
10140 RELEASE_ASSERT_NOT_REACHED();
10141 LValue key = lowObject(m_node->child2());
10142 LValue hash = lowInt32(m_node->child3());
10143
10144 LValue buffer = m_out.loadPtr(weakMap, m_heaps.WeakMapImpl_buffer);
10145 LValue mask = m_out.sub(m_out.load32(weakMap, m_heaps.WeakMapImpl_capacity), m_out.int32One);
10146
10147 ValueFromBlock indexStart = m_out.anchor(hash);
10148 m_out.jump(loopStart);
10149
10150 m_out.appendTo(loopStart, notEqualValue);
10151 LValue unmaskedIndex = m_out.phi(Int32, indexStart);
10152 LValue index = m_out.bitAnd(mask, unmaskedIndex);
10153
10154 LValue bucket;
10155
10156 if (m_node->child1().useKind() == WeakMapObjectUse) {
10157 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)), "Should be a power of 2");
10158 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>)))));
10159 } else {
10160 static_assert(hasOneBitSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)), "Should be a power of 2");
10161 bucket = m_out.add(buffer, m_out.shl(m_out.zeroExt(index, Int64), m_out.constInt32(getLSBSet(sizeof(WeakMapBucket<WeakMapBucketDataKey>)))));
10162 }
10163
10164 LValue bucketKey = m_out.load64(bucket, m_heaps.WeakMapBucket_key);
10165 m_out.branch(m_out.equal(key, bucketKey), unsure(continuation), unsure(notEqualValue));
10166
10167 m_out.appendTo(notEqualValue, loopAround);
10168 m_out.branch(m_out.isNull(bucketKey), unsure(continuation), unsure(loopAround));
10169
10170 m_out.appendTo(loopAround, continuation);
10171 m_out.addIncomingToPhi(unmaskedIndex, m_out.anchor(m_out.add(index, m_out.int32One)));
10172 m_out.jump(loopStart);
10173
10174 m_out.appendTo(continuation, lastNext);
10175 LValue result;
10176 if (m_node->child1().useKind() == WeakMapObjectUse)
10177 result = m_out.load64(bucket, m_heaps.WeakMapBucket_value);
10178 else
10179 result = bucketKey;
10180 setJSValue(result);
10181 }
10182
10183 void compileWeakSetAdd()
10184 {
10185 LValue set = lowWeakSetObject(m_node->child1());
10186 LValue key = lowObject(m_node->child2());
10187 LValue hash = lowInt32(m_node->child3());
10188
10189 vmCall(Void, m_out.operation(operationWeakSetAdd), m_callFrame, set, key, hash);
10190 }
10191
10192 void compileWeakMapSet()
10193 {
10194 LValue map = lowWeakMapObject(m_graph.varArgChild(m_node, 0));
10195 LValue key = lowObject(m_graph.varArgChild(m_node, 1));
10196 LValue value = lowJSValue(m_graph.varArgChild(m_node, 2));
10197 LValue hash = lowInt32(m_graph.varArgChild(m_node, 3));
10198
10199 vmCall(Void, m_out.operation(operationWeakMapSet), m_callFrame, map, key, value, hash);
10200 }
10201
10202 void compileIsObjectOrNull()
10203 {
10204 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
10205
10206 Edge child = m_node->child1();
10207 LValue value = lowJSValue(child);
10208
10209 LBasicBlock cellCase = m_out.newBlock();
10210 LBasicBlock notFunctionCase = m_out.newBlock();
10211 LBasicBlock objectCase = m_out.newBlock();
10212 LBasicBlock slowPath = m_out.newBlock();
10213 LBasicBlock notCellCase = m_out.newBlock();
10214 LBasicBlock continuation = m_out.newBlock();
10215
10216 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
10217
10218 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
10219 ValueFromBlock isFunctionResult = m_out.anchor(m_out.booleanFalse);
10220 m_out.branch(
10221 isFunction(value, provenType(child)),
10222 unsure(continuation), unsure(notFunctionCase));
10223
10224 m_out.appendTo(notFunctionCase, objectCase);
10225 ValueFromBlock notObjectResult = m_out.anchor(m_out.booleanFalse);
10226 m_out.branch(
10227 isObject(value, provenType(child)),
10228 unsure(objectCase), unsure(continuation));
10229
10230 m_out.appendTo(objectCase, slowPath);
10231 ValueFromBlock objectResult = m_out.anchor(m_out.booleanTrue);
10232 m_out.branch(
10233 isExoticForTypeof(value, provenType(child)),
10234 rarely(slowPath), usually(continuation));
10235
10236 m_out.appendTo(slowPath, notCellCase);
10237 VM& vm = this->vm();
10238 LValue slowResultValue = lazySlowPath(
10239 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
10240 return createLazyCallGenerator(vm,
10241 operationObjectIsObject, locations[0].directGPR(),
10242 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
10243 }, value);
10244 ValueFromBlock slowResult = m_out.anchor(m_out.notZero64(slowResultValue));
10245 m_out.jump(continuation);
10246
10247 m_out.appendTo(notCellCase, continuation);
10248 LValue notCellResultValue = m_out.equal(value, m_out.constInt64(JSValue::encode(jsNull())));
10249 ValueFromBlock notCellResult = m_out.anchor(notCellResultValue);
10250 m_out.jump(continuation);
10251
10252 m_out.appendTo(continuation, lastNext);
10253 LValue result = m_out.phi(
10254 Int32,
10255 isFunctionResult, notObjectResult, objectResult, slowResult, notCellResult);
10256 setBoolean(result);
10257 }
10258
10259 void compileIsFunction()
10260 {
10261 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
10262
10263 Edge child = m_node->child1();
10264 LValue value = lowJSValue(child);
10265
10266 LBasicBlock cellCase = m_out.newBlock();
10267 LBasicBlock notFunctionCase = m_out.newBlock();
10268 LBasicBlock slowPath = m_out.newBlock();
10269 LBasicBlock continuation = m_out.newBlock();
10270
10271 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
10272 m_out.branch(
10273 isCell(value, provenType(child)), unsure(cellCase), unsure(continuation));
10274
10275 LBasicBlock lastNext = m_out.appendTo(cellCase, notFunctionCase);
10276 ValueFromBlock functionResult = m_out.anchor(m_out.booleanTrue);
10277 m_out.branch(
10278 isFunction(value, provenType(child)),
10279 unsure(continuation), unsure(notFunctionCase));
10280
10281 m_out.appendTo(notFunctionCase, slowPath);
10282 ValueFromBlock objectResult = m_out.anchor(m_out.booleanFalse);
10283 m_out.branch(
10284 isExoticForTypeof(value, provenType(child)),
10285 rarely(slowPath), usually(continuation));
10286
10287 m_out.appendTo(slowPath, continuation);
10288 VM& vm = this->vm();
10289 LValue slowResultValue = lazySlowPath(
10290 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
10291 return createLazyCallGenerator(vm,
10292 operationObjectIsFunction, locations[0].directGPR(),
10293 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
10294 }, value);
10295 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(slowResultValue));
10296 m_out.jump(continuation);
10297
10298 m_out.appendTo(continuation, lastNext);
10299 LValue result = m_out.phi(
10300 Int32, notCellResult, functionResult, objectResult, slowResult);
10301 setBoolean(result);
10302 }
10303
10304 void compileIsTypedArrayView()
10305 {
10306 LValue value = lowJSValue(m_node->child1());
10307
10308 LBasicBlock isCellCase = m_out.newBlock();
10309 LBasicBlock continuation = m_out.newBlock();
10310
10311 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
10312 m_out.branch(isCell(value, provenType(m_node->child1())), unsure(isCellCase), unsure(continuation));
10313
10314 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
10315 ValueFromBlock cellResult = m_out.anchor(isTypedArrayView(value, provenType(m_node->child1())));
10316 m_out.jump(continuation);
10317
10318 m_out.appendTo(continuation, lastNext);
10319 setBoolean(m_out.phi(Int32, notCellResult, cellResult));
10320 }
10321
10322 void compileTypeOf()
10323 {
10324 Edge child = m_node->child1();
10325 LValue value = lowJSValue(child);
10326
10327 LBasicBlock continuation = m_out.newBlock();
10328 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
10329
10330 Vector<ValueFromBlock> results;
10331
10332 buildTypeOf(
10333 child, value,
10334 [&] (TypeofType type) {
10335 results.append(m_out.anchor(weakPointer(vm().smallStrings.typeString(type))));
10336 m_out.jump(continuation);
10337 });
10338
10339 m_out.appendTo(continuation, lastNext);
10340 setJSValue(m_out.phi(Int64, results));
10341 }
10342
10343 void compileInByVal()
10344 {
10345 setJSValue(vmCall(Int64, m_out.operation(operationInByVal), m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2())));
10346 }
10347
10348 void compileInById()
10349 {
10350 Node* node = m_node;
10351 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
10352 LValue base = lowCell(m_node->child1());
10353
10354 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10355 patchpoint->appendSomeRegister(base);
10356 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
10357 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
10358
10359 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10360
10361 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10362 preparePatchpointForExceptions(patchpoint);
10363
10364 State* state = &m_ftlState;
10365 patchpoint->setGenerator(
10366 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10367 AllowMacroScratchRegisterUsage allowScratch(jit);
10368
10369 CallSiteIndex callSiteIndex =
10370 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
10371
10372 // This is the direct exit target for operation calls.
10373 Box<CCallHelpers::JumpList> exceptions =
10374 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10375
10376 auto generator = Box<JITInByIdGenerator>::create(
10377 jit.codeBlock(), node->origin.semantic, callSiteIndex,
10378 params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
10379 JSValueRegs(params[0].gpr()));
10380
10381 generator->generateFastPath(jit);
10382 CCallHelpers::Label done = jit.label();
10383
10384 params.addLatePath(
10385 [=] (CCallHelpers& jit) {
10386 AllowMacroScratchRegisterUsage allowScratch(jit);
10387
10388 generator->slowPathJump().link(&jit);
10389 CCallHelpers::Label slowPathBegin = jit.label();
10390 CCallHelpers::Call slowPathCall = callOperation(
10391 *state, params.unavailableRegisters(), jit, node->origin.semantic,
10392 exceptions.get(), operationInByIdOptimize, params[0].gpr(),
10393 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
10394 CCallHelpers::TrustedImmPtr(uid)).call();
10395 jit.jump().linkTo(done, &jit);
10396
10397 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
10398
10399 jit.addLinkTask(
10400 [=] (LinkBuffer& linkBuffer) {
10401 generator->finalize(linkBuffer, linkBuffer);
10402 });
10403 });
10404 });
10405
10406 setJSValue(patchpoint);
10407 }
10408
10409 void compileHasOwnProperty()
10410 {
10411 LBasicBlock slowCase = m_out.newBlock();
10412 LBasicBlock continuation = m_out.newBlock();
10413 LBasicBlock lastNext = nullptr;
10414
10415 LValue object = lowObject(m_node->child1());
10416 LValue uniquedStringImpl;
10417 LValue keyAsValue = nullptr;
10418 switch (m_node->child2().useKind()) {
10419 case StringUse: {
10420 LBasicBlock isNonEmptyString = m_out.newBlock();
10421 LBasicBlock isAtomicString = m_out.newBlock();
10422
10423 keyAsValue = lowString(m_node->child2());
10424 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
10425
10426 lastNext = m_out.appendTo(isNonEmptyString, isAtomicString);
10427 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
10428 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
10429 m_out.branch(isNotAtomic, rarely(slowCase), usually(isAtomicString));
10430
10431 m_out.appendTo(isAtomicString, slowCase);
10432 break;
10433 }
10434 case SymbolUse: {
10435 keyAsValue = lowSymbol(m_node->child2());
10436 uniquedStringImpl = m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl);
10437 lastNext = m_out.insertNewBlocksBefore(slowCase);
10438 break;
10439 }
10440 case UntypedUse: {
10441 LBasicBlock isCellCase = m_out.newBlock();
10442 LBasicBlock isStringCase = m_out.newBlock();
10443 LBasicBlock notStringCase = m_out.newBlock();
10444 LBasicBlock isNonEmptyString = m_out.newBlock();
10445 LBasicBlock isSymbolCase = m_out.newBlock();
10446 LBasicBlock hasUniquedStringImpl = m_out.newBlock();
10447
10448 keyAsValue = lowJSValue(m_node->child2());
10449 m_out.branch(isCell(keyAsValue), usually(isCellCase), rarely(slowCase));
10450
10451 lastNext = m_out.appendTo(isCellCase, isStringCase);
10452 m_out.branch(isString(keyAsValue), unsure(isStringCase), unsure(notStringCase));
10453
10454 m_out.appendTo(isStringCase, isNonEmptyString);
10455 m_out.branch(isNotRopeString(keyAsValue, m_node->child2()), usually(isNonEmptyString), rarely(slowCase));
10456
10457 m_out.appendTo(isNonEmptyString, notStringCase);
10458 LValue implFromString = m_out.loadPtr(keyAsValue, m_heaps.JSString_value);
10459 ValueFromBlock stringResult = m_out.anchor(implFromString);
10460 LValue isNotAtomic = m_out.testIsZero32(m_out.load32(implFromString, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::flagIsAtomic()));
10461 m_out.branch(isNotAtomic, rarely(slowCase), usually(hasUniquedStringImpl));
10462
10463 m_out.appendTo(notStringCase, isSymbolCase);
10464 m_out.branch(isSymbol(keyAsValue), unsure(isSymbolCase), unsure(slowCase));
10465
10466 m_out.appendTo(isSymbolCase, hasUniquedStringImpl);
10467 ValueFromBlock symbolResult = m_out.anchor(m_out.loadPtr(keyAsValue, m_heaps.Symbol_symbolImpl));
10468 m_out.jump(hasUniquedStringImpl);
10469
10470 m_out.appendTo(hasUniquedStringImpl, slowCase);
10471 uniquedStringImpl = m_out.phi(pointerType(), stringResult, symbolResult);
10472 break;
10473 }
10474 default:
10475 RELEASE_ASSERT_NOT_REACHED();
10476 }
10477
10478 ASSERT(keyAsValue);
10479
10480 // Note that we don't test if the hash is zero here. AtomicStringImpl's can't have a zero
10481 // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only
10482 // ever load the result from the cache if the cache entry matches what we are querying for.
10483 // So we either get super lucky and use zero for the hash and somehow collide with the entity
10484 // we're looking for, or we realize we're comparing against another entity, and go to the
10485 // slow path anyways.
10486 LValue hash = m_out.lShr(m_out.load32(uniquedStringImpl, m_heaps.StringImpl_hashAndFlags), m_out.constInt32(StringImpl::s_flagCount));
10487
10488 LValue structureID = m_out.load32(object, m_heaps.JSCell_structureID);
10489 LValue index = m_out.add(hash, structureID);
10490 index = m_out.zeroExtPtr(m_out.bitAnd(index, m_out.constInt32(HasOwnPropertyCache::mask)));
10491 ASSERT(vm().hasOwnPropertyCache());
10492 LValue cache = m_out.constIntPtr(vm().hasOwnPropertyCache());
10493
10494 IndexedAbstractHeap& heap = m_heaps.HasOwnPropertyCache;
10495 LValue sameStructureID = m_out.equal(structureID, m_out.load32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfStructureID())));
10496 LValue sameImpl = m_out.equal(uniquedStringImpl, m_out.loadPtr(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfImpl())));
10497 ValueFromBlock fastResult = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(heap, cache, index, JSValue(), HasOwnPropertyCache::Entry::offsetOfResult())));
10498 LValue cacheHit = m_out.bitAnd(sameStructureID, sameImpl);
10499
10500 m_out.branch(m_out.notZero32(cacheHit), usually(continuation), rarely(slowCase));
10501
10502 m_out.appendTo(slowCase, continuation);
10503 ValueFromBlock slowResult;
10504 slowResult = m_out.anchor(vmCall(Int32, m_out.operation(operationHasOwnProperty), m_callFrame, object, keyAsValue));
10505 m_out.jump(continuation);
10506
10507 m_out.appendTo(continuation, lastNext);
10508 setBoolean(m_out.phi(Int32, fastResult, slowResult));
10509 }
10510
10511 void compileParseInt()
10512 {
10513 RELEASE_ASSERT(m_node->child1().useKind() == UntypedUse || m_node->child1().useKind() == StringUse);
10514 LValue result;
10515 if (m_node->child2()) {
10516 LValue radix = lowInt32(m_node->child2());
10517 if (m_node->child1().useKind() == UntypedUse)
10518 result = vmCall(Int64, m_out.operation(operationParseIntGeneric), m_callFrame, lowJSValue(m_node->child1()), radix);
10519 else
10520 result = vmCall(Int64, m_out.operation(operationParseIntString), m_callFrame, lowString(m_node->child1()), radix);
10521 } else {
10522 if (m_node->child1().useKind() == UntypedUse)
10523 result = vmCall(Int64, m_out.operation(operationParseIntNoRadixGeneric), m_callFrame, lowJSValue(m_node->child1()));
10524 else
10525 result = vmCall(Int64, m_out.operation(operationParseIntStringNoRadix), m_callFrame, lowString(m_node->child1()));
10526 }
10527 setJSValue(result);
10528 }
10529
10530 void compileOverridesHasInstance()
10531 {
10532 FrozenValue* defaultHasInstanceFunction = m_node->cellOperand();
10533 ASSERT(defaultHasInstanceFunction->cell()->inherits<JSFunction>(vm()));
10534
10535 LValue constructor = lowCell(m_node->child1());
10536 LValue hasInstance = lowJSValue(m_node->child2());
10537
10538 LBasicBlock defaultHasInstance = m_out.newBlock();
10539 LBasicBlock continuation = m_out.newBlock();
10540
10541 // Unlike in the DFG, we don't worry about cleaning this code up for the case where we have proven the hasInstanceValue is a constant as B3 should fix it for us.
10542
10543 ValueFromBlock notDefaultHasInstanceResult = m_out.anchor(m_out.booleanTrue);
10544 m_out.branch(m_out.notEqual(hasInstance, frozenPointer(defaultHasInstanceFunction)), unsure(continuation), unsure(defaultHasInstance));
10545
10546 LBasicBlock lastNext = m_out.appendTo(defaultHasInstance, continuation);
10547 ValueFromBlock implementsDefaultHasInstanceResult = m_out.anchor(m_out.testIsZero32(
10548 m_out.load8ZeroExt32(constructor, m_heaps.JSCell_typeInfoFlags),
10549 m_out.constInt32(ImplementsDefaultHasInstance)));
10550 m_out.jump(continuation);
10551
10552 m_out.appendTo(continuation, lastNext);
10553 setBoolean(m_out.phi(Int32, implementsDefaultHasInstanceResult, notDefaultHasInstanceResult));
10554 }
10555
10556 void compileCheckTypeInfoFlags()
10557 {
10558 speculate(
10559 BadTypeInfoFlags, noValue(), 0,
10560 m_out.testIsZero32(
10561 m_out.load8ZeroExt32(lowCell(m_node->child1()), m_heaps.JSCell_typeInfoFlags),
10562 m_out.constInt32(m_node->typeInfoOperand())));
10563 }
10564
10565 void compileInstanceOf()
10566 {
10567 Node* node = m_node;
10568 State* state = &m_ftlState;
10569
10570 LValue value;
10571 LValue prototype;
10572 bool valueIsCell;
10573 bool prototypeIsCell;
10574 if (m_node->child1().useKind() == CellUse
10575 && m_node->child2().useKind() == CellUse) {
10576 value = lowCell(m_node->child1());
10577 prototype = lowCell(m_node->child2());
10578
10579 valueIsCell = true;
10580 prototypeIsCell = true;
10581 } else {
10582 DFG_ASSERT(m_graph, m_node, m_node->child1().useKind() == UntypedUse);
10583 DFG_ASSERT(m_graph, m_node, m_node->child2().useKind() == UntypedUse);
10584
10585 value = lowJSValue(m_node->child1());
10586 prototype = lowJSValue(m_node->child2());
10587
10588 valueIsCell = abstractValue(m_node->child1()).isType(SpecCell);
10589 prototypeIsCell = abstractValue(m_node->child2()).isType(SpecCell);
10590 }
10591
10592 bool prototypeIsObject = abstractValue(m_node->child2()).isType(SpecObject | ~SpecCell);
10593
10594 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
10595 patchpoint->appendSomeRegister(value);
10596 patchpoint->appendSomeRegister(prototype);
10597 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
10598 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
10599 patchpoint->numGPScratchRegisters = 2;
10600 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
10601 patchpoint->clobber(RegisterSet::macroScratchRegisters());
10602
10603 RefPtr<PatchpointExceptionHandle> exceptionHandle =
10604 preparePatchpointForExceptions(patchpoint);
10605
10606 patchpoint->setGenerator(
10607 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
10608 AllowMacroScratchRegisterUsage allowScratch(jit);
10609
10610 GPRReg resultGPR = params[0].gpr();
10611 GPRReg valueGPR = params[1].gpr();
10612 GPRReg prototypeGPR = params[2].gpr();
10613 GPRReg scratchGPR = params.gpScratch(0);
10614 GPRReg scratch2GPR = params.gpScratch(1);
10615
10616 CCallHelpers::Jump doneJump;
10617 if (!valueIsCell) {
10618 CCallHelpers::Jump isCell = jit.branchIfCell(valueGPR);
10619 jit.boxBooleanPayload(false, resultGPR);
10620 doneJump = jit.jump();
10621 isCell.link(&jit);
10622 }
10623
10624 CCallHelpers::JumpList slowCases;
10625 if (!prototypeIsCell)
10626 slowCases.append(jit.branchIfNotCell(prototypeGPR));
10627
10628 CallSiteIndex callSiteIndex =
10629 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
10630
10631 // This is the direct exit target for operation calls.
10632 Box<CCallHelpers::JumpList> exceptions =
10633 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
10634
10635 auto generator = Box<JITInstanceOfGenerator>::create(
10636 jit.codeBlock(), node->origin.semantic, callSiteIndex,
10637 params.unavailableRegisters(), resultGPR, valueGPR, prototypeGPR, scratchGPR,
10638 scratch2GPR, prototypeIsObject);
10639 generator->generateFastPath(jit);
10640 CCallHelpers::Label done = jit.label();
10641
10642 params.addLatePath(
10643 [=] (CCallHelpers& jit) {
10644 AllowMacroScratchRegisterUsage allowScratch(jit);
10645
10646 J_JITOperation_ESsiJJ optimizationFunction = operationInstanceOfOptimize;
10647
10648 slowCases.link(&jit);
10649 CCallHelpers::Label slowPathBegin = jit.label();
10650 CCallHelpers::Call slowPathCall = callOperation(
10651 *state, params.unavailableRegisters(), jit, node->origin.semantic,
10652 exceptions.get(), optimizationFunction, resultGPR,
10653 CCallHelpers::TrustedImmPtr(generator->stubInfo()), valueGPR,
10654 prototypeGPR).call();
10655 jit.jump().linkTo(done, &jit);
10656
10657 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
10658
10659 jit.addLinkTask(
10660 [=] (LinkBuffer& linkBuffer) {
10661 generator->finalize(linkBuffer, linkBuffer);
10662 });
10663 });
10664
10665 if (doneJump.isSet())
10666 doneJump.link(&jit);
10667 });
10668
10669 // This returns a boxed boolean.
10670 setJSValue(patchpoint);
10671 }
10672
10673 void compileInstanceOfCustom()
10674 {
10675 LValue value = lowJSValue(m_node->child1());
10676 LValue constructor = lowCell(m_node->child2());
10677 LValue hasInstance = lowJSValue(m_node->child3());
10678
10679 setBoolean(m_out.logicalNot(m_out.equal(m_out.constInt32(0), vmCall(Int32, m_out.operation(operationInstanceOfCustom), m_callFrame, value, constructor, hasInstance))));
10680 }
10681
10682 void compileCountExecution()
10683 {
10684 TypedPointer counter = m_out.absolute(m_node->executionCounter()->address());
10685 m_out.store64(m_out.add(m_out.load64(counter), m_out.constInt64(1)), counter);
10686 }
10687
10688 void compileSuperSamplerBegin()
10689 {
10690 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
10691 m_out.store32(m_out.add(m_out.load32(counter), m_out.constInt32(1)), counter);
10692 }
10693
10694 void compileSuperSamplerEnd()
10695 {
10696 TypedPointer counter = m_out.absolute(bitwise_cast<void*>(&g_superSamplerCount));
10697 m_out.store32(m_out.sub(m_out.load32(counter), m_out.constInt32(1)), counter);
10698 }
10699
10700 void compileStoreBarrier()
10701 {
10702 emitStoreBarrier(lowCell(m_node->child1()), m_node->op() == FencedStoreBarrier);
10703 }
10704
10705 void compileHasIndexedProperty()
10706 {
10707 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
10708 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
10709
10710 switch (m_node->arrayMode().type()) {
10711 case Array::Int32:
10712 case Array::Contiguous: {
10713 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10714 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10715
10716 IndexedAbstractHeap& heap = m_node->arrayMode().type() == Array::Int32 ?
10717 m_heaps.indexedInt32Properties : m_heaps.indexedContiguousProperties;
10718
10719 LBasicBlock slowCase = m_out.newBlock();
10720 LBasicBlock continuation = m_out.newBlock();
10721 LBasicBlock lastNext = nullptr;
10722
10723 if (!m_node->arrayMode().isInBounds()) {
10724 LBasicBlock checkHole = m_out.newBlock();
10725 m_out.branch(
10726 m_out.aboveOrEqual(
10727 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
10728 rarely(slowCase), usually(checkHole));
10729 lastNext = m_out.appendTo(checkHole, slowCase);
10730 } else
10731 lastNext = m_out.insertNewBlocksBefore(slowCase);
10732
10733 LValue checkHoleResultValue =
10734 m_out.notZero64(m_out.load64(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1))));
10735 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10736 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10737
10738 m_out.appendTo(slowCase, continuation);
10739 ValueFromBlock slowResult = m_out.anchor(
10740 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10741 m_out.jump(continuation);
10742
10743 m_out.appendTo(continuation, lastNext);
10744 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10745 return;
10746 }
10747 case Array::Double: {
10748 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10749 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10750
10751 IndexedAbstractHeap& heap = m_heaps.indexedDoubleProperties;
10752
10753 LBasicBlock slowCase = m_out.newBlock();
10754 LBasicBlock continuation = m_out.newBlock();
10755 LBasicBlock lastNext = nullptr;
10756
10757 if (!m_node->arrayMode().isInBounds()) {
10758 LBasicBlock checkHole = m_out.newBlock();
10759 m_out.branch(
10760 m_out.aboveOrEqual(
10761 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength)),
10762 rarely(slowCase), usually(checkHole));
10763 lastNext = m_out.appendTo(checkHole, slowCase);
10764 } else
10765 lastNext = m_out.insertNewBlocksBefore(slowCase);
10766
10767 LValue doubleValue = m_out.loadDouble(baseIndex(heap, storage, index, m_graph.varArgChild(m_node, 1)));
10768 LValue checkHoleResultValue = m_out.doubleEqual(doubleValue, doubleValue);
10769 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10770 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10771
10772 m_out.appendTo(slowCase, continuation);
10773 ValueFromBlock slowResult = m_out.anchor(
10774 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10775 m_out.jump(continuation);
10776
10777 m_out.appendTo(continuation, lastNext);
10778 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10779 return;
10780 }
10781
10782 case Array::ArrayStorage: {
10783 LValue storage = lowStorage(m_graph.varArgChild(m_node, 2));
10784 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10785
10786 LBasicBlock slowCase = m_out.newBlock();
10787 LBasicBlock continuation = m_out.newBlock();
10788 LBasicBlock lastNext = nullptr;
10789
10790 if (!m_node->arrayMode().isInBounds()) {
10791 LBasicBlock checkHole = m_out.newBlock();
10792 m_out.branch(
10793 m_out.aboveOrEqual(
10794 index, m_out.load32NonNegative(storage, m_heaps.ArrayStorage_vectorLength)),
10795 rarely(slowCase), usually(checkHole));
10796 lastNext = m_out.appendTo(checkHole, slowCase);
10797 } else
10798 lastNext = m_out.insertNewBlocksBefore(slowCase);
10799
10800 LValue checkHoleResultValue =
10801 m_out.notZero64(m_out.load64(baseIndex(m_heaps.ArrayStorage_vector, storage, index, m_graph.varArgChild(m_node, 1))));
10802 ValueFromBlock checkHoleResult = m_out.anchor(checkHoleResultValue);
10803 m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
10804
10805 m_out.appendTo(slowCase, continuation);
10806 ValueFromBlock slowResult = m_out.anchor(
10807 m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10808 m_out.jump(continuation);
10809
10810 m_out.appendTo(continuation, lastNext);
10811 setBoolean(m_out.phi(Int32, checkHoleResult, slowResult));
10812 break;
10813 }
10814
10815 default: {
10816 LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
10817 setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
10818 break;
10819 }
10820 }
10821 }
10822
10823 void compileHasGenericProperty()
10824 {
10825 LValue base = lowJSValue(m_node->child1());
10826 LValue property = lowCell(m_node->child2());
10827 setJSValue(vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property));
10828 }
10829
10830 void compileHasStructureProperty()
10831 {
10832 LValue base = lowJSValue(m_node->child1());
10833 LValue property = lowString(m_node->child2());
10834 LValue enumerator = lowCell(m_node->child3());
10835
10836 LBasicBlock correctStructure = m_out.newBlock();
10837 LBasicBlock wrongStructure = m_out.newBlock();
10838 LBasicBlock continuation = m_out.newBlock();
10839
10840 m_out.branch(m_out.notEqual(
10841 m_out.load32(base, m_heaps.JSCell_structureID),
10842 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
10843 rarely(wrongStructure), usually(correctStructure));
10844
10845 LBasicBlock lastNext = m_out.appendTo(correctStructure, wrongStructure);
10846 ValueFromBlock correctStructureResult = m_out.anchor(m_out.booleanTrue);
10847 m_out.jump(continuation);
10848
10849 m_out.appendTo(wrongStructure, continuation);
10850 ValueFromBlock wrongStructureResult = m_out.anchor(
10851 m_out.equal(
10852 m_out.constInt64(JSValue::encode(jsBoolean(true))),
10853 vmCall(Int64, m_out.operation(operationHasGenericProperty), m_callFrame, base, property)));
10854 m_out.jump(continuation);
10855
10856 m_out.appendTo(continuation, lastNext);
10857 setBoolean(m_out.phi(Int32, correctStructureResult, wrongStructureResult));
10858 }
10859
10860 void compileGetDirectPname()
10861 {
10862 LValue base = lowCell(m_graph.varArgChild(m_node, 0));
10863 LValue property = lowCell(m_graph.varArgChild(m_node, 1));
10864 LValue index = lowInt32(m_graph.varArgChild(m_node, 2));
10865 LValue enumerator = lowCell(m_graph.varArgChild(m_node, 3));
10866
10867 LBasicBlock checkOffset = m_out.newBlock();
10868 LBasicBlock inlineLoad = m_out.newBlock();
10869 LBasicBlock outOfLineLoad = m_out.newBlock();
10870 LBasicBlock slowCase = m_out.newBlock();
10871 LBasicBlock continuation = m_out.newBlock();
10872
10873 m_out.branch(m_out.notEqual(
10874 m_out.load32(base, m_heaps.JSCell_structureID),
10875 m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedStructureID)),
10876 rarely(slowCase), usually(checkOffset));
10877
10878 LBasicBlock lastNext = m_out.appendTo(checkOffset, inlineLoad);
10879 m_out.branch(m_out.aboveOrEqual(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity)),
10880 unsure(outOfLineLoad), unsure(inlineLoad));
10881
10882 m_out.appendTo(inlineLoad, outOfLineLoad);
10883 ValueFromBlock inlineResult = m_out.anchor(
10884 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(),
10885 base, m_out.zeroExt(index, Int64), ScaleEight, JSObject::offsetOfInlineStorage())));
10886 m_out.jump(continuation);
10887
10888 m_out.appendTo(outOfLineLoad, slowCase);
10889 LValue storage = m_out.loadPtr(base, m_heaps.JSObject_butterfly);
10890 LValue realIndex = m_out.signExt32To64(
10891 m_out.neg(m_out.sub(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_cachedInlineCapacity))));
10892 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
10893 ValueFromBlock outOfLineResult = m_out.anchor(
10894 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty)));
10895 m_out.jump(continuation);
10896
10897 m_out.appendTo(slowCase, continuation);
10898 ValueFromBlock slowCaseResult = m_out.anchor(
10899 vmCall(Int64, m_out.operation(operationGetByVal), m_callFrame, base, property));
10900 m_out.jump(continuation);
10901
10902 m_out.appendTo(continuation, lastNext);
10903 setJSValue(m_out.phi(Int64, inlineResult, outOfLineResult, slowCaseResult));
10904 }
10905
10906 void compileGetEnumerableLength()
10907 {
10908 LValue enumerator = lowCell(m_node->child1());
10909 setInt32(m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_indexLength));
10910 }
10911
10912 void compileGetPropertyEnumerator()
10913 {
10914 if (m_node->child1().useKind() == CellUse)
10915 setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumeratorCell), m_callFrame, lowCell(m_node->child1())));
10916 else
10917 setJSValue(vmCall(Int64, m_out.operation(operationGetPropertyEnumerator), m_callFrame, lowJSValue(m_node->child1())));
10918 }
10919
10920 void compileGetEnumeratorStructurePname()
10921 {
10922 LValue enumerator = lowCell(m_node->child1());
10923 LValue index = lowInt32(m_node->child2());
10924
10925 LBasicBlock inBounds = m_out.newBlock();
10926 LBasicBlock outOfBounds = m_out.newBlock();
10927 LBasicBlock continuation = m_out.newBlock();
10928
10929 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endStructurePropertyIndex)),
10930 usually(inBounds), rarely(outOfBounds));
10931
10932 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
10933 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
10934 ValueFromBlock inBoundsResult = m_out.anchor(
10935 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
10936 m_out.jump(continuation);
10937
10938 m_out.appendTo(outOfBounds, continuation);
10939 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
10940 m_out.jump(continuation);
10941
10942 m_out.appendTo(continuation, lastNext);
10943 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
10944 }
10945
10946 void compileGetEnumeratorGenericPname()
10947 {
10948 LValue enumerator = lowCell(m_node->child1());
10949 LValue index = lowInt32(m_node->child2());
10950
10951 LBasicBlock inBounds = m_out.newBlock();
10952 LBasicBlock outOfBounds = m_out.newBlock();
10953 LBasicBlock continuation = m_out.newBlock();
10954
10955 m_out.branch(m_out.below(index, m_out.load32(enumerator, m_heaps.JSPropertyNameEnumerator_endGenericPropertyIndex)),
10956 usually(inBounds), rarely(outOfBounds));
10957
10958 LBasicBlock lastNext = m_out.appendTo(inBounds, outOfBounds);
10959 LValue storage = m_out.loadPtr(enumerator, m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVector);
10960 ValueFromBlock inBoundsResult = m_out.anchor(
10961 m_out.loadPtr(m_out.baseIndex(m_heaps.JSPropertyNameEnumerator_cachedPropertyNamesVectorContents, storage, m_out.zeroExtPtr(index))));
10962 m_out.jump(continuation);
10963
10964 m_out.appendTo(outOfBounds, continuation);
10965 ValueFromBlock outOfBoundsResult = m_out.anchor(m_out.constInt64(ValueNull));
10966 m_out.jump(continuation);
10967
10968 m_out.appendTo(continuation, lastNext);
10969 setJSValue(m_out.phi(Int64, inBoundsResult, outOfBoundsResult));
10970 }
10971
10972 void compileToIndexString()
10973 {
10974 LValue index = lowInt32(m_node->child1());
10975 setJSValue(vmCall(Int64, m_out.operation(operationToIndexString), m_callFrame, index));
10976 }
10977
10978 void compileCheckStructureImmediate()
10979 {
10980 LValue structure = lowCell(m_node->child1());
10981 checkStructure(
10982 structure, noValue(), BadCache, m_node->structureSet(),
10983 [this] (RegisteredStructure structure) {
10984 return weakStructure(structure);
10985 });
10986 }
10987
10988 void compileMaterializeNewObject()
10989 {
10990 ObjectMaterializationData& data = m_node->objectMaterializationData();
10991
10992 // Lower the values first, to avoid creating values inside a control flow diamond.
10993
10994 Vector<LValue, 8> values;
10995 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
10996 Edge edge = m_graph.varArgChild(m_node, 1 + i);
10997 switch (data.m_properties[i].kind()) {
10998 case PublicLengthPLoc:
10999 case VectorLengthPLoc:
11000 values.append(lowInt32(edge));
11001 break;
11002 default:
11003 values.append(lowJSValue(edge));
11004 break;
11005 }
11006 }
11007
11008 RegisteredStructureSet set = m_node->structureSet();
11009
11010 Vector<LBasicBlock, 1> blocks(set.size());
11011 for (unsigned i = set.size(); i--;)
11012 blocks[i] = m_out.newBlock();
11013 LBasicBlock dummyDefault = m_out.newBlock();
11014 LBasicBlock outerContinuation = m_out.newBlock();
11015
11016 Vector<SwitchCase, 1> cases(set.size());
11017 for (unsigned i = set.size(); i--;)
11018 cases[i] = SwitchCase(weakStructure(set.at(i)), blocks[i], Weight(1));
11019 m_out.switchInstruction(
11020 lowCell(m_graph.varArgChild(m_node, 0)), cases, dummyDefault, Weight(0));
11021
11022 LBasicBlock outerLastNext = m_out.m_nextBlock;
11023
11024 Vector<ValueFromBlock, 1> results;
11025
11026 for (unsigned i = set.size(); i--;) {
11027 m_out.appendTo(blocks[i], i + 1 < set.size() ? blocks[i + 1] : dummyDefault);
11028
11029 RegisteredStructure structure = set.at(i);
11030
11031 LValue object;
11032 LValue butterfly;
11033
11034 if (structure->outOfLineCapacity() || hasIndexedProperties(structure->indexingType())) {
11035 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
11036 Allocator cellAllocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
11037
11038 bool hasIndexingHeader = hasIndexedProperties(structure->indexingType());
11039 unsigned indexingHeaderSize = 0;
11040 LValue indexingPayloadSizeInBytes = m_out.intPtrZero;
11041 LValue vectorLength = m_out.int32Zero;
11042 LValue publicLength = m_out.int32Zero;
11043 if (hasIndexingHeader) {
11044 indexingHeaderSize = sizeof(IndexingHeader);
11045 for (unsigned i = data.m_properties.size(); i--;) {
11046 PromotedLocationDescriptor descriptor = data.m_properties[i];
11047 switch (descriptor.kind()) {
11048 case PublicLengthPLoc:
11049 publicLength = values[i];
11050 break;
11051 case VectorLengthPLoc:
11052 vectorLength = values[i];
11053 break;
11054 default:
11055 break;
11056 }
11057 }
11058 indexingPayloadSizeInBytes =
11059 m_out.mul(m_out.zeroExtPtr(vectorLength), m_out.intPtrEight);
11060 }
11061
11062 LValue butterflySize = m_out.add(
11063 m_out.constIntPtr(
11064 structure->outOfLineCapacity() * sizeof(JSValue) + indexingHeaderSize),
11065 indexingPayloadSizeInBytes);
11066
11067 LBasicBlock slowPath = m_out.newBlock();
11068 LBasicBlock continuation = m_out.newBlock();
11069
11070 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11071
11072 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
11073
11074 LValue startOfStorage = allocateHeapCell(
11075 allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, slowPath),
11076 slowPath);
11077
11078 LValue fastButterflyValue = m_out.add(
11079 startOfStorage,
11080 m_out.constIntPtr(
11081 structure->outOfLineCapacity() * sizeof(JSValue) + sizeof(IndexingHeader)));
11082
11083 ValueFromBlock haveButterfly = m_out.anchor(fastButterflyValue);
11084
11085 splatWords(
11086 fastButterflyValue,
11087 m_out.constInt32(-structure->outOfLineCapacity() - 1),
11088 m_out.constInt32(-1),
11089 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11090
11091 m_out.store32(vectorLength, fastButterflyValue, m_heaps.Butterfly_vectorLength);
11092
11093 LValue fastObjectValue = allocateObject(
11094 m_out.constIntPtr(cellAllocator.localAllocator()), structure, fastButterflyValue,
11095 slowPath);
11096
11097 ValueFromBlock fastObject = m_out.anchor(fastObjectValue);
11098 ValueFromBlock fastButterfly = m_out.anchor(fastButterflyValue);
11099 m_out.jump(continuation);
11100
11101 m_out.appendTo(slowPath, continuation);
11102
11103 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
11104
11105 VM& vm = this->vm();
11106 LValue slowObjectValue;
11107 if (hasIndexingHeader) {
11108 slowObjectValue = lazySlowPath(
11109 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11110 return createLazyCallGenerator(vm,
11111 operationNewObjectWithButterflyWithIndexingHeaderAndVectorLength,
11112 locations[0].directGPR(), CCallHelpers::TrustedImmPtr(structure.get()),
11113 locations[1].directGPR(), locations[2].directGPR());
11114 },
11115 vectorLength, butterflyValue);
11116 } else {
11117 slowObjectValue = lazySlowPath(
11118 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11119 return createLazyCallGenerator(vm,
11120 operationNewObjectWithButterfly, locations[0].directGPR(),
11121 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR());
11122 },
11123 butterflyValue);
11124 }
11125 ValueFromBlock slowObject = m_out.anchor(slowObjectValue);
11126 ValueFromBlock slowButterfly = m_out.anchor(
11127 m_out.loadPtr(slowObjectValue, m_heaps.JSObject_butterfly));
11128
11129 m_out.jump(continuation);
11130
11131 m_out.appendTo(continuation, lastNext);
11132
11133 object = m_out.phi(pointerType(), fastObject, slowObject);
11134 butterfly = m_out.phi(pointerType(), fastButterfly, slowButterfly);
11135
11136 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
11137
11138 initializeArrayElements(m_out.constInt32(structure->indexingType()), m_out.int32Zero, vectorLength, butterfly);
11139
11140 HashMap<int32_t, LValue, DefaultHash<int32_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<int32_t>> indexMap;
11141 Vector<int32_t> indices;
11142 for (unsigned i = data.m_properties.size(); i--;) {
11143 PromotedLocationDescriptor descriptor = data.m_properties[i];
11144 if (descriptor.kind() != IndexedPropertyPLoc)
11145 continue;
11146 int32_t index = static_cast<int32_t>(descriptor.info());
11147
11148 auto result = indexMap.add(index, values[i]);
11149 DFG_ASSERT(m_graph, m_node, result); // Duplicates are illegal.
11150
11151 indices.append(index);
11152 }
11153
11154 if (!indices.isEmpty()) {
11155 std::sort(indices.begin(), indices.end());
11156
11157 Vector<LBasicBlock> blocksWithStores(indices.size());
11158 Vector<LBasicBlock> blocksWithChecks(indices.size());
11159
11160 for (unsigned i = indices.size(); i--;) {
11161 blocksWithStores[i] = m_out.newBlock();
11162 blocksWithChecks[i] = m_out.newBlock(); // blocksWithChecks[0] is the continuation.
11163 }
11164
11165 LBasicBlock indexLastNext = m_out.m_nextBlock;
11166
11167 for (unsigned i = indices.size(); i--;) {
11168 int32_t index = indices[i];
11169 LValue value = indexMap.get(index);
11170
11171 m_out.branch(
11172 m_out.below(m_out.constInt32(index), publicLength),
11173 unsure(blocksWithStores[i]), unsure(blocksWithChecks[i]));
11174
11175 m_out.appendTo(blocksWithStores[i], blocksWithChecks[i]);
11176
11177 // This has to type-check and convert its inputs, but it cannot do so in a
11178 // way that updates AI. That's a bit annoying, but if you think about how
11179 // sinking works, it's actually not a bad thing. We are virtually guaranteed
11180 // that these type checks will not fail, since the type checks that guarded
11181 // the original stores to the array are still somewhere above this point.
11182 Output::StoreType storeType;
11183 IndexedAbstractHeap* heap;
11184 switch (structure->indexingType()) {
11185 case ALL_INT32_INDEXING_TYPES:
11186 // FIXME: This could use the proven type if we had the Edge for the
11187 // value. https://bugs.webkit.org/show_bug.cgi?id=155311
11188 speculate(BadType, noValue(), nullptr, isNotInt32(value));
11189 storeType = Output::Store64;
11190 heap = &m_heaps.indexedInt32Properties;
11191 break;
11192
11193 case ALL_DOUBLE_INDEXING_TYPES: {
11194 // FIXME: If the source is ValueRep, we should avoid emitting any
11195 // checks. We could also avoid emitting checks if we had the Edge of
11196 // this value. https://bugs.webkit.org/show_bug.cgi?id=155311
11197
11198 LBasicBlock intCase = m_out.newBlock();
11199 LBasicBlock doubleCase = m_out.newBlock();
11200 LBasicBlock continuation = m_out.newBlock();
11201
11202 m_out.branch(isInt32(value), unsure(intCase), unsure(doubleCase));
11203
11204 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
11205
11206 ValueFromBlock intResult =
11207 m_out.anchor(m_out.intToDouble(unboxInt32(value)));
11208 m_out.jump(continuation);
11209
11210 m_out.appendTo(doubleCase, continuation);
11211
11212 speculate(BadType, noValue(), nullptr, isNumber(value));
11213 ValueFromBlock doubleResult = m_out.anchor(unboxDouble(value));
11214 m_out.jump(continuation);
11215
11216 m_out.appendTo(continuation, lastNext);
11217 value = m_out.phi(Double, intResult, doubleResult);
11218 storeType = Output::StoreDouble;
11219 heap = &m_heaps.indexedDoubleProperties;
11220 break;
11221 }
11222
11223 case ALL_CONTIGUOUS_INDEXING_TYPES:
11224 storeType = Output::Store64;
11225 heap = &m_heaps.indexedContiguousProperties;
11226 break;
11227
11228 default:
11229 DFG_CRASH(m_graph, m_node, "Invalid indexing type");
11230 break;
11231 }
11232
11233 m_out.store(value, m_out.address(butterfly, heap->at(index)), storeType);
11234
11235 m_out.jump(blocksWithChecks[i]);
11236 m_out.appendTo(
11237 blocksWithChecks[i], i ? blocksWithStores[i - 1] : indexLastNext);
11238 }
11239 }
11240 } else {
11241 // In the easy case where we can do a one-shot allocation, we simply allocate the
11242 // object to directly have the desired structure.
11243 object = allocateObject(structure);
11244 butterfly = nullptr; // Don't have one, don't need one.
11245 }
11246
11247 BitVector setInlineOffsets;
11248 for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
11249 for (unsigned i = data.m_properties.size(); i--;) {
11250 PromotedLocationDescriptor descriptor = data.m_properties[i];
11251 if (descriptor.kind() != NamedPropertyPLoc)
11252 continue;
11253 if (m_graph.identifiers()[descriptor.info()] != entry.key)
11254 continue;
11255
11256 LValue base;
11257 if (isInlineOffset(entry.offset)) {
11258 setInlineOffsets.set(entry.offset);
11259 base = object;
11260 } else
11261 base = butterfly;
11262 storeProperty(values[i], base, descriptor.info(), entry.offset);
11263 break;
11264 }
11265 }
11266 for (unsigned i = structure->inlineCapacity(); i--;) {
11267 if (!setInlineOffsets.get(i))
11268 m_out.store64(m_out.int64Zero, m_out.address(m_heaps.properties.atAnyNumber(), object, offsetRelativeToBase(i)));
11269 }
11270
11271 results.append(m_out.anchor(object));
11272 m_out.jump(outerContinuation);
11273 }
11274
11275 m_out.appendTo(dummyDefault, outerContinuation);
11276 m_out.unreachable();
11277
11278 m_out.appendTo(outerContinuation, outerLastNext);
11279 setJSValue(m_out.phi(pointerType(), results));
11280 mutatorFence();
11281 }
11282
11283 void compileMaterializeCreateActivation()
11284 {
11285 ObjectMaterializationData& data = m_node->objectMaterializationData();
11286
11287 Vector<LValue, 8> values;
11288 for (unsigned i = 0; i < data.m_properties.size(); ++i)
11289 values.append(lowJSValue(m_graph.varArgChild(m_node, 2 + i)));
11290
11291 LValue scope = lowCell(m_graph.varArgChild(m_node, 1));
11292 SymbolTable* table = m_node->castOperand<SymbolTable*>();
11293 ASSERT(table == m_graph.varArgChild(m_node, 0)->castConstant<SymbolTable*>(vm()));
11294 RegisteredStructure structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->activationStructure());
11295
11296 LBasicBlock slowPath = m_out.newBlock();
11297 LBasicBlock continuation = m_out.newBlock();
11298
11299 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11300
11301 LValue fastObject = allocateObject<JSLexicalEnvironment>(
11302 JSLexicalEnvironment::allocationSize(table), structure, m_out.intPtrZero, slowPath);
11303
11304 m_out.storePtr(scope, fastObject, m_heaps.JSScope_next);
11305 m_out.storePtr(weakPointer(table), fastObject, m_heaps.JSSymbolTableObject_symbolTable);
11306
11307
11308 ValueFromBlock fastResult = m_out.anchor(fastObject);
11309 m_out.jump(continuation);
11310
11311 m_out.appendTo(slowPath, continuation);
11312 // We ensure allocation sinking explictly sets bottom values for all field members.
11313 // Therefore, it doesn't matter what JSValue we pass in as the initialization value
11314 // because all fields will be overwritten.
11315 // FIXME: It may be worth creating an operation that calls a constructor on JSLexicalEnvironment that
11316 // doesn't initialize every slot because we are guaranteed to do that here.
11317 VM& vm = this->vm();
11318 LValue callResult = lazySlowPath(
11319 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11320 return createLazyCallGenerator(vm,
11321 operationCreateActivationDirect, locations[0].directGPR(),
11322 CCallHelpers::TrustedImmPtr(structure.get()), locations[1].directGPR(),
11323 CCallHelpers::TrustedImmPtr(table),
11324 CCallHelpers::TrustedImm64(JSValue::encode(jsUndefined())));
11325 }, scope);
11326 ValueFromBlock slowResult = m_out.anchor(callResult);
11327 m_out.jump(continuation);
11328
11329 m_out.appendTo(continuation, lastNext);
11330 LValue activation = m_out.phi(pointerType(), fastResult, slowResult);
11331 RELEASE_ASSERT(data.m_properties.size() == table->scopeSize());
11332 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11333 PromotedLocationDescriptor descriptor = data.m_properties[i];
11334 ASSERT(descriptor.kind() == ClosureVarPLoc);
11335 m_out.store64(
11336 values[i], activation,
11337 m_heaps.JSLexicalEnvironment_variables[descriptor.info()]);
11338 }
11339
11340 if (validationEnabled()) {
11341 // Validate to make sure every slot in the scope has one value.
11342 ConcurrentJSLocker locker(table->m_lock);
11343 for (auto iter = table->begin(locker), end = table->end(locker); iter != end; ++iter) {
11344 bool found = false;
11345 for (unsigned i = 0; i < data.m_properties.size(); ++i) {
11346 PromotedLocationDescriptor descriptor = data.m_properties[i];
11347 ASSERT(descriptor.kind() == ClosureVarPLoc);
11348 if (iter->value.scopeOffset().offset() == descriptor.info()) {
11349 found = true;
11350 break;
11351 }
11352 }
11353 ASSERT_UNUSED(found, found);
11354 }
11355 }
11356
11357 mutatorFence();
11358 setJSValue(activation);
11359 }
11360
11361 void compileCheckTraps()
11362 {
11363 ASSERT(Options::usePollingTraps());
11364 LBasicBlock needTrapHandling = m_out.newBlock();
11365 LBasicBlock continuation = m_out.newBlock();
11366
11367 LValue state = m_out.load8ZeroExt32(m_out.absolute(vm().needTrapHandlingAddress()));
11368 m_out.branch(m_out.isZero32(state),
11369 usually(continuation), rarely(needTrapHandling));
11370
11371 LBasicBlock lastNext = m_out.appendTo(needTrapHandling, continuation);
11372
11373 VM& vm = this->vm();
11374 lazySlowPath(
11375 [=, &vm] (const Vector<Location>&) -> RefPtr<LazySlowPath::Generator> {
11376 return createLazyCallGenerator(vm, operationHandleTraps, InvalidGPRReg);
11377 });
11378 m_out.jump(continuation);
11379
11380 m_out.appendTo(continuation, lastNext);
11381 }
11382
11383 void compileRegExpExec()
11384 {
11385 LValue globalObject = lowCell(m_node->child1());
11386
11387 if (m_node->child2().useKind() == RegExpObjectUse) {
11388 LValue base = lowRegExpObject(m_node->child2());
11389
11390 if (m_node->child3().useKind() == StringUse) {
11391 LValue argument = lowString(m_node->child3());
11392 LValue result = vmCall(
11393 Int64, m_out.operation(operationRegExpExecString), m_callFrame, globalObject,
11394 base, argument);
11395 setJSValue(result);
11396 return;
11397 }
11398
11399 LValue argument = lowJSValue(m_node->child3());
11400 LValue result = vmCall(
11401 Int64, m_out.operation(operationRegExpExec), m_callFrame, globalObject, base,
11402 argument);
11403 setJSValue(result);
11404 return;
11405 }
11406
11407 LValue base = lowJSValue(m_node->child2());
11408 LValue argument = lowJSValue(m_node->child3());
11409 LValue result = vmCall(
11410 Int64, m_out.operation(operationRegExpExecGeneric), m_callFrame, globalObject, base,
11411 argument);
11412 setJSValue(result);
11413 }
11414
11415 void compileRegExpExecNonGlobalOrSticky()
11416 {
11417 LValue globalObject = lowCell(m_node->child1());
11418 LValue argument = lowString(m_node->child2());
11419 LValue result = vmCall(
11420 Int64, m_out.operation(operationRegExpExecNonGlobalOrSticky), m_callFrame, globalObject, frozenPointer(m_node->cellOperand()), argument);
11421 setJSValue(result);
11422 }
11423
11424 void compileRegExpMatchFastGlobal()
11425 {
11426 LValue globalObject = lowCell(m_node->child1());
11427 LValue argument = lowString(m_node->child2());
11428 LValue result = vmCall(
11429 Int64, m_out.operation(operationRegExpMatchFastGlobalString), m_callFrame, globalObject, frozenPointer(m_node->cellOperand()), argument);
11430 setJSValue(result);
11431 }
11432
11433 void compileRegExpTest()
11434 {
11435 LValue globalObject = lowCell(m_node->child1());
11436
11437 if (m_node->child2().useKind() == RegExpObjectUse) {
11438 LValue base = lowRegExpObject(m_node->child2());
11439
11440 if (m_node->child3().useKind() == StringUse) {
11441 LValue argument = lowString(m_node->child3());
11442 LValue result = vmCall(
11443 Int32, m_out.operation(operationRegExpTestString), m_callFrame, globalObject,
11444 base, argument);
11445 setBoolean(result);
11446 return;
11447 }
11448
11449 LValue argument = lowJSValue(m_node->child3());
11450 LValue result = vmCall(
11451 Int32, m_out.operation(operationRegExpTest), m_callFrame, globalObject, base,
11452 argument);
11453 setBoolean(result);
11454 return;
11455 }
11456
11457 LValue base = lowJSValue(m_node->child2());
11458 LValue argument = lowJSValue(m_node->child3());
11459 LValue result = vmCall(
11460 Int32, m_out.operation(operationRegExpTestGeneric), m_callFrame, globalObject, base,
11461 argument);
11462 setBoolean(result);
11463 }
11464
11465 void compileRegExpMatchFast()
11466 {
11467 LValue globalObject = lowCell(m_node->child1());
11468 LValue base = lowRegExpObject(m_node->child2());
11469 LValue argument = lowString(m_node->child3());
11470 LValue result = vmCall(
11471 Int64, m_out.operation(operationRegExpMatchFastString), m_callFrame, globalObject,
11472 base, argument);
11473 setJSValue(result);
11474 }
11475
11476 void compileNewRegexp()
11477 {
11478 FrozenValue* regexp = m_node->cellOperand();
11479 LValue lastIndex = lowJSValue(m_node->child1());
11480 ASSERT(regexp->cell()->inherits<RegExp>(vm()));
11481
11482 LBasicBlock slowCase = m_out.newBlock();
11483 LBasicBlock continuation = m_out.newBlock();
11484
11485 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowCase);
11486
11487 auto structure = m_graph.registerStructure(m_graph.globalObjectFor(m_node->origin.semantic)->regExpStructure());
11488 LValue fastResultValue = allocateObject<RegExpObject>(structure, m_out.intPtrZero, slowCase);
11489 m_out.storePtr(frozenPointer(regexp), fastResultValue, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag);
11490 m_out.store64(lastIndex, fastResultValue, m_heaps.RegExpObject_lastIndex);
11491 mutatorFence();
11492 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
11493 m_out.jump(continuation);
11494
11495 m_out.appendTo(slowCase, continuation);
11496 VM& vm = this->vm();
11497 RegExp* regexpCell = regexp->cast<RegExp*>();
11498 LValue slowResultValue = lazySlowPath(
11499 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11500 return createLazyCallGenerator(vm,
11501 operationNewRegexpWithLastIndex, locations[0].directGPR(),
11502 CCallHelpers::TrustedImmPtr(regexpCell), locations[1].directGPR());
11503 }, lastIndex);
11504 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
11505 m_out.jump(continuation);
11506
11507 m_out.appendTo(continuation, lastNext);
11508 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
11509 }
11510
11511 void compileSetFunctionName()
11512 {
11513 vmCall(Void, m_out.operation(operationSetFunctionName), m_callFrame,
11514 lowCell(m_node->child1()), lowJSValue(m_node->child2()));
11515 }
11516
11517 void compileStringReplace()
11518 {
11519 if (m_node->child1().useKind() == StringUse
11520 && m_node->child2().useKind() == RegExpObjectUse
11521 && m_node->child3().useKind() == StringUse) {
11522
11523 if (JSString* replace = m_node->child3()->dynamicCastConstant<JSString*>(vm())) {
11524 if (!replace->length()) {
11525 LValue string = lowString(m_node->child1());
11526 LValue regExp = lowRegExpObject(m_node->child2());
11527
11528 LValue result = vmCall(
11529 pointerType(), m_out.operation(operationStringProtoFuncReplaceRegExpEmptyStr),
11530 m_callFrame, string, regExp);
11531
11532 setJSValue(result);
11533 return;
11534 }
11535 }
11536
11537 LValue string = lowString(m_node->child1());
11538 LValue regExp = lowRegExpObject(m_node->child2());
11539 LValue replace = lowString(m_node->child3());
11540
11541 LValue result = vmCall(
11542 pointerType(), m_out.operation(operationStringProtoFuncReplaceRegExpString),
11543 m_callFrame, string, regExp, replace);
11544
11545 setJSValue(result);
11546 return;
11547 }
11548
11549 LValue search;
11550 if (m_node->child2().useKind() == StringUse)
11551 search = lowString(m_node->child2());
11552 else
11553 search = lowJSValue(m_node->child2());
11554
11555 LValue result = vmCall(
11556 pointerType(), m_out.operation(operationStringProtoFuncReplaceGeneric), m_callFrame,
11557 lowJSValue(m_node->child1()), search,
11558 lowJSValue(m_node->child3()));
11559
11560 setJSValue(result);
11561 }
11562
11563 void compileGetRegExpObjectLastIndex()
11564 {
11565 setJSValue(m_out.load64(lowRegExpObject(m_node->child1()), m_heaps.RegExpObject_lastIndex));
11566 }
11567
11568 void compileSetRegExpObjectLastIndex()
11569 {
11570 if (!m_node->ignoreLastIndexIsWritable()) {
11571 LValue regExp = lowRegExpObject(m_node->child1());
11572 LValue value = lowJSValue(m_node->child2());
11573
11574 speculate(
11575 ExoticObjectMode, noValue(), nullptr,
11576 m_out.testNonZeroPtr(
11577 m_out.loadPtr(regExp, m_heaps.RegExpObject_regExpAndLastIndexIsNotWritableFlag),
11578 m_out.constIntPtr(RegExpObject::lastIndexIsNotWritableFlag)));
11579
11580 m_out.store64(value, regExp, m_heaps.RegExpObject_lastIndex);
11581 return;
11582 }
11583
11584 m_out.store64(lowJSValue(m_node->child2()), lowCell(m_node->child1()), m_heaps.RegExpObject_lastIndex);
11585 }
11586
11587 void compileLogShadowChickenPrologue()
11588 {
11589 LValue packet = ensureShadowChickenPacket();
11590 LValue scope = lowCell(m_node->child1());
11591
11592 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
11593 m_out.storePtr(m_out.loadPtr(addressFor(0)), packet, m_heaps.ShadowChicken_Packet_callerFrame);
11594 m_out.storePtr(m_out.loadPtr(payloadFor(CallFrameSlot::callee)), packet, m_heaps.ShadowChicken_Packet_callee);
11595 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
11596 }
11597
11598 void compileLogShadowChickenTail()
11599 {
11600 LValue packet = ensureShadowChickenPacket();
11601 LValue thisValue = lowJSValue(m_node->child1());
11602 LValue scope = lowCell(m_node->child2());
11603 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(m_node->origin.semantic);
11604
11605 m_out.storePtr(m_callFrame, packet, m_heaps.ShadowChicken_Packet_frame);
11606 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(ShadowChicken::Packet::tailMarker())), packet, m_heaps.ShadowChicken_Packet_callee);
11607 m_out.store64(thisValue, packet, m_heaps.ShadowChicken_Packet_thisValue);
11608 m_out.storePtr(scope, packet, m_heaps.ShadowChicken_Packet_scope);
11609 // We don't want the CodeBlock to have a weak pointer to itself because
11610 // that would cause it to always get collected.
11611 m_out.storePtr(m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), packet, m_heaps.ShadowChicken_Packet_codeBlock);
11612 m_out.store32(m_out.constInt32(callSiteIndex.bits()), packet, m_heaps.ShadowChicken_Packet_callSiteIndex);
11613 }
11614
11615 void compileRecordRegExpCachedResult()
11616 {
11617 Edge globalObjectEdge = m_graph.varArgChild(m_node, 0);
11618 Edge regExpEdge = m_graph.varArgChild(m_node, 1);
11619 Edge stringEdge = m_graph.varArgChild(m_node, 2);
11620 Edge startEdge = m_graph.varArgChild(m_node, 3);
11621 Edge endEdge = m_graph.varArgChild(m_node, 4);
11622
11623 LValue globalObject = lowCell(globalObjectEdge);
11624 LValue regExp = lowCell(regExpEdge);
11625 LValue string = lowCell(stringEdge);
11626 LValue start = lowInt32(startEdge);
11627 LValue end = lowInt32(endEdge);
11628
11629 m_out.storePtr(regExp, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastRegExp);
11630 m_out.storePtr(string, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_lastInput);
11631 m_out.store32(start, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_start);
11632 m_out.store32(end, globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_result_end);
11633 m_out.store32As8(
11634 m_out.constInt32(0),
11635 m_out.address(globalObject, m_heaps.JSGlobalObject_regExpGlobalData_cachedResult_reified));
11636 }
11637
11638 struct ArgumentsLength {
11639 ArgumentsLength()
11640 : isKnown(false)
11641 , known(UINT_MAX)
11642 , value(nullptr)
11643 {
11644 }
11645
11646 bool isKnown;
11647 unsigned known;
11648 LValue value;
11649 };
11650 ArgumentsLength getArgumentsLength(InlineCallFrame* inlineCallFrame)
11651 {
11652 ArgumentsLength length;
11653
11654 if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
11655 length.known = inlineCallFrame->argumentCountIncludingThis - 1;
11656 length.isKnown = true;
11657 length.value = m_out.constInt32(length.known);
11658 } else {
11659 length.known = UINT_MAX;
11660 length.isKnown = false;
11661
11662 VirtualRegister argumentCountRegister;
11663 if (!inlineCallFrame)
11664 argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
11665 else
11666 argumentCountRegister = inlineCallFrame->argumentCountRegister;
11667 length.value = m_out.sub(m_out.load32(payloadFor(argumentCountRegister)), m_out.int32One);
11668 }
11669
11670 return length;
11671 }
11672
11673 ArgumentsLength getArgumentsLength()
11674 {
11675 return getArgumentsLength(m_node->origin.semantic.inlineCallFrame());
11676 }
11677
11678 LValue getCurrentCallee()
11679 {
11680 if (InlineCallFrame* frame = m_node->origin.semantic.inlineCallFrame()) {
11681 if (frame->isClosureCall)
11682 return m_out.loadPtr(addressFor(frame->calleeRecovery.virtualRegister()));
11683 return weakPointer(frame->calleeRecovery.constant().asCell());
11684 }
11685 return m_out.loadPtr(addressFor(CallFrameSlot::callee));
11686 }
11687
11688 LValue getArgumentsStart(InlineCallFrame* inlineCallFrame, unsigned offset = 0)
11689 {
11690 VirtualRegister start = AssemblyHelpers::argumentsStart(inlineCallFrame) + offset;
11691 return addressFor(start).value();
11692 }
11693
11694 LValue getArgumentsStart()
11695 {
11696 return getArgumentsStart(m_node->origin.semantic.inlineCallFrame());
11697 }
11698
11699 template<typename Functor>
11700 void checkStructure(
11701 LValue structureDiscriminant, const FormattedValue& formattedValue, ExitKind exitKind,
11702 const RegisteredStructureSet& set, const Functor& weakStructureDiscriminant)
11703 {
11704 if (set.isEmpty()) {
11705 terminate(exitKind);
11706 return;
11707 }
11708
11709 if (set.size() == 1) {
11710 speculate(
11711 exitKind, formattedValue, 0,
11712 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set[0])));
11713 return;
11714 }
11715
11716 LBasicBlock continuation = m_out.newBlock();
11717
11718 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
11719 for (unsigned i = 0; i < set.size() - 1; ++i) {
11720 LBasicBlock nextStructure = m_out.newBlock();
11721 m_out.branch(
11722 m_out.equal(structureDiscriminant, weakStructureDiscriminant(set[i])),
11723 unsure(continuation), unsure(nextStructure));
11724 m_out.appendTo(nextStructure);
11725 }
11726
11727 speculate(
11728 exitKind, formattedValue, 0,
11729 m_out.notEqual(structureDiscriminant, weakStructureDiscriminant(set.last())));
11730
11731 m_out.jump(continuation);
11732 m_out.appendTo(continuation, lastNext);
11733 }
11734
11735 LValue numberOrNotCellToInt32(Edge edge, LValue value)
11736 {
11737 LBasicBlock intCase = m_out.newBlock();
11738 LBasicBlock notIntCase = m_out.newBlock();
11739 LBasicBlock doubleCase = 0;
11740 LBasicBlock notNumberCase = 0;
11741 if (edge.useKind() == NotCellUse) {
11742 doubleCase = m_out.newBlock();
11743 notNumberCase = m_out.newBlock();
11744 }
11745 LBasicBlock continuation = m_out.newBlock();
11746
11747 Vector<ValueFromBlock> results;
11748
11749 m_out.branch(isNotInt32(value), unsure(notIntCase), unsure(intCase));
11750
11751 LBasicBlock lastNext = m_out.appendTo(intCase, notIntCase);
11752 results.append(m_out.anchor(unboxInt32(value)));
11753 m_out.jump(continuation);
11754
11755 if (edge.useKind() == NumberUse) {
11756 m_out.appendTo(notIntCase, continuation);
11757 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isCellOrMisc(value));
11758 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
11759 m_out.jump(continuation);
11760 } else {
11761 m_out.appendTo(notIntCase, doubleCase);
11762 m_out.branch(
11763 isCellOrMisc(value, provenType(edge)), unsure(notNumberCase), unsure(doubleCase));
11764
11765 m_out.appendTo(doubleCase, notNumberCase);
11766 results.append(m_out.anchor(doubleToInt32(unboxDouble(value))));
11767 m_out.jump(continuation);
11768
11769 m_out.appendTo(notNumberCase, continuation);
11770
11771 FTL_TYPE_CHECK(jsValueValue(value), edge, ~SpecCellCheck, isCell(value));
11772
11773 LValue specialResult = m_out.select(
11774 m_out.equal(value, m_out.constInt64(JSValue::encode(jsBoolean(true)))),
11775 m_out.int32One, m_out.int32Zero);
11776 results.append(m_out.anchor(specialResult));
11777 m_out.jump(continuation);
11778 }
11779
11780 m_out.appendTo(continuation, lastNext);
11781 return m_out.phi(Int32, results);
11782 }
11783
11784 LValue loadProperty(LValue storage, unsigned identifierNumber, PropertyOffset offset)
11785 {
11786 return m_out.load64(addressOfProperty(storage, identifierNumber, offset));
11787 }
11788
11789 void storeProperty(
11790 LValue value, LValue storage, unsigned identifierNumber, PropertyOffset offset)
11791 {
11792 m_out.store64(value, addressOfProperty(storage, identifierNumber, offset));
11793 }
11794
11795 TypedPointer addressOfProperty(
11796 LValue storage, unsigned identifierNumber, PropertyOffset offset)
11797 {
11798 return m_out.address(
11799 m_heaps.properties[identifierNumber], storage, offsetRelativeToBase(offset));
11800 }
11801
11802 LValue storageForTransition(
11803 LValue object, PropertyOffset offset,
11804 Structure* previousStructure, Structure* nextStructure)
11805 {
11806 if (isInlineOffset(offset))
11807 return object;
11808
11809 if (previousStructure->outOfLineCapacity() == nextStructure->outOfLineCapacity())
11810 return m_out.loadPtr(object, m_heaps.JSObject_butterfly);
11811
11812 LValue result;
11813 if (!previousStructure->outOfLineCapacity())
11814 result = allocatePropertyStorage(object, previousStructure);
11815 else {
11816 result = reallocatePropertyStorage(
11817 object, m_out.loadPtr(object, m_heaps.JSObject_butterfly),
11818 previousStructure, nextStructure);
11819 }
11820
11821 nukeStructureAndSetButterfly(result, object);
11822 return result;
11823 }
11824
11825 void initializeArrayElements(LValue indexingType, LValue begin, LValue end, LValue butterfly)
11826 {
11827
11828 if (begin == end)
11829 return;
11830
11831 if (indexingType->hasInt32()) {
11832 IndexingType rawIndexingType = static_cast<IndexingType>(indexingType->asInt32());
11833 if (hasUndecided(rawIndexingType))
11834 return;
11835 IndexedAbstractHeap* heap = m_heaps.forIndexingType(rawIndexingType);
11836 DFG_ASSERT(m_graph, m_node, heap);
11837
11838 LValue hole;
11839 if (hasDouble(rawIndexingType))
11840 hole = m_out.constInt64(bitwise_cast<int64_t>(PNaN));
11841 else
11842 hole = m_out.constInt64(JSValue::encode(JSValue()));
11843
11844 splatWords(butterfly, begin, end, hole, heap->atAnyIndex());
11845 } else {
11846 LValue hole = m_out.select(
11847 m_out.equal(m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)), m_out.constInt32(DoubleShape)),
11848 m_out.constInt64(bitwise_cast<int64_t>(PNaN)),
11849 m_out.constInt64(JSValue::encode(JSValue())));
11850 splatWords(butterfly, begin, end, hole, m_heaps.root);
11851 }
11852 }
11853
11854 void splatWords(LValue base, LValue begin, LValue end, LValue value, const AbstractHeap& heap)
11855 {
11856 const uint64_t unrollingLimit = 10;
11857 if (begin->hasInt() && end->hasInt()) {
11858 uint64_t beginConst = static_cast<uint64_t>(begin->asInt());
11859 uint64_t endConst = static_cast<uint64_t>(end->asInt());
11860
11861 if (endConst - beginConst <= unrollingLimit) {
11862 for (uint64_t i = beginConst; i < endConst; ++i) {
11863 LValue pointer = m_out.add(base, m_out.constIntPtr(i * sizeof(uint64_t)));
11864 m_out.store64(value, TypedPointer(heap, pointer));
11865 }
11866 return;
11867 }
11868 }
11869
11870 LBasicBlock initLoop = m_out.newBlock();
11871 LBasicBlock initDone = m_out.newBlock();
11872
11873 LBasicBlock lastNext = m_out.insertNewBlocksBefore(initLoop);
11874
11875 ValueFromBlock originalIndex = m_out.anchor(end);
11876 ValueFromBlock originalPointer = m_out.anchor(
11877 m_out.add(base, m_out.shl(m_out.signExt32ToPtr(begin), m_out.constInt32(3))));
11878 m_out.branch(m_out.notEqual(end, begin), unsure(initLoop), unsure(initDone));
11879
11880 m_out.appendTo(initLoop, initDone);
11881 LValue index = m_out.phi(Int32, originalIndex);
11882 LValue pointer = m_out.phi(pointerType(), originalPointer);
11883
11884 m_out.store64(value, TypedPointer(heap, pointer));
11885
11886 LValue nextIndex = m_out.sub(index, m_out.int32One);
11887 m_out.addIncomingToPhi(index, m_out.anchor(nextIndex));
11888 m_out.addIncomingToPhi(pointer, m_out.anchor(m_out.add(pointer, m_out.intPtrEight)));
11889 m_out.branch(
11890 m_out.notEqual(nextIndex, begin), unsure(initLoop), unsure(initDone));
11891
11892 m_out.appendTo(initDone, lastNext);
11893 }
11894
11895 LValue allocatePropertyStorage(LValue object, Structure* previousStructure)
11896 {
11897 if (previousStructure->couldHaveIndexingHeader()) {
11898 return vmCall(
11899 pointerType(),
11900 m_out.operation(operationAllocateComplexPropertyStorageWithInitialCapacity),
11901 m_callFrame, object);
11902 }
11903
11904 LValue result = allocatePropertyStorageWithSizeImpl(initialOutOfLineCapacity);
11905
11906 splatWords(
11907 result,
11908 m_out.constInt32(-initialOutOfLineCapacity - 1), m_out.constInt32(-1),
11909 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11910
11911 return result;
11912 }
11913
11914 LValue reallocatePropertyStorage(
11915 LValue object, LValue oldStorage, Structure* previous, Structure* next)
11916 {
11917 size_t oldSize = previous->outOfLineCapacity();
11918 size_t newSize = oldSize * outOfLineGrowthFactor;
11919
11920 ASSERT_UNUSED(next, newSize == next->outOfLineCapacity());
11921
11922 if (previous->couldHaveIndexingHeader()) {
11923 LValue newAllocSize = m_out.constIntPtr(newSize);
11924 return vmCall(pointerType(), m_out.operation(operationAllocateComplexPropertyStorage), m_callFrame, object, newAllocSize);
11925 }
11926
11927 LValue result = allocatePropertyStorageWithSizeImpl(newSize);
11928
11929 ptrdiff_t headerSize = -sizeof(IndexingHeader) - sizeof(void*);
11930 ptrdiff_t endStorage = headerSize - static_cast<ptrdiff_t>(oldSize * sizeof(JSValue));
11931
11932 for (ptrdiff_t offset = headerSize; offset > endStorage; offset -= sizeof(void*)) {
11933 LValue loaded =
11934 m_out.loadPtr(m_out.address(m_heaps.properties.atAnyNumber(), oldStorage, offset));
11935 m_out.storePtr(loaded, m_out.address(m_heaps.properties.atAnyNumber(), result, offset));
11936 }
11937
11938 splatWords(
11939 result,
11940 m_out.constInt32(-newSize - 1), m_out.constInt32(-oldSize - 1),
11941 m_out.int64Zero, m_heaps.properties.atAnyNumber());
11942
11943 return result;
11944 }
11945
11946 LValue allocatePropertyStorageWithSizeImpl(size_t sizeInValues)
11947 {
11948 LBasicBlock slowPath = m_out.newBlock();
11949 LBasicBlock continuation = m_out.newBlock();
11950
11951 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
11952
11953 size_t sizeInBytes = sizeInValues * sizeof(JSValue);
11954 Allocator allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(sizeInBytes, AllocatorForMode::AllocatorIfExists);
11955 LValue startOfStorage = allocateHeapCell(
11956 m_out.constIntPtr(allocator.localAllocator()), slowPath);
11957 ValueFromBlock fastButterfly = m_out.anchor(
11958 m_out.add(m_out.constIntPtr(sizeInBytes + sizeof(IndexingHeader)), startOfStorage));
11959 m_out.jump(continuation);
11960
11961 m_out.appendTo(slowPath, continuation);
11962
11963 LValue slowButterflyValue;
11964 VM& vm = this->vm();
11965 if (sizeInValues == initialOutOfLineCapacity) {
11966 slowButterflyValue = lazySlowPath(
11967 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11968 return createLazyCallGenerator(vm,
11969 operationAllocateSimplePropertyStorageWithInitialCapacity,
11970 locations[0].directGPR());
11971 });
11972 } else {
11973 slowButterflyValue = lazySlowPath(
11974 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
11975 return createLazyCallGenerator(vm,
11976 operationAllocateSimplePropertyStorage, locations[0].directGPR(),
11977 CCallHelpers::TrustedImmPtr(sizeInValues));
11978 });
11979 }
11980 ValueFromBlock slowButterfly = m_out.anchor(slowButterflyValue);
11981
11982 m_out.jump(continuation);
11983
11984 m_out.appendTo(continuation, lastNext);
11985
11986 return m_out.phi(pointerType(), fastButterfly, slowButterfly);
11987 }
11988
11989 LValue getById(LValue base, AccessType type)
11990 {
11991 Node* node = m_node;
11992 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
11993
11994 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
11995 patchpoint->appendSomeRegister(base);
11996 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
11997 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
11998
11999 // FIXME: If this is a GetByIdFlush/GetByIdDirectFlush, we might get some performance boost if we claim that it
12000 // clobbers volatile registers late. It's not necessary for correctness, though, since the
12001 // IC code is super smart about saving registers.
12002 // https://bugs.webkit.org/show_bug.cgi?id=152848
12003
12004 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12005
12006 RefPtr<PatchpointExceptionHandle> exceptionHandle =
12007 preparePatchpointForExceptions(patchpoint);
12008
12009 State* state = &m_ftlState;
12010 patchpoint->setGenerator(
12011 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12012 AllowMacroScratchRegisterUsage allowScratch(jit);
12013
12014 CallSiteIndex callSiteIndex =
12015 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
12016
12017 // This is the direct exit target for operation calls.
12018 Box<CCallHelpers::JumpList> exceptions =
12019 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12020
12021 // This is the exit for call IC's created by the getById for getters. We don't have
12022 // to do anything weird other than call this, since it will associate the exit with
12023 // the callsite index.
12024 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
12025
12026 auto generator = Box<JITGetByIdGenerator>::create(
12027 jit.codeBlock(), node->origin.semantic, callSiteIndex,
12028 params.unavailableRegisters(), uid, JSValueRegs(params[1].gpr()),
12029 JSValueRegs(params[0].gpr()), type);
12030
12031 generator->generateFastPath(jit);
12032 CCallHelpers::Label done = jit.label();
12033
12034 params.addLatePath(
12035 [=] (CCallHelpers& jit) {
12036 AllowMacroScratchRegisterUsage allowScratch(jit);
12037
12038 J_JITOperation_ESsiJI optimizationFunction = appropriateOptimizingGetByIdFunction(type);
12039
12040 generator->slowPathJump().link(&jit);
12041 CCallHelpers::Label slowPathBegin = jit.label();
12042 CCallHelpers::Call slowPathCall = callOperation(
12043 *state, params.unavailableRegisters(), jit, node->origin.semantic,
12044 exceptions.get(), optimizationFunction, params[0].gpr(),
12045 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
12046 CCallHelpers::TrustedImmPtr(uid)).call();
12047 jit.jump().linkTo(done, &jit);
12048
12049 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12050
12051 jit.addLinkTask(
12052 [=] (LinkBuffer& linkBuffer) {
12053 generator->finalize(linkBuffer, linkBuffer);
12054 });
12055 });
12056 });
12057
12058 return patchpoint;
12059 }
12060
12061 LValue getByIdWithThis(LValue base, LValue thisValue)
12062 {
12063 Node* node = m_node;
12064 UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
12065
12066 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12067 patchpoint->appendSomeRegister(base);
12068 patchpoint->appendSomeRegister(thisValue);
12069 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
12070 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
12071
12072 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12073
12074 RefPtr<PatchpointExceptionHandle> exceptionHandle =
12075 preparePatchpointForExceptions(patchpoint);
12076
12077 State* state = &m_ftlState;
12078 patchpoint->setGenerator(
12079 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12080 AllowMacroScratchRegisterUsage allowScratch(jit);
12081
12082 CallSiteIndex callSiteIndex =
12083 state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
12084
12085 // This is the direct exit target for operation calls.
12086 Box<CCallHelpers::JumpList> exceptions =
12087 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12088
12089 // This is the exit for call IC's created by the getById for getters. We don't have
12090 // to do anything weird other than call this, since it will associate the exit with
12091 // the callsite index.
12092 exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
12093
12094 auto generator = Box<JITGetByIdWithThisGenerator>::create(
12095 jit.codeBlock(), node->origin.semantic, callSiteIndex,
12096 params.unavailableRegisters(), uid, JSValueRegs(params[0].gpr()),
12097 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), AccessType::GetWithThis);
12098
12099 generator->generateFastPath(jit);
12100 CCallHelpers::Label done = jit.label();
12101
12102 params.addLatePath(
12103 [=] (CCallHelpers& jit) {
12104 AllowMacroScratchRegisterUsage allowScratch(jit);
12105
12106 J_JITOperation_ESsiJJI optimizationFunction = operationGetByIdWithThisOptimize;
12107
12108 generator->slowPathJump().link(&jit);
12109 CCallHelpers::Label slowPathBegin = jit.label();
12110 CCallHelpers::Call slowPathCall = callOperation(
12111 *state, params.unavailableRegisters(), jit, node->origin.semantic,
12112 exceptions.get(), optimizationFunction, params[0].gpr(),
12113 CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
12114 params[2].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
12115 jit.jump().linkTo(done, &jit);
12116
12117 generator->reportSlowPathCall(slowPathBegin, slowPathCall);
12118
12119 jit.addLinkTask(
12120 [=] (LinkBuffer& linkBuffer) {
12121 generator->finalize(linkBuffer, linkBuffer);
12122 });
12123 });
12124 });
12125
12126 return patchpoint;
12127 }
12128
12129 LValue isFastTypedArray(LValue object)
12130 {
12131 return m_out.equal(
12132 m_out.load32(object, m_heaps.JSArrayBufferView_mode),
12133 m_out.constInt32(FastTypedArray));
12134 }
12135
12136 TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0)
12137 {
12138 return m_out.baseIndex(
12139 heap, storage, m_out.zeroExtPtr(index), provenValue(edge), offset);
12140 }
12141
12142 template<typename IntFunctor, typename DoubleFunctor>
12143 void compare(
12144 const IntFunctor& intFunctor, const DoubleFunctor& doubleFunctor,
12145 C_JITOperation_TT stringIdentFunction,
12146 C_JITOperation_B_EJssJss stringFunction,
12147 S_JITOperation_EJJ fallbackFunction)
12148 {
12149 if (m_node->isBinaryUseKind(Int32Use)) {
12150 LValue left = lowInt32(m_node->child1());
12151 LValue right = lowInt32(m_node->child2());
12152 setBoolean(intFunctor(left, right));
12153 return;
12154 }
12155
12156 if (m_node->isBinaryUseKind(Int52RepUse)) {
12157 Int52Kind kind;
12158 LValue left = lowWhicheverInt52(m_node->child1(), kind);
12159 LValue right = lowInt52(m_node->child2(), kind);
12160 setBoolean(intFunctor(left, right));
12161 return;
12162 }
12163
12164 if (m_node->isBinaryUseKind(DoubleRepUse)) {
12165 LValue left = lowDouble(m_node->child1());
12166 LValue right = lowDouble(m_node->child2());
12167 setBoolean(doubleFunctor(left, right));
12168 return;
12169 }
12170
12171 if (m_node->isBinaryUseKind(StringIdentUse)) {
12172 LValue left = lowStringIdent(m_node->child1());
12173 LValue right = lowStringIdent(m_node->child2());
12174 setBoolean(m_out.callWithoutSideEffects(Int32, stringIdentFunction, left, right));
12175 return;
12176 }
12177
12178 if (m_node->isBinaryUseKind(StringUse)) {
12179 LValue left = lowCell(m_node->child1());
12180 LValue right = lowCell(m_node->child2());
12181 speculateString(m_node->child1(), left);
12182 speculateString(m_node->child2(), right);
12183
12184 LValue result = vmCall(
12185 Int32, m_out.operation(stringFunction),
12186 m_callFrame, left, right);
12187 setBoolean(result);
12188 return;
12189 }
12190
12191 DFG_ASSERT(m_graph, m_node, m_node->isBinaryUseKind(UntypedUse), m_node->child1().useKind(), m_node->child2().useKind());
12192 nonSpeculativeCompare(intFunctor, fallbackFunction);
12193 }
12194
12195 void compileStringSlice()
12196 {
12197 LBasicBlock lengthCheckCase = m_out.newBlock();
12198 LBasicBlock emptyCase = m_out.newBlock();
12199 LBasicBlock notEmptyCase = m_out.newBlock();
12200 LBasicBlock oneCharCase = m_out.newBlock();
12201 LBasicBlock is8Bit = m_out.newBlock();
12202 LBasicBlock is16Bit = m_out.newBlock();
12203 LBasicBlock bitsContinuation = m_out.newBlock();
12204 LBasicBlock bigCharacter = m_out.newBlock();
12205 LBasicBlock slowCase = m_out.newBlock();
12206 LBasicBlock ropeSlowCase = m_out.newBlock();
12207 LBasicBlock continuation = m_out.newBlock();
12208
12209 LValue string = lowString(m_node->child1());
12210 LValue start = lowInt32(m_node->child2());
12211 LValue end = nullptr;
12212 if (m_node->child3())
12213 end = lowInt32(m_node->child3());
12214 else
12215 end = m_out.constInt32(std::numeric_limits<int32_t>::max());
12216 m_out.branch(isRopeString(string, m_node->child1()), rarely(ropeSlowCase), usually(lengthCheckCase));
12217
12218 LBasicBlock lastNext = m_out.appendTo(lengthCheckCase, emptyCase);
12219 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
12220 LValue length = m_out.load32NonNegative(stringImpl, m_heaps.StringImpl_length);
12221 auto range = populateSliceRange(start, end, length);
12222 LValue from = range.first;
12223 LValue to = range.second;
12224 LValue span = m_out.sub(to, from);
12225 m_out.branch(m_out.lessThanOrEqual(span, m_out.int32Zero), unsure(emptyCase), unsure(notEmptyCase));
12226
12227 Vector<ValueFromBlock, 5> results;
12228
12229 m_out.appendTo(emptyCase, notEmptyCase);
12230 results.append(m_out.anchor(weakPointer(jsEmptyString(&vm()))));
12231 m_out.jump(continuation);
12232
12233 m_out.appendTo(notEmptyCase, oneCharCase);
12234 m_out.branch(m_out.equal(span, m_out.int32One), unsure(oneCharCase), unsure(slowCase));
12235
12236 m_out.appendTo(oneCharCase, is8Bit);
12237 LValue storage = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
12238 m_out.branch(
12239 m_out.testIsZero32(
12240 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
12241 m_out.constInt32(StringImpl::flagIs8Bit())),
12242 unsure(is16Bit), unsure(is8Bit));
12243
12244 m_out.appendTo(is8Bit, is16Bit);
12245 ValueFromBlock char8Bit = m_out.anchor(m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, storage, m_out.zeroExtPtr(from))));
12246 m_out.jump(bitsContinuation);
12247
12248 m_out.appendTo(is16Bit, bigCharacter);
12249 LValue char16BitValue = m_out.load16ZeroExt32(m_out.baseIndex(m_heaps.characters16, storage, m_out.zeroExtPtr(from)));
12250 ValueFromBlock char16Bit = m_out.anchor(char16BitValue);
12251 m_out.branch(
12252 m_out.above(char16BitValue, m_out.constInt32(maxSingleCharacterString)),
12253 rarely(bigCharacter), usually(bitsContinuation));
12254
12255 m_out.appendTo(bigCharacter, bitsContinuation);
12256 results.append(m_out.anchor(vmCall(
12257 Int64, m_out.operation(operationSingleCharacterString),
12258 m_callFrame, char16BitValue)));
12259 m_out.jump(continuation);
12260
12261 m_out.appendTo(bitsContinuation, slowCase);
12262 LValue character = m_out.phi(Int32, char8Bit, char16Bit);
12263 LValue smallStrings = m_out.constIntPtr(vm().smallStrings.singleCharacterStrings());
12264 results.append(m_out.anchor(m_out.loadPtr(m_out.baseIndex(
12265 m_heaps.singleCharacterStrings, smallStrings, m_out.zeroExtPtr(character)))));
12266 m_out.jump(continuation);
12267
12268 m_out.appendTo(slowCase, ropeSlowCase);
12269 results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSubstr), m_callFrame, string, from, span)));
12270 m_out.jump(continuation);
12271
12272 m_out.appendTo(ropeSlowCase, continuation);
12273 results.append(m_out.anchor(vmCall(pointerType(), m_out.operation(operationStringSlice), m_callFrame, string, start, end)));
12274 m_out.jump(continuation);
12275
12276 m_out.appendTo(continuation, lastNext);
12277 setJSValue(m_out.phi(pointerType(), results));
12278 }
12279
12280 void compileToLowerCase()
12281 {
12282 LBasicBlock notRope = m_out.newBlock();
12283 LBasicBlock is8Bit = m_out.newBlock();
12284 LBasicBlock loopTop = m_out.newBlock();
12285 LBasicBlock loopBody = m_out.newBlock();
12286 LBasicBlock slowPath = m_out.newBlock();
12287 LBasicBlock continuation = m_out.newBlock();
12288
12289 LValue string = lowString(m_node->child1());
12290 ValueFromBlock startIndex = m_out.anchor(m_out.constInt32(0));
12291 ValueFromBlock startIndexForCall = m_out.anchor(m_out.constInt32(0));
12292 m_out.branch(isRopeString(string, m_node->child1()),
12293 unsure(slowPath), unsure(notRope));
12294
12295 LBasicBlock lastNext = m_out.appendTo(notRope, is8Bit);
12296 LValue impl = m_out.loadPtr(string, m_heaps.JSString_value);
12297 m_out.branch(
12298 m_out.testIsZero32(
12299 m_out.load32(impl, m_heaps.StringImpl_hashAndFlags),
12300 m_out.constInt32(StringImpl::flagIs8Bit())),
12301 unsure(slowPath), unsure(is8Bit));
12302
12303 m_out.appendTo(is8Bit, loopTop);
12304 LValue length = m_out.load32(impl, m_heaps.StringImpl_length);
12305 LValue buffer = m_out.loadPtr(impl, m_heaps.StringImpl_data);
12306 ValueFromBlock fastResult = m_out.anchor(string);
12307 m_out.jump(loopTop);
12308
12309 m_out.appendTo(loopTop, loopBody);
12310 LValue index = m_out.phi(Int32, startIndex);
12311 ValueFromBlock indexFromBlock = m_out.anchor(index);
12312 m_out.branch(m_out.below(index, length),
12313 unsure(loopBody), unsure(continuation));
12314
12315 m_out.appendTo(loopBody, slowPath);
12316
12317 // FIXME: Strings needs to be caged.
12318 // https://bugs.webkit.org/show_bug.cgi?id=174924
12319 LValue byte = m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, buffer, m_out.zeroExtPtr(index)));
12320 LValue isInvalidAsciiRange = m_out.bitAnd(byte, m_out.constInt32(~0x7F));
12321 LValue isUpperCase = m_out.belowOrEqual(m_out.sub(byte, m_out.constInt32('A')), m_out.constInt32('Z' - 'A'));
12322 LValue isBadCharacter = m_out.bitOr(isInvalidAsciiRange, isUpperCase);
12323 m_out.addIncomingToPhi(index, m_out.anchor(m_out.add(index, m_out.int32One)));
12324 m_out.branch(isBadCharacter, unsure(slowPath), unsure(loopTop));
12325
12326 m_out.appendTo(slowPath, continuation);
12327 LValue slowPathIndex = m_out.phi(Int32, startIndexForCall, indexFromBlock);
12328 ValueFromBlock slowResult = m_out.anchor(vmCall(pointerType(), m_out.operation(operationToLowerCase), m_callFrame, string, slowPathIndex));
12329 m_out.jump(continuation);
12330
12331 m_out.appendTo(continuation, lastNext);
12332 setJSValue(m_out.phi(pointerType(), fastResult, slowResult));
12333 }
12334
12335 void compileNumberToStringWithRadix()
12336 {
12337 bool validRadixIsGuaranteed = false;
12338 if (m_node->child2()->isInt32Constant()) {
12339 int32_t radix = m_node->child2()->asInt32();
12340 if (radix >= 2 && radix <= 36)
12341 validRadixIsGuaranteed = true;
12342 }
12343
12344 switch (m_node->child1().useKind()) {
12345 case Int32Use:
12346 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt32ToStringWithValidRadix : operationInt32ToString), m_callFrame, lowInt32(m_node->child1()), lowInt32(m_node->child2())));
12347 break;
12348 case Int52RepUse:
12349 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationInt52ToStringWithValidRadix : operationInt52ToString), m_callFrame, lowStrictInt52(m_node->child1()), lowInt32(m_node->child2())));
12350 break;
12351 case DoubleRepUse:
12352 setJSValue(vmCall(pointerType(), m_out.operation(validRadixIsGuaranteed ? operationDoubleToStringWithValidRadix : operationDoubleToString), m_callFrame, lowDouble(m_node->child1()), lowInt32(m_node->child2())));
12353 break;
12354 default:
12355 RELEASE_ASSERT_NOT_REACHED();
12356 }
12357 }
12358
12359 void compileNumberToStringWithValidRadixConstant()
12360 {
12361 switch (m_node->child1().useKind()) {
12362 case Int32Use:
12363 setJSValue(vmCall(pointerType(), m_out.operation(operationInt32ToStringWithValidRadix), m_callFrame, lowInt32(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12364 break;
12365 case Int52RepUse:
12366 setJSValue(vmCall(pointerType(), m_out.operation(operationInt52ToStringWithValidRadix), m_callFrame, lowStrictInt52(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12367 break;
12368 case DoubleRepUse:
12369 setJSValue(vmCall(pointerType(), m_out.operation(operationDoubleToStringWithValidRadix), m_callFrame, lowDouble(m_node->child1()), m_out.constInt32(m_node->validRadixConstant())));
12370 break;
12371 default:
12372 RELEASE_ASSERT_NOT_REACHED();
12373 }
12374 }
12375
12376 void compileResolveScopeForHoistingFuncDeclInEval()
12377 {
12378 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12379 setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScopeForHoistingFuncDeclInEval), m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
12380 }
12381
12382 void compileResolveScope()
12383 {
12384 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12385 setJSValue(vmCall(pointerType(), m_out.operation(operationResolveScope),
12386 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid)));
12387 }
12388
12389 void compileGetDynamicVar()
12390 {
12391 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12392 setJSValue(vmCall(Int64, m_out.operation(operationGetDynamicVar),
12393 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
12394 }
12395
12396 void compilePutDynamicVar()
12397 {
12398 UniquedStringImpl* uid = m_graph.identifiers()[m_node->identifierNumber()];
12399 setJSValue(vmCall(Void, m_out.operation(m_graph.isStrictModeFor(m_node->origin.semantic) ? operationPutDynamicVarStrict : operationPutDynamicVarNonStrict),
12400 m_callFrame, lowCell(m_node->child1()), lowJSValue(m_node->child2()), m_out.constIntPtr(uid), m_out.constInt32(m_node->getPutInfo())));
12401 }
12402
12403 void compileUnreachable()
12404 {
12405 // It's so tempting to assert that AI has proved that this is unreachable. But that's
12406 // simply not a requirement of the Unreachable opcode at all. If you emit an opcode that
12407 // *you* know will not return, then it's fine to end the basic block with Unreachable
12408 // after that opcode. You don't have to also prove to AI that your opcode does not return.
12409 // Hence, there is nothing to do here but emit code that will crash, so that we catch
12410 // cases where you said Unreachable but you lied.
12411 //
12412 // It's also also worth noting that some clients emit this opcode because they're not 100% sure
12413 // if the code is unreachable, but they would really prefer if we crashed rather than kept going
12414 // if it did turn out to be reachable. Hence, this needs to deterministically crash.
12415
12416 crash();
12417 }
12418
12419 void compileCheckSubClass()
12420 {
12421 LValue cell = lowCell(m_node->child1());
12422
12423 const ClassInfo* classInfo = m_node->classInfo();
12424 if (!classInfo->checkSubClassSnippet) {
12425 LBasicBlock loop = m_out.newBlock();
12426 LBasicBlock parentClass = m_out.newBlock();
12427 LBasicBlock continuation = m_out.newBlock();
12428
12429 LValue structure = loadStructure(cell);
12430 LValue classInfo = m_out.loadPtr(structure, m_heaps.Structure_classInfo);
12431 ValueFromBlock otherAtStart = m_out.anchor(classInfo);
12432 m_out.jump(loop);
12433
12434 LBasicBlock lastNext = m_out.appendTo(loop, parentClass);
12435 LValue other = m_out.phi(pointerType(), otherAtStart);
12436 m_out.branch(m_out.equal(other, m_out.constIntPtr(classInfo)), unsure(continuation), unsure(parentClass));
12437
12438 m_out.appendTo(parentClass, continuation);
12439 LValue parent = m_out.loadPtr(other, m_heaps.ClassInfo_parentClass);
12440 speculate(BadType, jsValueValue(cell), m_node->child1().node(), m_out.isNull(parent));
12441 m_out.addIncomingToPhi(other, m_out.anchor(parent));
12442 m_out.jump(loop);
12443
12444 m_out.appendTo(continuation, lastNext);
12445 return;
12446 }
12447
12448 RefPtr<Snippet> domJIT = classInfo->checkSubClassSnippet();
12449 PatchpointValue* patchpoint = m_out.patchpoint(Void);
12450 patchpoint->appendSomeRegister(cell);
12451 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
12452 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
12453
12454 NodeOrigin origin = m_origin;
12455 unsigned osrExitArgumentOffset = patchpoint->numChildren();
12456 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(jsValueValue(cell), m_node->child1().node());
12457 patchpoint->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, jsValueValue(cell)));
12458
12459 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
12460 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
12461 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12462
12463 State* state = &m_ftlState;
12464 Node* node = m_node;
12465 JSValue child1Constant = m_state.forNode(m_node->child1()).value();
12466
12467 patchpoint->setGenerator(
12468 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12469 AllowMacroScratchRegisterUsage allowScratch(jit);
12470
12471 Vector<GPRReg> gpScratch;
12472 Vector<FPRReg> fpScratch;
12473 Vector<SnippetParams::Value> regs;
12474
12475 regs.append(SnippetParams::Value(params[0].gpr(), child1Constant));
12476
12477 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
12478 gpScratch.append(params.gpScratch(i));
12479
12480 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
12481 fpScratch.append(params.fpScratch(i));
12482
12483 RefPtr<OSRExitHandle> handle = exitDescriptor->emitOSRExitLater(*state, BadType, origin, params, osrExitArgumentOffset);
12484
12485 SnippetParams domJITParams(*state, params, node, nullptr, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
12486 CCallHelpers::JumpList failureCases = domJIT->generator()->run(jit, domJITParams);
12487
12488 jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
12489 linkBuffer.link(failureCases, linkBuffer.locationOf<NoPtrTag>(handle->label));
12490 });
12491 });
12492 patchpoint->effects = Effects::forCheck();
12493 }
12494
12495 void compileCallDOM()
12496 {
12497 const DOMJIT::Signature* signature = m_node->signature();
12498
12499 // FIXME: We should have a way to call functions with the vector of registers.
12500 // https://bugs.webkit.org/show_bug.cgi?id=163099
12501 Vector<LValue, JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS> operands;
12502
12503 unsigned index = 0;
12504 DFG_NODE_DO_TO_CHILDREN(m_graph, m_node, [&](Node*, Edge edge) {
12505 if (!index)
12506 operands.append(lowCell(edge));
12507 else {
12508 switch (signature->arguments[index - 1]) {
12509 case SpecString:
12510 operands.append(lowString(edge));
12511 break;
12512 case SpecInt32Only:
12513 operands.append(lowInt32(edge));
12514 break;
12515 case SpecBoolean:
12516 operands.append(lowBoolean(edge));
12517 break;
12518 default:
12519 RELEASE_ASSERT_NOT_REACHED();
12520 break;
12521 }
12522 }
12523 ++index;
12524 });
12525
12526 unsigned argumentCountIncludingThis = signature->argumentCount + 1;
12527 LValue result;
12528 assertIsTaggedWith(reinterpret_cast<void*>(signature->unsafeFunction), CFunctionPtrTag);
12529 switch (argumentCountIncludingThis) {
12530 case 1:
12531 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EP>(signature->unsafeFunction)), m_callFrame, operands[0]);
12532 break;
12533 case 2:
12534 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1]);
12535 break;
12536 case 3:
12537 result = vmCall(Int64, m_out.operation(reinterpret_cast<J_JITOperation_EPPP>(signature->unsafeFunction)), m_callFrame, operands[0], operands[1], operands[2]);
12538 break;
12539 default:
12540 RELEASE_ASSERT_NOT_REACHED();
12541 break;
12542 }
12543
12544 setJSValue(result);
12545 }
12546
12547 void compileCallDOMGetter()
12548 {
12549 DOMJIT::CallDOMGetterSnippet* domJIT = m_node->callDOMGetterData()->snippet;
12550 if (!domJIT) {
12551 // The following function is not an operation: we directly call a custom accessor getter.
12552 // Since the getter does not have code setting topCallFrame, As is the same to IC, we should set topCallFrame in caller side.
12553 m_out.storePtr(m_callFrame, m_out.absolute(&vm().topCallFrame));
12554 setJSValue(
12555 vmCall(Int64, m_out.operation(m_node->callDOMGetterData()->customAccessorGetter.retaggedExecutableAddress<CFunctionPtrTag>()),
12556 m_callFrame, lowCell(m_node->child1()), m_out.constIntPtr(m_graph.identifiers()[m_node->callDOMGetterData()->identifierNumber])));
12557 return;
12558 }
12559
12560 Edge& baseEdge = m_node->child1();
12561 LValue base = lowCell(baseEdge);
12562 JSValue baseConstant = m_state.forNode(baseEdge).value();
12563
12564 LValue globalObject;
12565 JSValue globalObjectConstant;
12566 if (domJIT->requireGlobalObject) {
12567 Edge& globalObjectEdge = m_node->child2();
12568 globalObject = lowCell(globalObjectEdge);
12569 globalObjectConstant = m_state.forNode(globalObjectEdge).value();
12570 }
12571
12572 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12573 patchpoint->appendSomeRegister(base);
12574 if (domJIT->requireGlobalObject)
12575 patchpoint->appendSomeRegister(globalObject);
12576 patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
12577 patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
12578 RefPtr<PatchpointExceptionHandle> exceptionHandle = preparePatchpointForExceptions(patchpoint);
12579 patchpoint->clobber(RegisterSet::macroScratchRegisters());
12580 patchpoint->numGPScratchRegisters = domJIT->numGPScratchRegisters;
12581 patchpoint->numFPScratchRegisters = domJIT->numFPScratchRegisters;
12582 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
12583
12584 State* state = &m_ftlState;
12585 Node* node = m_node;
12586 patchpoint->setGenerator(
12587 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12588 AllowMacroScratchRegisterUsage allowScratch(jit);
12589
12590 Vector<GPRReg> gpScratch;
12591 Vector<FPRReg> fpScratch;
12592 Vector<SnippetParams::Value> regs;
12593
12594 regs.append(JSValueRegs(params[0].gpr()));
12595 regs.append(SnippetParams::Value(params[1].gpr(), baseConstant));
12596 if (domJIT->requireGlobalObject)
12597 regs.append(SnippetParams::Value(params[2].gpr(), globalObjectConstant));
12598
12599 for (unsigned i = 0; i < domJIT->numGPScratchRegisters; ++i)
12600 gpScratch.append(params.gpScratch(i));
12601
12602 for (unsigned i = 0; i < domJIT->numFPScratchRegisters; ++i)
12603 fpScratch.append(params.fpScratch(i));
12604
12605 Box<CCallHelpers::JumpList> exceptions = exceptionHandle->scheduleExitCreation(params)->jumps(jit);
12606
12607 SnippetParams domJITParams(*state, params, node, exceptions, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
12608 domJIT->generator()->run(jit, domJITParams);
12609 });
12610 patchpoint->effects = Effects::forCall();
12611 setJSValue(patchpoint);
12612 }
12613
12614 void compileFilterICStatus()
12615 {
12616 m_interpreter.filterICStatus(m_node);
12617 }
12618
12619 LValue byteSwap32(LValue value)
12620 {
12621 // FIXME: teach B3 byteswap
12622 // https://bugs.webkit.org/show_bug.cgi?id=188759
12623
12624 RELEASE_ASSERT(value->type() == Int32);
12625 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12626 patchpoint->appendSomeRegister(value);
12627 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12628 jit.move(params[1].gpr(), params[0].gpr());
12629 jit.byteSwap32(params[0].gpr());
12630 });
12631 patchpoint->effects = Effects::none();
12632 return patchpoint;
12633 }
12634
12635 LValue byteSwap64(LValue value)
12636 {
12637 // FIXME: teach B3 byteswap
12638 // https://bugs.webkit.org/show_bug.cgi?id=188759
12639
12640 RELEASE_ASSERT(value->type() == Int64);
12641 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
12642 patchpoint->appendSomeRegister(value);
12643 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12644 jit.move(params[1].gpr(), params[0].gpr());
12645 jit.byteSwap64(params[0].gpr());
12646 });
12647 patchpoint->effects = Effects::none();
12648 return patchpoint;
12649 }
12650
12651 template <typename F1, typename F2>
12652 LValue emitCodeBasedOnEndiannessBranch(LValue isLittleEndian, const F1& emitLittleEndianCode, const F2& emitBigEndianCode)
12653 {
12654 LType type;
12655
12656 LBasicBlock bigEndianCase = m_out.newBlock();
12657 LBasicBlock littleEndianCase = m_out.newBlock();
12658 LBasicBlock continuation = m_out.newBlock();
12659
12660 m_out.branch(m_out.testIsZero32(isLittleEndian, m_out.constInt32(1)),
12661 unsure(bigEndianCase), unsure(littleEndianCase));
12662
12663 LBasicBlock lastNext = m_out.appendTo(bigEndianCase, littleEndianCase);
12664 LValue bigEndianValue = emitBigEndianCode();
12665 type = bigEndianValue ? bigEndianValue->type() : Void;
12666 ValueFromBlock bigEndianResult = bigEndianValue ? m_out.anchor(bigEndianValue) : ValueFromBlock();
12667 m_out.jump(continuation);
12668
12669 m_out.appendTo(littleEndianCase, continuation);
12670 LValue littleEndianValue = emitLittleEndianCode();
12671 ValueFromBlock littleEndianResult = littleEndianValue ? m_out.anchor(littleEndianValue) : ValueFromBlock();
12672 RELEASE_ASSERT((!littleEndianValue && !bigEndianValue) || type == littleEndianValue->type());
12673 m_out.jump(continuation);
12674
12675 m_out.appendTo(continuation, lastNext);
12676 RELEASE_ASSERT(!!bigEndianResult == !!littleEndianResult);
12677 if (bigEndianResult)
12678 return m_out.phi(type, bigEndianResult, littleEndianResult);
12679 return nullptr;
12680 }
12681
12682 void compileDataViewGet()
12683 {
12684 LValue dataView = lowDataViewObject(m_node->child1());
12685 LValue index = lowInt32(m_node->child2());
12686 LValue isLittleEndian = nullptr;
12687 if (m_node->child3())
12688 isLittleEndian = lowBoolean(m_node->child3());
12689
12690 DataViewData data = m_node->dataViewData();
12691
12692 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
12693 LValue indexToCheck = m_out.zeroExtPtr(index);
12694 if (data.byteSize > 1)
12695 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
12696 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
12697
12698 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
12699
12700 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
12701
12702 if (m_node->op() == DataViewGetInt) {
12703 switch (data.byteSize) {
12704 case 1:
12705 if (data.isSigned)
12706 setInt32(m_out.load8SignExt32(pointer));
12707 else
12708 setInt32(m_out.load8ZeroExt32(pointer));
12709 break;
12710 case 2: {
12711 auto emitLittleEndianLoad = [&] {
12712 if (data.isSigned)
12713 return m_out.load16SignExt32(pointer);
12714 return m_out.load16ZeroExt32(pointer);
12715 };
12716
12717 auto emitBigEndianLoad = [&] {
12718 LValue val = m_out.load16ZeroExt32(pointer);
12719
12720 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12721 patchpoint->appendSomeRegister(val);
12722 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12723 jit.move(params[1].gpr(), params[0].gpr());
12724 jit.byteSwap16(params[0].gpr());
12725 if (data.isSigned)
12726 jit.signExtend16To32(params[0].gpr(), params[0].gpr());
12727 });
12728 patchpoint->effects = Effects::none();
12729
12730 return patchpoint;
12731 };
12732
12733 if (data.isLittleEndian == FalseTriState)
12734 setInt32(emitBigEndianLoad());
12735 else if (data.isLittleEndian == TrueTriState)
12736 setInt32(emitLittleEndianLoad());
12737 else
12738 setInt32(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianLoad, emitBigEndianLoad));
12739
12740 break;
12741 }
12742 case 4: {
12743 LValue loadedValue = m_out.load32(pointer);
12744
12745 if (data.isLittleEndian == FalseTriState)
12746 loadedValue = byteSwap32(loadedValue);
12747 else if (data.isLittleEndian == MixedTriState) {
12748 auto emitLittleEndianCode = [&] {
12749 return loadedValue;
12750 };
12751 auto emitBigEndianCode = [&] {
12752 return byteSwap32(loadedValue);
12753 };
12754
12755 loadedValue = emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12756 }
12757
12758 if (data.isSigned)
12759 setInt32(loadedValue);
12760 else
12761 setStrictInt52(m_out.zeroExt(loadedValue, Int64));
12762
12763 break;
12764 }
12765 default:
12766 RELEASE_ASSERT_NOT_REACHED();
12767 }
12768 } else {
12769 switch (data.byteSize) {
12770 case 4: {
12771 auto emitLittleEndianCode = [&] {
12772 return m_out.floatToDouble(m_out.loadFloat(pointer));
12773 };
12774
12775 auto emitBigEndianCode = [&] {
12776 LValue loadedValue = m_out.load32(pointer);
12777 PatchpointValue* patchpoint = m_out.patchpoint(Double);
12778 patchpoint->appendSomeRegister(loadedValue);
12779 patchpoint->numGPScratchRegisters = 1;
12780 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12781 jit.move(params[1].gpr(), params.gpScratch(0));
12782 jit.byteSwap32(params.gpScratch(0));
12783 jit.move32ToFloat(params.gpScratch(0), params[0].fpr());
12784 jit.convertFloatToDouble(params[0].fpr(), params[0].fpr());
12785 });
12786 patchpoint->effects = Effects::none();
12787 return patchpoint;
12788 };
12789
12790 if (data.isLittleEndian == TrueTriState)
12791 setDouble(emitLittleEndianCode());
12792 else if (data.isLittleEndian == FalseTriState)
12793 setDouble(emitBigEndianCode());
12794 else
12795 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
12796
12797 break;
12798 }
12799 case 8: {
12800 auto emitLittleEndianCode = [&] {
12801 return m_out.loadDouble(pointer);
12802 };
12803
12804 auto emitBigEndianCode = [&] {
12805 LValue loadedValue = m_out.load64(pointer);
12806 loadedValue = byteSwap64(loadedValue);
12807 return m_out.bitCast(loadedValue, Double);
12808 };
12809
12810 if (data.isLittleEndian == TrueTriState)
12811 setDouble(emitLittleEndianCode());
12812 else if (data.isLittleEndian == FalseTriState)
12813 setDouble(emitBigEndianCode());
12814 else
12815 setDouble(emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode));
12816
12817 break;
12818 }
12819 default:
12820 RELEASE_ASSERT_NOT_REACHED();
12821 }
12822 }
12823 }
12824
12825 void compileDataViewSet()
12826 {
12827 LValue dataView = lowDataViewObject(m_graph.varArgChild(m_node, 0));
12828 LValue index = lowInt32(m_graph.varArgChild(m_node, 1));
12829 LValue isLittleEndian = nullptr;
12830 if (m_graph.varArgChild(m_node, 3))
12831 isLittleEndian = lowBoolean(m_graph.varArgChild(m_node, 3));
12832
12833 DataViewData data = m_node->dataViewData();
12834
12835 LValue length = m_out.zeroExtPtr(m_out.load32NonNegative(dataView, m_heaps.JSArrayBufferView_length));
12836 LValue indexToCheck = m_out.zeroExtPtr(index);
12837 if (data.byteSize > 1)
12838 indexToCheck = m_out.add(indexToCheck, m_out.constInt64(data.byteSize - 1));
12839 speculate(OutOfBounds, noValue(), nullptr, m_out.aboveOrEqual(indexToCheck, length));
12840
12841 Edge& valueEdge = m_graph.varArgChild(m_node, 2);
12842 LValue valueToStore;
12843 switch (valueEdge.useKind()) {
12844 case Int32Use:
12845 valueToStore = lowInt32(valueEdge);
12846 break;
12847 case DoubleRepUse:
12848 valueToStore = lowDouble(valueEdge);
12849 break;
12850 case Int52RepUse:
12851 valueToStore = lowStrictInt52(valueEdge);
12852 break;
12853 default:
12854 RELEASE_ASSERT_NOT_REACHED();
12855 }
12856
12857 LValue vector = caged(Gigacage::Primitive, m_out.loadPtr(dataView, m_heaps.JSArrayBufferView_vector), dataView);
12858 TypedPointer pointer(m_heaps.typedArrayProperties, m_out.add(vector, m_out.zeroExtPtr(index)));
12859
12860 if (data.isFloatingPoint) {
12861 if (data.byteSize == 4) {
12862 valueToStore = m_out.doubleToFloat(valueToStore);
12863
12864 auto emitLittleEndianCode = [&] () -> LValue {
12865 m_out.storeFloat(valueToStore, pointer);
12866 return nullptr;
12867 };
12868
12869 auto emitBigEndianCode = [&] () -> LValue {
12870 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12871 patchpoint->appendSomeRegister(valueToStore);
12872 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12873 jit.moveFloatTo32(params[1].fpr(), params[0].gpr());
12874 jit.byteSwap32(params[0].gpr());
12875 });
12876 patchpoint->effects = Effects::none();
12877 m_out.store32(patchpoint, pointer);
12878 return nullptr;
12879 };
12880
12881 if (data.isLittleEndian == FalseTriState)
12882 emitBigEndianCode();
12883 else if (data.isLittleEndian == TrueTriState)
12884 emitLittleEndianCode();
12885 else
12886 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12887
12888 } else {
12889 RELEASE_ASSERT(data.byteSize == 8);
12890 auto emitLittleEndianCode = [&] () -> LValue {
12891 m_out.storeDouble(valueToStore, pointer);
12892 return nullptr;
12893 };
12894 auto emitBigEndianCode = [&] () -> LValue {
12895 m_out.store64(byteSwap64(m_out.bitCast(valueToStore, Int64)), pointer);
12896 return nullptr;
12897 };
12898
12899 if (data.isLittleEndian == FalseTriState)
12900 emitBigEndianCode();
12901 else if (data.isLittleEndian == TrueTriState)
12902 emitLittleEndianCode();
12903 else
12904 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12905 }
12906 } else {
12907 switch (data.byteSize) {
12908 case 1:
12909 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
12910 m_out.store32As8(valueToStore, pointer);
12911 break;
12912 case 2: {
12913 RELEASE_ASSERT(valueEdge.useKind() == Int32Use);
12914
12915 auto emitLittleEndianCode = [&] () -> LValue {
12916 m_out.store32As16(valueToStore, pointer);
12917 return nullptr;
12918 };
12919 auto emitBigEndianCode = [&] () -> LValue {
12920 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
12921 patchpoint->appendSomeRegister(valueToStore);
12922 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
12923 jit.move(params[1].gpr(), params[0].gpr());
12924 jit.byteSwap16(params[0].gpr());
12925 });
12926 patchpoint->effects = Effects::none();
12927
12928 m_out.store32As16(patchpoint, pointer);
12929 return nullptr;
12930 };
12931
12932 if (data.isLittleEndian == FalseTriState)
12933 emitBigEndianCode();
12934 else if (data.isLittleEndian == TrueTriState)
12935 emitLittleEndianCode();
12936 else
12937 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12938 break;
12939 }
12940 case 4: {
12941 RELEASE_ASSERT(valueEdge.useKind() == Int32Use || valueEdge.useKind() == Int52RepUse);
12942
12943 if (valueEdge.useKind() == Int52RepUse)
12944 valueToStore = m_out.castToInt32(valueToStore);
12945
12946 auto emitLittleEndianCode = [&] () -> LValue {
12947 m_out.store32(valueToStore, pointer);
12948 return nullptr;
12949 };
12950 auto emitBigEndianCode = [&] () -> LValue {
12951 m_out.store32(byteSwap32(valueToStore), pointer);
12952 return nullptr;
12953 };
12954
12955 if (data.isLittleEndian == FalseTriState)
12956 emitBigEndianCode();
12957 else if (data.isLittleEndian == TrueTriState)
12958 emitLittleEndianCode();
12959 else
12960 emitCodeBasedOnEndiannessBranch(isLittleEndian, emitLittleEndianCode, emitBigEndianCode);
12961
12962 break;
12963 }
12964 default:
12965 RELEASE_ASSERT_NOT_REACHED();
12966 }
12967 }
12968 }
12969
12970 void emitSwitchForMultiByOffset(LValue base, bool structuresChecked, Vector<SwitchCase, 2>& cases, LBasicBlock exit)
12971 {
12972 if (cases.isEmpty()) {
12973 m_out.jump(exit);
12974 return;
12975 }
12976
12977 if (structuresChecked) {
12978 std::sort(
12979 cases.begin(), cases.end(),
12980 [&] (const SwitchCase& a, const SwitchCase& b) -> bool {
12981 return a.value()->asInt() < b.value()->asInt();
12982 });
12983 SwitchCase last = cases.takeLast();
12984 m_out.switchInstruction(
12985 m_out.load32(base, m_heaps.JSCell_structureID), cases, last.target(), Weight(0));
12986 return;
12987 }
12988
12989 m_out.switchInstruction(
12990 m_out.load32(base, m_heaps.JSCell_structureID), cases, exit, Weight(0));
12991 }
12992
12993 void compareEqObjectOrOtherToObject(Edge leftChild, Edge rightChild)
12994 {
12995 LValue rightCell = lowCell(rightChild);
12996 LValue leftValue = lowJSValue(leftChild, ManualOperandSpeculation);
12997
12998 speculateTruthyObject(rightChild, rightCell, SpecObject);
12999
13000 LBasicBlock leftCellCase = m_out.newBlock();
13001 LBasicBlock leftNotCellCase = m_out.newBlock();
13002 LBasicBlock continuation = m_out.newBlock();
13003
13004 m_out.branch(
13005 isCell(leftValue, provenType(leftChild)),
13006 unsure(leftCellCase), unsure(leftNotCellCase));
13007
13008 LBasicBlock lastNext = m_out.appendTo(leftCellCase, leftNotCellCase);
13009 speculateTruthyObject(leftChild, leftValue, SpecObject | (~SpecCellCheck));
13010 ValueFromBlock cellResult = m_out.anchor(m_out.equal(rightCell, leftValue));
13011 m_out.jump(continuation);
13012
13013 m_out.appendTo(leftNotCellCase, continuation);
13014 FTL_TYPE_CHECK(
13015 jsValueValue(leftValue), leftChild, SpecOther | SpecCellCheck, isNotOther(leftValue));
13016 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
13017 m_out.jump(continuation);
13018
13019 m_out.appendTo(continuation, lastNext);
13020 setBoolean(m_out.phi(Int32, cellResult, notCellResult));
13021 }
13022
13023 void speculateTruthyObject(Edge edge, LValue cell, SpeculatedType filter)
13024 {
13025 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
13026 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
13027 return;
13028 }
13029
13030 FTL_TYPE_CHECK(jsValueValue(cell), edge, filter, isNotObject(cell));
13031 speculate(
13032 BadType, jsValueValue(cell), edge.node(),
13033 m_out.testNonZero32(
13034 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
13035 m_out.constInt32(MasqueradesAsUndefined)));
13036 }
13037
13038 template<typename IntFunctor>
13039 void nonSpeculativeCompare(const IntFunctor& intFunctor, S_JITOperation_EJJ helperFunction)
13040 {
13041 LValue left = lowJSValue(m_node->child1());
13042 LValue right = lowJSValue(m_node->child2());
13043
13044 LBasicBlock leftIsInt = m_out.newBlock();
13045 LBasicBlock fastPath = m_out.newBlock();
13046 LBasicBlock slowPath = m_out.newBlock();
13047 LBasicBlock continuation = m_out.newBlock();
13048
13049 m_out.branch(isNotInt32(left, provenType(m_node->child1())), rarely(slowPath), usually(leftIsInt));
13050
13051 LBasicBlock lastNext = m_out.appendTo(leftIsInt, fastPath);
13052 m_out.branch(isNotInt32(right, provenType(m_node->child2())), rarely(slowPath), usually(fastPath));
13053
13054 m_out.appendTo(fastPath, slowPath);
13055 ValueFromBlock fastResult = m_out.anchor(intFunctor(unboxInt32(left), unboxInt32(right)));
13056 m_out.jump(continuation);
13057
13058 m_out.appendTo(slowPath, continuation);
13059 ValueFromBlock slowResult = m_out.anchor(m_out.notNull(vmCall(
13060 pointerType(), m_out.operation(helperFunction), m_callFrame, left, right)));
13061 m_out.jump(continuation);
13062
13063 m_out.appendTo(continuation, lastNext);
13064 setBoolean(m_out.phi(Int32, fastResult, slowResult));
13065 }
13066
13067 LValue stringsEqual(LValue leftJSString, LValue rightJSString, Edge leftJSStringEdge = Edge(), Edge rightJSStringEdge = Edge())
13068 {
13069 LBasicBlock notTriviallyUnequalCase = m_out.newBlock();
13070 LBasicBlock notEmptyCase = m_out.newBlock();
13071 LBasicBlock leftReadyCase = m_out.newBlock();
13072 LBasicBlock rightReadyCase = m_out.newBlock();
13073 LBasicBlock left8BitCase = m_out.newBlock();
13074 LBasicBlock right8BitCase = m_out.newBlock();
13075 LBasicBlock loop = m_out.newBlock();
13076 LBasicBlock bytesEqual = m_out.newBlock();
13077 LBasicBlock trueCase = m_out.newBlock();
13078 LBasicBlock falseCase = m_out.newBlock();
13079 LBasicBlock slowCase = m_out.newBlock();
13080 LBasicBlock continuation = m_out.newBlock();
13081
13082 m_out.branch(isRopeString(leftJSString, leftJSStringEdge), rarely(slowCase), usually(leftReadyCase));
13083
13084 LBasicBlock lastNext = m_out.appendTo(leftReadyCase, rightReadyCase);
13085 m_out.branch(isRopeString(rightJSString, rightJSStringEdge), rarely(slowCase), usually(rightReadyCase));
13086
13087 m_out.appendTo(rightReadyCase, notTriviallyUnequalCase);
13088 LValue left = m_out.loadPtr(leftJSString, m_heaps.JSString_value);
13089 LValue right = m_out.loadPtr(rightJSString, m_heaps.JSString_value);
13090 LValue length = m_out.load32(left, m_heaps.StringImpl_length);
13091 m_out.branch(
13092 m_out.notEqual(length, m_out.load32(right, m_heaps.StringImpl_length)),
13093 unsure(falseCase), unsure(notTriviallyUnequalCase));
13094
13095 m_out.appendTo(notTriviallyUnequalCase, notEmptyCase);
13096 m_out.branch(m_out.isZero32(length), unsure(trueCase), unsure(notEmptyCase));
13097
13098 m_out.appendTo(notEmptyCase, left8BitCase);
13099 m_out.branch(
13100 m_out.testIsZero32(
13101 m_out.load32(left, m_heaps.StringImpl_hashAndFlags),
13102 m_out.constInt32(StringImpl::flagIs8Bit())),
13103 unsure(slowCase), unsure(left8BitCase));
13104
13105 m_out.appendTo(left8BitCase, right8BitCase);
13106 m_out.branch(
13107 m_out.testIsZero32(
13108 m_out.load32(right, m_heaps.StringImpl_hashAndFlags),
13109 m_out.constInt32(StringImpl::flagIs8Bit())),
13110 unsure(slowCase), unsure(right8BitCase));
13111
13112 m_out.appendTo(right8BitCase, loop);
13113
13114 LValue leftData = m_out.loadPtr(left, m_heaps.StringImpl_data);
13115 LValue rightData = m_out.loadPtr(right, m_heaps.StringImpl_data);
13116
13117 ValueFromBlock indexAtStart = m_out.anchor(length);
13118
13119 m_out.jump(loop);
13120
13121 m_out.appendTo(loop, bytesEqual);
13122
13123 LValue indexAtLoopTop = m_out.phi(Int32, indexAtStart);
13124 LValue indexInLoop = m_out.sub(indexAtLoopTop, m_out.int32One);
13125
13126 LValue leftByte = m_out.load8ZeroExt32(
13127 m_out.baseIndex(m_heaps.characters8, leftData, m_out.zeroExtPtr(indexInLoop)));
13128 LValue rightByte = m_out.load8ZeroExt32(
13129 m_out.baseIndex(m_heaps.characters8, rightData, m_out.zeroExtPtr(indexInLoop)));
13130
13131 m_out.branch(m_out.notEqual(leftByte, rightByte), unsure(falseCase), unsure(bytesEqual));
13132
13133 m_out.appendTo(bytesEqual, trueCase);
13134
13135 ValueFromBlock indexForNextIteration = m_out.anchor(indexInLoop);
13136 m_out.addIncomingToPhi(indexAtLoopTop, indexForNextIteration);
13137 m_out.branch(m_out.notZero32(indexInLoop), unsure(loop), unsure(trueCase));
13138
13139 m_out.appendTo(trueCase, falseCase);
13140
13141 ValueFromBlock trueResult = m_out.anchor(m_out.booleanTrue);
13142 m_out.jump(continuation);
13143
13144 m_out.appendTo(falseCase, slowCase);
13145
13146 ValueFromBlock falseResult = m_out.anchor(m_out.booleanFalse);
13147 m_out.jump(continuation);
13148
13149 m_out.appendTo(slowCase, continuation);
13150
13151 LValue slowResultValue = vmCall(
13152 Int64, m_out.operation(operationCompareStringEq), m_callFrame,
13153 leftJSString, rightJSString);
13154 ValueFromBlock slowResult = m_out.anchor(unboxBoolean(slowResultValue));
13155 m_out.jump(continuation);
13156
13157 m_out.appendTo(continuation, lastNext);
13158 return m_out.phi(Int32, trueResult, falseResult, slowResult);
13159 }
13160
13161 enum ScratchFPRUsage {
13162 DontNeedScratchFPR,
13163 NeedScratchFPR
13164 };
13165 template<typename BinaryArithOpGenerator, ScratchFPRUsage scratchFPRUsage = DontNeedScratchFPR>
13166 void emitBinarySnippet(J_JITOperation_EJJ slowPathFunction)
13167 {
13168 Node* node = m_node;
13169
13170 LValue left = lowJSValue(node->child1());
13171 LValue right = lowJSValue(node->child2());
13172
13173 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13174 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13175
13176 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13177 patchpoint->appendSomeRegister(left);
13178 patchpoint->appendSomeRegister(right);
13179 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13180 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13181 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13182 preparePatchpointForExceptions(patchpoint);
13183 patchpoint->numGPScratchRegisters = 1;
13184 patchpoint->numFPScratchRegisters = 2;
13185 if (scratchFPRUsage == NeedScratchFPR)
13186 patchpoint->numFPScratchRegisters++;
13187 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13188 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13189 State* state = &m_ftlState;
13190 patchpoint->setGenerator(
13191 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13192 AllowMacroScratchRegisterUsage allowScratch(jit);
13193
13194 Box<CCallHelpers::JumpList> exceptions =
13195 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13196
13197 auto generator = Box<BinaryArithOpGenerator>::create(
13198 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13199 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
13200 params.fpScratch(0), params.fpScratch(1), params.gpScratch(0),
13201 scratchFPRUsage == NeedScratchFPR ? params.fpScratch(2) : InvalidFPRReg);
13202
13203 generator->generateFastPath(jit);
13204
13205 if (generator->didEmitFastPath()) {
13206 generator->endJumpList().link(&jit);
13207 CCallHelpers::Label done = jit.label();
13208
13209 params.addLatePath(
13210 [=] (CCallHelpers& jit) {
13211 AllowMacroScratchRegisterUsage allowScratch(jit);
13212
13213 generator->slowPathJumpList().link(&jit);
13214 callOperation(
13215 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13216 exceptions.get(), slowPathFunction, params[0].gpr(),
13217 params[1].gpr(), params[2].gpr());
13218 jit.jump().linkTo(done, &jit);
13219 });
13220 } else {
13221 callOperation(
13222 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13223 exceptions.get(), slowPathFunction, params[0].gpr(), params[1].gpr(),
13224 params[2].gpr());
13225 }
13226 });
13227
13228 setJSValue(patchpoint);
13229 }
13230
13231 template<typename BinaryBitOpGenerator>
13232 void emitBinaryBitOpSnippet(J_JITOperation_EJJ slowPathFunction)
13233 {
13234 Node* node = m_node;
13235
13236 LValue left = lowJSValue(node->child1());
13237 LValue right = lowJSValue(node->child2());
13238
13239 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13240 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13241
13242 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13243 patchpoint->appendSomeRegister(left);
13244 patchpoint->appendSomeRegister(right);
13245 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13246 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13247 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13248 preparePatchpointForExceptions(patchpoint);
13249 patchpoint->numGPScratchRegisters = 1;
13250 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13251 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13252 State* state = &m_ftlState;
13253 patchpoint->setGenerator(
13254 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13255 AllowMacroScratchRegisterUsage allowScratch(jit);
13256
13257 Box<CCallHelpers::JumpList> exceptions =
13258 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13259
13260 auto generator = Box<BinaryBitOpGenerator>::create(
13261 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13262 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), params.gpScratch(0));
13263
13264 generator->generateFastPath(jit);
13265 generator->endJumpList().link(&jit);
13266 CCallHelpers::Label done = jit.label();
13267
13268 params.addLatePath(
13269 [=] (CCallHelpers& jit) {
13270 AllowMacroScratchRegisterUsage allowScratch(jit);
13271
13272 generator->slowPathJumpList().link(&jit);
13273 callOperation(
13274 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13275 exceptions.get(), slowPathFunction, params[0].gpr(),
13276 params[1].gpr(), params[2].gpr());
13277 jit.jump().linkTo(done, &jit);
13278 });
13279 });
13280
13281 setJSValue(patchpoint);
13282 }
13283
13284 void emitRightShiftSnippet(JITRightShiftGenerator::ShiftType shiftType)
13285 {
13286 Node* node = m_node;
13287
13288 // FIXME: Make this do exceptions.
13289 // https://bugs.webkit.org/show_bug.cgi?id=151686
13290
13291 LValue left = lowJSValue(node->child1());
13292 LValue right = lowJSValue(node->child2());
13293
13294 SnippetOperand leftOperand(m_state.forNode(node->child1()).resultType());
13295 SnippetOperand rightOperand(m_state.forNode(node->child2()).resultType());
13296
13297 PatchpointValue* patchpoint = m_out.patchpoint(Int64);
13298 patchpoint->appendSomeRegister(left);
13299 patchpoint->appendSomeRegister(right);
13300 patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
13301 patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
13302 RefPtr<PatchpointExceptionHandle> exceptionHandle =
13303 preparePatchpointForExceptions(patchpoint);
13304 patchpoint->numGPScratchRegisters = 1;
13305 patchpoint->numFPScratchRegisters = 1;
13306 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13307 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13308 State* state = &m_ftlState;
13309 patchpoint->setGenerator(
13310 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13311 AllowMacroScratchRegisterUsage allowScratch(jit);
13312
13313 Box<CCallHelpers::JumpList> exceptions =
13314 exceptionHandle->scheduleExitCreation(params)->jumps(jit);
13315
13316 auto generator = Box<JITRightShiftGenerator>::create(
13317 leftOperand, rightOperand, JSValueRegs(params[0].gpr()),
13318 JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()),
13319 params.fpScratch(0), params.gpScratch(0), InvalidFPRReg, shiftType);
13320
13321 generator->generateFastPath(jit);
13322 generator->endJumpList().link(&jit);
13323 CCallHelpers::Label done = jit.label();
13324
13325 params.addLatePath(
13326 [=] (CCallHelpers& jit) {
13327 AllowMacroScratchRegisterUsage allowScratch(jit);
13328
13329 generator->slowPathJumpList().link(&jit);
13330
13331 J_JITOperation_EJJ slowPathFunction =
13332 shiftType == JITRightShiftGenerator::SignedShift
13333 ? operationValueBitRShift : operationValueBitURShift;
13334
13335 callOperation(
13336 *state, params.unavailableRegisters(), jit, node->origin.semantic,
13337 exceptions.get(), slowPathFunction, params[0].gpr(),
13338 params[1].gpr(), params[2].gpr());
13339 jit.jump().linkTo(done, &jit);
13340 });
13341 });
13342
13343 setJSValue(patchpoint);
13344 }
13345
13346 LValue allocateHeapCell(LValue allocator, LBasicBlock slowPath)
13347 {
13348 JITAllocator actualAllocator;
13349 if (allocator->hasIntPtr())
13350 actualAllocator = JITAllocator::constant(Allocator(bitwise_cast<LocalAllocator*>(allocator->asIntPtr())));
13351 else
13352 actualAllocator = JITAllocator::variable();
13353
13354 if (actualAllocator.isConstant()) {
13355 if (!actualAllocator.allocator()) {
13356 LBasicBlock haveAllocator = m_out.newBlock();
13357 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
13358 m_out.jump(slowPath);
13359 m_out.appendTo(haveAllocator, lastNext);
13360 return m_out.intPtrZero;
13361 }
13362 } else {
13363 // This means that either we know that the allocator is null or we don't know what the
13364 // allocator is. In either case, we need the null check.
13365 LBasicBlock haveAllocator = m_out.newBlock();
13366 LBasicBlock lastNext = m_out.insertNewBlocksBefore(haveAllocator);
13367 m_out.branch(
13368 m_out.notEqual(allocator, m_out.intPtrZero),
13369 usually(haveAllocator), rarely(slowPath));
13370 m_out.appendTo(haveAllocator, lastNext);
13371 }
13372
13373 LBasicBlock continuation = m_out.newBlock();
13374
13375 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13376
13377 PatchpointValue* patchpoint = m_out.patchpoint(pointerType());
13378 if (isARM64()) {
13379 // emitAllocateWithNonNullAllocator uses the scratch registers on ARM.
13380 patchpoint->clobber(RegisterSet::macroScratchRegisters());
13381 }
13382 patchpoint->effects.terminal = true;
13383 if (actualAllocator.isConstant())
13384 patchpoint->numGPScratchRegisters++;
13385 else
13386 patchpoint->appendSomeRegisterWithClobber(allocator);
13387 patchpoint->numGPScratchRegisters++;
13388 patchpoint->resultConstraint = ValueRep::SomeEarlyRegister;
13389
13390 m_out.appendSuccessor(usually(continuation));
13391 m_out.appendSuccessor(rarely(slowPath));
13392
13393 patchpoint->setGenerator(
13394 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
13395 AllowMacroScratchRegisterUsageIf allowScratchIf(jit, isARM64());
13396 CCallHelpers::JumpList jumpToSlowPath;
13397
13398 GPRReg allocatorGPR;
13399 if (actualAllocator.isConstant())
13400 allocatorGPR = params.gpScratch(1);
13401 else
13402 allocatorGPR = params[1].gpr();
13403
13404 // We use a patchpoint to emit the allocation path because whenever we mess with
13405 // allocation paths, we already reason about them at the machine code level. We know
13406 // exactly what instruction sequence we want. We're confident that no compiler
13407 // optimization could make this code better. So, it's best to have the code in
13408 // AssemblyHelpers::emitAllocate(). That way, the same optimized path is shared by
13409 // all of the compiler tiers.
13410 jit.emitAllocateWithNonNullAllocator(
13411 params[0].gpr(), actualAllocator, allocatorGPR, params.gpScratch(0),
13412 jumpToSlowPath);
13413
13414 CCallHelpers::Jump jumpToSuccess;
13415 if (!params.fallsThroughToSuccessor(0))
13416 jumpToSuccess = jit.jump();
13417
13418 Vector<Box<CCallHelpers::Label>> labels = params.successorLabels();
13419
13420 params.addLatePath(
13421 [=] (CCallHelpers& jit) {
13422 jumpToSlowPath.linkTo(*labels[1], &jit);
13423 if (jumpToSuccess.isSet())
13424 jumpToSuccess.linkTo(*labels[0], &jit);
13425 });
13426 });
13427
13428 m_out.appendTo(continuation, lastNext);
13429 return patchpoint;
13430 }
13431
13432 void storeStructure(LValue object, Structure* structure)
13433 {
13434 m_out.store32(m_out.constInt32(structure->id()), object, m_heaps.JSCell_structureID);
13435 m_out.store32(
13436 m_out.constInt32(structure->objectInitializationBlob()),
13437 object, m_heaps.JSCell_usefulBytes);
13438 }
13439
13440 void storeStructure(LValue object, LValue structure)
13441 {
13442 if (structure->hasIntPtr()) {
13443 storeStructure(object, bitwise_cast<Structure*>(structure->asIntPtr()));
13444 return;
13445 }
13446
13447 LValue id = m_out.load32(structure, m_heaps.Structure_structureID);
13448 m_out.store32(id, object, m_heaps.JSCell_structureID);
13449
13450 LValue blob = m_out.load32(structure, m_heaps.Structure_indexingModeIncludingHistory);
13451 m_out.store32(blob, object, m_heaps.JSCell_usefulBytes);
13452 }
13453
13454 template <typename StructureType>
13455 LValue allocateCell(LValue allocator, StructureType structure, LBasicBlock slowPath)
13456 {
13457 LValue result = allocateHeapCell(allocator, slowPath);
13458 storeStructure(result, structure);
13459 return result;
13460 }
13461
13462 LValue allocateObject(LValue allocator, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
13463 {
13464 return allocateObject(allocator, weakStructure(structure), butterfly, slowPath);
13465 }
13466
13467 LValue allocateObject(LValue allocator, LValue structure, LValue butterfly, LBasicBlock slowPath)
13468 {
13469 LValue result = allocateCell(allocator, structure, slowPath);
13470 if (structure->hasIntPtr()) {
13471 splatWords(
13472 result,
13473 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13474 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8 + bitwise_cast<Structure*>(structure->asIntPtr())->inlineCapacity()),
13475 m_out.int64Zero,
13476 m_heaps.properties.atAnyNumber());
13477 } else {
13478 LValue end = m_out.add(
13479 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13480 m_out.load8ZeroExt32(structure, m_heaps.Structure_inlineCapacity));
13481 splatWords(
13482 result,
13483 m_out.constInt32(JSFinalObject::offsetOfInlineStorage() / 8),
13484 end,
13485 m_out.int64Zero,
13486 m_heaps.properties.atAnyNumber());
13487 }
13488
13489 m_out.storePtr(butterfly, result, m_heaps.JSObject_butterfly);
13490 return result;
13491 }
13492
13493 template<typename ClassType, typename StructureType>
13494 LValue allocateObject(
13495 size_t size, StructureType structure, LValue butterfly, LBasicBlock slowPath)
13496 {
13497 Allocator allocator = allocatorForNonVirtualConcurrently<ClassType>(vm(), size, AllocatorForMode::AllocatorIfExists);
13498 return allocateObject(
13499 m_out.constIntPtr(allocator.localAllocator()), structure, butterfly, slowPath);
13500 }
13501
13502 template<typename ClassType, typename StructureType>
13503 LValue allocateObject(StructureType structure, LValue butterfly, LBasicBlock slowPath)
13504 {
13505 return allocateObject<ClassType>(
13506 ClassType::allocationSize(0), structure, butterfly, slowPath);
13507 }
13508
13509 LValue allocatorForSize(LValue subspace, LValue size, LBasicBlock slowPath)
13510 {
13511 static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
13512
13513 // Try to do some constant-folding here.
13514 if (subspace->hasIntPtr() && size->hasIntPtr()) {
13515 CompleteSubspace* actualSubspace = bitwise_cast<CompleteSubspace*>(subspace->asIntPtr());
13516 size_t actualSize = size->asIntPtr();
13517
13518 Allocator actualAllocator = actualSubspace->allocatorForNonVirtual(actualSize, AllocatorForMode::AllocatorIfExists);
13519 if (!actualAllocator) {
13520 LBasicBlock continuation = m_out.newBlock();
13521 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13522 m_out.jump(slowPath);
13523 m_out.appendTo(continuation, lastNext);
13524 return m_out.intPtrZero;
13525 }
13526
13527 return m_out.constIntPtr(actualAllocator.localAllocator());
13528 }
13529
13530 unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
13531
13532 LBasicBlock continuation = m_out.newBlock();
13533
13534 LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
13535
13536 LValue sizeClassIndex = m_out.lShr(
13537 m_out.add(size, m_out.constIntPtr(MarkedSpace::sizeStep - 1)),
13538 m_out.constInt32(stepShift));
13539
13540 m_out.branch(
13541 m_out.above(sizeClassIndex, m_out.constIntPtr(MarkedSpace::largeCutoff >> stepShift)),
13542 rarely(slowPath), usually(continuation));
13543
13544 m_out.appendTo(continuation, lastNext);
13545
13546 return m_out.loadPtr(
13547 m_out.baseIndex(
13548 m_heaps.CompleteSubspace_allocatorForSizeStep,
13549 subspace, sizeClassIndex));
13550 }
13551
13552 LValue allocatorForSize(CompleteSubspace& subspace, LValue size, LBasicBlock slowPath)
13553 {
13554 return allocatorForSize(m_out.constIntPtr(&subspace), size, slowPath);
13555 }
13556
13557 template<typename ClassType>
13558 LValue allocateVariableSizedObject(
13559 LValue size, RegisteredStructure structure, LValue butterfly, LBasicBlock slowPath)
13560 {
13561 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
13562 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
13563 LValue allocator = allocatorForSize(*subspace, size, slowPath);
13564 return allocateObject(allocator, structure, butterfly, slowPath);
13565 }
13566
13567 template<typename ClassType>
13568 LValue allocateVariableSizedCell(
13569 LValue size, Structure* structure, LBasicBlock slowPath)
13570 {
13571 CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm());
13572 RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated");
13573 LValue allocator = allocatorForSize(*subspace, size, slowPath);
13574 return allocateCell(allocator, structure, slowPath);
13575 }
13576
13577 LValue allocateObject(RegisteredStructure structure)
13578 {
13579 size_t allocationSize = JSFinalObject::allocationSize(structure.get()->inlineCapacity());
13580 Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
13581
13582 // FIXME: If the allocator is null, we could simply emit a normal C call to the allocator
13583 // instead of putting it on the slow path.
13584 // https://bugs.webkit.org/show_bug.cgi?id=161062
13585
13586 LBasicBlock slowPath = m_out.newBlock();
13587 LBasicBlock continuation = m_out.newBlock();
13588
13589 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
13590
13591 ValueFromBlock fastResult = m_out.anchor(allocateObject(
13592 m_out.constIntPtr(allocator.localAllocator()), structure, m_out.intPtrZero, slowPath));
13593
13594 m_out.jump(continuation);
13595
13596 m_out.appendTo(slowPath, continuation);
13597
13598 VM& vm = this->vm();
13599 LValue slowResultValue = lazySlowPath(
13600 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13601 return createLazyCallGenerator(vm,
13602 operationNewObject, locations[0].directGPR(),
13603 CCallHelpers::TrustedImmPtr(structure.get()));
13604 });
13605 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13606 m_out.jump(continuation);
13607
13608 m_out.appendTo(continuation, lastNext);
13609 return m_out.phi(pointerType(), fastResult, slowResult);
13610 }
13611
13612 struct ArrayValues {
13613 ArrayValues()
13614 : array(0)
13615 , butterfly(0)
13616 {
13617 }
13618
13619 ArrayValues(LValue array, LValue butterfly)
13620 : array(array)
13621 , butterfly(butterfly)
13622 {
13623 }
13624
13625 LValue array;
13626 LValue butterfly;
13627 };
13628
13629 ArrayValues allocateJSArray(LValue publicLength, LValue vectorLength, LValue structure, LValue indexingType, bool shouldInitializeElements = true, bool shouldLargeArraySizeCreateArrayStorage = true)
13630 {
13631 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
13632 if (indexingType->hasInt32()) {
13633 IndexingType type = static_cast<IndexingType>(indexingType->asInt32());
13634 ASSERT_UNUSED(type,
13635 hasUndecided(type)
13636 || hasInt32(type)
13637 || hasDouble(type)
13638 || hasContiguous(type));
13639 }
13640
13641 LBasicBlock fastCase = m_out.newBlock();
13642 LBasicBlock largeCase = m_out.newBlock();
13643 LBasicBlock failCase = m_out.newBlock();
13644 LBasicBlock continuation = m_out.newBlock();
13645 LBasicBlock slowCase = m_out.newBlock();
13646
13647 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastCase);
13648
13649 Optional<unsigned> staticVectorLength;
13650 Optional<unsigned> staticVectorLengthFromPublicLength;
13651 if (structure->hasIntPtr()) {
13652 if (publicLength->hasInt32()) {
13653 unsigned publicLengthConst = static_cast<unsigned>(publicLength->asInt32());
13654 if (publicLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
13655 publicLengthConst = Butterfly::optimalContiguousVectorLength(
13656 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), publicLengthConst);
13657 staticVectorLengthFromPublicLength = publicLengthConst;
13658 }
13659
13660 }
13661 if (vectorLength->hasInt32()) {
13662 unsigned vectorLengthConst = static_cast<unsigned>(vectorLength->asInt32());
13663 if (vectorLengthConst <= MAX_STORAGE_VECTOR_LENGTH) {
13664 vectorLengthConst = Butterfly::optimalContiguousVectorLength(
13665 bitwise_cast<Structure*>(structure->asIntPtr())->outOfLineCapacity(), vectorLengthConst);
13666 vectorLength = m_out.constInt32(vectorLengthConst);
13667 staticVectorLength = vectorLengthConst;
13668 }
13669 }
13670 } else {
13671 // We don't compute the optimal vector length for new Array(blah) where blah is not
13672 // statically known, since the compute effort of doing it here is probably not worth it.
13673 }
13674
13675 ValueFromBlock noButterfly = m_out.anchor(m_out.intPtrZero);
13676
13677 LValue predicate;
13678 if (shouldLargeArraySizeCreateArrayStorage)
13679 predicate = m_out.aboveOrEqual(publicLength, m_out.constInt32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
13680 else
13681 predicate = m_out.booleanFalse;
13682
13683 m_out.branch(predicate, rarely(largeCase), usually(fastCase));
13684
13685 m_out.appendTo(fastCase, largeCase);
13686
13687 LValue payloadSize =
13688 m_out.shl(m_out.zeroExt(vectorLength, pointerType()), m_out.constIntPtr(3));
13689
13690 LValue butterflySize = m_out.add(
13691 payloadSize, m_out.constIntPtr(sizeof(IndexingHeader)));
13692
13693 LValue allocator = allocatorForSize(vm().jsValueGigacageAuxiliarySpace, butterflySize, failCase);
13694 LValue startOfStorage = allocateHeapCell(allocator, failCase);
13695
13696 LValue butterfly = m_out.add(startOfStorage, m_out.constIntPtr(sizeof(IndexingHeader)));
13697
13698 m_out.store32(publicLength, butterfly, m_heaps.Butterfly_publicLength);
13699 m_out.store32(vectorLength, butterfly, m_heaps.Butterfly_vectorLength);
13700
13701 initializeArrayElements(
13702 indexingType,
13703 shouldInitializeElements ? m_out.int32Zero : publicLength, vectorLength,
13704 butterfly);
13705
13706 ValueFromBlock haveButterfly = m_out.anchor(butterfly);
13707
13708 LValue object = allocateObject<JSArray>(structure, butterfly, failCase);
13709
13710 ValueFromBlock fastResult = m_out.anchor(object);
13711 ValueFromBlock fastButterfly = m_out.anchor(butterfly);
13712 m_out.jump(continuation);
13713
13714 m_out.appendTo(largeCase, failCase);
13715 ValueFromBlock largeStructure = m_out.anchor(
13716 weakStructure(m_graph.registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))));
13717 m_out.jump(slowCase);
13718
13719 m_out.appendTo(failCase, slowCase);
13720 ValueFromBlock failStructure = m_out.anchor(structure);
13721 m_out.jump(slowCase);
13722
13723 m_out.appendTo(slowCase, continuation);
13724 LValue structureValue = m_out.phi(pointerType(), largeStructure, failStructure);
13725 LValue butterflyValue = m_out.phi(pointerType(), noButterfly, haveButterfly);
13726
13727 VM& vm = this->vm();
13728 LValue slowResultValue = nullptr;
13729 if (vectorLength == publicLength
13730 || (staticVectorLengthFromPublicLength && staticVectorLength && staticVectorLength.value() == staticVectorLengthFromPublicLength.value())) {
13731 slowResultValue = lazySlowPath(
13732 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13733 return createLazyCallGenerator(vm,
13734 operationNewArrayWithSize, locations[0].directGPR(),
13735 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR());
13736 },
13737 structureValue, publicLength, butterflyValue);
13738 } else {
13739 slowResultValue = lazySlowPath(
13740 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
13741 return createLazyCallGenerator(vm,
13742 operationNewArrayWithSizeAndHint, locations[0].directGPR(),
13743 locations[1].directGPR(), locations[2].directGPR(), locations[3].directGPR(), locations[4].directGPR());
13744 },
13745 structureValue, publicLength, vectorLength, butterflyValue);
13746 }
13747
13748 ValueFromBlock slowResult = m_out.anchor(slowResultValue);
13749 ValueFromBlock slowButterfly = m_out.anchor(
13750 m_out.loadPtr(slowResultValue, m_heaps.JSObject_butterfly));
13751 m_out.jump(continuation);
13752
13753 m_out.appendTo(continuation, lastNext);
13754 return ArrayValues(
13755 m_out.phi(pointerType(), fastResult, slowResult),
13756 m_out.phi(pointerType(), fastButterfly, slowButterfly));
13757 }
13758
13759 ArrayValues allocateUninitializedContiguousJSArrayInternal(LValue publicLength, LValue vectorLength, RegisteredStructure structure)
13760 {
13761 bool shouldInitializeElements = false;
13762 bool shouldLargeArraySizeCreateArrayStorage = false;
13763 return allocateJSArray(
13764 publicLength, vectorLength, weakStructure(structure), m_out.constInt32(structure->indexingType()), shouldInitializeElements,
13765 shouldLargeArraySizeCreateArrayStorage);
13766 }
13767
13768 ArrayValues allocateUninitializedContiguousJSArray(LValue publicLength, RegisteredStructure structure)
13769 {
13770 return allocateUninitializedContiguousJSArrayInternal(publicLength, publicLength, structure);
13771 }
13772
13773 ArrayValues allocateUninitializedContiguousJSArray(unsigned publicLength, unsigned vectorLength, RegisteredStructure structure)
13774 {
13775 ASSERT(vectorLength >= publicLength);
13776 return allocateUninitializedContiguousJSArrayInternal(m_out.constInt32(publicLength), m_out.constInt32(vectorLength), structure);
13777 }
13778
13779 LValue ensureShadowChickenPacket()
13780 {
13781 ShadowChicken* shadowChicken = vm().shadowChicken();
13782 RELEASE_ASSERT(shadowChicken);
13783 LBasicBlock slowCase = m_out.newBlock();
13784 LBasicBlock continuation = m_out.newBlock();
13785
13786 TypedPointer addressOfLogCursor = m_out.absolute(shadowChicken->addressOfLogCursor());
13787 LValue logCursor = m_out.loadPtr(addressOfLogCursor);
13788
13789 ValueFromBlock fastResult = m_out.anchor(logCursor);
13790
13791 m_out.branch(
13792 m_out.below(logCursor, m_out.constIntPtr(shadowChicken->logEnd())),
13793 usually(continuation), rarely(slowCase));
13794
13795 LBasicBlock lastNext = m_out.appendTo(slowCase, continuation);
13796
13797 vmCall(Void, m_out.operation(operationProcessShadowChickenLog), m_callFrame);
13798
13799 ValueFromBlock slowResult = m_out.anchor(m_out.loadPtr(addressOfLogCursor));
13800 m_out.jump(continuation);
13801
13802 m_out.appendTo(continuation, lastNext);
13803 LValue result = m_out.phi(pointerType(), fastResult, slowResult);
13804
13805 m_out.storePtr(
13806 m_out.add(result, m_out.constIntPtr(sizeof(ShadowChicken::Packet))),
13807 addressOfLogCursor);
13808
13809 return result;
13810 }
13811
13812 LValue boolify(Edge edge)
13813 {
13814 switch (edge.useKind()) {
13815 case BooleanUse:
13816 case KnownBooleanUse:
13817 return lowBoolean(edge);
13818 case Int32Use:
13819 return m_out.notZero32(lowInt32(edge));
13820 case DoubleRepUse:
13821 return m_out.doubleNotEqualAndOrdered(lowDouble(edge), m_out.doubleZero);
13822 case ObjectOrOtherUse:
13823 return m_out.logicalNot(
13824 equalNullOrUndefined(
13825 edge, CellCaseSpeculatesObject, SpeculateNullOrUndefined,
13826 ManualOperandSpeculation));
13827 case StringUse:
13828 return m_out.notEqual(lowString(edge), weakPointer(jsEmptyString(&m_graph.m_vm)));
13829 case StringOrOtherUse: {
13830 LValue value = lowJSValue(edge, ManualOperandSpeculation);
13831
13832 LBasicBlock cellCase = m_out.newBlock();
13833 LBasicBlock notCellCase = m_out.newBlock();
13834 LBasicBlock continuation = m_out.newBlock();
13835
13836 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
13837
13838 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
13839 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
13840 ValueFromBlock stringResult = m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(&m_graph.m_vm))));
13841 m_out.jump(continuation);
13842
13843 m_out.appendTo(notCellCase, continuation);
13844 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
13845 ValueFromBlock notCellResult = m_out.anchor(m_out.booleanFalse);
13846 m_out.jump(continuation);
13847
13848 m_out.appendTo(continuation, lastNext);
13849 return m_out.phi(Int32, stringResult, notCellResult);
13850 }
13851 case UntypedUse: {
13852 LValue value = lowJSValue(edge);
13853
13854 // Implements the following control flow structure:
13855 // if (value is cell) {
13856 // if (value is string or value is BigInt)
13857 // result = !!value->length
13858 // else {
13859 // do evil things for masquerades-as-undefined
13860 // result = true
13861 // }
13862 // } else if (value is int32) {
13863 // result = !!unboxInt32(value)
13864 // } else if (value is number) {
13865 // result = !!unboxDouble(value)
13866 // } else {
13867 // result = value == jsTrue
13868 // }
13869
13870 LBasicBlock cellCase = m_out.newBlock();
13871 LBasicBlock notStringCase = m_out.newBlock();
13872 LBasicBlock stringCase = m_out.newBlock();
13873 LBasicBlock bigIntCase = m_out.newBlock();
13874 LBasicBlock notStringOrBigIntCase = m_out.newBlock();
13875 LBasicBlock notCellCase = m_out.newBlock();
13876 LBasicBlock int32Case = m_out.newBlock();
13877 LBasicBlock notInt32Case = m_out.newBlock();
13878 LBasicBlock doubleCase = m_out.newBlock();
13879 LBasicBlock notDoubleCase = m_out.newBlock();
13880 LBasicBlock continuation = m_out.newBlock();
13881
13882 Vector<ValueFromBlock> results;
13883
13884 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
13885
13886 LBasicBlock lastNext = m_out.appendTo(cellCase, notStringCase);
13887 m_out.branch(
13888 isString(value, provenType(edge) & SpecCell),
13889 unsure(stringCase), unsure(notStringCase));
13890
13891 m_out.appendTo(notStringCase, stringCase);
13892 m_out.branch(
13893 isBigInt(value, provenType(edge) & (SpecCell - SpecString)),
13894 unsure(bigIntCase), unsure(notStringOrBigIntCase));
13895
13896 m_out.appendTo(stringCase, bigIntCase);
13897 results.append(m_out.anchor(m_out.notEqual(value, weakPointer(jsEmptyString(&m_graph.m_vm)))));
13898 m_out.jump(continuation);
13899
13900 m_out.appendTo(bigIntCase, notStringOrBigIntCase);
13901 LValue nonZeroBigInt = m_out.notZero32(
13902 m_out.load32NonNegative(value, m_heaps.JSBigInt_length));
13903 results.append(m_out.anchor(nonZeroBigInt));
13904 m_out.jump(continuation);
13905
13906 m_out.appendTo(notStringOrBigIntCase, notCellCase);
13907 LValue isTruthyObject;
13908 if (masqueradesAsUndefinedWatchpointIsStillValid())
13909 isTruthyObject = m_out.booleanTrue;
13910 else {
13911 LBasicBlock masqueradesCase = m_out.newBlock();
13912
13913 results.append(m_out.anchor(m_out.booleanTrue));
13914
13915 m_out.branch(
13916 m_out.testIsZero32(
13917 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
13918 m_out.constInt32(MasqueradesAsUndefined)),
13919 usually(continuation), rarely(masqueradesCase));
13920
13921 m_out.appendTo(masqueradesCase);
13922
13923 isTruthyObject = m_out.notEqual(
13924 weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
13925 m_out.loadPtr(loadStructure(value), m_heaps.Structure_globalObject));
13926 }
13927 results.append(m_out.anchor(isTruthyObject));
13928 m_out.jump(continuation);
13929
13930 m_out.appendTo(notCellCase, int32Case);
13931 m_out.branch(
13932 isInt32(value, provenType(edge) & ~SpecCell),
13933 unsure(int32Case), unsure(notInt32Case));
13934
13935 m_out.appendTo(int32Case, notInt32Case);
13936 results.append(m_out.anchor(m_out.notZero32(unboxInt32(value))));
13937 m_out.jump(continuation);
13938
13939 m_out.appendTo(notInt32Case, doubleCase);
13940 m_out.branch(
13941 isNumber(value, provenType(edge) & ~SpecCell),
13942 unsure(doubleCase), unsure(notDoubleCase));
13943
13944 m_out.appendTo(doubleCase, notDoubleCase);
13945 LValue doubleIsTruthy = m_out.doubleNotEqualAndOrdered(
13946 unboxDouble(value), m_out.constDouble(0));
13947 results.append(m_out.anchor(doubleIsTruthy));
13948 m_out.jump(continuation);
13949
13950 m_out.appendTo(notDoubleCase, continuation);
13951 LValue miscIsTruthy = m_out.equal(
13952 value, m_out.constInt64(JSValue::encode(jsBoolean(true))));
13953 results.append(m_out.anchor(miscIsTruthy));
13954 m_out.jump(continuation);
13955
13956 m_out.appendTo(continuation, lastNext);
13957 return m_out.phi(Int32, results);
13958 }
13959 default:
13960 DFG_CRASH(m_graph, m_node, "Bad use kind");
13961 return 0;
13962 }
13963 }
13964
13965 enum StringOrObjectMode {
13966 AllCellsAreFalse,
13967 CellCaseSpeculatesObject
13968 };
13969 enum EqualNullOrUndefinedMode {
13970 EqualNull,
13971 EqualUndefined,
13972 EqualNullOrUndefined,
13973 SpeculateNullOrUndefined
13974 };
13975 LValue equalNullOrUndefined(
13976 Edge edge, StringOrObjectMode cellMode, EqualNullOrUndefinedMode primitiveMode,
13977 OperandSpeculationMode operandMode = AutomaticOperandSpeculation)
13978 {
13979 bool validWatchpoint = masqueradesAsUndefinedWatchpointIsStillValid();
13980
13981 LValue value = lowJSValue(edge, operandMode);
13982
13983 LBasicBlock cellCase = m_out.newBlock();
13984 LBasicBlock primitiveCase = m_out.newBlock();
13985 LBasicBlock continuation = m_out.newBlock();
13986
13987 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
13988
13989 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
13990
13991 Vector<ValueFromBlock, 3> results;
13992
13993 switch (cellMode) {
13994 case AllCellsAreFalse:
13995 break;
13996 case CellCaseSpeculatesObject:
13997 FTL_TYPE_CHECK(
13998 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
13999 break;
14000 }
14001
14002 if (validWatchpoint) {
14003 results.append(m_out.anchor(m_out.booleanFalse));
14004 m_out.jump(continuation);
14005 } else {
14006 LBasicBlock masqueradesCase =
14007 m_out.newBlock();
14008
14009 results.append(m_out.anchor(m_out.booleanFalse));
14010
14011 m_out.branch(
14012 m_out.testNonZero32(
14013 m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoFlags),
14014 m_out.constInt32(MasqueradesAsUndefined)),
14015 rarely(masqueradesCase), usually(continuation));
14016
14017 m_out.appendTo(masqueradesCase, primitiveCase);
14018
14019 LValue structure = loadStructure(value);
14020
14021 results.append(m_out.anchor(
14022 m_out.equal(
14023 weakPointer(m_graph.globalObjectFor(m_node->origin.semantic)),
14024 m_out.loadPtr(structure, m_heaps.Structure_globalObject))));
14025 m_out.jump(continuation);
14026 }
14027
14028 m_out.appendTo(primitiveCase, continuation);
14029
14030 LValue primitiveResult;
14031 switch (primitiveMode) {
14032 case EqualNull:
14033 primitiveResult = m_out.equal(value, m_out.constInt64(ValueNull));
14034 break;
14035 case EqualUndefined:
14036 primitiveResult = m_out.equal(value, m_out.constInt64(ValueUndefined));
14037 break;
14038 case EqualNullOrUndefined:
14039 primitiveResult = isOther(value, provenType(edge));
14040 break;
14041 case SpeculateNullOrUndefined:
14042 FTL_TYPE_CHECK(
14043 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
14044 primitiveResult = m_out.booleanTrue;
14045 break;
14046 }
14047 results.append(m_out.anchor(primitiveResult));
14048 m_out.jump(continuation);
14049
14050 m_out.appendTo(continuation, lastNext);
14051
14052 return m_out.phi(Int32, results);
14053 }
14054
14055 template<typename FunctionType>
14056 void contiguousPutByValOutOfBounds(
14057 FunctionType slowPathFunction, LValue base, LValue storage, LValue index, LValue value,
14058 LBasicBlock continuation)
14059 {
14060 if (!m_node->arrayMode().isInBounds()) {
14061 LBasicBlock notInBoundsCase =
14062 m_out.newBlock();
14063 LBasicBlock performStore =
14064 m_out.newBlock();
14065
14066 LValue isNotInBounds = m_out.aboveOrEqual(
14067 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_publicLength));
14068 m_out.branch(isNotInBounds, unsure(notInBoundsCase), unsure(performStore));
14069
14070 LBasicBlock lastNext = m_out.appendTo(notInBoundsCase, performStore);
14071
14072 LValue isOutOfBounds = m_out.aboveOrEqual(
14073 index, m_out.load32NonNegative(storage, m_heaps.Butterfly_vectorLength));
14074
14075 if (!m_node->arrayMode().isOutOfBounds())
14076 speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
14077 else {
14078 LBasicBlock outOfBoundsCase =
14079 m_out.newBlock();
14080 LBasicBlock holeCase =
14081 m_out.newBlock();
14082
14083 m_out.branch(isOutOfBounds, rarely(outOfBoundsCase), usually(holeCase));
14084
14085 LBasicBlock innerLastNext = m_out.appendTo(outOfBoundsCase, holeCase);
14086
14087 vmCall(
14088 Void, m_out.operation(slowPathFunction),
14089 m_callFrame, base, index, value);
14090
14091 m_out.jump(continuation);
14092
14093 m_out.appendTo(holeCase, innerLastNext);
14094 }
14095
14096 m_out.store32(
14097 m_out.add(index, m_out.int32One),
14098 storage, m_heaps.Butterfly_publicLength);
14099
14100 m_out.jump(performStore);
14101 m_out.appendTo(performStore, lastNext);
14102 }
14103 }
14104
14105 LValue untagArrayPtr(LValue ptr, LValue size)
14106 {
14107#if CPU(ARM64E)
14108 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
14109 authenticate->appendSomeRegister(ptr);
14110 authenticate->append(size, B3::ValueRep(B3::ValueRep::SomeLateRegister));
14111 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14112 jit.move(params[1].gpr(), params[0].gpr());
14113 jit.untagArrayPtr(params[2].gpr(), params[0].gpr());
14114 });
14115 return authenticate;
14116#else
14117 UNUSED_PARAM(size);
14118 return ptr;
14119#endif
14120 }
14121
14122 LValue removeArrayPtrTag(LValue ptr)
14123 {
14124#if CPU(ARM64E)
14125 PatchpointValue* authenticate = m_out.patchpoint(pointerType());
14126 authenticate->appendSomeRegister(ptr);
14127 authenticate->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14128 jit.move(params[1].gpr(), params[0].gpr());
14129 jit.removeArrayPtrTag(params[0].gpr());
14130 });
14131 return authenticate;
14132#endif
14133 return ptr;
14134 }
14135
14136 LValue caged(Gigacage::Kind kind, LValue ptr, LValue base)
14137 {
14138#if GIGACAGE_ENABLED
14139 UNUSED_PARAM(base);
14140 if (!Gigacage::isEnabled(kind))
14141 return ptr;
14142
14143 if (kind == Gigacage::Primitive && Gigacage::canPrimitiveGigacageBeDisabled()) {
14144 if (vm().primitiveGigacageEnabled().isStillValid())
14145 m_graph.watchpoints().addLazily(vm().primitiveGigacageEnabled());
14146 else
14147 return ptr;
14148 }
14149
14150 LValue basePtr = m_out.constIntPtr(Gigacage::basePtr(kind));
14151 LValue mask = m_out.constIntPtr(Gigacage::mask(kind));
14152
14153 LValue masked = m_out.bitAnd(ptr, mask);
14154 LValue result = m_out.add(masked, basePtr);
14155
14156 // Make sure that B3 doesn't try to do smart reassociation of these pointer bits.
14157 // FIXME: In an ideal world, B3 would not do harmful reassociations, and if it did, it would be able
14158 // to undo them during constant hoisting and regalloc. As it stands, if you remove this then Octane
14159 // gets 1.6% slower and Kraken gets 5% slower. It's all because the basePtr, which is a constant,
14160 // gets reassociated out of the add above and into the address arithmetic. This disables hoisting of
14161 // the basePtr constant. Hoisting that constant is worth a lot more perf than the reassociation. One
14162 // way to make this all work happily is to combine offset legalization with constant hoisting, and
14163 // then teach it reassociation. So, Add(Add(a, b), const) where a is loop-invariant while b isn't
14164 // will turn into Add(Add(a, const), b) by the constant hoister. We would have to teach B3 to do this
14165 // and possibly other smart things if we want to be able to remove this opaque.
14166 // https://bugs.webkit.org/show_bug.cgi?id=175493
14167 return m_out.opaque(result);
14168#elif CPU(ARM64E)
14169 if (kind == Gigacage::Primitive) {
14170 LValue size = m_out.load32(base, m_heaps.JSArrayBufferView_length);
14171 return untagArrayPtr(ptr, size);
14172 }
14173
14174 return ptr;
14175#else
14176 UNUSED_PARAM(kind);
14177 UNUSED_PARAM(base);
14178 return ptr;
14179#endif
14180 }
14181
14182 void buildSwitch(SwitchData* data, LType type, LValue switchValue)
14183 {
14184 ASSERT(type == pointerType() || type == Int32);
14185
14186 Vector<SwitchCase> cases;
14187 for (unsigned i = 0; i < data->cases.size(); ++i) {
14188 SwitchCase newCase;
14189
14190 if (type == pointerType()) {
14191 newCase = SwitchCase(m_out.constIntPtr(data->cases[i].value.switchLookupValue(data->kind)),
14192 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
14193 } else if (type == Int32) {
14194 newCase = SwitchCase(m_out.constInt32(data->cases[i].value.switchLookupValue(data->kind)),
14195 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count));
14196 } else
14197 CRASH();
14198
14199 cases.append(newCase);
14200 }
14201
14202 m_out.switchInstruction(
14203 switchValue, cases,
14204 lowBlock(data->fallThrough.block), Weight(data->fallThrough.count));
14205 }
14206
14207 void switchString(SwitchData* data, LValue string, Edge& edge)
14208 {
14209 bool canDoBinarySwitch = true;
14210 unsigned totalLength = 0;
14211
14212 for (DFG::SwitchCase myCase : data->cases) {
14213 StringImpl* string = myCase.value.stringImpl();
14214 if (!string->is8Bit()) {
14215 canDoBinarySwitch = false;
14216 break;
14217 }
14218 if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
14219 canDoBinarySwitch = false;
14220 break;
14221 }
14222 totalLength += string->length();
14223 }
14224
14225 if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
14226 switchStringSlow(data, string);
14227 return;
14228 }
14229
14230 LBasicBlock hasImplBlock = m_out.newBlock();
14231 LBasicBlock is8BitBlock = m_out.newBlock();
14232 LBasicBlock slowBlock = m_out.newBlock();
14233
14234 m_out.branch(isRopeString(string, edge), unsure(slowBlock), unsure(hasImplBlock));
14235
14236 LBasicBlock lastNext = m_out.appendTo(hasImplBlock, is8BitBlock);
14237
14238 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
14239 LValue length = m_out.load32(stringImpl, m_heaps.StringImpl_length);
14240
14241 m_out.branch(
14242 m_out.testIsZero32(
14243 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
14244 m_out.constInt32(StringImpl::flagIs8Bit())),
14245 unsure(slowBlock), unsure(is8BitBlock));
14246
14247 m_out.appendTo(is8BitBlock, slowBlock);
14248
14249 LValue buffer = m_out.loadPtr(stringImpl, m_heaps.StringImpl_data);
14250
14251 // FIXME: We should propagate branch weight data to the cases of this switch.
14252 // https://bugs.webkit.org/show_bug.cgi?id=144368
14253
14254 Vector<StringSwitchCase> cases;
14255 for (DFG::SwitchCase myCase : data->cases)
14256 cases.append(StringSwitchCase(myCase.value.stringImpl(), lowBlock(myCase.target.block)));
14257 std::sort(cases.begin(), cases.end());
14258 switchStringRecurse(data, buffer, length, cases, 0, 0, cases.size(), 0, false);
14259
14260 m_out.appendTo(slowBlock, lastNext);
14261 switchStringSlow(data, string);
14262 }
14263
14264 // The code for string switching is based closely on the same code in the DFG backend. While it
14265 // would be nice to reduce the amount of similar-looking code, it seems like this is one of
14266 // those algorithms where factoring out the common bits would result in more code than just
14267 // duplicating.
14268
14269 struct StringSwitchCase {
14270 StringSwitchCase() { }
14271
14272 StringSwitchCase(StringImpl* string, LBasicBlock target)
14273 : string(string)
14274 , target(target)
14275 {
14276 }
14277
14278 bool operator<(const StringSwitchCase& other) const
14279 {
14280 return stringLessThan(*string, *other.string);
14281 }
14282
14283 StringImpl* string;
14284 LBasicBlock target;
14285 };
14286
14287 struct CharacterCase {
14288 CharacterCase()
14289 : character(0)
14290 , begin(0)
14291 , end(0)
14292 {
14293 }
14294
14295 CharacterCase(LChar character, unsigned begin, unsigned end)
14296 : character(character)
14297 , begin(begin)
14298 , end(end)
14299 {
14300 }
14301
14302 bool operator<(const CharacterCase& other) const
14303 {
14304 return character < other.character;
14305 }
14306
14307 LChar character;
14308 unsigned begin;
14309 unsigned end;
14310 };
14311
14312 void switchStringRecurse(
14313 SwitchData* data, LValue buffer, LValue length, const Vector<StringSwitchCase>& cases,
14314 unsigned numChecked, unsigned begin, unsigned end, unsigned alreadyCheckedLength,
14315 unsigned checkedExactLength)
14316 {
14317 LBasicBlock fallThrough = lowBlock(data->fallThrough.block);
14318
14319 if (begin == end) {
14320 m_out.jump(fallThrough);
14321 return;
14322 }
14323
14324 unsigned minLength = cases[begin].string->length();
14325 unsigned commonChars = minLength;
14326 bool allLengthsEqual = true;
14327 for (unsigned i = begin + 1; i < end; ++i) {
14328 unsigned myCommonChars = numChecked;
14329 unsigned limit = std::min(cases[begin].string->length(), cases[i].string->length());
14330 for (unsigned j = numChecked; j < limit; ++j) {
14331 if (cases[begin].string->at(j) != cases[i].string->at(j))
14332 break;
14333 myCommonChars++;
14334 }
14335 commonChars = std::min(commonChars, myCommonChars);
14336 if (minLength != cases[i].string->length())
14337 allLengthsEqual = false;
14338 minLength = std::min(minLength, cases[i].string->length());
14339 }
14340
14341 if (checkedExactLength) {
14342 DFG_ASSERT(m_graph, m_node, alreadyCheckedLength == minLength, alreadyCheckedLength, minLength);
14343 DFG_ASSERT(m_graph, m_node, allLengthsEqual);
14344 }
14345
14346 DFG_ASSERT(m_graph, m_node, minLength >= commonChars, minLength, commonChars);
14347
14348 if (!allLengthsEqual && alreadyCheckedLength < minLength)
14349 m_out.check(m_out.below(length, m_out.constInt32(minLength)), unsure(fallThrough));
14350 if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
14351 m_out.check(m_out.notEqual(length, m_out.constInt32(minLength)), unsure(fallThrough));
14352
14353 for (unsigned i = numChecked; i < commonChars; ++i) {
14354 m_out.check(
14355 m_out.notEqual(
14356 m_out.load8ZeroExt32(buffer, m_heaps.characters8[i]),
14357 m_out.constInt32(static_cast<uint16_t>(cases[begin].string->at(i)))),
14358 unsure(fallThrough));
14359 }
14360
14361 if (minLength == commonChars) {
14362 // This is the case where one of the cases is a prefix of all of the other cases.
14363 // We've already checked that the input string is a prefix of all of the cases,
14364 // so we just check length to jump to that case.
14365
14366 DFG_ASSERT(m_graph, m_node, cases[begin].string->length() == commonChars, cases[begin].string->length(), commonChars);
14367 for (unsigned i = begin + 1; i < end; ++i)
14368 DFG_ASSERT(m_graph, m_node, cases[i].string->length() > commonChars, cases[i].string->length(), commonChars);
14369
14370 if (allLengthsEqual) {
14371 DFG_ASSERT(m_graph, m_node, end == begin + 1, end, begin);
14372 m_out.jump(cases[begin].target);
14373 return;
14374 }
14375
14376 m_out.check(
14377 m_out.equal(length, m_out.constInt32(commonChars)),
14378 unsure(cases[begin].target));
14379
14380 // We've checked if the length is >= minLength, and then we checked if the length is
14381 // == commonChars. We get to this point if it is >= minLength but not == commonChars.
14382 // Hence we know that it now must be > minLength, i.e. that it's >= minLength + 1.
14383 switchStringRecurse(
14384 data, buffer, length, cases, commonChars, begin + 1, end, minLength + 1, false);
14385 return;
14386 }
14387
14388 // At this point we know that the string is longer than commonChars, and we've only verified
14389 // commonChars. Use a binary switch on the next unchecked character, i.e.
14390 // string[commonChars].
14391
14392 DFG_ASSERT(m_graph, m_node, end >= begin + 2, end, begin);
14393
14394 LValue uncheckedChar = m_out.load8ZeroExt32(buffer, m_heaps.characters8[commonChars]);
14395
14396 Vector<CharacterCase> characterCases;
14397 CharacterCase currentCase(cases[begin].string->at(commonChars), begin, begin + 1);
14398 for (unsigned i = begin + 1; i < end; ++i) {
14399 LChar currentChar = cases[i].string->at(commonChars);
14400 if (currentChar != currentCase.character) {
14401 currentCase.end = i;
14402 characterCases.append(currentCase);
14403 currentCase = CharacterCase(currentChar, i, i + 1);
14404 } else
14405 currentCase.end = i + 1;
14406 }
14407 characterCases.append(currentCase);
14408
14409 Vector<LBasicBlock> characterBlocks;
14410 for (unsigned i = characterCases.size(); i--;)
14411 characterBlocks.append(m_out.newBlock());
14412
14413 Vector<SwitchCase> switchCases;
14414 for (unsigned i = 0; i < characterCases.size(); ++i) {
14415 if (i)
14416 DFG_ASSERT(m_graph, m_node, characterCases[i - 1].character < characterCases[i].character);
14417 switchCases.append(SwitchCase(
14418 m_out.constInt32(characterCases[i].character), characterBlocks[i], Weight()));
14419 }
14420 m_out.switchInstruction(uncheckedChar, switchCases, fallThrough, Weight());
14421
14422 LBasicBlock lastNext = m_out.m_nextBlock;
14423 characterBlocks.append(lastNext); // Makes it convenient to set nextBlock.
14424 for (unsigned i = 0; i < characterCases.size(); ++i) {
14425 m_out.appendTo(characterBlocks[i], characterBlocks[i + 1]);
14426 switchStringRecurse(
14427 data, buffer, length, cases, commonChars + 1,
14428 characterCases[i].begin, characterCases[i].end, minLength, allLengthsEqual);
14429 }
14430
14431 DFG_ASSERT(m_graph, m_node, m_out.m_nextBlock == lastNext);
14432 }
14433
14434 void switchStringSlow(SwitchData* data, LValue string)
14435 {
14436 // FIXME: We ought to be able to use computed gotos here. We would save the labels of the
14437 // blocks we want to jump to, and then request their addresses after compilation completes.
14438 // https://bugs.webkit.org/show_bug.cgi?id=144369
14439
14440 LValue branchOffset = vmCall(
14441 Int32, m_out.operation(operationSwitchStringAndGetBranchOffset),
14442 m_callFrame, m_out.constIntPtr(data->switchTableIndex), string);
14443
14444 StringJumpTable& table = codeBlock()->stringSwitchJumpTable(data->switchTableIndex);
14445
14446 Vector<SwitchCase> cases;
14447 // These may be negative, or zero, or probably other stuff, too. We don't want to mess with HashSet's corner cases and we don't really care about throughput here.
14448 StdUnorderedSet<int32_t> alreadyHandled;
14449 for (unsigned i = 0; i < data->cases.size(); ++i) {
14450 // FIXME: The fact that we're using the bytecode's switch table means that the
14451 // following DFG IR transformation would be invalid.
14452 //
14453 // Original code:
14454 // switch (v) {
14455 // case "foo":
14456 // case "bar":
14457 // things();
14458 // break;
14459 // default:
14460 // break;
14461 // }
14462 //
14463 // New code:
14464 // switch (v) {
14465 // case "foo":
14466 // instrumentFoo();
14467 // goto _things;
14468 // case "bar":
14469 // instrumentBar();
14470 // _things:
14471 // things();
14472 // break;
14473 // default:
14474 // break;
14475 // }
14476 //
14477 // Luckily, we don't currently do any such transformation. But it's kind of silly that
14478 // this is an issue.
14479 // https://bugs.webkit.org/show_bug.cgi?id=144635
14480
14481 DFG::SwitchCase myCase = data->cases[i];
14482 StringJumpTable::StringOffsetTable::iterator iter =
14483 table.offsetTable.find(myCase.value.stringImpl());
14484 DFG_ASSERT(m_graph, m_node, iter != table.offsetTable.end());
14485
14486 if (!alreadyHandled.insert(iter->value.branchOffset).second)
14487 continue;
14488
14489 cases.append(SwitchCase(
14490 m_out.constInt32(iter->value.branchOffset),
14491 lowBlock(myCase.target.block), Weight(myCase.target.count)));
14492 }
14493
14494 m_out.switchInstruction(
14495 branchOffset, cases, lowBlock(data->fallThrough.block),
14496 Weight(data->fallThrough.count));
14497 }
14498
14499 // Calls the functor at the point of code generation where we know what the result type is.
14500 // You can emit whatever code you like at that point. Expects you to terminate the basic block.
14501 // When buildTypeOf() returns, it will have terminated all basic blocks that it created. So, if
14502 // you aren't using this as the terminator of a high-level block, you should create your own
14503 // contination and set it as the nextBlock (m_out.insertNewBlocksBefore(continuation)) before
14504 // calling this. For example:
14505 //
14506 // LBasicBlock continuation = m_out.newBlock();
14507 // LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
14508 // buildTypeOf(
14509 // child, value,
14510 // [&] (TypeofType type) {
14511 // do things;
14512 // m_out.jump(continuation);
14513 // });
14514 // m_out.appendTo(continuation, lastNext);
14515 template<typename Functor>
14516 void buildTypeOf(Edge child, LValue value, const Functor& functor)
14517 {
14518 JSGlobalObject* globalObject = m_graph.globalObjectFor(m_node->origin.semantic);
14519
14520 // Implements the following branching structure:
14521 //
14522 // if (is cell) {
14523 // if (is object) {
14524 // if (is function) {
14525 // return function;
14526 // } else if (doesn't have call trap and doesn't masquerade as undefined) {
14527 // return object
14528 // } else {
14529 // return slowPath();
14530 // }
14531 // } else if (is string) {
14532 // return string
14533 // } else if (is bigint) {
14534 // return bigint
14535 // } else {
14536 // return symbol
14537 // }
14538 // } else if (is number) {
14539 // return number
14540 // } else if (is null) {
14541 // return object
14542 // } else if (is boolean) {
14543 // return boolean
14544 // } else {
14545 // return undefined
14546 // }
14547 //
14548 // FIXME: typeof Symbol should be more frequently seen than BigInt.
14549 // We should change the order of type detection based on this frequency.
14550 // https://bugs.webkit.org/show_bug.cgi?id=192650
14551
14552 LBasicBlock cellCase = m_out.newBlock();
14553 LBasicBlock objectCase = m_out.newBlock();
14554 LBasicBlock functionCase = m_out.newBlock();
14555 LBasicBlock notFunctionCase = m_out.newBlock();
14556 LBasicBlock reallyObjectCase = m_out.newBlock();
14557 LBasicBlock slowPath = m_out.newBlock();
14558 LBasicBlock unreachable = m_out.newBlock();
14559 LBasicBlock notObjectCase = m_out.newBlock();
14560 LBasicBlock stringCase = m_out.newBlock();
14561 LBasicBlock notStringCase = m_out.newBlock();
14562 LBasicBlock bigIntCase = m_out.newBlock();
14563 LBasicBlock symbolCase = m_out.newBlock();
14564 LBasicBlock notCellCase = m_out.newBlock();
14565 LBasicBlock numberCase = m_out.newBlock();
14566 LBasicBlock notNumberCase = m_out.newBlock();
14567 LBasicBlock notNullCase = m_out.newBlock();
14568 LBasicBlock booleanCase = m_out.newBlock();
14569 LBasicBlock undefinedCase = m_out.newBlock();
14570
14571 m_out.branch(isCell(value, provenType(child)), unsure(cellCase), unsure(notCellCase));
14572
14573 LBasicBlock lastNext = m_out.appendTo(cellCase, objectCase);
14574 m_out.branch(isObject(value, provenType(child)), unsure(objectCase), unsure(notObjectCase));
14575
14576 m_out.appendTo(objectCase, functionCase);
14577 m_out.branch(
14578 isFunction(value, provenType(child) & SpecObject),
14579 unsure(functionCase), unsure(notFunctionCase));
14580
14581 m_out.appendTo(functionCase, notFunctionCase);
14582 functor(TypeofType::Function);
14583
14584 m_out.appendTo(notFunctionCase, reallyObjectCase);
14585 m_out.branch(
14586 isExoticForTypeof(value, provenType(child) & (SpecObject - SpecFunction)),
14587 rarely(slowPath), usually(reallyObjectCase));
14588
14589 m_out.appendTo(reallyObjectCase, slowPath);
14590 functor(TypeofType::Object);
14591
14592 m_out.appendTo(slowPath, unreachable);
14593 VM& vm = this->vm();
14594 LValue result = lazySlowPath(
14595 [=, &vm] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14596 return createLazyCallGenerator(vm,
14597 operationTypeOfObjectAsTypeofType, locations[0].directGPR(),
14598 CCallHelpers::TrustedImmPtr(globalObject), locations[1].directGPR());
14599 }, value);
14600 Vector<SwitchCase, 3> cases;
14601 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Undefined)), undefinedCase));
14602 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Object)), reallyObjectCase));
14603 cases.append(SwitchCase(m_out.constInt32(static_cast<int32_t>(TypeofType::Function)), functionCase));
14604 m_out.switchInstruction(m_out.castToInt32(result), cases, unreachable, Weight());
14605
14606 m_out.appendTo(unreachable, notObjectCase);
14607 m_out.unreachable();
14608
14609 m_out.appendTo(notObjectCase, stringCase);
14610 m_out.branch(
14611 isString(value, provenType(child) & (SpecCell - SpecObject)),
14612 unsure(stringCase), unsure(notStringCase));
14613
14614 m_out.appendTo(stringCase, notStringCase);
14615 functor(TypeofType::String);
14616
14617 m_out.appendTo(notStringCase, bigIntCase);
14618 m_out.branch(
14619 isBigInt(value, provenType(child) & (SpecCell - SpecObject - SpecString)),
14620 unsure(bigIntCase), unsure(symbolCase));
14621
14622 m_out.appendTo(bigIntCase, symbolCase);
14623 functor(TypeofType::BigInt);
14624
14625 m_out.appendTo(symbolCase, notCellCase);
14626 functor(TypeofType::Symbol);
14627
14628 m_out.appendTo(notCellCase, numberCase);
14629 m_out.branch(
14630 isNumber(value, provenType(child) & ~SpecCell),
14631 unsure(numberCase), unsure(notNumberCase));
14632
14633 m_out.appendTo(numberCase, notNumberCase);
14634 functor(TypeofType::Number);
14635
14636 m_out.appendTo(notNumberCase, notNullCase);
14637 LValue isNull;
14638 if (provenType(child) & SpecOther)
14639 isNull = m_out.equal(value, m_out.constInt64(ValueNull));
14640 else
14641 isNull = m_out.booleanFalse;
14642 m_out.branch(isNull, unsure(reallyObjectCase), unsure(notNullCase));
14643
14644 m_out.appendTo(notNullCase, booleanCase);
14645 m_out.branch(
14646 isBoolean(value, provenType(child) & ~(SpecCell | SpecFullNumber)),
14647 unsure(booleanCase), unsure(undefinedCase));
14648
14649 m_out.appendTo(booleanCase, undefinedCase);
14650 functor(TypeofType::Boolean);
14651
14652 m_out.appendTo(undefinedCase, lastNext);
14653 functor(TypeofType::Undefined);
14654 }
14655
14656 TypedPointer pointerIntoTypedArray(LValue storage, LValue index, TypedArrayType type)
14657 {
14658 LValue offset = m_out.shl(m_out.zeroExtPtr(index), m_out.constIntPtr(logElementSize(type)));
14659
14660 return TypedPointer(
14661 m_heaps.typedArrayProperties,
14662 m_out.add(
14663 storage,
14664 offset
14665 ));
14666 }
14667
14668 LValue loadFromIntTypedArray(TypedPointer pointer, TypedArrayType type)
14669 {
14670 switch (elementSize(type)) {
14671 case 1:
14672 return isSigned(type) ? m_out.load8SignExt32(pointer) : m_out.load8ZeroExt32(pointer);
14673 case 2:
14674 return isSigned(type) ? m_out.load16SignExt32(pointer) : m_out.load16ZeroExt32(pointer);
14675 case 4:
14676 return m_out.load32(pointer);
14677 default:
14678 DFG_CRASH(m_graph, m_node, "Bad element size");
14679 }
14680 }
14681
14682 Output::StoreType storeType(TypedArrayType type)
14683 {
14684 if (isInt(type)) {
14685 switch (elementSize(type)) {
14686 case 1:
14687 return Output::Store32As8;
14688 case 2:
14689 return Output::Store32As16;
14690 case 4:
14691 return Output::Store32;
14692 default:
14693 DFG_CRASH(m_graph, m_node, "Bad element size");
14694 return Output::Store32;
14695 }
14696 }
14697 switch (type) {
14698 case TypeFloat32:
14699 return Output::StoreFloat;
14700 case TypeFloat64:
14701 return Output::StoreDouble;
14702 default:
14703 DFG_CRASH(m_graph, m_node, "Bad typed array type");
14704 }
14705 }
14706
14707 void setIntTypedArrayLoadResult(LValue result, TypedArrayType type, bool canSpeculate = false)
14708 {
14709 if (elementSize(type) < 4 || isSigned(type)) {
14710 setInt32(result);
14711 return;
14712 }
14713
14714 if (m_node->shouldSpeculateInt32() && canSpeculate) {
14715 speculate(
14716 Overflow, noValue(), 0, m_out.lessThan(result, m_out.int32Zero));
14717 setInt32(result);
14718 return;
14719 }
14720
14721 if (m_node->shouldSpeculateInt52()) {
14722 setStrictInt52(m_out.zeroExt(result, Int64));
14723 return;
14724 }
14725
14726 setDouble(m_out.unsignedToDouble(result));
14727 }
14728
14729 LValue getIntTypedArrayStoreOperand(Edge edge, bool isClamped = false)
14730 {
14731 LValue intValue;
14732 switch (edge.useKind()) {
14733 case Int52RepUse:
14734 case Int32Use: {
14735 if (edge.useKind() == Int32Use)
14736 intValue = lowInt32(edge);
14737 else
14738 intValue = m_out.castToInt32(lowStrictInt52(edge));
14739
14740 if (isClamped) {
14741 LBasicBlock atLeastZero = m_out.newBlock();
14742 LBasicBlock continuation = m_out.newBlock();
14743
14744 Vector<ValueFromBlock, 2> intValues;
14745 intValues.append(m_out.anchor(m_out.int32Zero));
14746 m_out.branch(
14747 m_out.lessThan(intValue, m_out.int32Zero),
14748 unsure(continuation), unsure(atLeastZero));
14749
14750 LBasicBlock lastNext = m_out.appendTo(atLeastZero, continuation);
14751
14752 intValues.append(m_out.anchor(m_out.select(
14753 m_out.greaterThan(intValue, m_out.constInt32(255)),
14754 m_out.constInt32(255),
14755 intValue)));
14756 m_out.jump(continuation);
14757
14758 m_out.appendTo(continuation, lastNext);
14759 intValue = m_out.phi(Int32, intValues);
14760 }
14761 break;
14762 }
14763
14764 case DoubleRepUse: {
14765 LValue doubleValue = lowDouble(edge);
14766
14767 if (isClamped) {
14768 LBasicBlock atLeastZero = m_out.newBlock();
14769 LBasicBlock withinRange = m_out.newBlock();
14770 LBasicBlock continuation = m_out.newBlock();
14771
14772 Vector<ValueFromBlock, 3> intValues;
14773 intValues.append(m_out.anchor(m_out.int32Zero));
14774 m_out.branch(
14775 m_out.doubleLessThanOrUnordered(doubleValue, m_out.doubleZero),
14776 unsure(continuation), unsure(atLeastZero));
14777
14778 LBasicBlock lastNext = m_out.appendTo(atLeastZero, withinRange);
14779 intValues.append(m_out.anchor(m_out.constInt32(255)));
14780 m_out.branch(
14781 m_out.doubleGreaterThan(doubleValue, m_out.constDouble(255)),
14782 unsure(continuation), unsure(withinRange));
14783
14784 m_out.appendTo(withinRange, continuation);
14785 intValues.append(m_out.anchor(m_out.doubleToInt(doubleValue)));
14786 m_out.jump(continuation);
14787
14788 m_out.appendTo(continuation, lastNext);
14789 intValue = m_out.phi(Int32, intValues);
14790 } else
14791 intValue = doubleToInt32(doubleValue);
14792 break;
14793 }
14794
14795 default:
14796 DFG_CRASH(m_graph, m_node, "Bad use kind");
14797 }
14798
14799 return intValue;
14800 }
14801
14802 LValue doubleToInt32(LValue doubleValue, double low, double high, bool isSigned = true)
14803 {
14804 LBasicBlock greatEnough = m_out.newBlock();
14805 LBasicBlock withinRange = m_out.newBlock();
14806 LBasicBlock slowPath = m_out.newBlock();
14807 LBasicBlock continuation = m_out.newBlock();
14808
14809 Vector<ValueFromBlock, 2> results;
14810
14811 m_out.branch(
14812 m_out.doubleGreaterThanOrEqual(doubleValue, m_out.constDouble(low)),
14813 unsure(greatEnough), unsure(slowPath));
14814
14815 LBasicBlock lastNext = m_out.appendTo(greatEnough, withinRange);
14816 m_out.branch(
14817 m_out.doubleLessThanOrEqual(doubleValue, m_out.constDouble(high)),
14818 unsure(withinRange), unsure(slowPath));
14819
14820 m_out.appendTo(withinRange, slowPath);
14821 LValue fastResult;
14822 if (isSigned)
14823 fastResult = m_out.doubleToInt(doubleValue);
14824 else
14825 fastResult = m_out.doubleToUInt(doubleValue);
14826 results.append(m_out.anchor(fastResult));
14827 m_out.jump(continuation);
14828
14829 m_out.appendTo(slowPath, continuation);
14830 results.append(m_out.anchor(m_out.call(Int32, m_out.operation(operationToInt32), doubleValue)));
14831 m_out.jump(continuation);
14832
14833 m_out.appendTo(continuation, lastNext);
14834 return m_out.phi(Int32, results);
14835 }
14836
14837 LValue doubleToInt32(LValue doubleValue)
14838 {
14839#if CPU(ARM64)
14840 if (MacroAssemblerARM64::supportsDoubleToInt32ConversionUsingJavaScriptSemantics()) {
14841 PatchpointValue* patchpoint = m_out.patchpoint(Int32);
14842 patchpoint->append(ConstrainedValue(doubleValue, B3::ValueRep::SomeRegister));
14843 patchpoint->setGenerator([=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14844 jit.convertDoubleToInt32UsingJavaScriptSemantics(params[1].fpr(), params[0].gpr());
14845 });
14846 patchpoint->effects = Effects::none();
14847 return patchpoint;
14848 }
14849#endif
14850
14851 if (hasSensibleDoubleToInt())
14852 return sensibleDoubleToInt32(doubleValue);
14853
14854 double limit = pow(2, 31) - 1;
14855 return doubleToInt32(doubleValue, -limit, limit);
14856 }
14857
14858 LValue sensibleDoubleToInt32(LValue doubleValue)
14859 {
14860 LBasicBlock slowPath = m_out.newBlock();
14861 LBasicBlock continuation = m_out.newBlock();
14862
14863 LValue fastResultValue = m_out.doubleToInt(doubleValue);
14864 ValueFromBlock fastResult = m_out.anchor(fastResultValue);
14865 m_out.branch(
14866 m_out.equal(fastResultValue, m_out.constInt32(0x80000000)),
14867 rarely(slowPath), usually(continuation));
14868
14869 LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
14870 ValueFromBlock slowResult = m_out.anchor(
14871 m_out.call(Int32, m_out.operation(operationToInt32SensibleSlow), doubleValue));
14872 m_out.jump(continuation);
14873
14874 m_out.appendTo(continuation, lastNext);
14875 return m_out.phi(Int32, fastResult, slowResult);
14876 }
14877
14878 // This is a mechanism for creating a code generator that fills in a gap in the code using our
14879 // own MacroAssembler. This is useful for slow paths that involve a lot of code and we don't want
14880 // to pay the price of B3 optimizing it. A lazy slow path will only be generated if it actually
14881 // executes. On the other hand, a lazy slow path always incurs the cost of two additional jumps.
14882 // Also, the lazy slow path's register allocation state is slaved to whatever B3 did, so you
14883 // have to use a ScratchRegisterAllocator to try to use some unused registers and you may have
14884 // to spill to top of stack if there aren't enough registers available.
14885 //
14886 // Lazy slow paths involve three different stages of execution. Each stage has unique
14887 // capabilities and knowledge. The stages are:
14888 //
14889 // 1) DFG->B3 lowering, i.e. code that runs in this phase. Lowering is the last time you will
14890 // have access to LValues. If there is an LValue that needs to be fed as input to a lazy slow
14891 // path, then you must pass it as an argument here (as one of the varargs arguments after the
14892 // functor). But, lowering doesn't know which registers will be used for those LValues. Hence
14893 // you pass a lambda to lazySlowPath() and that lambda will run during stage (2):
14894 //
14895 // 2) FTLCompile.cpp's fixFunctionBasedOnStackMaps. This code is the only stage at which we know
14896 // the mapping from arguments passed to this method in (1) and the registers that B3
14897 // selected for those arguments. You don't actually want to generate any code here, since then
14898 // the slow path wouldn't actually be lazily generated. Instead, you want to save the
14899 // registers being used for the arguments and defer code generation to stage (3) by creating
14900 // and returning a LazySlowPath::Generator:
14901 //
14902 // 3) LazySlowPath's generate() method. This code runs in response to the lazy slow path
14903 // executing for the first time. It will call the generator you created in stage (2).
14904 //
14905 // Note that each time you invoke stage (1), stage (2) may be invoked zero, one, or many times.
14906 // Stage (2) will usually be invoked once for stage (1). But, B3 may kill the code, in which
14907 // case stage (2) won't run. B3 may duplicate the code (for example via tail duplication),
14908 // leading to many calls to your stage (2) lambda. Stage (3) may be called zero or once for each
14909 // stage (2). It will be called zero times if the slow path never runs. This is what you hope for
14910 // whenever you use the lazySlowPath() mechanism.
14911 //
14912 // A typical use of lazySlowPath() will look like the example below, which just creates a slow
14913 // path that adds some value to the input and returns it.
14914 //
14915 // // Stage (1) is here. This is your last chance to figure out which LValues to use as inputs.
14916 // // Notice how we pass "input" as an argument to lazySlowPath().
14917 // LValue input = ...;
14918 // int addend = ...;
14919 // LValue output = lazySlowPath(
14920 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14921 // // Stage (2) is here. This is your last chance to figure out which registers are used
14922 // // for which values. Location zero is always the return value. You can ignore it if
14923 // // you don't want to return anything. Location 1 is the register for the first
14924 // // argument to the lazySlowPath(), i.e. "input". Note that the Location object could
14925 // // also hold an FPR, if you are passing a double.
14926 // GPRReg outputGPR = locations[0].directGPR();
14927 // GPRReg inputGPR = locations[1].directGPR();
14928 // return LazySlowPath::createGenerator(
14929 // [=] (CCallHelpers& jit, LazySlowPath::GenerationParams& params) {
14930 // // Stage (3) is here. This is when you generate code. You have access to the
14931 // // registers you collected in stage (2) because this lambda closes over those
14932 // // variables (outputGPR and inputGPR). You also have access to whatever extra
14933 // // data you collected in stage (1), such as the addend in this case.
14934 // jit.add32(TrustedImm32(addend), inputGPR, outputGPR);
14935 // // You have to end by jumping to done. There is nothing to fall through to.
14936 // // You can also jump to the exception handler (see LazySlowPath.h for more
14937 // // info). Note that currently you cannot OSR exit.
14938 // params.doneJumps.append(jit.jump());
14939 // });
14940 // },
14941 // input);
14942 //
14943 // You can basically pass as many inputs as you like, either using this varargs form, or by
14944 // passing a Vector of LValues.
14945 //
14946 // Note that if your slow path is only doing a call, you can use the createLazyCallGenerator()
14947 // helper. For example:
14948 //
14949 // LValue input = ...;
14950 // LValue output = lazySlowPath(
14951 // [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
14952 // return createLazyCallGenerator(
14953 // operationDoThings, locations[0].directGPR(), locations[1].directGPR());
14954 // }, input);
14955 //
14956 // Finally, note that all of the lambdas - both the stage (2) lambda and the stage (3) lambda -
14957 // run after the function that created them returns. Hence, you should not use by-reference
14958 // capture (i.e. [&]) in any of these lambdas.
14959 template<typename Functor, typename... ArgumentTypes>
14960 PatchpointValue* lazySlowPath(const Functor& functor, ArgumentTypes... arguments)
14961 {
14962 return lazySlowPath(functor, Vector<LValue>{ arguments... });
14963 }
14964
14965 template<typename Functor>
14966 PatchpointValue* lazySlowPath(const Functor& functor, const Vector<LValue>& userArguments)
14967 {
14968 CodeOrigin origin = m_node->origin.semantic;
14969
14970 PatchpointValue* result = m_out.patchpoint(B3::Int64);
14971 for (LValue arg : userArguments)
14972 result->append(ConstrainedValue(arg, B3::ValueRep::SomeRegister));
14973
14974 RefPtr<PatchpointExceptionHandle> exceptionHandle =
14975 preparePatchpointForExceptions(result);
14976
14977 result->clobber(RegisterSet::macroScratchRegisters());
14978 State* state = &m_ftlState;
14979
14980 result->setGenerator(
14981 [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
14982 Vector<Location> locations;
14983 for (const B3::ValueRep& rep : params)
14984 locations.append(Location::forValueRep(rep));
14985
14986 RefPtr<LazySlowPath::Generator> generator = functor(locations);
14987
14988 CCallHelpers::PatchableJump patchableJump = jit.patchableJump();
14989 CCallHelpers::Label done = jit.label();
14990
14991 RegisterSet usedRegisters = params.unavailableRegisters();
14992
14993 RefPtr<ExceptionTarget> exceptionTarget =
14994 exceptionHandle->scheduleExitCreation(params);
14995
14996 // FIXME: As part of handling exceptions, we need to create a concrete OSRExit here.
14997 // Doing so should automagically register late paths that emit exit thunks.
14998
14999 params.addLatePath(
15000 [=] (CCallHelpers& jit) {
15001 AllowMacroScratchRegisterUsage allowScratch(jit);
15002 patchableJump.m_jump.link(&jit);
15003 unsigned index = state->jitCode->lazySlowPaths.size();
15004 state->jitCode->lazySlowPaths.append(nullptr);
15005 jit.pushToSaveImmediateWithoutTouchingRegisters(
15006 CCallHelpers::TrustedImm32(index));
15007 CCallHelpers::Jump generatorJump = jit.jump();
15008
15009 // Note that so long as we're here, we don't really know if our late path
15010 // runs before or after any other late paths that we might depend on, like
15011 // the exception thunk.
15012
15013 RefPtr<JITCode> jitCode = state->jitCode;
15014 VM* vm = &state->graph.m_vm;
15015
15016 jit.addLinkTask(
15017 [=] (LinkBuffer& linkBuffer) {
15018 linkBuffer.link(generatorJump,
15019 CodeLocationLabel<JITThunkPtrTag>(vm->getCTIStub(lazySlowPathGenerationThunkGenerator).code()));
15020
15021 std::unique_ptr<LazySlowPath> lazySlowPath = std::make_unique<LazySlowPath>();
15022
15023 auto linkedPatchableJump = CodeLocationJump<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(patchableJump));
15024
15025 CodeLocationLabel<JSInternalPtrTag> linkedDone = linkBuffer.locationOf<JSInternalPtrTag>(done);
15026
15027 CallSiteIndex callSiteIndex =
15028 jitCode->common.addUniqueCallSiteIndex(origin);
15029
15030 lazySlowPath->initialize(
15031 linkedPatchableJump, linkedDone,
15032 exceptionTarget->label(linkBuffer), usedRegisters,
15033 callSiteIndex, generator);
15034
15035 jitCode->lazySlowPaths[index] = WTFMove(lazySlowPath);
15036 });
15037 });
15038 });
15039 return result;
15040 }
15041
15042 void speculate(
15043 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition)
15044 {
15045 appendOSRExit(kind, lowValue, highValue, failCondition, m_origin);
15046 }
15047
15048 void speculate(
15049 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition)
15050 {
15051 appendOSRExit(kind, lowValue, profile, failCondition, m_origin);
15052 }
15053
15054 void terminate(ExitKind kind)
15055 {
15056 speculate(kind, noValue(), nullptr, m_out.booleanTrue);
15057 didAlreadyTerminate();
15058 }
15059
15060 void didAlreadyTerminate()
15061 {
15062 m_state.setIsValid(false);
15063 }
15064
15065 void simulatedTypeCheck(Edge highValue, SpeculatedType typesPassedThrough)
15066 {
15067 m_interpreter.filter(highValue, typesPassedThrough);
15068 }
15069
15070 void typeCheck(
15071 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
15072 LValue failCondition, ExitKind exitKind = BadType)
15073 {
15074 appendTypeCheck(lowValue, highValue, typesPassedThrough, failCondition, exitKind);
15075 }
15076
15077 void appendTypeCheck(
15078 FormattedValue lowValue, Edge highValue, SpeculatedType typesPassedThrough,
15079 LValue failCondition, ExitKind exitKind)
15080 {
15081 if (!m_interpreter.needsTypeCheck(highValue, typesPassedThrough))
15082 return;
15083 ASSERT(mayHaveTypeCheck(highValue.useKind()));
15084 appendOSRExit(exitKind, lowValue, highValue.node(), failCondition, m_origin);
15085 m_interpreter.filter(highValue, typesPassedThrough);
15086 }
15087
15088 LValue lowInt32(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15089 {
15090 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
15091
15092 if (edge->hasConstant()) {
15093 JSValue value = edge->asJSValue();
15094 simulatedTypeCheck(edge, SpecInt32Only);
15095 if (!value.isInt32()) {
15096 if (mayHaveTypeCheck(edge.useKind()))
15097 terminate(Uncountable);
15098 return m_out.int32Zero;
15099 }
15100 LValue result = m_out.constInt32(value.asInt32());
15101 result->setOrigin(B3::Origin(edge.node()));
15102 return result;
15103 }
15104
15105 LoweredNodeValue value = m_int32Values.get(edge.node());
15106 if (isValid(value)) {
15107 simulatedTypeCheck(edge, SpecInt32Only);
15108 return value.value();
15109 }
15110
15111 value = m_strictInt52Values.get(edge.node());
15112 if (isValid(value))
15113 return strictInt52ToInt32(edge, value.value());
15114
15115 value = m_int52Values.get(edge.node());
15116 if (isValid(value))
15117 return strictInt52ToInt32(edge, int52ToStrictInt52(value.value()));
15118
15119 value = m_jsValueValues.get(edge.node());
15120 if (isValid(value)) {
15121 LValue boxedResult = value.value();
15122 FTL_TYPE_CHECK(
15123 jsValueValue(boxedResult), edge, SpecInt32Only, isNotInt32(boxedResult));
15124 LValue result = unboxInt32(boxedResult);
15125 setInt32(edge.node(), result);
15126 return result;
15127 }
15128
15129 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecInt32Only), provenType(edge));
15130 if (mayHaveTypeCheck(edge.useKind()))
15131 terminate(Uncountable);
15132 return m_out.int32Zero;
15133 }
15134
15135 enum Int52Kind { StrictInt52, Int52 };
15136 LValue lowInt52(Edge edge, Int52Kind kind)
15137 {
15138 DFG_ASSERT(m_graph, m_node, edge.useKind() == Int52RepUse, edge.useKind());
15139
15140 LoweredNodeValue value;
15141
15142 switch (kind) {
15143 case Int52:
15144 value = m_int52Values.get(edge.node());
15145 if (isValid(value))
15146 return value.value();
15147
15148 value = m_strictInt52Values.get(edge.node());
15149 if (isValid(value))
15150 return strictInt52ToInt52(value.value());
15151 break;
15152
15153 case StrictInt52:
15154 value = m_strictInt52Values.get(edge.node());
15155 if (isValid(value))
15156 return value.value();
15157
15158 value = m_int52Values.get(edge.node());
15159 if (isValid(value))
15160 return int52ToStrictInt52(value.value());
15161 break;
15162 }
15163
15164 DFG_ASSERT(m_graph, m_node, !provenType(edge), provenType(edge));
15165 if (mayHaveTypeCheck(edge.useKind()))
15166 terminate(Uncountable);
15167 return m_out.int64Zero;
15168 }
15169
15170 LValue lowInt52(Edge edge)
15171 {
15172 return lowInt52(edge, Int52);
15173 }
15174
15175 LValue lowStrictInt52(Edge edge)
15176 {
15177 return lowInt52(edge, StrictInt52);
15178 }
15179
15180 bool betterUseStrictInt52(Node* node)
15181 {
15182 return !isValid(m_int52Values.get(node));
15183 }
15184 bool betterUseStrictInt52(Edge edge)
15185 {
15186 return betterUseStrictInt52(edge.node());
15187 }
15188 template<typename T>
15189 Int52Kind bestInt52Kind(T node)
15190 {
15191 return betterUseStrictInt52(node) ? StrictInt52 : Int52;
15192 }
15193 Int52Kind opposite(Int52Kind kind)
15194 {
15195 switch (kind) {
15196 case Int52:
15197 return StrictInt52;
15198 case StrictInt52:
15199 return Int52;
15200 }
15201 DFG_CRASH(m_graph, m_node, "Bad use kind");
15202 return Int52;
15203 }
15204
15205 LValue lowWhicheverInt52(Edge edge, Int52Kind& kind)
15206 {
15207 kind = bestInt52Kind(edge);
15208 return lowInt52(edge, kind);
15209 }
15210
15211 LValue lowCell(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15212 {
15213 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || DFG::isCell(edge.useKind()), edge.useKind());
15214
15215 if (edge->op() == JSConstant) {
15216 FrozenValue* value = edge->constant();
15217 simulatedTypeCheck(edge, SpecCellCheck);
15218 if (!value->value().isCell()) {
15219 if (mayHaveTypeCheck(edge.useKind()))
15220 terminate(Uncountable);
15221 return m_out.intPtrZero;
15222 }
15223 LValue result = frozenPointer(value);
15224 result->setOrigin(B3::Origin(edge.node()));
15225 return result;
15226 }
15227
15228 LoweredNodeValue value = m_jsValueValues.get(edge.node());
15229 if (isValid(value)) {
15230 LValue uncheckedValue = value.value();
15231 FTL_TYPE_CHECK(
15232 jsValueValue(uncheckedValue), edge, SpecCellCheck, isNotCell(uncheckedValue));
15233 return uncheckedValue;
15234 }
15235
15236 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecCellCheck), provenType(edge));
15237 if (mayHaveTypeCheck(edge.useKind()))
15238 terminate(Uncountable);
15239 return m_out.intPtrZero;
15240 }
15241
15242 LValue lowObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15243 {
15244 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
15245
15246 LValue result = lowCell(edge, mode);
15247 speculateObject(edge, result);
15248 return result;
15249 }
15250
15251 LValue lowRegExpObject(Edge edge)
15252 {
15253 LValue result = lowCell(edge);
15254 speculateRegExpObject(edge, result);
15255 return result;
15256 }
15257
15258 LValue lowMapObject(Edge edge)
15259 {
15260 LValue result = lowCell(edge);
15261 speculateMapObject(edge, result);
15262 return result;
15263 }
15264
15265 LValue lowSetObject(Edge edge)
15266 {
15267 LValue result = lowCell(edge);
15268 speculateSetObject(edge, result);
15269 return result;
15270 }
15271
15272 LValue lowWeakMapObject(Edge edge)
15273 {
15274 LValue result = lowCell(edge);
15275 speculateWeakMapObject(edge, result);
15276 return result;
15277 }
15278
15279 LValue lowWeakSetObject(Edge edge)
15280 {
15281 LValue result = lowCell(edge);
15282 speculateWeakSetObject(edge, result);
15283 return result;
15284 }
15285
15286 LValue lowDataViewObject(Edge edge)
15287 {
15288 LValue result = lowCell(edge);
15289 speculateDataViewObject(edge, result);
15290 return result;
15291 }
15292
15293 LValue lowString(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15294 {
15295 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringUse || edge.useKind() == KnownStringUse || edge.useKind() == StringIdentUse);
15296
15297 LValue result = lowCell(edge, mode);
15298 speculateString(edge, result);
15299 return result;
15300 }
15301
15302 LValue lowStringIdent(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15303 {
15304 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == StringIdentUse);
15305
15306 LValue string = lowString(edge, mode);
15307 LValue stringImpl = m_out.loadPtr(string, m_heaps.JSString_value);
15308 speculateStringIdent(edge, string, stringImpl);
15309 return stringImpl;
15310 }
15311
15312 LValue lowSymbol(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15313 {
15314 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == SymbolUse);
15315
15316 LValue result = lowCell(edge, mode);
15317 speculateSymbol(edge, result);
15318 return result;
15319 }
15320
15321 LValue lowBigInt(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15322 {
15323 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BigIntUse);
15324
15325 LValue result = lowCell(edge, mode);
15326 speculateBigInt(edge, result);
15327 return result;
15328 }
15329
15330 LValue lowNonNullObject(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15331 {
15332 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == ObjectUse);
15333
15334 LValue result = lowCell(edge, mode);
15335 speculateNonNullObject(edge, result);
15336 return result;
15337 }
15338
15339 LValue lowBoolean(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15340 {
15341 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse);
15342
15343 if (edge->hasConstant()) {
15344 JSValue value = edge->asJSValue();
15345 simulatedTypeCheck(edge, SpecBoolean);
15346 if (!value.isBoolean()) {
15347 if (mayHaveTypeCheck(edge.useKind()))
15348 terminate(Uncountable);
15349 return m_out.booleanFalse;
15350 }
15351 LValue result = m_out.constBool(value.asBoolean());
15352 result->setOrigin(B3::Origin(edge.node()));
15353 return result;
15354 }
15355
15356 LoweredNodeValue value = m_booleanValues.get(edge.node());
15357 if (isValid(value)) {
15358 simulatedTypeCheck(edge, SpecBoolean);
15359 return value.value();
15360 }
15361
15362 value = m_jsValueValues.get(edge.node());
15363 if (isValid(value)) {
15364 LValue unboxedResult = value.value();
15365 FTL_TYPE_CHECK(
15366 jsValueValue(unboxedResult), edge, SpecBoolean, isNotBoolean(unboxedResult));
15367 LValue result = unboxBoolean(unboxedResult);
15368 setBoolean(edge.node(), result);
15369 return result;
15370 }
15371
15372 DFG_ASSERT(m_graph, m_node, !(provenType(edge) & SpecBoolean), provenType(edge));
15373 if (mayHaveTypeCheck(edge.useKind()))
15374 terminate(Uncountable);
15375 return m_out.booleanFalse;
15376 }
15377
15378 LValue lowDouble(Edge edge)
15379 {
15380 DFG_ASSERT(m_graph, m_node, isDouble(edge.useKind()), edge.useKind());
15381
15382 LoweredNodeValue value = m_doubleValues.get(edge.node());
15383 if (isValid(value))
15384 return value.value();
15385 DFG_ASSERT(m_graph, m_node, !provenType(edge), provenType(edge));
15386 if (mayHaveTypeCheck(edge.useKind()))
15387 terminate(Uncountable);
15388 return m_out.doubleZero;
15389 }
15390
15391 LValue lowJSValue(Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
15392 {
15393 DFG_ASSERT(m_graph, m_node, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse, m_node->op(), edge.useKind());
15394 DFG_ASSERT(m_graph, m_node, !isDouble(edge.useKind()), m_node->op(), edge.useKind());
15395 DFG_ASSERT(m_graph, m_node, edge.useKind() != Int52RepUse, m_node->op(), edge.useKind());
15396
15397 if (edge->hasConstant()) {
15398 LValue result = m_out.constInt64(JSValue::encode(edge->asJSValue()));
15399 result->setOrigin(B3::Origin(edge.node()));
15400 return result;
15401 }
15402
15403 LoweredNodeValue value = m_jsValueValues.get(edge.node());
15404 if (isValid(value))
15405 return value.value();
15406
15407 value = m_int32Values.get(edge.node());
15408 if (isValid(value)) {
15409 LValue result = boxInt32(value.value());
15410 setJSValue(edge.node(), result);
15411 return result;
15412 }
15413
15414 value = m_booleanValues.get(edge.node());
15415 if (isValid(value)) {
15416 LValue result = boxBoolean(value.value());
15417 setJSValue(edge.node(), result);
15418 return result;
15419 }
15420
15421 DFG_CRASH(m_graph, m_node, makeString("Value not defined: ", String::number(edge.node()->index())).ascii().data());
15422 return 0;
15423 }
15424
15425 LValue lowNotCell(Edge edge)
15426 {
15427 LValue result = lowJSValue(edge, ManualOperandSpeculation);
15428 FTL_TYPE_CHECK(jsValueValue(result), edge, ~SpecCellCheck, isCell(result));
15429 return result;
15430 }
15431
15432 LValue lowStorage(Edge edge)
15433 {
15434 LoweredNodeValue value = m_storageValues.get(edge.node());
15435 if (isValid(value))
15436 return value.value();
15437
15438 LValue result = lowCell(edge);
15439 setStorage(edge.node(), result);
15440 return result;
15441 }
15442
15443 LValue strictInt52ToInt32(Edge edge, LValue value)
15444 {
15445 LValue result = m_out.castToInt32(value);
15446 FTL_TYPE_CHECK(
15447 noValue(), edge, SpecInt32Only,
15448 m_out.notEqual(m_out.signExt32To64(result), value));
15449 setInt32(edge.node(), result);
15450 return result;
15451 }
15452
15453 LValue strictInt52ToDouble(LValue value)
15454 {
15455 return m_out.intToDouble(value);
15456 }
15457
15458 LValue strictInt52ToJSValue(LValue value)
15459 {
15460 LBasicBlock isInt32 = m_out.newBlock();
15461 LBasicBlock isDouble = m_out.newBlock();
15462 LBasicBlock continuation = m_out.newBlock();
15463
15464 Vector<ValueFromBlock, 2> results;
15465
15466 LValue int32Value = m_out.castToInt32(value);
15467 m_out.branch(
15468 m_out.equal(m_out.signExt32To64(int32Value), value),
15469 unsure(isInt32), unsure(isDouble));
15470
15471 LBasicBlock lastNext = m_out.appendTo(isInt32, isDouble);
15472
15473 results.append(m_out.anchor(boxInt32(int32Value)));
15474 m_out.jump(continuation);
15475
15476 m_out.appendTo(isDouble, continuation);
15477
15478 results.append(m_out.anchor(boxDouble(m_out.intToDouble(value))));
15479 m_out.jump(continuation);
15480
15481 m_out.appendTo(continuation, lastNext);
15482 return m_out.phi(Int64, results);
15483 }
15484
15485 LValue strictInt52ToInt52(LValue value)
15486 {
15487 return m_out.shl(value, m_out.constInt64(JSValue::int52ShiftAmount));
15488 }
15489
15490 LValue int52ToStrictInt52(LValue value)
15491 {
15492 return m_out.aShr(value, m_out.constInt64(JSValue::int52ShiftAmount));
15493 }
15494
15495 LValue isInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
15496 {
15497 if (LValue proven = isProvenValue(type, SpecInt32Only))
15498 return proven;
15499 return m_out.aboveOrEqual(jsValue, m_tagTypeNumber);
15500 }
15501 LValue isNotInt32(LValue jsValue, SpeculatedType type = SpecFullTop)
15502 {
15503 if (LValue proven = isProvenValue(type, ~SpecInt32Only))
15504 return proven;
15505 return m_out.below(jsValue, m_tagTypeNumber);
15506 }
15507 LValue unboxInt32(LValue jsValue)
15508 {
15509 return m_out.castToInt32(jsValue);
15510 }
15511 LValue boxInt32(LValue value)
15512 {
15513 return m_out.add(m_out.zeroExt(value, Int64), m_tagTypeNumber);
15514 }
15515
15516 LValue isCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
15517 {
15518 if (LValue proven = isProvenValue(type, SpecCellCheck | SpecMisc))
15519 return proven;
15520 return m_out.testIsZero64(jsValue, m_tagTypeNumber);
15521 }
15522 LValue isNotCellOrMisc(LValue jsValue, SpeculatedType type = SpecFullTop)
15523 {
15524 if (LValue proven = isProvenValue(type, ~(SpecCellCheck | SpecMisc)))
15525 return proven;
15526 return m_out.testNonZero64(jsValue, m_tagTypeNumber);
15527 }
15528
15529 LValue unboxDouble(LValue jsValue, LValue* unboxedAsInt = nullptr)
15530 {
15531 LValue asInt = m_out.add(jsValue, m_tagTypeNumber);
15532 if (unboxedAsInt)
15533 *unboxedAsInt = asInt;
15534 return m_out.bitCast(asInt, Double);
15535 }
15536 LValue boxDouble(LValue doubleValue)
15537 {
15538 return m_out.sub(m_out.bitCast(doubleValue, Int64), m_tagTypeNumber);
15539 }
15540
15541 LValue jsValueToStrictInt52(Edge edge, LValue boxedValue)
15542 {
15543 LBasicBlock intCase = m_out.newBlock();
15544 LBasicBlock doubleCase = m_out.newBlock();
15545 LBasicBlock continuation = m_out.newBlock();
15546
15547 LValue isNotInt32;
15548 if (!m_interpreter.needsTypeCheck(edge, SpecInt32Only))
15549 isNotInt32 = m_out.booleanFalse;
15550 else if (!m_interpreter.needsTypeCheck(edge, ~SpecInt32Only))
15551 isNotInt32 = m_out.booleanTrue;
15552 else
15553 isNotInt32 = this->isNotInt32(boxedValue);
15554 m_out.branch(isNotInt32, unsure(doubleCase), unsure(intCase));
15555
15556 LBasicBlock lastNext = m_out.appendTo(intCase, doubleCase);
15557
15558 ValueFromBlock intToInt52 = m_out.anchor(
15559 m_out.signExt32To64(unboxInt32(boxedValue)));
15560 m_out.jump(continuation);
15561
15562 m_out.appendTo(doubleCase, continuation);
15563
15564 LValue possibleResult = m_out.call(
15565 Int64, m_out.operation(operationConvertBoxedDoubleToInt52), boxedValue);
15566 FTL_TYPE_CHECK(
15567 jsValueValue(boxedValue), edge, SpecInt32Only | SpecAnyIntAsDouble,
15568 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
15569
15570 ValueFromBlock doubleToInt52 = m_out.anchor(possibleResult);
15571 m_out.jump(continuation);
15572
15573 m_out.appendTo(continuation, lastNext);
15574
15575 return m_out.phi(Int64, intToInt52, doubleToInt52);
15576 }
15577
15578 LValue doubleToStrictInt52(Edge edge, LValue value)
15579 {
15580 LValue possibleResult = m_out.call(
15581 Int64, m_out.operation(operationConvertDoubleToInt52), value);
15582 FTL_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow,
15583 doubleValue(value), edge, SpecAnyIntAsDouble,
15584 m_out.equal(possibleResult, m_out.constInt64(JSValue::notInt52)));
15585
15586 return possibleResult;
15587 }
15588
15589 LValue convertDoubleToInt32(LValue value, bool shouldCheckNegativeZero)
15590 {
15591 LValue integerValue = m_out.doubleToInt(value);
15592 LValue integerValueConvertedToDouble = m_out.intToDouble(integerValue);
15593 LValue valueNotConvertibleToInteger = m_out.doubleNotEqualOrUnordered(value, integerValueConvertedToDouble);
15594 speculate(Overflow, FormattedValue(DataFormatDouble, value), m_node, valueNotConvertibleToInteger);
15595
15596 if (shouldCheckNegativeZero) {
15597 LBasicBlock valueIsZero = m_out.newBlock();
15598 LBasicBlock continuation = m_out.newBlock();
15599 m_out.branch(m_out.isZero32(integerValue), unsure(valueIsZero), unsure(continuation));
15600
15601 LBasicBlock lastNext = m_out.appendTo(valueIsZero, continuation);
15602
15603 LValue doubleBitcastToInt64 = m_out.bitCast(value, Int64);
15604 LValue signBitSet = m_out.lessThan(doubleBitcastToInt64, m_out.constInt64(0));
15605
15606 speculate(NegativeZero, FormattedValue(DataFormatDouble, value), m_node, signBitSet);
15607 m_out.jump(continuation);
15608 m_out.appendTo(continuation, lastNext);
15609 }
15610 return integerValue;
15611 }
15612
15613 LValue isNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
15614 {
15615 if (LValue proven = isProvenValue(type, SpecFullNumber))
15616 return proven;
15617 return isNotCellOrMisc(jsValue);
15618 }
15619 LValue isNotNumber(LValue jsValue, SpeculatedType type = SpecFullTop)
15620 {
15621 if (LValue proven = isProvenValue(type, ~SpecFullNumber))
15622 return proven;
15623 return isCellOrMisc(jsValue);
15624 }
15625
15626 LValue isNotCell(LValue jsValue, SpeculatedType type = SpecFullTop)
15627 {
15628 if (LValue proven = isProvenValue(type, ~SpecCellCheck))
15629 return proven;
15630 return m_out.testNonZero64(jsValue, m_tagMask);
15631 }
15632
15633 LValue isCell(LValue jsValue, SpeculatedType type = SpecFullTop)
15634 {
15635 if (LValue proven = isProvenValue(type, SpecCellCheck))
15636 return proven;
15637 return m_out.testIsZero64(jsValue, m_tagMask);
15638 }
15639
15640 LValue isNotMisc(LValue value, SpeculatedType type = SpecFullTop)
15641 {
15642 if (LValue proven = isProvenValue(type, ~SpecMisc))
15643 return proven;
15644 return m_out.above(value, m_out.constInt64(TagBitTypeOther | TagBitBool | TagBitUndefined));
15645 }
15646
15647 LValue isMisc(LValue value, SpeculatedType type = SpecFullTop)
15648 {
15649 if (LValue proven = isProvenValue(type, SpecMisc))
15650 return proven;
15651 return m_out.logicalNot(isNotMisc(value));
15652 }
15653
15654 LValue isNotBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
15655 {
15656 if (LValue proven = isProvenValue(type, ~SpecBoolean))
15657 return proven;
15658 return m_out.testNonZero64(
15659 m_out.bitXor(jsValue, m_out.constInt64(ValueFalse)),
15660 m_out.constInt64(~1));
15661 }
15662 LValue isBoolean(LValue jsValue, SpeculatedType type = SpecFullTop)
15663 {
15664 if (LValue proven = isProvenValue(type, SpecBoolean))
15665 return proven;
15666 return m_out.logicalNot(isNotBoolean(jsValue));
15667 }
15668 LValue unboxBoolean(LValue jsValue)
15669 {
15670 // We want to use a cast that guarantees that B3 knows that even the integer
15671 // value is just 0 or 1. But for now we do it the dumb way.
15672 return m_out.notZero64(m_out.bitAnd(jsValue, m_out.constInt64(1)));
15673 }
15674 LValue boxBoolean(LValue value)
15675 {
15676 return m_out.select(
15677 value, m_out.constInt64(ValueTrue), m_out.constInt64(ValueFalse));
15678 }
15679
15680 LValue isNotOther(LValue value, SpeculatedType type = SpecFullTop)
15681 {
15682 if (LValue proven = isProvenValue(type, ~SpecOther))
15683 return proven;
15684 return m_out.notEqual(
15685 m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
15686 m_out.constInt64(ValueNull));
15687 }
15688 LValue isOther(LValue value, SpeculatedType type = SpecFullTop)
15689 {
15690 if (LValue proven = isProvenValue(type, SpecOther))
15691 return proven;
15692 return m_out.equal(
15693 m_out.bitAnd(value, m_out.constInt64(~TagBitUndefined)),
15694 m_out.constInt64(ValueNull));
15695 }
15696
15697 LValue isProvenValue(SpeculatedType provenType, SpeculatedType wantedType)
15698 {
15699 if (!(provenType & ~wantedType))
15700 return m_out.booleanTrue;
15701 if (!(provenType & wantedType))
15702 return m_out.booleanFalse;
15703 return nullptr;
15704 }
15705
15706 void speculate(Edge edge)
15707 {
15708 switch (edge.useKind()) {
15709 case UntypedUse:
15710 break;
15711 case KnownInt32Use:
15712 case KnownStringUse:
15713 case KnownPrimitiveUse:
15714 case KnownOtherUse:
15715 case DoubleRepUse:
15716 case Int52RepUse:
15717 case KnownCellUse:
15718 case KnownBooleanUse:
15719 ASSERT(!m_interpreter.needsTypeCheck(edge));
15720 break;
15721 case Int32Use:
15722 speculateInt32(edge);
15723 break;
15724 case CellUse:
15725 speculateCell(edge);
15726 break;
15727 case CellOrOtherUse:
15728 speculateCellOrOther(edge);
15729 break;
15730 case AnyIntUse:
15731 speculateAnyInt(edge);
15732 break;
15733 case ObjectUse:
15734 speculateObject(edge);
15735 break;
15736 case ArrayUse:
15737 speculateArray(edge);
15738 break;
15739 case FunctionUse:
15740 speculateFunction(edge);
15741 break;
15742 case ObjectOrOtherUse:
15743 speculateObjectOrOther(edge);
15744 break;
15745 case FinalObjectUse:
15746 speculateFinalObject(edge);
15747 break;
15748 case RegExpObjectUse:
15749 speculateRegExpObject(edge);
15750 break;
15751 case ProxyObjectUse:
15752 speculateProxyObject(edge);
15753 break;
15754 case DerivedArrayUse:
15755 speculateDerivedArray(edge);
15756 break;
15757 case MapObjectUse:
15758 speculateMapObject(edge);
15759 break;
15760 case SetObjectUse:
15761 speculateSetObject(edge);
15762 break;
15763 case WeakMapObjectUse:
15764 speculateWeakMapObject(edge);
15765 break;
15766 case WeakSetObjectUse:
15767 speculateWeakSetObject(edge);
15768 break;
15769 case DataViewObjectUse:
15770 speculateDataViewObject(edge);
15771 break;
15772 case StringUse:
15773 speculateString(edge);
15774 break;
15775 case StringOrOtherUse:
15776 speculateStringOrOther(edge);
15777 break;
15778 case StringIdentUse:
15779 speculateStringIdent(edge);
15780 break;
15781 case SymbolUse:
15782 speculateSymbol(edge);
15783 break;
15784 case StringObjectUse:
15785 speculateStringObject(edge);
15786 break;
15787 case StringOrStringObjectUse:
15788 speculateStringOrStringObject(edge);
15789 break;
15790 case NumberUse:
15791 speculateNumber(edge);
15792 break;
15793 case RealNumberUse:
15794 speculateRealNumber(edge);
15795 break;
15796 case DoubleRepRealUse:
15797 speculateDoubleRepReal(edge);
15798 break;
15799 case DoubleRepAnyIntUse:
15800 speculateDoubleRepAnyInt(edge);
15801 break;
15802 case BooleanUse:
15803 speculateBoolean(edge);
15804 break;
15805 case BigIntUse:
15806 speculateBigInt(edge);
15807 break;
15808 case NotStringVarUse:
15809 speculateNotStringVar(edge);
15810 break;
15811 case NotSymbolUse:
15812 speculateNotSymbol(edge);
15813 break;
15814 case NotCellUse:
15815 speculateNotCell(edge);
15816 break;
15817 case OtherUse:
15818 speculateOther(edge);
15819 break;
15820 case MiscUse:
15821 speculateMisc(edge);
15822 break;
15823 default:
15824 DFG_CRASH(m_graph, m_node, "Unsupported speculation use kind");
15825 }
15826 }
15827
15828 void speculate(Node*, Edge edge)
15829 {
15830 speculate(edge);
15831 }
15832
15833 void speculateInt32(Edge edge)
15834 {
15835 lowInt32(edge);
15836 }
15837
15838 void speculateCell(Edge edge)
15839 {
15840 lowCell(edge);
15841 }
15842
15843 void speculateNotCell(Edge edge)
15844 {
15845 if (!m_interpreter.needsTypeCheck(edge))
15846 return;
15847 lowNotCell(edge);
15848 }
15849
15850 void speculateCellOrOther(Edge edge)
15851 {
15852 if (shouldNotHaveTypeCheck(edge.useKind()))
15853 return;
15854
15855 LValue value = lowJSValue(edge, ManualOperandSpeculation);
15856
15857 LBasicBlock isNotCell = m_out.newBlock();
15858 LBasicBlock continuation = m_out.newBlock();
15859
15860 m_out.branch(isCell(value, provenType(edge)), unsure(continuation), unsure(isNotCell));
15861
15862 LBasicBlock lastNext = m_out.appendTo(isNotCell, continuation);
15863 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
15864 m_out.jump(continuation);
15865
15866 m_out.appendTo(continuation, lastNext);
15867 }
15868
15869 void speculateAnyInt(Edge edge)
15870 {
15871 if (!m_interpreter.needsTypeCheck(edge))
15872 return;
15873
15874 jsValueToStrictInt52(edge, lowJSValue(edge, ManualOperandSpeculation));
15875 }
15876
15877 LValue isCellWithType(LValue cell, JSType queriedType, SpeculatedType speculatedTypeForQuery, SpeculatedType type = SpecFullTop)
15878 {
15879 if (LValue proven = isProvenValue(type & SpecCell, speculatedTypeForQuery))
15880 return proven;
15881 return m_out.equal(
15882 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15883 m_out.constInt32(queriedType));
15884 }
15885
15886 LValue isTypedArrayView(LValue cell, SpeculatedType type = SpecFullTop)
15887 {
15888 if (LValue proven = isProvenValue(type & SpecCell, SpecTypedArrayView))
15889 return proven;
15890 LValue jsType = m_out.sub(
15891 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15892 m_out.constInt32(FirstTypedArrayType));
15893 return m_out.below(
15894 jsType,
15895 m_out.constInt32(NumberOfTypedArrayTypesExcludingDataView));
15896 }
15897
15898 LValue isObject(LValue cell, SpeculatedType type = SpecFullTop)
15899 {
15900 if (LValue proven = isProvenValue(type & SpecCell, SpecObject))
15901 return proven;
15902 return m_out.aboveOrEqual(
15903 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15904 m_out.constInt32(ObjectType));
15905 }
15906
15907 LValue isNotObject(LValue cell, SpeculatedType type = SpecFullTop)
15908 {
15909 if (LValue proven = isProvenValue(type & SpecCell, ~SpecObject))
15910 return proven;
15911 return m_out.below(
15912 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
15913 m_out.constInt32(ObjectType));
15914 }
15915
15916 LValue isNotString(LValue cell, SpeculatedType type = SpecFullTop)
15917 {
15918 if (LValue proven = isProvenValue(type & SpecCell, ~SpecString))
15919 return proven;
15920 return m_out.notEqual(
15921 m_out.load32(cell, m_heaps.JSCell_structureID),
15922 m_out.constInt32(vm().stringStructure->id()));
15923 }
15924
15925 LValue isString(LValue cell, SpeculatedType type = SpecFullTop)
15926 {
15927 if (LValue proven = isProvenValue(type & SpecCell, SpecString))
15928 return proven;
15929 return m_out.equal(
15930 m_out.load32(cell, m_heaps.JSCell_structureID),
15931 m_out.constInt32(vm().stringStructure->id()));
15932 }
15933
15934 LValue isRopeString(LValue string, Edge edge = Edge())
15935 {
15936 if (edge) {
15937 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
15938 return m_out.booleanFalse;
15939 if (JSValue value = provenValue(edge)) {
15940 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
15941 return m_out.booleanFalse;
15942 }
15943 String value = edge->tryGetString(m_graph);
15944 if (!value.isNull()) {
15945 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
15946 return m_out.booleanFalse;
15947 }
15948 }
15949
15950 return m_out.testNonZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
15951 }
15952
15953 LValue isNotRopeString(LValue string, Edge edge = Edge())
15954 {
15955 if (edge) {
15956 if (!((provenType(edge) & SpecString) & ~SpecStringIdent))
15957 return m_out.booleanTrue;
15958 if (JSValue value = provenValue(edge)) {
15959 if (value.isCell() && value.asCell()->type() == StringType && !asString(value)->isRope())
15960 return m_out.booleanTrue;
15961 }
15962 String value = edge->tryGetString(m_graph);
15963 if (!value.isNull()) {
15964 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
15965 return m_out.booleanTrue;
15966 }
15967 }
15968
15969 return m_out.testIsZeroPtr(m_out.loadPtr(string, m_heaps.JSString_value), m_out.constIntPtr(JSString::isRopeInPointer));
15970 }
15971
15972 LValue isNotSymbol(LValue cell, SpeculatedType type = SpecFullTop)
15973 {
15974 if (LValue proven = isProvenValue(type & SpecCell, ~SpecSymbol))
15975 return proven;
15976 return m_out.notEqual(
15977 m_out.load32(cell, m_heaps.JSCell_structureID),
15978 m_out.constInt32(vm().symbolStructure->id()));
15979 }
15980
15981 LValue isSymbol(LValue cell, SpeculatedType type = SpecFullTop)
15982 {
15983 if (LValue proven = isProvenValue(type & SpecCell, SpecSymbol))
15984 return proven;
15985 return m_out.equal(
15986 m_out.load32(cell, m_heaps.JSCell_structureID),
15987 m_out.constInt32(vm().symbolStructure->id()));
15988 }
15989
15990 LValue isNotBigInt(LValue cell, SpeculatedType type = SpecFullTop)
15991 {
15992 if (LValue proven = isProvenValue(type & SpecCell, ~SpecBigInt))
15993 return proven;
15994 return m_out.notEqual(
15995 m_out.load32(cell, m_heaps.JSCell_structureID),
15996 m_out.constInt32(vm().bigIntStructure->id()));
15997 }
15998
15999 LValue isBigInt(LValue cell, SpeculatedType type = SpecFullTop)
16000 {
16001 if (LValue proven = isProvenValue(type & SpecCell, SpecBigInt))
16002 return proven;
16003 return m_out.equal(
16004 m_out.load32(cell, m_heaps.JSCell_structureID),
16005 m_out.constInt32(vm().bigIntStructure->id()));
16006 }
16007
16008 LValue isArrayTypeForArrayify(LValue cell, ArrayMode arrayMode)
16009 {
16010 switch (arrayMode.type()) {
16011 case Array::Int32:
16012 case Array::Double:
16013 case Array::Contiguous:
16014 case Array::Undecided:
16015 case Array::ArrayStorage: {
16016 IndexingType indexingModeMask = IsArray | IndexingShapeMask;
16017 if (arrayMode.action() == Array::Write)
16018 indexingModeMask |= CopyOnWrite;
16019
16020 IndexingType shape = arrayMode.shapeMask();
16021 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
16022
16023 switch (arrayMode.arrayClass()) {
16024 case Array::OriginalArray:
16025 case Array::OriginalCopyOnWriteArray:
16026 DFG_CRASH(m_graph, m_node, "Unexpected original array");
16027 return nullptr;
16028
16029 case Array::Array:
16030 return m_out.equal(
16031 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
16032 m_out.constInt32(IsArray | shape));
16033
16034 case Array::NonArray:
16035 case Array::OriginalNonArray:
16036 return m_out.equal(
16037 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask)),
16038 m_out.constInt32(shape));
16039
16040 case Array::PossiblyArray:
16041 return m_out.equal(
16042 m_out.bitAnd(indexingType, m_out.constInt32(indexingModeMask & ~IsArray)),
16043 m_out.constInt32(shape));
16044 }
16045 break;
16046 }
16047
16048 case Array::SlowPutArrayStorage: {
16049 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
16050 LValue indexingType = m_out.load8ZeroExt32(cell, m_heaps.JSCell_indexingTypeAndMisc);
16051
16052 LBasicBlock trueCase = m_out.newBlock();
16053 LBasicBlock checkCase = m_out.newBlock();
16054 LBasicBlock continuation = m_out.newBlock();
16055
16056 ValueFromBlock falseValue = m_out.anchor(m_out.booleanFalse);
16057 LValue isAnArrayStorageShape = m_out.belowOrEqual(
16058 m_out.sub(
16059 m_out.bitAnd(indexingType, m_out.constInt32(IndexingShapeMask)),
16060 m_out.constInt32(ArrayStorageShape)),
16061 m_out.constInt32(SlowPutArrayStorageShape - ArrayStorageShape));
16062 m_out.branch(isAnArrayStorageShape, unsure(checkCase), unsure(continuation));
16063
16064 LBasicBlock lastNext = m_out.appendTo(checkCase, trueCase);
16065 switch (arrayMode.arrayClass()) {
16066 case Array::OriginalArray:
16067 case Array::OriginalCopyOnWriteArray:
16068 DFG_CRASH(m_graph, m_node, "Unexpected original array");
16069 return nullptr;
16070
16071 case Array::Array:
16072 m_out.branch(
16073 m_out.testNonZero32(indexingType, m_out.constInt32(IsArray)),
16074 unsure(trueCase), unsure(continuation));
16075 break;
16076
16077 case Array::NonArray:
16078 case Array::OriginalNonArray:
16079 m_out.branch(
16080 m_out.testIsZero32(indexingType, m_out.constInt32(IsArray)),
16081 unsure(trueCase), unsure(continuation));
16082 break;
16083
16084 case Array::PossiblyArray:
16085 m_out.jump(trueCase);
16086 break;
16087 }
16088
16089 m_out.appendTo(trueCase, continuation);
16090 ValueFromBlock trueValue = m_out.anchor(m_out.booleanTrue);
16091 m_out.jump(continuation);
16092
16093 m_out.appendTo(continuation, lastNext);
16094 return m_out.phi(Int32, falseValue, trueValue);
16095 }
16096
16097 default:
16098 break;
16099 }
16100 DFG_CRASH(m_graph, m_node, "Corrupt array class");
16101 }
16102
16103 LValue isArrayTypeForCheckArray(LValue cell, ArrayMode arrayMode)
16104 {
16105 switch (arrayMode.type()) {
16106 case Array::Int32:
16107 case Array::Double:
16108 case Array::Contiguous:
16109 case Array::Undecided:
16110 case Array::ArrayStorage:
16111 case Array::SlowPutArrayStorage:
16112 return isArrayTypeForArrayify(cell, arrayMode);
16113
16114 case Array::DirectArguments:
16115 return m_out.equal(
16116 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16117 m_out.constInt32(DirectArgumentsType));
16118
16119 case Array::ScopedArguments:
16120 return m_out.equal(
16121 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16122 m_out.constInt32(ScopedArgumentsType));
16123
16124 default:
16125 return m_out.equal(
16126 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16127 m_out.constInt32(typeForTypedArrayType(arrayMode.typedArrayType())));
16128 }
16129 }
16130
16131 LValue isFunction(LValue cell, SpeculatedType type = SpecFullTop)
16132 {
16133 if (LValue proven = isProvenValue(type & SpecCell, SpecFunction))
16134 return proven;
16135 return isType(cell, JSFunctionType);
16136 }
16137 LValue isNotFunction(LValue cell, SpeculatedType type = SpecFullTop)
16138 {
16139 if (LValue proven = isProvenValue(type & SpecCell, ~SpecFunction))
16140 return proven;
16141 return isNotType(cell, JSFunctionType);
16142 }
16143
16144 LValue isExoticForTypeof(LValue cell, SpeculatedType type = SpecFullTop)
16145 {
16146 if (!(type & SpecObjectOther))
16147 return m_out.booleanFalse;
16148 return m_out.testNonZero32(
16149 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
16150 m_out.constInt32(MasqueradesAsUndefined | OverridesGetCallData));
16151 }
16152
16153 LValue isType(LValue cell, JSType type)
16154 {
16155 return m_out.equal(
16156 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType),
16157 m_out.constInt32(type));
16158 }
16159
16160 LValue isNotType(LValue cell, JSType type)
16161 {
16162 return m_out.logicalNot(isType(cell, type));
16163 }
16164
16165 void speculateObject(Edge edge, LValue cell)
16166 {
16167 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
16168 }
16169
16170 void speculateObject(Edge edge)
16171 {
16172 speculateObject(edge, lowCell(edge));
16173 }
16174
16175 void speculateArray(Edge edge, LValue cell)
16176 {
16177 FTL_TYPE_CHECK(
16178 jsValueValue(cell), edge, SpecArray, isNotType(cell, ArrayType));
16179 }
16180
16181 void speculateArray(Edge edge)
16182 {
16183 speculateArray(edge, lowCell(edge));
16184 }
16185
16186 void speculateFunction(Edge edge, LValue cell)
16187 {
16188 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecFunction, isNotFunction(cell));
16189 }
16190
16191 void speculateFunction(Edge edge)
16192 {
16193 speculateFunction(edge, lowCell(edge));
16194 }
16195
16196 void speculateObjectOrOther(Edge edge)
16197 {
16198 if (!m_interpreter.needsTypeCheck(edge))
16199 return;
16200
16201 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16202
16203 LBasicBlock cellCase = m_out.newBlock();
16204 LBasicBlock primitiveCase = m_out.newBlock();
16205 LBasicBlock continuation = m_out.newBlock();
16206
16207 m_out.branch(isNotCell(value, provenType(edge)), unsure(primitiveCase), unsure(cellCase));
16208
16209 LBasicBlock lastNext = m_out.appendTo(cellCase, primitiveCase);
16210
16211 FTL_TYPE_CHECK(
16212 jsValueValue(value), edge, (~SpecCellCheck) | SpecObject, isNotObject(value));
16213
16214 m_out.jump(continuation);
16215
16216 m_out.appendTo(primitiveCase, continuation);
16217
16218 FTL_TYPE_CHECK(
16219 jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16220
16221 m_out.jump(continuation);
16222
16223 m_out.appendTo(continuation, lastNext);
16224 }
16225
16226 void speculateFinalObject(Edge edge, LValue cell)
16227 {
16228 FTL_TYPE_CHECK(
16229 jsValueValue(cell), edge, SpecFinalObject, isNotType(cell, FinalObjectType));
16230 }
16231
16232 void speculateFinalObject(Edge edge)
16233 {
16234 speculateFinalObject(edge, lowCell(edge));
16235 }
16236
16237 void speculateRegExpObject(Edge edge, LValue cell)
16238 {
16239 FTL_TYPE_CHECK(
16240 jsValueValue(cell), edge, SpecRegExpObject, isNotType(cell, RegExpObjectType));
16241 }
16242
16243 void speculateRegExpObject(Edge edge)
16244 {
16245 speculateRegExpObject(edge, lowCell(edge));
16246 }
16247
16248 void speculateProxyObject(Edge edge, LValue cell)
16249 {
16250 FTL_TYPE_CHECK(
16251 jsValueValue(cell), edge, SpecProxyObject, isNotType(cell, ProxyObjectType));
16252 }
16253
16254 void speculateProxyObject(Edge edge)
16255 {
16256 speculateProxyObject(edge, lowCell(edge));
16257 }
16258
16259 void speculateDerivedArray(Edge edge, LValue cell)
16260 {
16261 FTL_TYPE_CHECK(
16262 jsValueValue(cell), edge, SpecDerivedArray, isNotType(cell, DerivedArrayType));
16263 }
16264
16265 void speculateDerivedArray(Edge edge)
16266 {
16267 speculateDerivedArray(edge, lowCell(edge));
16268 }
16269
16270 void speculateMapObject(Edge edge, LValue cell)
16271 {
16272 FTL_TYPE_CHECK(
16273 jsValueValue(cell), edge, SpecMapObject, isNotType(cell, JSMapType));
16274 }
16275
16276 void speculateMapObject(Edge edge)
16277 {
16278 speculateMapObject(edge, lowCell(edge));
16279 }
16280
16281 void speculateSetObject(Edge edge, LValue cell)
16282 {
16283 FTL_TYPE_CHECK(
16284 jsValueValue(cell), edge, SpecSetObject, isNotType(cell, JSSetType));
16285 }
16286
16287 void speculateSetObject(Edge edge)
16288 {
16289 speculateSetObject(edge, lowCell(edge));
16290 }
16291
16292 void speculateWeakMapObject(Edge edge, LValue cell)
16293 {
16294 FTL_TYPE_CHECK(
16295 jsValueValue(cell), edge, SpecWeakMapObject, isNotType(cell, JSWeakMapType));
16296 }
16297
16298 void speculateWeakMapObject(Edge edge)
16299 {
16300 speculateWeakMapObject(edge, lowCell(edge));
16301 }
16302
16303 void speculateWeakSetObject(Edge edge, LValue cell)
16304 {
16305 FTL_TYPE_CHECK(
16306 jsValueValue(cell), edge, SpecWeakSetObject, isNotType(cell, JSWeakSetType));
16307 }
16308
16309 void speculateWeakSetObject(Edge edge)
16310 {
16311 speculateWeakSetObject(edge, lowCell(edge));
16312 }
16313
16314 void speculateDataViewObject(Edge edge, LValue cell)
16315 {
16316 FTL_TYPE_CHECK(
16317 jsValueValue(cell), edge, SpecDataViewObject, isNotType(cell, DataViewType));
16318 }
16319
16320 void speculateDataViewObject(Edge edge)
16321 {
16322 speculateDataViewObject(edge, lowCell(edge));
16323 }
16324
16325 void speculateString(Edge edge, LValue cell)
16326 {
16327 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecString, isNotString(cell));
16328 }
16329
16330 void speculateString(Edge edge)
16331 {
16332 speculateString(edge, lowCell(edge));
16333 }
16334
16335 void speculateStringOrOther(Edge edge, LValue value)
16336 {
16337 if (!m_interpreter.needsTypeCheck(edge))
16338 return;
16339
16340 LBasicBlock cellCase = m_out.newBlock();
16341 LBasicBlock notCellCase = m_out.newBlock();
16342 LBasicBlock continuation = m_out.newBlock();
16343
16344 m_out.branch(isCell(value, provenType(edge)), unsure(cellCase), unsure(notCellCase));
16345
16346 LBasicBlock lastNext = m_out.appendTo(cellCase, notCellCase);
16347
16348 FTL_TYPE_CHECK(jsValueValue(value), edge, (~SpecCellCheck) | SpecString, isNotString(value));
16349
16350 m_out.jump(continuation);
16351 m_out.appendTo(notCellCase, continuation);
16352
16353 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecCellCheck | SpecOther, isNotOther(value));
16354
16355 m_out.jump(continuation);
16356 m_out.appendTo(continuation, lastNext);
16357 }
16358
16359 void speculateStringOrOther(Edge edge)
16360 {
16361 speculateStringOrOther(edge, lowJSValue(edge, ManualOperandSpeculation));
16362 }
16363
16364 void speculateStringIdent(Edge edge, LValue string, LValue stringImpl)
16365 {
16366 if (!m_interpreter.needsTypeCheck(edge, SpecStringIdent | ~SpecString))
16367 return;
16368
16369 speculate(BadType, jsValueValue(string), edge.node(), isRopeString(string));
16370 speculate(
16371 BadType, jsValueValue(string), edge.node(),
16372 m_out.testIsZero32(
16373 m_out.load32(stringImpl, m_heaps.StringImpl_hashAndFlags),
16374 m_out.constInt32(StringImpl::flagIsAtomic())));
16375 m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
16376 }
16377
16378 void speculateStringIdent(Edge edge)
16379 {
16380 lowStringIdent(edge);
16381 }
16382
16383 void speculateStringObject(Edge edge)
16384 {
16385 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
16386 return;
16387
16388 speculateStringObjectForCell(edge, lowCell(edge));
16389 }
16390
16391 void speculateStringOrStringObject(Edge edge)
16392 {
16393 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
16394 return;
16395
16396 LValue cellBase = lowCell(edge);
16397 if (!m_interpreter.needsTypeCheck(edge, SpecString | SpecStringObject))
16398 return;
16399
16400 LBasicBlock notString = m_out.newBlock();
16401 LBasicBlock continuation = m_out.newBlock();
16402
16403 LValue type = m_out.load8ZeroExt32(cellBase, m_heaps.JSCell_typeInfoType);
16404 m_out.branch(
16405 m_out.equal(type, m_out.constInt32(StringType)),
16406 unsure(continuation), unsure(notString));
16407
16408 LBasicBlock lastNext = m_out.appendTo(notString, continuation);
16409 speculate(
16410 BadType, jsValueValue(cellBase), edge.node(),
16411 m_out.notEqual(type, m_out.constInt32(StringObjectType)));
16412 m_out.jump(continuation);
16413
16414 m_out.appendTo(continuation, lastNext);
16415 m_interpreter.filter(edge, SpecString | SpecStringObject);
16416 }
16417
16418 void speculateStringObjectForCell(Edge edge, LValue cell)
16419 {
16420 if (!m_interpreter.needsTypeCheck(edge, SpecStringObject))
16421 return;
16422
16423 LValue type = m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoType);
16424 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecStringObject, m_out.notEqual(type, m_out.constInt32(StringObjectType)));
16425 }
16426
16427 void speculateSymbol(Edge edge, LValue cell)
16428 {
16429 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecSymbol, isNotSymbol(cell));
16430 }
16431
16432 void speculateSymbol(Edge edge)
16433 {
16434 speculateSymbol(edge, lowCell(edge));
16435 }
16436
16437 void speculateBigInt(Edge edge, LValue cell)
16438 {
16439 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecBigInt, isNotBigInt(cell));
16440 }
16441
16442 void speculateBigInt(Edge edge)
16443 {
16444 speculateBigInt(edge, lowCell(edge));
16445 }
16446
16447 void speculateNonNullObject(Edge edge, LValue cell)
16448 {
16449 FTL_TYPE_CHECK(jsValueValue(cell), edge, SpecObject, isNotObject(cell));
16450 if (masqueradesAsUndefinedWatchpointIsStillValid())
16451 return;
16452
16453 speculate(
16454 BadType, jsValueValue(cell), edge.node(),
16455 m_out.testNonZero32(
16456 m_out.load8ZeroExt32(cell, m_heaps.JSCell_typeInfoFlags),
16457 m_out.constInt32(MasqueradesAsUndefined)));
16458 }
16459
16460 void speculateNumber(Edge edge)
16461 {
16462 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16463 FTL_TYPE_CHECK(jsValueValue(value), edge, SpecBytecodeNumber, isNotNumber(value));
16464 }
16465
16466 void speculateRealNumber(Edge edge)
16467 {
16468 // Do an early return here because lowDouble() can create a lot of control flow.
16469 if (!m_interpreter.needsTypeCheck(edge))
16470 return;
16471
16472 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16473 LValue doubleValue = unboxDouble(value);
16474
16475 LBasicBlock intCase = m_out.newBlock();
16476 LBasicBlock continuation = m_out.newBlock();
16477
16478 m_out.branch(
16479 m_out.doubleEqual(doubleValue, doubleValue),
16480 usually(continuation), rarely(intCase));
16481
16482 LBasicBlock lastNext = m_out.appendTo(intCase, continuation);
16483
16484 typeCheck(
16485 jsValueValue(value), m_node->child1(), SpecBytecodeRealNumber,
16486 isNotInt32(value, provenType(m_node->child1()) & ~SpecFullDouble));
16487 m_out.jump(continuation);
16488
16489 m_out.appendTo(continuation, lastNext);
16490 }
16491
16492 void speculateDoubleRepReal(Edge edge)
16493 {
16494 // Do an early return here because lowDouble() can create a lot of control flow.
16495 if (!m_interpreter.needsTypeCheck(edge))
16496 return;
16497
16498 LValue value = lowDouble(edge);
16499 FTL_TYPE_CHECK(
16500 doubleValue(value), edge, SpecDoubleReal,
16501 m_out.doubleNotEqualOrUnordered(value, value));
16502 }
16503
16504 void speculateDoubleRepAnyInt(Edge edge)
16505 {
16506 if (!m_interpreter.needsTypeCheck(edge))
16507 return;
16508
16509 doubleToStrictInt52(edge, lowDouble(edge));
16510 }
16511
16512 void speculateBoolean(Edge edge)
16513 {
16514 lowBoolean(edge);
16515 }
16516
16517 void speculateNotStringVar(Edge edge)
16518 {
16519 if (!m_interpreter.needsTypeCheck(edge, ~SpecStringVar))
16520 return;
16521
16522 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16523
16524 LBasicBlock isCellCase = m_out.newBlock();
16525 LBasicBlock isStringCase = m_out.newBlock();
16526 LBasicBlock continuation = m_out.newBlock();
16527
16528 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
16529
16530 LBasicBlock lastNext = m_out.appendTo(isCellCase, isStringCase);
16531 m_out.branch(isString(value, provenType(edge)), unsure(isStringCase), unsure(continuation));
16532
16533 m_out.appendTo(isStringCase, continuation);
16534 speculateStringIdent(edge, value, m_out.loadPtr(value, m_heaps.JSString_value));
16535 m_out.jump(continuation);
16536
16537 m_out.appendTo(continuation, lastNext);
16538 }
16539
16540 void speculateNotSymbol(Edge edge)
16541 {
16542 if (!m_interpreter.needsTypeCheck(edge, ~SpecSymbol))
16543 return;
16544
16545 ASSERT(mayHaveTypeCheck(edge.useKind()));
16546 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16547
16548 LBasicBlock isCellCase = m_out.newBlock();
16549 LBasicBlock continuation = m_out.newBlock();
16550
16551 m_out.branch(isCell(value, provenType(edge)), unsure(isCellCase), unsure(continuation));
16552
16553 LBasicBlock lastNext = m_out.appendTo(isCellCase, continuation);
16554 speculate(BadType, jsValueValue(value), edge.node(), isSymbol(value));
16555 m_out.jump(continuation);
16556
16557 m_out.appendTo(continuation, lastNext);
16558
16559 m_interpreter.filter(edge, ~SpecSymbol);
16560 }
16561
16562 void speculateOther(Edge edge)
16563 {
16564 if (!m_interpreter.needsTypeCheck(edge))
16565 return;
16566
16567 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16568 typeCheck(jsValueValue(value), edge, SpecOther, isNotOther(value));
16569 }
16570
16571 void speculateMisc(Edge edge)
16572 {
16573 if (!m_interpreter.needsTypeCheck(edge))
16574 return;
16575
16576 LValue value = lowJSValue(edge, ManualOperandSpeculation);
16577 typeCheck(jsValueValue(value), edge, SpecMisc, isNotMisc(value));
16578 }
16579
16580 void speculateTypedArrayIsNotNeutered(LValue base)
16581 {
16582 LBasicBlock isWasteful = m_out.newBlock();
16583 LBasicBlock continuation = m_out.newBlock();
16584
16585 LValue mode = m_out.load32(base, m_heaps.JSArrayBufferView_mode);
16586 m_out.branch(m_out.equal(mode, m_out.constInt32(WastefulTypedArray)),
16587 unsure(isWasteful), unsure(continuation));
16588
16589 LBasicBlock lastNext = m_out.appendTo(isWasteful, continuation);
16590 LValue vector = m_out.loadPtr(base, m_heaps.JSArrayBufferView_vector);
16591 // FIXME: We could probably make this a mask.
16592 // https://bugs.webkit.org/show_bug.cgi?id=197701
16593 vector = removeArrayPtrTag(vector);
16594 speculate(Uncountable, jsValueValue(vector), m_node, m_out.isZero64(vector));
16595 m_out.jump(continuation);
16596
16597 m_out.appendTo(continuation, lastNext);
16598 }
16599
16600 bool masqueradesAsUndefinedWatchpointIsStillValid()
16601 {
16602 return m_graph.masqueradesAsUndefinedWatchpointIsStillValid(m_node->origin.semantic);
16603 }
16604
16605 LValue loadCellState(LValue base)
16606 {
16607 return m_out.load8ZeroExt32(base, m_heaps.JSCell_cellState);
16608 }
16609
16610 void emitStoreBarrier(LValue base, bool isFenced)
16611 {
16612 LBasicBlock recheckPath = nullptr;
16613 if (isFenced)
16614 recheckPath = m_out.newBlock();
16615 LBasicBlock slowPath = m_out.newBlock();
16616 LBasicBlock continuation = m_out.newBlock();
16617
16618 LBasicBlock lastNext = m_out.insertNewBlocksBefore(isFenced ? recheckPath : slowPath);
16619
16620 LValue threshold;
16621 if (isFenced)
16622 threshold = m_out.load32(m_out.absolute(vm().heap.addressOfBarrierThreshold()));
16623 else
16624 threshold = m_out.constInt32(blackThreshold);
16625
16626 m_out.branch(
16627 m_out.above(loadCellState(base), threshold),
16628 usually(continuation), rarely(isFenced ? recheckPath : slowPath));
16629
16630 if (isFenced) {
16631 m_out.appendTo(recheckPath, slowPath);
16632
16633 m_out.fence(&m_heaps.root, &m_heaps.JSCell_cellState);
16634
16635 m_out.branch(
16636 m_out.above(loadCellState(base), m_out.constInt32(blackThreshold)),
16637 usually(continuation), rarely(slowPath));
16638 }
16639
16640 m_out.appendTo(slowPath, continuation);
16641
16642 LValue call = vmCall(Void, m_out.operation(operationWriteBarrierSlowPath), m_callFrame, base);
16643 m_heaps.decorateCCallRead(&m_heaps.root, call);
16644 m_heaps.decorateCCallWrite(&m_heaps.JSCell_cellState, call);
16645
16646 m_out.jump(continuation);
16647
16648 m_out.appendTo(continuation, lastNext);
16649 }
16650
16651 void mutatorFence()
16652 {
16653 if (isX86()) {
16654 m_out.fence(&m_heaps.root, nullptr);
16655 return;
16656 }
16657
16658 LBasicBlock slowPath = m_out.newBlock();
16659 LBasicBlock continuation = m_out.newBlock();
16660
16661 LBasicBlock lastNext = m_out.insertNewBlocksBefore(slowPath);
16662
16663 m_out.branch(
16664 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
16665 rarely(slowPath), usually(continuation));
16666
16667 m_out.appendTo(slowPath, continuation);
16668
16669 m_out.fence(&m_heaps.root, nullptr);
16670 m_out.jump(continuation);
16671
16672 m_out.appendTo(continuation, lastNext);
16673 }
16674
16675 void nukeStructureAndSetButterfly(LValue butterfly, LValue object)
16676 {
16677 if (isX86()) {
16678 m_out.store32(
16679 m_out.bitOr(
16680 m_out.load32(object, m_heaps.JSCell_structureID),
16681 m_out.constInt32(nukedStructureIDBit())),
16682 object, m_heaps.JSCell_structureID);
16683 m_out.fence(&m_heaps.root, nullptr);
16684 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16685 m_out.fence(&m_heaps.root, nullptr);
16686 return;
16687 }
16688
16689 LBasicBlock fastPath = m_out.newBlock();
16690 LBasicBlock slowPath = m_out.newBlock();
16691 LBasicBlock continuation = m_out.newBlock();
16692
16693 LBasicBlock lastNext = m_out.insertNewBlocksBefore(fastPath);
16694
16695 m_out.branch(
16696 m_out.load8ZeroExt32(m_out.absolute(vm().heap.addressOfMutatorShouldBeFenced())),
16697 rarely(slowPath), usually(fastPath));
16698
16699 m_out.appendTo(fastPath, slowPath);
16700
16701 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16702 m_out.jump(continuation);
16703
16704 m_out.appendTo(slowPath, continuation);
16705
16706 m_out.store32(
16707 m_out.bitOr(
16708 m_out.load32(object, m_heaps.JSCell_structureID),
16709 m_out.constInt32(nukedStructureIDBit())),
16710 object, m_heaps.JSCell_structureID);
16711 m_out.fence(&m_heaps.root, nullptr);
16712 m_out.storePtr(butterfly, object, m_heaps.JSObject_butterfly);
16713 m_out.fence(&m_heaps.root, nullptr);
16714 m_out.jump(continuation);
16715
16716 m_out.appendTo(continuation, lastNext);
16717 }
16718
16719 LValue preciseIndexMask64(LValue value, LValue index, LValue limit)
16720 {
16721 return m_out.bitAnd(
16722 value,
16723 m_out.aShr(
16724 m_out.sub(
16725 index,
16726 m_out.opaque(limit)),
16727 m_out.constInt32(63)));
16728 }
16729
16730 LValue preciseIndexMask32(LValue value, LValue index, LValue limit)
16731 {
16732 return preciseIndexMask64(value, m_out.zeroExt(index, Int64), m_out.zeroExt(limit, Int64));
16733 }
16734
16735 template<typename... Args>
16736 LValue vmCall(LType type, LValue function, Args&&... args)
16737 {
16738 callPreflight();
16739 LValue result = m_out.call(type, function, std::forward<Args>(args)...);
16740 if (mayExit(m_graph, m_node))
16741 callCheck();
16742 else {
16743 // We can't exit due to an exception, so we also can't throw an exception.
16744#ifndef NDEBUG
16745 LBasicBlock crash = m_out.newBlock();
16746 LBasicBlock continuation = m_out.newBlock();
16747
16748 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
16749 LValue hadException = m_out.notZero64(exception);
16750
16751 m_out.branch(
16752 hadException, rarely(crash), usually(continuation));
16753
16754 LBasicBlock lastNext = m_out.appendTo(crash, continuation);
16755 m_out.unreachable();
16756
16757 m_out.appendTo(continuation, lastNext);
16758#endif
16759 }
16760 return result;
16761 }
16762
16763 void callPreflight(CodeOrigin codeOrigin)
16764 {
16765 CallSiteIndex callSiteIndex = m_ftlState.jitCode->common.addCodeOrigin(codeOrigin);
16766 m_out.store32(
16767 m_out.constInt32(callSiteIndex.bits()),
16768 tagFor(CallFrameSlot::argumentCount));
16769 }
16770
16771 void callPreflight()
16772 {
16773 callPreflight(codeOriginDescriptionOfCallSite());
16774 }
16775
16776 CodeOrigin codeOriginDescriptionOfCallSite() const
16777 {
16778 CodeOrigin codeOrigin = m_node->origin.semantic;
16779 if (m_node->op() == TailCallInlinedCaller
16780 || m_node->op() == TailCallVarargsInlinedCaller
16781 || m_node->op() == TailCallForwardVarargsInlinedCaller
16782 || m_node->op() == DirectTailCallInlinedCaller) {
16783 // This case arises when you have a situation like this:
16784 // foo makes a call to bar, bar is inlined in foo. bar makes a call
16785 // to baz and baz is inlined in bar. And then baz makes a tail-call to jaz,
16786 // and jaz is inlined in baz. We want the callframe for jaz to appear to
16787 // have caller be bar.
16788 codeOrigin = *codeOrigin.inlineCallFrame()->getCallerSkippingTailCalls();
16789 }
16790
16791 return codeOrigin;
16792 }
16793
16794 void callCheck()
16795 {
16796 if (Options::useExceptionFuzz())
16797 m_out.call(Void, m_out.operation(operationExceptionFuzz), m_callFrame);
16798
16799 LValue exception = m_out.load64(m_out.absolute(vm().addressOfException()));
16800 LValue hadException = m_out.notZero64(exception);
16801
16802 CodeOrigin opCatchOrigin;
16803 HandlerInfo* exceptionHandler;
16804 if (m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler)) {
16805 bool exitOK = true;
16806 bool isExceptionHandler = true;
16807 appendOSRExit(
16808 ExceptionCheck, noValue(), nullptr, hadException,
16809 m_origin.withForExitAndExitOK(opCatchOrigin, exitOK), isExceptionHandler);
16810 return;
16811 }
16812
16813 LBasicBlock continuation = m_out.newBlock();
16814
16815 m_out.branch(
16816 hadException, rarely(m_handleExceptions), usually(continuation));
16817
16818 m_out.appendTo(continuation);
16819 }
16820
16821 RefPtr<PatchpointExceptionHandle> preparePatchpointForExceptions(PatchpointValue* value)
16822 {
16823 CodeOrigin opCatchOrigin;
16824 HandlerInfo* exceptionHandler;
16825 bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_origin.forExit, opCatchOrigin, exceptionHandler);
16826 if (!willCatchException)
16827 return PatchpointExceptionHandle::defaultHandle(m_ftlState);
16828
16829 dataLogLnIf(verboseCompilationEnabled(), " Patchpoint exception OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
16830
16831 bool exitOK = true;
16832 NodeOrigin origin = m_origin.withForExitAndExitOK(opCatchOrigin, exitOK);
16833
16834 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(noValue(), nullptr);
16835
16836 // Compute the offset into the StackmapGenerationParams where we will find the exit arguments
16837 // we are about to append. We need to account for both the children we've already added, and
16838 // for the possibility of a result value if the patchpoint is not void.
16839 unsigned offset = value->numChildren();
16840 if (value->type() != Void)
16841 offset++;
16842
16843 // Use LateColdAny to ensure that the stackmap arguments interfere with the patchpoint's
16844 // result and with any late-clobbered registers.
16845 value->appendVectorWithRep(
16846 buildExitArguments(exitDescriptor, opCatchOrigin, noValue()),
16847 ValueRep::LateColdAny);
16848
16849 return PatchpointExceptionHandle::create(
16850 m_ftlState, exitDescriptor, origin, offset, *exceptionHandler);
16851 }
16852
16853 LBasicBlock lowBlock(DFG::BasicBlock* block)
16854 {
16855 return m_blocks.get(block);
16856 }
16857
16858 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, Node* highValue)
16859 {
16860 return appendOSRExitDescriptor(lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue));
16861 }
16862
16863 OSRExitDescriptor* appendOSRExitDescriptor(FormattedValue lowValue, const MethodOfGettingAValueProfile& profile)
16864 {
16865 return &m_ftlState.jitCode->osrExitDescriptors.alloc(
16866 lowValue.format(), profile,
16867 availabilityMap().m_locals.numberOfArguments(),
16868 availabilityMap().m_locals.numberOfLocals());
16869 }
16870
16871 void appendOSRExit(
16872 ExitKind kind, FormattedValue lowValue, Node* highValue, LValue failCondition,
16873 NodeOrigin origin, bool isExceptionHandler = false)
16874 {
16875 return appendOSRExit(kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue),
16876 failCondition, origin, isExceptionHandler);
16877 }
16878
16879 void appendOSRExit(
16880 ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, LValue failCondition,
16881 NodeOrigin origin, bool isExceptionHandler = false)
16882 {
16883 dataLogLnIf(verboseCompilationEnabled(), " OSR exit #", m_ftlState.jitCode->osrExitDescriptors.size(), " with availability: ", availabilityMap());
16884
16885 DFG_ASSERT(m_graph, m_node, origin.exitOK);
16886
16887 if (!isExceptionHandler
16888 && Options::useOSRExitFuzz()
16889 && canUseOSRExitFuzzing(m_graph.baselineCodeBlockFor(m_node->origin.semantic))
16890 && doOSRExitFuzzing()) {
16891 LValue numberOfFuzzChecks = m_out.add(
16892 m_out.load32(m_out.absolute(&g_numberOfOSRExitFuzzChecks)),
16893 m_out.int32One);
16894
16895 m_out.store32(numberOfFuzzChecks, m_out.absolute(&g_numberOfOSRExitFuzzChecks));
16896
16897 if (unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter()) {
16898 failCondition = m_out.bitOr(
16899 failCondition,
16900 m_out.aboveOrEqual(numberOfFuzzChecks, m_out.constInt32(atOrAfter)));
16901 }
16902 if (unsigned at = Options::fireOSRExitFuzzAt()) {
16903 failCondition = m_out.bitOr(
16904 failCondition,
16905 m_out.equal(numberOfFuzzChecks, m_out.constInt32(at)));
16906 }
16907 }
16908
16909 if (failCondition == m_out.booleanFalse)
16910 return;
16911
16912 blessSpeculation(
16913 m_out.speculate(failCondition), kind, lowValue, profile, origin);
16914 }
16915
16916 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, Node* highValue, NodeOrigin origin)
16917 {
16918 blessSpeculation(value, kind, lowValue, m_graph.methodOfGettingAValueProfileFor(m_node, highValue), origin);
16919 }
16920
16921 void blessSpeculation(CheckValue* value, ExitKind kind, FormattedValue lowValue, const MethodOfGettingAValueProfile& profile, NodeOrigin origin)
16922 {
16923 OSRExitDescriptor* exitDescriptor = appendOSRExitDescriptor(lowValue, profile);
16924
16925 value->appendColdAnys(buildExitArguments(exitDescriptor, origin.forExit, lowValue));
16926
16927 State* state = &m_ftlState;
16928 value->setGenerator(
16929 [=] (CCallHelpers& jit, const B3::StackmapGenerationParams& params) {
16930 exitDescriptor->emitOSRExit(
16931 *state, kind, origin, jit, params, 0);
16932 });
16933 }
16934
16935 StackmapArgumentList buildExitArguments(
16936 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, FormattedValue lowValue,
16937 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
16938 {
16939 StackmapArgumentList result;
16940 buildExitArguments(
16941 exitDescriptor, exitOrigin, result, lowValue, offsetOfExitArgumentsInStackmapLocations);
16942 return result;
16943 }
16944
16945 void buildExitArguments(
16946 OSRExitDescriptor* exitDescriptor, CodeOrigin exitOrigin, StackmapArgumentList& arguments, FormattedValue lowValue,
16947 unsigned offsetOfExitArgumentsInStackmapLocations = 0)
16948 {
16949 if (!!lowValue)
16950 arguments.append(lowValue.value());
16951
16952 AvailabilityMap availabilityMap = this->availabilityMap();
16953 availabilityMap.pruneByLiveness(m_graph, exitOrigin);
16954
16955 HashMap<Node*, ExitTimeObjectMaterialization*> map;
16956 availabilityMap.forEachAvailability(
16957 [&] (Availability availability) {
16958 if (!availability.shouldUseNode())
16959 return;
16960
16961 Node* node = availability.node();
16962 if (!node->isPhantomAllocation())
16963 return;
16964
16965 auto result = map.add(node, nullptr);
16966 if (result.isNewEntry) {
16967 result.iterator->value =
16968 exitDescriptor->m_materializations.add(node->op(), node->origin.semantic);
16969 }
16970 });
16971
16972 for (unsigned i = 0; i < exitDescriptor->m_values.size(); ++i) {
16973 int operand = exitDescriptor->m_values.operandForIndex(i);
16974
16975 Availability availability = availabilityMap.m_locals[i];
16976
16977 if (Options::validateFTLOSRExitLiveness()
16978 && m_graph.m_plan.mode() != FTLForOSREntryMode) {
16979
16980 if (availability.isDead() && m_graph.isLiveInBytecode(VirtualRegister(operand), exitOrigin))
16981 DFG_CRASH(m_graph, m_node, toCString("Live bytecode local not available: operand = ", VirtualRegister(operand), ", availability = ", availability, ", origin = ", exitOrigin).data());
16982 }
16983 ExitValue exitValue = exitValueForAvailability(arguments, map, availability);
16984 if (exitValue.hasIndexInStackmapLocations())
16985 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
16986 exitDescriptor->m_values[i] = exitValue;
16987 }
16988
16989 for (auto heapPair : availabilityMap.m_heap) {
16990 Node* node = heapPair.key.base();
16991 ExitTimeObjectMaterialization* materialization = map.get(node);
16992 if (!materialization)
16993 DFG_CRASH(m_graph, m_node, toCString("Could not find materialization for ", node, " in ", availabilityMap).data());
16994 ExitValue exitValue = exitValueForAvailability(arguments, map, heapPair.value);
16995 if (exitValue.hasIndexInStackmapLocations())
16996 exitValue.adjustStackmapLocationsIndexByOffset(offsetOfExitArgumentsInStackmapLocations);
16997 materialization->add(
16998 heapPair.key.descriptor(),
16999 exitValue);
17000 }
17001
17002 if (verboseCompilationEnabled()) {
17003 dataLog(" Exit values: ", exitDescriptor->m_values, "\n");
17004 if (!exitDescriptor->m_materializations.isEmpty()) {
17005 dataLog(" Materializations: \n");
17006 for (ExitTimeObjectMaterialization* materialization : exitDescriptor->m_materializations)
17007 dataLog(" ", pointerDump(materialization), "\n");
17008 }
17009 }
17010 }
17011
17012 ExitValue exitValueForAvailability(
17013 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
17014 Availability availability)
17015 {
17016 FlushedAt flush = availability.flushedAt();
17017 switch (flush.format()) {
17018 case DeadFlush:
17019 case ConflictingFlush:
17020 if (availability.hasNode())
17021 return exitValueForNode(arguments, map, availability.node());
17022
17023 // This means that the value is dead. It could be dead in bytecode or it could have
17024 // been killed by our DCE, which can sometimes kill things even if they were live in
17025 // bytecode.
17026 return ExitValue::dead();
17027
17028 case FlushedJSValue:
17029 case FlushedCell:
17030 case FlushedBoolean:
17031 return ExitValue::inJSStack(flush.virtualRegister());
17032
17033 case FlushedInt32:
17034 return ExitValue::inJSStackAsInt32(flush.virtualRegister());
17035
17036 case FlushedInt52:
17037 return ExitValue::inJSStackAsInt52(flush.virtualRegister());
17038
17039 case FlushedDouble:
17040 return ExitValue::inJSStackAsDouble(flush.virtualRegister());
17041 }
17042
17043 DFG_CRASH(m_graph, m_node, "Invalid flush format");
17044 return ExitValue::dead();
17045 }
17046
17047 ExitValue exitValueForNode(
17048 StackmapArgumentList& arguments, const HashMap<Node*, ExitTimeObjectMaterialization*>& map,
17049 Node* node)
17050 {
17051 // NOTE: In FTL->B3, we cannot generate code here, because m_output is positioned after the
17052 // stackmap value. Like all values, the stackmap value cannot use a child that is defined after
17053 // it.
17054
17055 ASSERT(node->shouldGenerate());
17056 ASSERT(node->hasResult());
17057
17058 if (node) {
17059 switch (node->op()) {
17060 case BottomValue:
17061 // This might arise in object materializations. I actually doubt that it would,
17062 // but it seems worthwhile to be conservative.
17063 return ExitValue::dead();
17064
17065 case JSConstant:
17066 case Int52Constant:
17067 case DoubleConstant:
17068 return ExitValue::constant(node->asJSValue());
17069
17070 default:
17071 if (node->isPhantomAllocation())
17072 return ExitValue::materializeNewObject(map.get(node));
17073 break;
17074 }
17075 }
17076
17077 LoweredNodeValue value = m_int32Values.get(node);
17078 if (isValid(value))
17079 return exitArgument(arguments, DataFormatInt32, value.value());
17080
17081 value = m_int52Values.get(node);
17082 if (isValid(value))
17083 return exitArgument(arguments, DataFormatInt52, value.value());
17084
17085 value = m_strictInt52Values.get(node);
17086 if (isValid(value))
17087 return exitArgument(arguments, DataFormatStrictInt52, value.value());
17088
17089 value = m_booleanValues.get(node);
17090 if (isValid(value))
17091 return exitArgument(arguments, DataFormatBoolean, value.value());
17092
17093 value = m_jsValueValues.get(node);
17094 if (isValid(value))
17095 return exitArgument(arguments, DataFormatJS, value.value());
17096
17097 value = m_doubleValues.get(node);
17098 if (isValid(value))
17099 return exitArgument(arguments, DataFormatDouble, value.value());
17100
17101 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
17102 return ExitValue::dead();
17103 }
17104
17105 ExitValue exitArgument(StackmapArgumentList& arguments, DataFormat format, LValue value)
17106 {
17107 ExitValue result = ExitValue::exitArgument(ExitArgument(format, arguments.size()));
17108 arguments.append(value);
17109 return result;
17110 }
17111
17112 ExitValue exitValueForTailCall(StackmapArgumentList& arguments, Node* node)
17113 {
17114 ASSERT(node->shouldGenerate());
17115 ASSERT(node->hasResult());
17116
17117 switch (node->op()) {
17118 case JSConstant:
17119 case Int52Constant:
17120 case DoubleConstant:
17121 return ExitValue::constant(node->asJSValue());
17122
17123 default:
17124 break;
17125 }
17126
17127 LoweredNodeValue value = m_jsValueValues.get(node);
17128 if (isValid(value))
17129 return exitArgument(arguments, DataFormatJS, value.value());
17130
17131 value = m_int32Values.get(node);
17132 if (isValid(value))
17133 return exitArgument(arguments, DataFormatJS, boxInt32(value.value()));
17134
17135 value = m_booleanValues.get(node);
17136 if (isValid(value))
17137 return exitArgument(arguments, DataFormatJS, boxBoolean(value.value()));
17138
17139 // Doubles and Int52 have been converted by ValueRep()
17140 DFG_CRASH(m_graph, m_node, toCString("Cannot find value for node: ", node).data());
17141 }
17142
17143 void setInt32(Node* node, LValue value)
17144 {
17145 m_int32Values.set(node, LoweredNodeValue(value, m_highBlock));
17146 }
17147 void setInt52(Node* node, LValue value)
17148 {
17149 m_int52Values.set(node, LoweredNodeValue(value, m_highBlock));
17150 }
17151 void setStrictInt52(Node* node, LValue value)
17152 {
17153 m_strictInt52Values.set(node, LoweredNodeValue(value, m_highBlock));
17154 }
17155 void setInt52(Node* node, LValue value, Int52Kind kind)
17156 {
17157 switch (kind) {
17158 case Int52:
17159 setInt52(node, value);
17160 return;
17161
17162 case StrictInt52:
17163 setStrictInt52(node, value);
17164 return;
17165 }
17166
17167 DFG_CRASH(m_graph, m_node, "Corrupt int52 kind");
17168 }
17169 void setJSValue(Node* node, LValue value)
17170 {
17171 m_jsValueValues.set(node, LoweredNodeValue(value, m_highBlock));
17172 }
17173 void setBoolean(Node* node, LValue value)
17174 {
17175 m_booleanValues.set(node, LoweredNodeValue(value, m_highBlock));
17176 }
17177 void setStorage(Node* node, LValue value)
17178 {
17179 m_storageValues.set(node, LoweredNodeValue(value, m_highBlock));
17180 }
17181 void setDouble(Node* node, LValue value)
17182 {
17183 m_doubleValues.set(node, LoweredNodeValue(value, m_highBlock));
17184 }
17185
17186 void setInt32(LValue value)
17187 {
17188 setInt32(m_node, value);
17189 }
17190 void setInt52(LValue value)
17191 {
17192 setInt52(m_node, value);
17193 }
17194 void setStrictInt52(LValue value)
17195 {
17196 setStrictInt52(m_node, value);
17197 }
17198 void setInt52(LValue value, Int52Kind kind)
17199 {
17200 setInt52(m_node, value, kind);
17201 }
17202 void setJSValue(LValue value)
17203 {
17204 setJSValue(m_node, value);
17205 }
17206 void setBoolean(LValue value)
17207 {
17208 setBoolean(m_node, value);
17209 }
17210 void setStorage(LValue value)
17211 {
17212 setStorage(m_node, value);
17213 }
17214 void setDouble(LValue value)
17215 {
17216 setDouble(m_node, value);
17217 }
17218
17219 bool isValid(const LoweredNodeValue& value)
17220 {
17221 if (!value)
17222 return false;
17223 if (!m_graph.m_ssaDominators->dominates(value.block(), m_highBlock))
17224 return false;
17225 return true;
17226 }
17227
17228 void addWeakReference(JSCell* target)
17229 {
17230 m_graph.m_plan.weakReferences().addLazily(target);
17231 }
17232
17233 LValue loadStructure(LValue value)
17234 {
17235 LValue structureID = m_out.load32(value, m_heaps.JSCell_structureID);
17236 LValue tableBase = m_out.loadPtr(m_out.absolute(vm().heap.structureIDTable().base()));
17237 LValue tableIndex = m_out.aShr(structureID, m_out.constInt32(StructureIDTable::s_numberOfEntropyBits));
17238 LValue entropyBits = m_out.shl(m_out.zeroExtPtr(structureID), m_out.constInt32(StructureIDTable::s_entropyBitsShiftForStructurePointer));
17239 TypedPointer address = m_out.baseIndex(m_heaps.structureTable, tableBase, m_out.zeroExtPtr(tableIndex));
17240 LValue encodedStructureBits = m_out.loadPtr(address);
17241 return m_out.bitXor(encodedStructureBits, entropyBits);
17242 }
17243
17244 LValue weakPointer(JSCell* pointer)
17245 {
17246 addWeakReference(pointer);
17247 return m_out.weakPointer(m_graph, pointer);
17248 }
17249
17250 LValue frozenPointer(FrozenValue* value)
17251 {
17252 return m_out.weakPointer(value);
17253 }
17254
17255 LValue weakStructureID(RegisteredStructure structure)
17256 {
17257 return m_out.constInt32(structure->id());
17258 }
17259
17260 LValue weakStructure(RegisteredStructure structure)
17261 {
17262 ASSERT(!!structure.get());
17263 return m_out.weakPointer(m_graph, structure.get());
17264 }
17265
17266 TypedPointer addressFor(LValue base, int operand, ptrdiff_t offset = 0)
17267 {
17268 return m_out.address(base, m_heaps.variables[operand], offset);
17269 }
17270 TypedPointer payloadFor(LValue base, int operand)
17271 {
17272 return addressFor(base, operand, PayloadOffset);
17273 }
17274 TypedPointer tagFor(LValue base, int operand)
17275 {
17276 return addressFor(base, operand, TagOffset);
17277 }
17278 TypedPointer addressFor(int operand, ptrdiff_t offset = 0)
17279 {
17280 return addressFor(VirtualRegister(operand), offset);
17281 }
17282 TypedPointer addressFor(VirtualRegister operand, ptrdiff_t offset = 0)
17283 {
17284 if (operand.isLocal())
17285 return addressFor(m_captured, operand.offset(), offset);
17286 return addressFor(m_callFrame, operand.offset(), offset);
17287 }
17288 TypedPointer payloadFor(int operand)
17289 {
17290 return payloadFor(VirtualRegister(operand));
17291 }
17292 TypedPointer payloadFor(VirtualRegister operand)
17293 {
17294 return addressFor(operand, PayloadOffset);
17295 }
17296 TypedPointer tagFor(int operand)
17297 {
17298 return tagFor(VirtualRegister(operand));
17299 }
17300 TypedPointer tagFor(VirtualRegister operand)
17301 {
17302 return addressFor(operand, TagOffset);
17303 }
17304
17305 AbstractValue abstractValue(Node* node)
17306 {
17307 return m_state.forNode(node);
17308 }
17309 AbstractValue abstractValue(Edge edge)
17310 {
17311 return abstractValue(edge.node());
17312 }
17313
17314 SpeculatedType provenType(Node* node)
17315 {
17316 return abstractValue(node).m_type;
17317 }
17318 SpeculatedType provenType(Edge edge)
17319 {
17320 return provenType(edge.node());
17321 }
17322
17323 JSValue provenValue(Node* node)
17324 {
17325 return abstractValue(node).m_value;
17326 }
17327 JSValue provenValue(Edge edge)
17328 {
17329 return provenValue(edge.node());
17330 }
17331
17332 StructureAbstractValue abstractStructure(Node* node)
17333 {
17334 return abstractValue(node).m_structure;
17335 }
17336 StructureAbstractValue abstractStructure(Edge edge)
17337 {
17338 return abstractStructure(edge.node());
17339 }
17340
17341 void crash()
17342 {
17343 crash(m_highBlock, m_node);
17344 }
17345 void crash(DFG::BasicBlock* block, Node* node)
17346 {
17347 BlockIndex blockIndex = block->index;
17348 unsigned nodeIndex = node ? node->index() : UINT_MAX;
17349#if ASSERT_DISABLED
17350 m_out.patchpoint(Void)->setGenerator(
17351 [=] (CCallHelpers& jit, const StackmapGenerationParams&) {
17352 AllowMacroScratchRegisterUsage allowScratch(jit);
17353
17354 jit.move(CCallHelpers::TrustedImm32(blockIndex), GPRInfo::regT0);
17355 jit.move(CCallHelpers::TrustedImm32(nodeIndex), GPRInfo::regT1);
17356 if (node)
17357 jit.move(CCallHelpers::TrustedImm32(node->op()), GPRInfo::regT2);
17358 jit.abortWithReason(FTLCrash);
17359 });
17360#else
17361 m_out.call(
17362 Void,
17363 m_out.constIntPtr(ftlUnreachable),
17364 // We don't want the CodeBlock to have a weak pointer to itself because
17365 // that would cause it to always get collected.
17366 m_out.constIntPtr(bitwise_cast<intptr_t>(codeBlock())), m_out.constInt32(blockIndex),
17367 m_out.constInt32(nodeIndex));
17368#endif
17369 m_out.unreachable();
17370 }
17371
17372 AvailabilityMap& availabilityMap() { return m_availabilityCalculator.m_availability; }
17373
17374 VM& vm() { return m_graph.m_vm; }
17375 CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
17376
17377 Graph& m_graph;
17378 State& m_ftlState;
17379 AbstractHeapRepository m_heaps;
17380 Output m_out;
17381 Procedure& m_proc;
17382
17383 LBasicBlock m_handleExceptions;
17384 HashMap<DFG::BasicBlock*, LBasicBlock> m_blocks;
17385
17386 LValue m_callFrame;
17387 LValue m_captured;
17388 LValue m_tagTypeNumber;
17389 LValue m_tagMask;
17390
17391 HashMap<Node*, LoweredNodeValue> m_int32Values;
17392 HashMap<Node*, LoweredNodeValue> m_strictInt52Values;
17393 HashMap<Node*, LoweredNodeValue> m_int52Values;
17394 HashMap<Node*, LoweredNodeValue> m_jsValueValues;
17395 HashMap<Node*, LoweredNodeValue> m_booleanValues;
17396 HashMap<Node*, LoweredNodeValue> m_storageValues;
17397 HashMap<Node*, LoweredNodeValue> m_doubleValues;
17398
17399 HashMap<Node*, LValue> m_phis;
17400
17401 LocalOSRAvailabilityCalculator m_availabilityCalculator;
17402
17403 InPlaceAbstractState m_state;
17404 AbstractInterpreter<InPlaceAbstractState> m_interpreter;
17405 DFG::BasicBlock* m_highBlock;
17406 DFG::BasicBlock* m_nextHighBlock;
17407 LBasicBlock m_nextLowBlock;
17408
17409 enum IndexMaskingMode { IndexMaskingDisabled, IndexMaskingEnabled };
17410
17411 IndexMaskingMode m_indexMaskingMode;
17412
17413 NodeOrigin m_origin;
17414 unsigned m_nodeIndex;
17415 Node* m_node;
17416
17417 // These are used for validating AI state.
17418 HashMap<Node*, NodeSet> m_liveInToNode;
17419 HashMap<Node*, AbstractValue> m_aiCheckedNodes;
17420 String m_graphDump;
17421};
17422
17423} // anonymous namespace
17424
17425void lowerDFGToB3(State& state)
17426{
17427 LowerDFGToB3 lowering(state);
17428 lowering.lower();
17429}
17430
17431} } // namespace JSC::FTL
17432
17433#endif // ENABLE(FTL_JIT)
17434
17435