| 1 | /* |
| 2 | * Copyright (C) 2011-2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #include "config.h" |
| 27 | #include "DFGJITCompiler.h" |
| 28 | |
| 29 | #if ENABLE(DFG_JIT) |
| 30 | |
| 31 | #include "CodeBlock.h" |
| 32 | #include "DFGFailedFinalizer.h" |
| 33 | #include "DFGInlineCacheWrapperInlines.h" |
| 34 | #include "DFGJITCode.h" |
| 35 | #include "DFGJITFinalizer.h" |
| 36 | #include "DFGOSRExit.h" |
| 37 | #include "DFGOperations.h" |
| 38 | #include "DFGRegisterBank.h" |
| 39 | #include "DFGSlowPathGenerator.h" |
| 40 | #include "DFGSpeculativeJIT.h" |
| 41 | #include "DFGThunks.h" |
| 42 | #include "JSCInlines.h" |
| 43 | #include "JSCJSValueInlines.h" |
| 44 | #include "LinkBuffer.h" |
| 45 | #include "MaxFrameExtentForSlowPathCall.h" |
| 46 | #include "StructureStubInfo.h" |
| 47 | #include "ThunkGenerators.h" |
| 48 | #include "VM.h" |
| 49 | |
| 50 | namespace JSC { namespace DFG { |
| 51 | |
| 52 | JITCompiler::JITCompiler(Graph& dfg) |
| 53 | : CCallHelpers(dfg.m_codeBlock) |
| 54 | , m_graph(dfg) |
| 55 | , m_jitCode(adoptRef(new JITCode())) |
| 56 | , m_blockHeads(dfg.numBlocks()) |
| 57 | , m_pcToCodeOriginMapBuilder(dfg.m_vm) |
| 58 | { |
| 59 | if (UNLIKELY(shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)) |
| 60 | m_disassembler = std::make_unique<Disassembler>(dfg); |
| 61 | #if ENABLE(FTL_JIT) |
| 62 | m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy()); |
| 63 | for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes()) |
| 64 | m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger); |
| 65 | #endif |
| 66 | } |
| 67 | |
| 68 | JITCompiler::~JITCompiler() |
| 69 | { |
| 70 | } |
| 71 | |
| 72 | void JITCompiler::linkOSRExits() |
| 73 | { |
| 74 | ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size()); |
| 75 | if (UNLIKELY(m_graph.compilation())) { |
| 76 | for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { |
| 77 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i]; |
| 78 | Vector<Label> labels; |
| 79 | if (!info.m_failureJumps.empty()) { |
| 80 | for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j) |
| 81 | labels.append(info.m_failureJumps.jumps()[j].label()); |
| 82 | } else |
| 83 | labels.append(info.m_replacementSource); |
| 84 | m_exitSiteLabels.append(labels); |
| 85 | } |
| 86 | } |
| 87 | |
| 88 | MacroAssemblerCodeRef<JITThunkPtrTag> osrExitThunk = vm()->getCTIStub(osrExitThunkGenerator); |
| 89 | auto osrExitThunkLabel = CodeLocationLabel<JITThunkPtrTag>(osrExitThunk.code()); |
| 90 | for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { |
| 91 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i]; |
| 92 | JumpList& failureJumps = info.m_failureJumps; |
| 93 | if (!failureJumps.empty()) |
| 94 | failureJumps.link(this); |
| 95 | else |
| 96 | info.m_replacementDestination = label(); |
| 97 | |
| 98 | jitAssertHasValidCallFrame(); |
| 99 | store32(TrustedImm32(i), &vm()->osrExitIndex); |
| 100 | if (Options::useProbeOSRExit()) { |
| 101 | Jump target = jump(); |
| 102 | addLinkTask([target, osrExitThunkLabel] (LinkBuffer& linkBuffer) { |
| 103 | linkBuffer.link(target, osrExitThunkLabel); |
| 104 | }); |
| 105 | } else |
| 106 | info.m_patchableJump = patchableJump(); |
| 107 | } |
| 108 | } |
| 109 | |
| 110 | void JITCompiler::compileEntry() |
| 111 | { |
| 112 | // This code currently matches the old JIT. In the function header we need to |
| 113 | // save return address and call frame via the prologue and perform a fast stack check. |
| 114 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 |
| 115 | // We'll need to convert the remaining cti_ style calls (specifically the stack |
| 116 | // check) which will be dependent on stack layout. (We'd need to account for this in |
| 117 | // both normal return code and when jumping to an exception handler). |
| 118 | emitFunctionPrologue(); |
| 119 | emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); |
| 120 | } |
| 121 | |
| 122 | void JITCompiler::compileSetupRegistersForEntry() |
| 123 | { |
| 124 | emitSaveCalleeSaves(); |
| 125 | emitMaterializeTagCheckRegisters(); |
| 126 | } |
| 127 | |
| 128 | void JITCompiler::compileEntryExecutionFlag() |
| 129 | { |
| 130 | #if ENABLE(FTL_JIT) |
| 131 | if (m_graph.m_plan.canTierUpAndOSREnter()) |
| 132 | store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry); |
| 133 | #endif // ENABLE(FTL_JIT) |
| 134 | } |
| 135 | |
| 136 | void JITCompiler::compileBody() |
| 137 | { |
| 138 | // We generate the speculative code path, followed by OSR exit code to return |
| 139 | // to the old JIT code if speculations fail. |
| 140 | |
| 141 | bool compiledSpeculative = m_speculative->compile(); |
| 142 | ASSERT_UNUSED(compiledSpeculative, compiledSpeculative); |
| 143 | } |
| 144 | |
| 145 | void JITCompiler::compileExceptionHandlers() |
| 146 | { |
| 147 | if (!m_exceptionChecksWithCallFrameRollback.empty()) { |
| 148 | m_exceptionChecksWithCallFrameRollback.link(this); |
| 149 | |
| 150 | copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); |
| 151 | |
| 152 | // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). |
| 153 | move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); |
| 154 | move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); |
| 155 | addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); |
| 156 | |
| 157 | #if CPU(X86) |
| 158 | // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! |
| 159 | poke(GPRInfo::argumentGPR0); |
| 160 | poke(GPRInfo::argumentGPR1, 1); |
| 161 | #endif |
| 162 | m_calls.append(CallLinkRecord(call(OperationPtrTag), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame))); |
| 163 | |
| 164 | jumpToExceptionHandler(*vm()); |
| 165 | } |
| 166 | |
| 167 | if (!m_exceptionChecks.empty()) { |
| 168 | m_exceptionChecks.link(this); |
| 169 | |
| 170 | copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); |
| 171 | |
| 172 | // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). |
| 173 | move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); |
| 174 | move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); |
| 175 | |
| 176 | #if CPU(X86) |
| 177 | // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! |
| 178 | poke(GPRInfo::argumentGPR0); |
| 179 | poke(GPRInfo::argumentGPR1, 1); |
| 180 | #endif |
| 181 | m_calls.append(CallLinkRecord(call(OperationPtrTag), FunctionPtr<OperationPtrTag>(lookupExceptionHandler))); |
| 182 | |
| 183 | jumpToExceptionHandler(*vm()); |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | void JITCompiler::link(LinkBuffer& linkBuffer) |
| 188 | { |
| 189 | // Link the code, populate data in CodeBlock data structures. |
| 190 | m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount(); |
| 191 | m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit(); |
| 192 | |
| 193 | if (!m_graph.m_plan.inlineCallFrames()->isEmpty()) |
| 194 | m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames(); |
| 195 | |
| 196 | #if USE(JSVALUE32_64) |
| 197 | m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants); |
| 198 | #endif |
| 199 | |
| 200 | m_graph.registerFrozenValues(); |
| 201 | |
| 202 | BitVector usedJumpTables; |
| 203 | for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) { |
| 204 | SwitchData& data = **iter; |
| 205 | if (!data.didUseJumpTable) |
| 206 | continue; |
| 207 | |
| 208 | if (data.kind == SwitchString) |
| 209 | continue; |
| 210 | |
| 211 | RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar); |
| 212 | |
| 213 | usedJumpTables.set(data.switchTableIndex); |
| 214 | SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); |
| 215 | table.ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]); |
| 216 | table.ctiOffsets.grow(table.branchOffsets.size()); |
| 217 | for (unsigned j = table.ctiOffsets.size(); j--;) |
| 218 | table.ctiOffsets[j] = table.ctiDefault; |
| 219 | for (unsigned j = data.cases.size(); j--;) { |
| 220 | SwitchCase& myCase = data.cases[j]; |
| 221 | table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] = |
| 222 | linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[myCase.target.block->index]); |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) { |
| 227 | if (usedJumpTables.get(i)) |
| 228 | continue; |
| 229 | |
| 230 | m_codeBlock->switchJumpTable(i).clear(); |
| 231 | } |
| 232 | |
| 233 | // NOTE: we cannot clear string switch tables because (1) we're running concurrently |
| 234 | // and we cannot deref StringImpl's and (2) it would be weird to deref those |
| 235 | // StringImpl's since we refer to them. |
| 236 | for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) { |
| 237 | SwitchData& data = **switchDataIter; |
| 238 | if (!data.didUseJumpTable) |
| 239 | continue; |
| 240 | |
| 241 | if (data.kind != SwitchString) |
| 242 | continue; |
| 243 | |
| 244 | StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); |
| 245 | |
| 246 | table.ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]); |
| 247 | StringJumpTable::StringOffsetTable::iterator iter; |
| 248 | StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); |
| 249 | for (iter = table.offsetTable.begin(); iter != end; ++iter) |
| 250 | iter->value.ctiOffset = table.ctiDefault; |
| 251 | for (unsigned j = data.cases.size(); j--;) { |
| 252 | SwitchCase& myCase = data.cases[j]; |
| 253 | iter = table.offsetTable.find(myCase.value.stringImpl()); |
| 254 | RELEASE_ASSERT(iter != end); |
| 255 | iter->value.ctiOffset = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[myCase.target.block->index]); |
| 256 | } |
| 257 | } |
| 258 | |
| 259 | // Link all calls out from the JIT code to their respective functions. |
| 260 | for (unsigned i = 0; i < m_calls.size(); ++i) |
| 261 | linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); |
| 262 | |
| 263 | finalizeInlineCaches(m_getByIds, linkBuffer); |
| 264 | finalizeInlineCaches(m_getByIdsWithThis, linkBuffer); |
| 265 | finalizeInlineCaches(m_putByIds, linkBuffer); |
| 266 | finalizeInlineCaches(m_inByIds, linkBuffer); |
| 267 | finalizeInlineCaches(m_instanceOfs, linkBuffer); |
| 268 | |
| 269 | auto linkCallThunk = FunctionPtr<NoPtrTag>(vm()->getCTIStub(linkCallThunkGenerator).retaggedCode<NoPtrTag>()); |
| 270 | for (auto& record : m_jsCalls) { |
| 271 | CallLinkInfo& info = *record.info; |
| 272 | linkBuffer.link(record.slowCall, linkCallThunk); |
| 273 | info.setCallLocations( |
| 274 | CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(record.slowCall)), |
| 275 | CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(record.targetToCheck)), |
| 276 | linkBuffer.locationOfNearCall<JSInternalPtrTag>(record.fastCall)); |
| 277 | } |
| 278 | |
| 279 | for (JSDirectCallRecord& record : m_jsDirectCalls) { |
| 280 | CallLinkInfo& info = *record.info; |
| 281 | linkBuffer.link(record.call, linkBuffer.locationOf<NoPtrTag>(record.slowPath)); |
| 282 | info.setCallLocations( |
| 283 | CodeLocationLabel<JSInternalPtrTag>(), |
| 284 | linkBuffer.locationOf<JSInternalPtrTag>(record.slowPath), |
| 285 | linkBuffer.locationOfNearCall<JSInternalPtrTag>(record.call)); |
| 286 | } |
| 287 | |
| 288 | for (JSDirectTailCallRecord& record : m_jsDirectTailCalls) { |
| 289 | CallLinkInfo& info = *record.info; |
| 290 | info.setCallLocations( |
| 291 | linkBuffer.locationOf<JSInternalPtrTag>(record.patchableJump), |
| 292 | linkBuffer.locationOf<JSInternalPtrTag>(record.slowPath), |
| 293 | linkBuffer.locationOfNearCall<JSInternalPtrTag>(record.call)); |
| 294 | } |
| 295 | |
| 296 | MacroAssemblerCodeRef<JITThunkPtrTag> osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator); |
| 297 | auto target = CodeLocationLabel<JITThunkPtrTag>(osrExitThunk.code()); |
| 298 | for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { |
| 299 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i]; |
| 300 | if (!Options::useProbeOSRExit()) { |
| 301 | linkBuffer.link(info.m_patchableJump.m_jump, target); |
| 302 | OSRExit& exit = m_jitCode->osrExit[i]; |
| 303 | exit.m_patchableJumpLocation = linkBuffer.locationOf<JSInternalPtrTag>(info.m_patchableJump); |
| 304 | } |
| 305 | if (info.m_replacementSource.isSet()) { |
| 306 | m_jitCode->common.jumpReplacements.append(JumpReplacement( |
| 307 | linkBuffer.locationOf<JSInternalPtrTag>(info.m_replacementSource), |
| 308 | linkBuffer.locationOf<OSRExitPtrTag>(info.m_replacementDestination))); |
| 309 | } |
| 310 | } |
| 311 | |
| 312 | if (UNLIKELY(m_graph.compilation())) { |
| 313 | ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size()); |
| 314 | for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) { |
| 315 | Vector<Label>& labels = m_exitSiteLabels[i]; |
| 316 | Vector<MacroAssemblerCodePtr<JSInternalPtrTag>> addresses; |
| 317 | for (unsigned j = 0; j < labels.size(); ++j) |
| 318 | addresses.append(linkBuffer.locationOf<JSInternalPtrTag>(labels[j])); |
| 319 | m_graph.compilation()->addOSRExitSite(addresses); |
| 320 | } |
| 321 | } else |
| 322 | ASSERT(!m_exitSiteLabels.size()); |
| 323 | |
| 324 | m_jitCode->common.compilation = m_graph.compilation(); |
| 325 | |
| 326 | // Link new DFG exception handlers and remove baseline JIT handlers. |
| 327 | m_codeBlock->clearExceptionHandlers(); |
| 328 | for (unsigned i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) { |
| 329 | OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo; |
| 330 | if (info.m_replacementDestination.isSet()) { |
| 331 | // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow. |
| 332 | // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame. |
| 333 | // If this *is set*, it means we will be landing at this code location from genericUnwind from an |
| 334 | // exception thrown in a child call frame. |
| 335 | CodeLocationLabel<ExceptionHandlerPtrTag> catchLabel = linkBuffer.locationOf<ExceptionHandlerPtrTag>(info.m_replacementDestination); |
| 336 | HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler; |
| 337 | CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex; |
| 338 | newExceptionHandler.start = callSite.bits(); |
| 339 | newExceptionHandler.end = callSite.bits() + 1; |
| 340 | newExceptionHandler.nativeCode = catchLabel; |
| 341 | m_codeBlock->appendExceptionHandler(newExceptionHandler); |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | if (m_pcToCodeOriginMapBuilder.didBuildMapping()) |
| 346 | m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer)); |
| 347 | } |
| 348 | |
| 349 | static void emitStackOverflowCheck(JITCompiler& jit, MacroAssembler::JumpList& stackOverflow) |
| 350 | { |
| 351 | int frameTopOffset = virtualRegisterForLocal(jit.graph().requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register); |
| 352 | unsigned maxFrameSize = -frameTopOffset; |
| 353 | |
| 354 | jit.addPtr(MacroAssembler::TrustedImm32(frameTopOffset), GPRInfo::callFrameRegister, GPRInfo::regT1); |
| 355 | if (UNLIKELY(maxFrameSize > Options::reservedZoneSize())) |
| 356 | stackOverflow.append(jit.branchPtr(MacroAssembler::Above, GPRInfo::regT1, GPRInfo::callFrameRegister)); |
| 357 | stackOverflow.append(jit.branchPtr(MacroAssembler::Above, MacroAssembler::AbsoluteAddress(jit.vm()->addressOfSoftStackLimit()), GPRInfo::regT1)); |
| 358 | } |
| 359 | |
| 360 | void JITCompiler::compile() |
| 361 | { |
| 362 | makeCatchOSREntryBuffer(); |
| 363 | |
| 364 | setStartOfCode(); |
| 365 | compileEntry(); |
| 366 | m_speculative = std::make_unique<SpeculativeJIT>(*this); |
| 367 | |
| 368 | // Plant a check that sufficient space is available in the JSStack. |
| 369 | JumpList stackOverflow; |
| 370 | emitStackOverflowCheck(*this, stackOverflow); |
| 371 | |
| 372 | addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister); |
| 373 | if (Options::zeroStackFrame()) |
| 374 | clearStackFrame(GPRInfo::callFrameRegister, stackPointerRegister, GPRInfo::regT0, m_graph.frameRegisterCount() * sizeof(Register)); |
| 375 | checkStackPointerAlignment(); |
| 376 | compileSetupRegistersForEntry(); |
| 377 | compileEntryExecutionFlag(); |
| 378 | compileBody(); |
| 379 | setEndOfMainPath(); |
| 380 | |
| 381 | // === Footer code generation === |
| 382 | // |
| 383 | // Generate the stack overflow handling; if the stack check in the entry head fails, |
| 384 | // we need to call out to a helper function to throw the StackOverflowError. |
| 385 | stackOverflow.link(this); |
| 386 | |
| 387 | emitStoreCodeOrigin(CodeOrigin(0)); |
| 388 | |
| 389 | if (maxFrameExtentForSlowPathCall) |
| 390 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); |
| 391 | |
| 392 | m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); |
| 393 | |
| 394 | // Generate slow path code. |
| 395 | m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); |
| 396 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); |
| 397 | |
| 398 | compileExceptionHandlers(); |
| 399 | linkOSRExits(); |
| 400 | |
| 401 | // Create OSR entry trampolines if necessary. |
| 402 | m_speculative->createOSREntries(); |
| 403 | setEndOfCode(); |
| 404 | |
| 405 | auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail); |
| 406 | if (linkBuffer->didFailToAllocate()) { |
| 407 | m_graph.m_plan.setFinalizer(std::make_unique<FailedFinalizer>(m_graph.m_plan)); |
| 408 | return; |
| 409 | } |
| 410 | |
| 411 | link(*linkBuffer); |
| 412 | m_speculative->linkOSREntries(*linkBuffer); |
| 413 | |
| 414 | m_jitCode->shrinkToFit(); |
| 415 | codeBlock()->shrinkToFit(CodeBlock::LateShrink); |
| 416 | |
| 417 | disassemble(*linkBuffer); |
| 418 | |
| 419 | m_graph.m_plan.setFinalizer(std::make_unique<JITFinalizer>( |
| 420 | m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer))); |
| 421 | } |
| 422 | |
| 423 | void JITCompiler::compileFunction() |
| 424 | { |
| 425 | makeCatchOSREntryBuffer(); |
| 426 | |
| 427 | setStartOfCode(); |
| 428 | Label entryLabel(this); |
| 429 | compileEntry(); |
| 430 | |
| 431 | // === Function header code generation === |
| 432 | // This is the main entry point, without performing an arity check. |
| 433 | // If we needed to perform an arity check we will already have moved the return address, |
| 434 | // so enter after this. |
| 435 | Label fromArityCheck(this); |
| 436 | // Plant a check that sufficient space is available in the JSStack. |
| 437 | JumpList stackOverflow; |
| 438 | emitStackOverflowCheck(*this, stackOverflow); |
| 439 | |
| 440 | // Move the stack pointer down to accommodate locals |
| 441 | addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister); |
| 442 | if (Options::zeroStackFrame()) |
| 443 | clearStackFrame(GPRInfo::callFrameRegister, stackPointerRegister, GPRInfo::regT0, m_graph.frameRegisterCount() * sizeof(Register)); |
| 444 | checkStackPointerAlignment(); |
| 445 | |
| 446 | compileSetupRegistersForEntry(); |
| 447 | compileEntryExecutionFlag(); |
| 448 | |
| 449 | // === Function body code generation === |
| 450 | m_speculative = std::make_unique<SpeculativeJIT>(*this); |
| 451 | compileBody(); |
| 452 | setEndOfMainPath(); |
| 453 | |
| 454 | // === Function footer code generation === |
| 455 | // |
| 456 | // Generate code to perform the stack overflow handling (if the stack check in |
| 457 | // the function header fails), and generate the entry point with arity check. |
| 458 | // |
| 459 | // Generate the stack overflow handling; if the stack check in the function head fails, |
| 460 | // we need to call out to a helper function to throw the StackOverflowError. |
| 461 | stackOverflow.link(this); |
| 462 | |
| 463 | emitStoreCodeOrigin(CodeOrigin(0)); |
| 464 | |
| 465 | if (maxFrameExtentForSlowPathCall) |
| 466 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); |
| 467 | |
| 468 | m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); |
| 469 | |
| 470 | // The fast entry point into a function does not check the correct number of arguments |
| 471 | // have been passed to the call (we only use the fast entry point where we can statically |
| 472 | // determine the correct number of arguments have been passed, or have already checked). |
| 473 | // In cases where an arity check is necessary, we enter here. |
| 474 | // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). |
| 475 | Call callArityFixup; |
| 476 | Label arityCheck; |
| 477 | bool requiresArityFixup = m_codeBlock->numParameters() != 1; |
| 478 | if (requiresArityFixup) { |
| 479 | arityCheck = label(); |
| 480 | compileEntry(); |
| 481 | |
| 482 | load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCount), GPRInfo::regT1); |
| 483 | branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); |
| 484 | emitStoreCodeOrigin(CodeOrigin(0)); |
| 485 | if (maxFrameExtentForSlowPathCall) |
| 486 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); |
| 487 | m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0); |
| 488 | if (maxFrameExtentForSlowPathCall) |
| 489 | addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); |
| 490 | branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this); |
| 491 | emitStoreCodeOrigin(CodeOrigin(0)); |
| 492 | move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); |
| 493 | callArityFixup = nearCall(); |
| 494 | jump(fromArityCheck); |
| 495 | } else |
| 496 | arityCheck = entryLabel; |
| 497 | |
| 498 | // Generate slow path code. |
| 499 | m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); |
| 500 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); |
| 501 | |
| 502 | compileExceptionHandlers(); |
| 503 | linkOSRExits(); |
| 504 | |
| 505 | // Create OSR entry trampolines if necessary. |
| 506 | m_speculative->createOSREntries(); |
| 507 | setEndOfCode(); |
| 508 | |
| 509 | // === Link === |
| 510 | auto linkBuffer = std::make_unique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail); |
| 511 | if (linkBuffer->didFailToAllocate()) { |
| 512 | m_graph.m_plan.setFinalizer(std::make_unique<FailedFinalizer>(m_graph.m_plan)); |
| 513 | return; |
| 514 | } |
| 515 | link(*linkBuffer); |
| 516 | m_speculative->linkOSREntries(*linkBuffer); |
| 517 | |
| 518 | m_jitCode->shrinkToFit(); |
| 519 | codeBlock()->shrinkToFit(CodeBlock::LateShrink); |
| 520 | |
| 521 | if (requiresArityFixup) |
| 522 | linkBuffer->link(callArityFixup, FunctionPtr<JITThunkPtrTag>(vm()->getCTIStub(arityFixupGenerator).code())); |
| 523 | |
| 524 | disassemble(*linkBuffer); |
| 525 | |
| 526 | MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = linkBuffer->locationOf<JSEntryPtrTag>(arityCheck); |
| 527 | |
| 528 | m_graph.m_plan.setFinalizer(std::make_unique<JITFinalizer>( |
| 529 | m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck)); |
| 530 | } |
| 531 | |
| 532 | void JITCompiler::disassemble(LinkBuffer& linkBuffer) |
| 533 | { |
| 534 | if (shouldDumpDisassembly()) { |
| 535 | m_disassembler->dump(linkBuffer); |
| 536 | linkBuffer.didAlreadyDisassemble(); |
| 537 | } |
| 538 | |
| 539 | if (UNLIKELY(m_graph.m_plan.compilation())) |
| 540 | m_disassembler->reportToProfiler(m_graph.m_plan.compilation(), linkBuffer); |
| 541 | } |
| 542 | |
| 543 | #if USE(JSVALUE32_64) |
| 544 | void* JITCompiler::addressOfDoubleConstant(Node* node) |
| 545 | { |
| 546 | double value = node->asNumber(); |
| 547 | int64_t valueBits = bitwise_cast<int64_t>(value); |
| 548 | auto it = m_graph.m_doubleConstantsMap.find(valueBits); |
| 549 | if (it != m_graph.m_doubleConstantsMap.end()) |
| 550 | return it->second; |
| 551 | |
| 552 | if (!m_graph.m_doubleConstants) |
| 553 | m_graph.m_doubleConstants = std::make_unique<Bag<double>>(); |
| 554 | |
| 555 | double* addressInConstantPool = m_graph.m_doubleConstants->add(); |
| 556 | *addressInConstantPool = value; |
| 557 | m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool; |
| 558 | return addressInConstantPool; |
| 559 | } |
| 560 | #endif |
| 561 | |
| 562 | void JITCompiler::noticeCatchEntrypoint(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer, Vector<FlushFormat>&& argumentFormats) |
| 563 | { |
| 564 | RELEASE_ASSERT(basicBlock.isCatchEntrypoint); |
| 565 | RELEASE_ASSERT(basicBlock.intersectionOfCFAHasVisited); // An entrypoint is reachable by definition. |
| 566 | m_jitCode->common.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf<ExceptionHandlerPtrTag>(blockHead), WTFMove(argumentFormats)); |
| 567 | } |
| 568 | |
| 569 | void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer) |
| 570 | { |
| 571 | RELEASE_ASSERT(!basicBlock.isCatchEntrypoint); |
| 572 | |
| 573 | // OSR entry is not allowed into blocks deemed unreachable by control flow analysis. |
| 574 | if (!basicBlock.intersectionOfCFAHasVisited) |
| 575 | return; |
| 576 | |
| 577 | OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.locationOf<OSREntryPtrTag>(blockHead)); |
| 578 | |
| 579 | entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead; |
| 580 | |
| 581 | // Fix the expected values: in our protocol, a dead variable will have an expected |
| 582 | // value of (None, []). But the old JIT may stash some values there. So we really |
| 583 | // need (Top, TOP). |
| 584 | for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) { |
| 585 | Node* node = basicBlock.variablesAtHead.argument(argument); |
| 586 | if (!node || !node->shouldGenerate()) |
| 587 | entry->m_expectedValues.argument(argument).makeBytecodeTop(); |
| 588 | } |
| 589 | for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) { |
| 590 | Node* node = basicBlock.variablesAtHead.local(local); |
| 591 | if (!node || !node->shouldGenerate()) |
| 592 | entry->m_expectedValues.local(local).makeBytecodeTop(); |
| 593 | else { |
| 594 | VariableAccessData* variable = node->variableAccessData(); |
| 595 | entry->m_machineStackUsed.set(variable->machineLocal().toLocal()); |
| 596 | |
| 597 | switch (variable->flushFormat()) { |
| 598 | case FlushedDouble: |
| 599 | entry->m_localsForcedDouble.set(local); |
| 600 | break; |
| 601 | case FlushedInt52: |
| 602 | entry->m_localsForcedAnyInt.set(local); |
| 603 | break; |
| 604 | default: |
| 605 | break; |
| 606 | } |
| 607 | |
| 608 | if (variable->local() != variable->machineLocal()) { |
| 609 | entry->m_reshufflings.append( |
| 610 | OSREntryReshuffling( |
| 611 | variable->local().offset(), variable->machineLocal().offset())); |
| 612 | } |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | entry->m_reshufflings.shrinkToFit(); |
| 617 | } |
| 618 | |
| 619 | void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail) |
| 620 | { |
| 621 | OSRExit exit(kind, JSValueRegs(), MethodOfGettingAValueProfile(), m_speculative.get(), eventStreamIndex); |
| 622 | exit.m_codeOrigin = opCatchOrigin; |
| 623 | exit.m_exceptionHandlerCallSiteIndex = callSite; |
| 624 | OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail); |
| 625 | jitCode()->appendOSRExit(exit); |
| 626 | m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite }); |
| 627 | } |
| 628 | |
| 629 | void JITCompiler::exceptionCheck() |
| 630 | { |
| 631 | // It's important that we use origin.forExit here. Consider if we hoist string |
| 632 | // addition outside a loop, and that we exit at the point of that concatenation |
| 633 | // from an out of memory exception. |
| 634 | // If the original loop had a try/catch around string concatenation, if we "catch" |
| 635 | // that exception inside the loop, then the loops induction variable will be undefined |
| 636 | // in the OSR exit value recovery. It's more defensible for the string concatenation, |
| 637 | // then, to not be caught by the for loops' try/catch. |
| 638 | // Here is the program I'm speaking about: |
| 639 | // |
| 640 | // >>>> lets presume "c = a + b" gets hoisted here. |
| 641 | // for (var i = 0; i < length; i++) { |
| 642 | // try { |
| 643 | // c = a + b |
| 644 | // } catch(e) { |
| 645 | // If we threw an out of memory error, and we cought the exception |
| 646 | // right here, then "i" would almost certainly be undefined, which |
| 647 | // would make no sense. |
| 648 | // ... |
| 649 | // } |
| 650 | // } |
| 651 | CodeOrigin opCatchOrigin; |
| 652 | HandlerInfo* exceptionHandler; |
| 653 | bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); |
| 654 | if (willCatchException) { |
| 655 | unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size(); |
| 656 | MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(*vm()); |
| 657 | // We assume here that this is called after callOpeartion()/appendCall() is called. |
| 658 | appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException); |
| 659 | } else |
| 660 | m_exceptionChecks.append(emitExceptionCheck(*vm())); |
| 661 | } |
| 662 | |
| 663 | CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex) |
| 664 | { |
| 665 | CodeOrigin opCatchOrigin; |
| 666 | HandlerInfo* exceptionHandler; |
| 667 | bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler); |
| 668 | CallSiteIndex callSite = addCallSite(callSiteCodeOrigin); |
| 669 | if (willCatchException) |
| 670 | appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite); |
| 671 | return callSite; |
| 672 | } |
| 673 | |
| 674 | void JITCompiler::setEndOfMainPath() |
| 675 | { |
| 676 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic); |
| 677 | if (LIKELY(!m_disassembler)) |
| 678 | return; |
| 679 | m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints()); |
| 680 | } |
| 681 | |
| 682 | void JITCompiler::setEndOfCode() |
| 683 | { |
| 684 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); |
| 685 | if (LIKELY(!m_disassembler)) |
| 686 | return; |
| 687 | m_disassembler->setEndOfCode(labelIgnoringWatchpoints()); |
| 688 | } |
| 689 | |
| 690 | void JITCompiler::makeCatchOSREntryBuffer() |
| 691 | { |
| 692 | if (m_graph.m_maxLocalsForCatchOSREntry) { |
| 693 | uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer. |
| 694 | m_jitCode->common.catchOSREntryBuffer = vm()->scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals); |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | } } // namespace JSC::DFG |
| 699 | |
| 700 | #endif // ENABLE(DFG_JIT) |
| 701 | |