| 1 | /* |
| 2 | * Copyright (C) 2014-2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #pragma once |
| 27 | |
| 28 | #if ENABLE(DFG_JIT) |
| 29 | |
| 30 | #include "DFGClobberize.h" |
| 31 | |
| 32 | namespace JSC { namespace DFG { |
| 33 | |
| 34 | template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor> |
| 35 | class PreciseLocalClobberizeAdaptor { |
| 36 | public: |
| 37 | PreciseLocalClobberizeAdaptor( |
| 38 | Graph& graph, Node* node, |
| 39 | const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def) |
| 40 | : m_graph(graph) |
| 41 | , m_node(node) |
| 42 | , m_read(read) |
| 43 | , m_unconditionalWrite(write) |
| 44 | , m_def(def) |
| 45 | { |
| 46 | } |
| 47 | |
| 48 | void read(AbstractHeap heap) |
| 49 | { |
| 50 | if (heap.kind() == Stack) { |
| 51 | if (heap.payload().isTop()) { |
| 52 | readTop(); |
| 53 | return; |
| 54 | } |
| 55 | |
| 56 | callIfAppropriate(m_read, VirtualRegister(heap.payload().value32())); |
| 57 | return; |
| 58 | } |
| 59 | |
| 60 | if (heap.overlaps(Stack)) { |
| 61 | readTop(); |
| 62 | return; |
| 63 | } |
| 64 | } |
| 65 | |
| 66 | void write(AbstractHeap heap) |
| 67 | { |
| 68 | // We expect stack writes to already be precisely characterized by DFG::clobberize(). |
| 69 | if (heap.kind() == Stack) { |
| 70 | RELEASE_ASSERT(!heap.payload().isTop()); |
| 71 | callIfAppropriate(m_unconditionalWrite, VirtualRegister(heap.payload().value32())); |
| 72 | return; |
| 73 | } |
| 74 | |
| 75 | RELEASE_ASSERT(!heap.overlaps(Stack)); |
| 76 | } |
| 77 | |
| 78 | void def(PureValue) |
| 79 | { |
| 80 | // PureValue defs never have anything to do with locals, so ignore this. |
| 81 | } |
| 82 | |
| 83 | void def(HeapLocation location, LazyNode node) |
| 84 | { |
| 85 | if (location.kind() != StackLoc) |
| 86 | return; |
| 87 | |
| 88 | RELEASE_ASSERT(location.heap().kind() == Stack); |
| 89 | |
| 90 | m_def(VirtualRegister(location.heap().payload().value32()), node); |
| 91 | } |
| 92 | |
| 93 | private: |
| 94 | template<typename Functor> |
| 95 | void callIfAppropriate(const Functor& functor, VirtualRegister operand) |
| 96 | { |
| 97 | if (operand.isLocal() && static_cast<unsigned>(operand.toLocal()) >= m_graph.block(0)->variablesAtHead.numberOfLocals()) |
| 98 | return; |
| 99 | |
| 100 | if (operand.isArgument() && !operand.isHeader() && static_cast<unsigned>(operand.toArgument()) >= m_graph.block(0)->variablesAtHead.numberOfArguments()) |
| 101 | return; |
| 102 | |
| 103 | functor(operand); |
| 104 | } |
| 105 | |
| 106 | void readTop() |
| 107 | { |
| 108 | auto readFrame = [&] (InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip) { |
| 109 | if (!inlineCallFrame) { |
| 110 | // Read the outermost arguments and argument count. |
| 111 | for (unsigned i = numberOfArgumentsToSkip; i < static_cast<unsigned>(m_graph.m_codeBlock->numParameters()); i++) |
| 112 | m_read(virtualRegisterForArgument(i)); |
| 113 | m_read(VirtualRegister(CallFrameSlot::argumentCount)); |
| 114 | return; |
| 115 | } |
| 116 | |
| 117 | for (unsigned i = numberOfArgumentsToSkip; i < inlineCallFrame->argumentsWithFixup.size(); i++) |
| 118 | m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset())); |
| 119 | if (inlineCallFrame->isVarargs()) |
| 120 | m_read(VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)); |
| 121 | }; |
| 122 | |
| 123 | auto readSpread = [&] (Node* spread) { |
| 124 | ASSERT(spread->op() == Spread || spread->op() == PhantomSpread); |
| 125 | if (!spread->child1()->isPhantomAllocation()) |
| 126 | return; |
| 127 | |
| 128 | ASSERT(spread->child1()->op() == PhantomCreateRest || spread->child1()->op() == PhantomNewArrayBuffer); |
| 129 | if (spread->child1()->op() == PhantomNewArrayBuffer) { |
| 130 | // This reads from a constant buffer. |
| 131 | return; |
| 132 | } |
| 133 | InlineCallFrame* inlineCallFrame = spread->child1()->origin.semantic.inlineCallFrame(); |
| 134 | unsigned numberOfArgumentsToSkip = spread->child1()->numberOfArgumentsToSkip(); |
| 135 | readFrame(inlineCallFrame, numberOfArgumentsToSkip); |
| 136 | }; |
| 137 | |
| 138 | auto readNewArrayWithSpreadNode = [&] (Node* arrayWithSpread) { |
| 139 | ASSERT(arrayWithSpread->op() == NewArrayWithSpread || arrayWithSpread->op() == PhantomNewArrayWithSpread); |
| 140 | BitVector* bitVector = arrayWithSpread->bitVector(); |
| 141 | for (unsigned i = 0; i < arrayWithSpread->numChildren(); i++) { |
| 142 | if (bitVector->get(i)) { |
| 143 | Node* child = m_graph.varArgChild(arrayWithSpread, i).node(); |
| 144 | if (child->op() == PhantomSpread) |
| 145 | readSpread(child); |
| 146 | } |
| 147 | } |
| 148 | }; |
| 149 | |
| 150 | switch (m_node->op()) { |
| 151 | case ForwardVarargs: |
| 152 | case CallForwardVarargs: |
| 153 | case ConstructForwardVarargs: |
| 154 | case TailCallForwardVarargs: |
| 155 | case TailCallForwardVarargsInlinedCaller: |
| 156 | case GetMyArgumentByVal: |
| 157 | case GetMyArgumentByValOutOfBounds: |
| 158 | case CreateDirectArguments: |
| 159 | case CreateScopedArguments: |
| 160 | case CreateClonedArguments: |
| 161 | case PhantomDirectArguments: |
| 162 | case PhantomClonedArguments: |
| 163 | case GetRestLength: |
| 164 | case CreateRest: { |
| 165 | bool isForwardingNode = false; |
| 166 | bool isPhantomNode = false; |
| 167 | switch (m_node->op()) { |
| 168 | case ForwardVarargs: |
| 169 | case CallForwardVarargs: |
| 170 | case ConstructForwardVarargs: |
| 171 | case TailCallForwardVarargs: |
| 172 | case TailCallForwardVarargsInlinedCaller: |
| 173 | isForwardingNode = true; |
| 174 | break; |
| 175 | case PhantomDirectArguments: |
| 176 | case PhantomClonedArguments: |
| 177 | isPhantomNode = true; |
| 178 | break; |
| 179 | default: |
| 180 | break; |
| 181 | } |
| 182 | |
| 183 | if (isPhantomNode && m_graph.m_plan.isFTL()) |
| 184 | break; |
| 185 | |
| 186 | if (isForwardingNode && m_node->hasArgumentsChild() && m_node->argumentsChild() |
| 187 | && (m_node->argumentsChild()->op() == PhantomNewArrayWithSpread || m_node->argumentsChild()->op() == PhantomSpread)) { |
| 188 | if (m_node->argumentsChild()->op() == PhantomNewArrayWithSpread) |
| 189 | readNewArrayWithSpreadNode(m_node->argumentsChild().node()); |
| 190 | else |
| 191 | readSpread(m_node->argumentsChild().node()); |
| 192 | } else { |
| 193 | InlineCallFrame* inlineCallFrame; |
| 194 | if (m_node->hasArgumentsChild() && m_node->argumentsChild()) |
| 195 | inlineCallFrame = m_node->argumentsChild()->origin.semantic.inlineCallFrame(); |
| 196 | else |
| 197 | inlineCallFrame = m_node->origin.semantic.inlineCallFrame(); |
| 198 | |
| 199 | unsigned numberOfArgumentsToSkip = 0; |
| 200 | if (m_node->op() == GetMyArgumentByVal || m_node->op() == GetMyArgumentByValOutOfBounds) { |
| 201 | // The value of numberOfArgumentsToSkip guarantees that GetMyArgumentByVal* will never |
| 202 | // read any arguments below the number of arguments to skip. For example, if numberOfArgumentsToSkip is 2, |
| 203 | // we will never read argument 0 or argument 1. |
| 204 | numberOfArgumentsToSkip = m_node->numberOfArgumentsToSkip(); |
| 205 | } |
| 206 | |
| 207 | readFrame(inlineCallFrame, numberOfArgumentsToSkip); |
| 208 | } |
| 209 | |
| 210 | break; |
| 211 | } |
| 212 | |
| 213 | case Spread: |
| 214 | readSpread(m_node); |
| 215 | break; |
| 216 | |
| 217 | case NewArrayWithSpread: { |
| 218 | readNewArrayWithSpreadNode(m_node); |
| 219 | break; |
| 220 | } |
| 221 | |
| 222 | case GetArgument: { |
| 223 | InlineCallFrame* inlineCallFrame = m_node->origin.semantic.inlineCallFrame(); |
| 224 | unsigned indexIncludingThis = m_node->argumentIndex(); |
| 225 | if (!inlineCallFrame) { |
| 226 | if (indexIncludingThis < static_cast<unsigned>(m_graph.m_codeBlock->numParameters())) |
| 227 | m_read(virtualRegisterForArgument(indexIncludingThis)); |
| 228 | m_read(VirtualRegister(CallFrameSlot::argumentCount)); |
| 229 | break; |
| 230 | } |
| 231 | |
| 232 | ASSERT_WITH_MESSAGE(inlineCallFrame->isVarargs(), "GetArgument is only used for InlineCallFrame if the call frame is varargs." ); |
| 233 | if (indexIncludingThis < inlineCallFrame->argumentsWithFixup.size()) |
| 234 | m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(indexIncludingThis).offset())); |
| 235 | m_read(VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)); |
| 236 | break; |
| 237 | } |
| 238 | |
| 239 | default: { |
| 240 | // All of the outermost arguments, except this, are read in sloppy mode. |
| 241 | if (!m_graph.m_codeBlock->isStrictMode()) { |
| 242 | for (unsigned i = m_graph.m_codeBlock->numParameters(); i--;) |
| 243 | m_read(virtualRegisterForArgument(i)); |
| 244 | } |
| 245 | |
| 246 | // The stack header is read. |
| 247 | for (unsigned i = 0; i < CallFrameSlot::thisArgument; ++i) |
| 248 | m_read(VirtualRegister(i)); |
| 249 | |
| 250 | // Read all of the inline arguments and call frame headers that we didn't already capture. |
| 251 | for (InlineCallFrame* inlineCallFrame = m_node->origin.semantic.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->getCallerInlineFrameSkippingTailCalls()) { |
| 252 | if (!inlineCallFrame->isStrictMode()) { |
| 253 | for (unsigned i = inlineCallFrame->argumentsWithFixup.size(); i--;) |
| 254 | m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset())); |
| 255 | } |
| 256 | if (inlineCallFrame->isClosureCall) |
| 257 | m_read(VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::callee)); |
| 258 | if (inlineCallFrame->isVarargs()) |
| 259 | m_read(VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)); |
| 260 | } |
| 261 | break; |
| 262 | } } |
| 263 | } |
| 264 | |
| 265 | Graph& m_graph; |
| 266 | Node* m_node; |
| 267 | const ReadFunctor& m_read; |
| 268 | const WriteFunctor& m_unconditionalWrite; |
| 269 | const DefFunctor& m_def; |
| 270 | }; |
| 271 | |
| 272 | template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor> |
| 273 | void preciseLocalClobberize( |
| 274 | Graph& graph, Node* node, |
| 275 | const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def) |
| 276 | { |
| 277 | PreciseLocalClobberizeAdaptor<ReadFunctor, WriteFunctor, DefFunctor> |
| 278 | adaptor(graph, node, read, write, def); |
| 279 | clobberize(graph, node, adaptor); |
| 280 | } |
| 281 | |
| 282 | } } // namespace JSC::DFG |
| 283 | |
| 284 | #endif // ENABLE(DFG_JIT) |
| 285 | |