| 1 | /* |
| 2 | * Copyright (C) 2014-2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #include "config.h" |
| 27 | #include "DFGPutStackSinkingPhase.h" |
| 28 | |
| 29 | #if ENABLE(DFG_JIT) |
| 30 | |
| 31 | #include "DFGBlockMapInlines.h" |
| 32 | #include "DFGGraph.h" |
| 33 | #include "DFGInsertionSet.h" |
| 34 | #include "DFGPhase.h" |
| 35 | #include "DFGPreciseLocalClobberize.h" |
| 36 | #include "DFGSSACalculator.h" |
| 37 | #include "DFGValidate.h" |
| 38 | #include "JSCInlines.h" |
| 39 | #include "OperandsInlines.h" |
| 40 | |
| 41 | namespace JSC { namespace DFG { |
| 42 | |
| 43 | namespace { |
| 44 | |
| 45 | namespace DFGPutStackSinkingPhaseInternal { |
| 46 | static const bool verbose = false; |
| 47 | } |
| 48 | |
| 49 | class PutStackSinkingPhase : public Phase { |
| 50 | public: |
| 51 | PutStackSinkingPhase(Graph& graph) |
| 52 | : Phase(graph, "PutStack sinking" ) |
| 53 | { |
| 54 | } |
| 55 | |
| 56 | bool run() |
| 57 | { |
| 58 | // FIXME: One of the problems of this approach is that it will create a duplicate Phi graph |
| 59 | // for sunken PutStacks in the presence of interesting control flow merges, and where the |
| 60 | // value being PutStack'd is also otherwise live in the DFG code. We could work around this |
| 61 | // by doing the sinking over CPS, or maybe just by doing really smart hoisting. It's also |
| 62 | // possible that the duplicate Phi graph can be deduplicated by B3. It would be best if we |
| 63 | // could observe that there is already a Phi graph in place that does what we want. In |
| 64 | // principle if we have a request to place a Phi at a particular place, we could just check |
| 65 | // if there is already a Phi that does what we want. Because PutStackSinkingPhase runs just |
| 66 | // after SSA conversion, we have almost a guarantee that the Phi graph we produce here would |
| 67 | // be trivially redundant to the one we already have. |
| 68 | |
| 69 | // FIXME: This phase doesn't adequately use KillStacks. KillStack can be viewed as a def. |
| 70 | // This is mostly inconsequential; it would be a bug to have a local live at a KillStack. |
| 71 | // More important is that KillStack should swallow any deferral. After a KillStack, the |
| 72 | // local should behave like a TOP deferral because it would be invalid for anyone to trust |
| 73 | // the stack. It's not clear to me if this is important or not. |
| 74 | // https://bugs.webkit.org/show_bug.cgi?id=145296 |
| 75 | |
| 76 | if (DFGPutStackSinkingPhaseInternal::verbose) { |
| 77 | dataLog("Graph before PutStack sinking:\n" ); |
| 78 | m_graph.dump(); |
| 79 | } |
| 80 | |
| 81 | m_graph.ensureSSADominators(); |
| 82 | |
| 83 | SSACalculator ssaCalculator(m_graph); |
| 84 | InsertionSet insertionSet(m_graph); |
| 85 | |
| 86 | // First figure out where various locals are live. |
| 87 | BlockMap<Operands<bool>> liveAtHead(m_graph); |
| 88 | BlockMap<Operands<bool>> liveAtTail(m_graph); |
| 89 | |
| 90 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 91 | liveAtHead[block] = Operands<bool>(OperandsLike, block->variablesAtHead); |
| 92 | liveAtTail[block] = Operands<bool>(OperandsLike, block->variablesAtHead); |
| 93 | |
| 94 | liveAtHead[block].fill(false); |
| 95 | liveAtTail[block].fill(false); |
| 96 | } |
| 97 | |
| 98 | bool changed; |
| 99 | do { |
| 100 | changed = false; |
| 101 | |
| 102 | for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { |
| 103 | BasicBlock* block = m_graph.block(blockIndex); |
| 104 | if (!block) |
| 105 | continue; |
| 106 | |
| 107 | Operands<bool> live = liveAtTail[block]; |
| 108 | for (unsigned nodeIndex = block->size(); nodeIndex--;) { |
| 109 | Node* node = block->at(nodeIndex); |
| 110 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 111 | dataLog("Live at " , node, ": " , live, "\n" ); |
| 112 | |
| 113 | Vector<VirtualRegister, 4> reads; |
| 114 | Vector<VirtualRegister, 4> writes; |
| 115 | auto escapeHandler = [&] (VirtualRegister operand) { |
| 116 | if (operand.isHeader()) |
| 117 | return; |
| 118 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 119 | dataLog(" " , operand, " is live at " , node, "\n" ); |
| 120 | reads.append(operand); |
| 121 | }; |
| 122 | |
| 123 | auto writeHandler = [&] (VirtualRegister operand) { |
| 124 | if (operand.isHeader()) |
| 125 | return; |
| 126 | RELEASE_ASSERT(node->op() == PutStack || node->op() == LoadVarargs || node->op() == ForwardVarargs || node->op() == KillStack); |
| 127 | writes.append(operand); |
| 128 | }; |
| 129 | |
| 130 | preciseLocalClobberize( |
| 131 | m_graph, node, escapeHandler, writeHandler, |
| 132 | [&] (VirtualRegister, LazyNode) { }); |
| 133 | |
| 134 | for (VirtualRegister operand : writes) |
| 135 | live.operand(operand) = false; |
| 136 | for (VirtualRegister operand : reads) |
| 137 | live.operand(operand) = true; |
| 138 | } |
| 139 | |
| 140 | if (live == liveAtHead[block]) |
| 141 | continue; |
| 142 | |
| 143 | liveAtHead[block] = live; |
| 144 | changed = true; |
| 145 | |
| 146 | for (BasicBlock* predecessor : block->predecessors) { |
| 147 | for (size_t i = live.size(); i--;) |
| 148 | liveAtTail[predecessor][i] |= live[i]; |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | } while (changed); |
| 153 | |
| 154 | // All of the arguments should be live at head of root. Note that we may find that some |
| 155 | // locals are live at head of root. This seems wrong but isn't. This will happen for example |
| 156 | // if the function accesses closure variable #42 for some other function and we either don't |
| 157 | // have variable #42 at all or we haven't set it at root, for whatever reason. Basically this |
| 158 | // arises since our aliasing for closure variables is conservatively based on variable number |
| 159 | // and ignores the owning symbol table. We should probably fix this eventually and make our |
| 160 | // aliasing more precise. |
| 161 | // |
| 162 | // For our purposes here, the imprecision in the aliasing is harmless. It just means that we |
| 163 | // may not do as much Phi pruning as we wanted. |
| 164 | for (size_t i = liveAtHead.atIndex(0).numberOfArguments(); i--;) |
| 165 | DFG_ASSERT(m_graph, nullptr, liveAtHead.atIndex(0).argument(i)); |
| 166 | |
| 167 | // Next identify where we would want to sink PutStacks to. We say that there is a deferred |
| 168 | // flush if we had a PutStack with a given FlushFormat but it hasn't been materialized yet. |
| 169 | // Deferrals have the following lattice; but it's worth noting that the TOP part of the |
| 170 | // lattice serves an entirely different purpose than the rest of the lattice: it just means |
| 171 | // that we're in a region of code where nobody should have been relying on the value. The |
| 172 | // rest of the lattice means that we either have a PutStack that is deferred (i.e. still |
| 173 | // needs to be executed) or there isn't one (because we've alraedy executed it). |
| 174 | // |
| 175 | // Bottom: |
| 176 | // Represented as DeadFlush. |
| 177 | // Means that all previous PutStacks have been executed so there is nothing deferred. |
| 178 | // During merging this is subordinate to the other kinds of deferrals, because it |
| 179 | // represents the fact that we've already executed all necessary PutStacks. This implies |
| 180 | // that there *had* been some PutStacks that we should have executed. |
| 181 | // |
| 182 | // Top: |
| 183 | // Represented as ConflictingFlush. |
| 184 | // Represents the fact that we know, via forward flow, that there isn't any value in the |
| 185 | // given local that anyone should have been relying on. This comes into play at the |
| 186 | // prologue (because in SSA form at the prologue no local has any value) or when we merge |
| 187 | // deferrals for different formats's. A lexical scope in which a local had some semantic |
| 188 | // meaning will by this point share the same format; if we had stores from different |
| 189 | // lexical scopes that got merged together then we may have a conflicting format. Hence |
| 190 | // a conflicting format proves that we're no longer in an area in which the variable was |
| 191 | // in scope. Note that this is all approximate and only precise enough to later answer |
| 192 | // questions pertinent to sinking. For example, this doesn't always detect when a local |
| 193 | // is no longer semantically relevant - we may well have a deferral from inside some |
| 194 | // inlined call survive outside of that inlined code, and this is generally OK. In the |
| 195 | // worst case it means that we might think that a deferral that is actually dead must |
| 196 | // still be executed. But we usually catch that with liveness. Liveness usually catches |
| 197 | // such cases, but that's not guaranteed since liveness is conservative. |
| 198 | // |
| 199 | // What Top does give us is detects situations where we both don't need to care about a |
| 200 | // deferral and there is no way that we could reason about it anyway. If we merged |
| 201 | // deferrals for different formats then we wouldn't know the format to use. So, we use |
| 202 | // Top in that case because that's also a case where we know that we can ignore the |
| 203 | // deferral. |
| 204 | // |
| 205 | // Deferral with a concrete format: |
| 206 | // Represented by format values other than DeadFlush or ConflictingFlush. |
| 207 | // Represents the fact that the original code would have done a PutStack but we haven't |
| 208 | // identified an operation that would have observed that PutStack. |
| 209 | // |
| 210 | // We need to be precise about liveness in this phase because not doing so |
| 211 | // could cause us to insert a PutStack before a node we thought may escape a |
| 212 | // value that it doesn't really escape. Sinking this PutStack above such a node may |
| 213 | // cause us to insert a GetStack that we forward to the Phi we're feeding into the |
| 214 | // sunken PutStack. Inserting such a GetStack could cause us to load garbage and |
| 215 | // can confuse the AI to claim untrue things (like that the program will exit when |
| 216 | // it really won't). |
| 217 | BlockMap<Operands<FlushFormat>> deferredAtHead(m_graph); |
| 218 | BlockMap<Operands<FlushFormat>> deferredAtTail(m_graph); |
| 219 | |
| 220 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 221 | deferredAtHead[block] = |
| 222 | Operands<FlushFormat>(OperandsLike, block->variablesAtHead); |
| 223 | deferredAtTail[block] = |
| 224 | Operands<FlushFormat>(OperandsLike, block->variablesAtHead); |
| 225 | } |
| 226 | |
| 227 | for (unsigned local = deferredAtHead.atIndex(0).numberOfLocals(); local--;) |
| 228 | deferredAtHead.atIndex(0).local(local) = ConflictingFlush; |
| 229 | |
| 230 | do { |
| 231 | changed = false; |
| 232 | |
| 233 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 234 | Operands<FlushFormat> deferred = deferredAtHead[block]; |
| 235 | |
| 236 | for (Node* node : *block) { |
| 237 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 238 | dataLog("Deferred at " , node, ":" , deferred, "\n" ); |
| 239 | |
| 240 | if (node->op() == GetStack) { |
| 241 | // Handle the case that the input doesn't match our requirements. This is |
| 242 | // really a bug, but it's a benign one if we simply don't run this phase. |
| 243 | // It usually arises because of patterns like: |
| 244 | // |
| 245 | // if (thing) |
| 246 | // PutStack() |
| 247 | // ... |
| 248 | // if (thing) |
| 249 | // GetStack() |
| 250 | // |
| 251 | // Or: |
| 252 | // |
| 253 | // if (never happens) |
| 254 | // GetStack() |
| 255 | // |
| 256 | // Because this phase runs early in SSA, it should be sensible to enforce |
| 257 | // that no such code pattern has arisen yet. So, when validation is |
| 258 | // enabled, we assert that we aren't seeing this. But with validation |
| 259 | // disabled we silently let this fly and we just abort this phase. |
| 260 | // FIXME: Get rid of all remaining cases of conflicting GetStacks. |
| 261 | // https://bugs.webkit.org/show_bug.cgi?id=150398 |
| 262 | |
| 263 | bool isConflicting = |
| 264 | deferred.operand(node->stackAccessData()->local) == ConflictingFlush; |
| 265 | |
| 266 | if (validationEnabled()) |
| 267 | DFG_ASSERT(m_graph, node, !isConflicting); |
| 268 | |
| 269 | if (isConflicting) { |
| 270 | // Oh noes! Abort!! |
| 271 | return false; |
| 272 | } |
| 273 | |
| 274 | // A GetStack doesn't affect anything, since we know which local we are reading |
| 275 | // from. |
| 276 | continue; |
| 277 | } else if (node->op() == PutStack) { |
| 278 | VirtualRegister operand = node->stackAccessData()->local; |
| 279 | deferred.operand(operand) = node->stackAccessData()->format; |
| 280 | continue; |
| 281 | } else if (node->op() == KillStack) { |
| 282 | // We don't want to sink a PutStack past a KillStack. |
| 283 | deferred.operand(node->unlinkedLocal()) = ConflictingFlush; |
| 284 | continue; |
| 285 | } |
| 286 | |
| 287 | auto escapeHandler = [&] (VirtualRegister operand) { |
| 288 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 289 | dataLog("For " , node, " escaping " , operand, "\n" ); |
| 290 | if (operand.isHeader()) |
| 291 | return; |
| 292 | // We will materialize just before any reads. |
| 293 | deferred.operand(operand) = DeadFlush; |
| 294 | }; |
| 295 | |
| 296 | auto writeHandler = [&] (VirtualRegister operand) { |
| 297 | if (operand.isHeader()) |
| 298 | return; |
| 299 | RELEASE_ASSERT(node->op() == LoadVarargs || node->op() == ForwardVarargs); |
| 300 | deferred.operand(operand) = DeadFlush; |
| 301 | }; |
| 302 | |
| 303 | preciseLocalClobberize( |
| 304 | m_graph, node, escapeHandler, writeHandler, |
| 305 | [&] (VirtualRegister, LazyNode) { }); |
| 306 | } |
| 307 | |
| 308 | if (deferred == deferredAtTail[block]) |
| 309 | continue; |
| 310 | |
| 311 | deferredAtTail[block] = deferred; |
| 312 | changed = true; |
| 313 | |
| 314 | for (BasicBlock* successor : block->successors()) { |
| 315 | for (size_t i = deferred.size(); i--;) { |
| 316 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 317 | dataLog("Considering " , VirtualRegister(deferred.operandForIndex(i)), " at " , pointerDump(block), "->" , pointerDump(successor), ": " , deferred[i], " and " , deferredAtHead[successor][i], " merges to " ); |
| 318 | |
| 319 | deferredAtHead[successor][i] = |
| 320 | merge(deferredAtHead[successor][i], deferred[i]); |
| 321 | |
| 322 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 323 | dataLog(deferredAtHead[successor][i], "\n" ); |
| 324 | } |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | } while (changed); |
| 329 | |
| 330 | // We wish to insert PutStacks at all of the materialization points, which are defined |
| 331 | // implicitly as the places where we set deferred to Dead while it was previously not Dead. |
| 332 | // To do this, we may need to build some Phi functions to handle stuff like this: |
| 333 | // |
| 334 | // Before: |
| 335 | // |
| 336 | // if (p) |
| 337 | // PutStack(r42, @x) |
| 338 | // else |
| 339 | // PutStack(r42, @y) |
| 340 | // |
| 341 | // After: |
| 342 | // |
| 343 | // if (p) |
| 344 | // Upsilon(@x, ^z) |
| 345 | // else |
| 346 | // Upsilon(@y, ^z) |
| 347 | // z: Phi() |
| 348 | // PutStack(r42, @z) |
| 349 | // |
| 350 | // This means that we have an SSACalculator::Variable for each local, and a Def is any |
| 351 | // PutStack in the original program. The original PutStacks will simply vanish. |
| 352 | |
| 353 | Operands<SSACalculator::Variable*> operandToVariable( |
| 354 | OperandsLike, m_graph.block(0)->variablesAtHead); |
| 355 | Vector<VirtualRegister> indexToOperand; |
| 356 | for (size_t i = m_graph.block(0)->variablesAtHead.size(); i--;) { |
| 357 | VirtualRegister operand(m_graph.block(0)->variablesAtHead.operandForIndex(i)); |
| 358 | |
| 359 | SSACalculator::Variable* variable = ssaCalculator.newVariable(); |
| 360 | operandToVariable.operand(operand) = variable; |
| 361 | ASSERT(indexToOperand.size() == variable->index()); |
| 362 | indexToOperand.append(operand); |
| 363 | } |
| 364 | |
| 365 | HashSet<Node*> putStacksToSink; |
| 366 | |
| 367 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 368 | for (Node* node : *block) { |
| 369 | switch (node->op()) { |
| 370 | case PutStack: |
| 371 | putStacksToSink.add(node); |
| 372 | ssaCalculator.newDef( |
| 373 | operandToVariable.operand(node->stackAccessData()->local), |
| 374 | block, node->child1().node()); |
| 375 | break; |
| 376 | case GetStack: |
| 377 | ssaCalculator.newDef( |
| 378 | operandToVariable.operand(node->stackAccessData()->local), |
| 379 | block, node); |
| 380 | break; |
| 381 | default: |
| 382 | break; |
| 383 | } |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | ssaCalculator.computePhis( |
| 388 | [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* { |
| 389 | VirtualRegister operand = indexToOperand[variable->index()]; |
| 390 | |
| 391 | if (!liveAtHead[block].operand(operand)) |
| 392 | return nullptr; |
| 393 | |
| 394 | FlushFormat format = deferredAtHead[block].operand(operand); |
| 395 | |
| 396 | // We could have an invalid deferral because liveness is imprecise. |
| 397 | if (!isConcrete(format)) |
| 398 | return nullptr; |
| 399 | |
| 400 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 401 | dataLog("Adding Phi for " , operand, " at " , pointerDump(block), "\n" ); |
| 402 | |
| 403 | Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit()); |
| 404 | phiNode->mergeFlags(resultFor(format)); |
| 405 | return phiNode; |
| 406 | }); |
| 407 | |
| 408 | Operands<Node*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead); |
| 409 | Operands<FlushFormat> deferred; |
| 410 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 411 | mapping.fill(nullptr); |
| 412 | |
| 413 | for (size_t i = mapping.size(); i--;) { |
| 414 | VirtualRegister operand(mapping.operandForIndex(i)); |
| 415 | |
| 416 | SSACalculator::Variable* variable = operandToVariable.operand(operand); |
| 417 | SSACalculator::Def* def = ssaCalculator.reachingDefAtHead(block, variable); |
| 418 | if (!def) |
| 419 | continue; |
| 420 | |
| 421 | mapping.operand(operand) = def->value(); |
| 422 | } |
| 423 | |
| 424 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 425 | dataLog("Mapping at top of " , pointerDump(block), ": " , mapping, "\n" ); |
| 426 | |
| 427 | for (SSACalculator::Def* phiDef : ssaCalculator.phisForBlock(block)) { |
| 428 | VirtualRegister operand = indexToOperand[phiDef->variable()->index()]; |
| 429 | |
| 430 | insertionSet.insert(0, phiDef->value()); |
| 431 | |
| 432 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 433 | dataLog(" Mapping " , operand, " to " , phiDef->value(), "\n" ); |
| 434 | mapping.operand(operand) = phiDef->value(); |
| 435 | } |
| 436 | |
| 437 | deferred = deferredAtHead[block]; |
| 438 | for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { |
| 439 | Node* node = block->at(nodeIndex); |
| 440 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 441 | dataLog("Deferred at " , node, ":" , deferred, "\n" ); |
| 442 | |
| 443 | switch (node->op()) { |
| 444 | case PutStack: { |
| 445 | StackAccessData* data = node->stackAccessData(); |
| 446 | VirtualRegister operand = data->local; |
| 447 | deferred.operand(operand) = data->format; |
| 448 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 449 | dataLog(" Mapping " , operand, " to " , node->child1().node(), " at " , node, "\n" ); |
| 450 | mapping.operand(operand) = node->child1().node(); |
| 451 | break; |
| 452 | } |
| 453 | |
| 454 | case GetStack: { |
| 455 | StackAccessData* data = node->stackAccessData(); |
| 456 | FlushFormat format = deferred.operand(data->local); |
| 457 | if (!isConcrete(format)) { |
| 458 | DFG_ASSERT( |
| 459 | m_graph, node, |
| 460 | deferred.operand(data->local) != ConflictingFlush, deferred.operand(data->local)); |
| 461 | |
| 462 | // This means there is no deferral. No deferral means that the most |
| 463 | // authoritative value for this stack slot is what is stored in the stack. So, |
| 464 | // keep the GetStack. |
| 465 | mapping.operand(data->local) = node; |
| 466 | break; |
| 467 | } |
| 468 | |
| 469 | // We have a concrete deferral, which means a PutStack that hasn't executed yet. It |
| 470 | // would have stored a value with a certain format. That format must match our |
| 471 | // format. But more importantly, we can simply use the value that the PutStack would |
| 472 | // have stored and get rid of the GetStack. |
| 473 | DFG_ASSERT(m_graph, node, format == data->format, format, data->format); |
| 474 | |
| 475 | Node* incoming = mapping.operand(data->local); |
| 476 | node->child1() = incoming->defaultEdge(); |
| 477 | node->convertToIdentity(); |
| 478 | break; |
| 479 | } |
| 480 | |
| 481 | case KillStack: { |
| 482 | deferred.operand(node->unlinkedLocal()) = ConflictingFlush; |
| 483 | break; |
| 484 | } |
| 485 | |
| 486 | default: { |
| 487 | auto escapeHandler = [&] (VirtualRegister operand) { |
| 488 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 489 | dataLog("For " , node, " escaping " , operand, "\n" ); |
| 490 | |
| 491 | if (operand.isHeader()) |
| 492 | return; |
| 493 | |
| 494 | FlushFormat format = deferred.operand(operand); |
| 495 | if (!isConcrete(format)) { |
| 496 | // It's dead now, rather than conflicting. |
| 497 | deferred.operand(operand) = DeadFlush; |
| 498 | return; |
| 499 | } |
| 500 | |
| 501 | // Gotta insert a PutStack. |
| 502 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 503 | dataLog("Inserting a PutStack for " , operand, " at " , node, "\n" ); |
| 504 | |
| 505 | Node* incoming = mapping.operand(operand); |
| 506 | DFG_ASSERT(m_graph, node, incoming); |
| 507 | |
| 508 | insertionSet.insertNode( |
| 509 | nodeIndex, SpecNone, PutStack, node->origin, |
| 510 | OpInfo(m_graph.m_stackAccessData.add(operand, format)), |
| 511 | Edge(incoming, uncheckedUseKindFor(format))); |
| 512 | |
| 513 | deferred.operand(operand) = DeadFlush; |
| 514 | }; |
| 515 | |
| 516 | auto writeHandler = [&] (VirtualRegister operand) { |
| 517 | if (operand.isHeader()) |
| 518 | return; |
| 519 | // LoadVarargs and ForwardVarargs are unconditional writes to the stack |
| 520 | // locations they claim to write to. They do not read from the stack |
| 521 | // locations they write to. This makes those stack locations dead right |
| 522 | // before a LoadVarargs/ForwardVarargs. This means we should never sink |
| 523 | // PutStacks right to this point. |
| 524 | RELEASE_ASSERT(node->op() == LoadVarargs || node->op() == ForwardVarargs); |
| 525 | deferred.operand(operand) = DeadFlush; |
| 526 | }; |
| 527 | |
| 528 | preciseLocalClobberize( |
| 529 | m_graph, node, escapeHandler, writeHandler, |
| 530 | [&] (VirtualRegister, LazyNode) { }); |
| 531 | break; |
| 532 | } } |
| 533 | } |
| 534 | |
| 535 | NodeAndIndex terminal = block->findTerminal(); |
| 536 | size_t upsilonInsertionPoint = terminal.index; |
| 537 | NodeOrigin upsilonOrigin = terminal.node->origin; |
| 538 | for (BasicBlock* successorBlock : block->successors()) { |
| 539 | for (SSACalculator::Def* phiDef : ssaCalculator.phisForBlock(successorBlock)) { |
| 540 | Node* phiNode = phiDef->value(); |
| 541 | SSACalculator::Variable* variable = phiDef->variable(); |
| 542 | VirtualRegister operand = indexToOperand[variable->index()]; |
| 543 | if (DFGPutStackSinkingPhaseInternal::verbose) |
| 544 | dataLog("Creating Upsilon for " , operand, " at " , pointerDump(block), "->" , pointerDump(successorBlock), "\n" ); |
| 545 | FlushFormat format = deferredAtHead[successorBlock].operand(operand); |
| 546 | DFG_ASSERT(m_graph, nullptr, isConcrete(format), format); |
| 547 | UseKind useKind = uncheckedUseKindFor(format); |
| 548 | |
| 549 | // We need to get a value for the stack slot. This phase doesn't really have a |
| 550 | // good way of determining if a stack location got clobbered. It just knows if |
| 551 | // there is a deferral. The lack of a deferral might mean that a PutStack or |
| 552 | // GetStack had never happened, or it might mean that the value was read, or |
| 553 | // that it was written. It's OK for us to make some bad decisions here, since |
| 554 | // GCSE will clean it up anyway. |
| 555 | Node* incoming; |
| 556 | if (isConcrete(deferred.operand(operand))) { |
| 557 | incoming = mapping.operand(operand); |
| 558 | DFG_ASSERT(m_graph, phiNode, incoming); |
| 559 | } else { |
| 560 | // Issue a GetStack to get the value. This might introduce some redundancy |
| 561 | // into the code, but if it's bad enough, GCSE will clean it up. |
| 562 | incoming = insertionSet.insertNode( |
| 563 | upsilonInsertionPoint, SpecNone, GetStack, upsilonOrigin, |
| 564 | OpInfo(m_graph.m_stackAccessData.add(operand, format))); |
| 565 | incoming->setResult(resultFor(format)); |
| 566 | } |
| 567 | |
| 568 | insertionSet.insertNode( |
| 569 | upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin, |
| 570 | OpInfo(phiNode), Edge(incoming, useKind)); |
| 571 | } |
| 572 | } |
| 573 | |
| 574 | insertionSet.execute(block); |
| 575 | } |
| 576 | |
| 577 | // Finally eliminate the sunken PutStacks by turning them into Checks. This keeps whatever |
| 578 | // type check they were doing. |
| 579 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) { |
| 580 | for (auto* node : *block) { |
| 581 | if (!putStacksToSink.contains(node)) |
| 582 | continue; |
| 583 | |
| 584 | node->remove(m_graph); |
| 585 | } |
| 586 | } |
| 587 | |
| 588 | if (DFGPutStackSinkingPhaseInternal::verbose) { |
| 589 | dataLog("Graph after PutStack sinking:\n" ); |
| 590 | m_graph.dump(); |
| 591 | } |
| 592 | |
| 593 | return true; |
| 594 | } |
| 595 | }; |
| 596 | |
| 597 | } // anonymous namespace |
| 598 | |
| 599 | bool performPutStackSinking(Graph& graph) |
| 600 | { |
| 601 | return runPhase<PutStackSinkingPhase>(graph); |
| 602 | } |
| 603 | |
| 604 | } } // namespace JSC::DFG |
| 605 | |
| 606 | #endif // ENABLE(DFG_JIT) |
| 607 | |
| 608 | |