| 1 | /* |
| 2 | * Copyright (C) 2013-2019 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #pragma once |
| 27 | |
| 28 | #include "DFGCommon.h" |
| 29 | |
| 30 | #if ENABLE(FTL_JIT) |
| 31 | |
| 32 | #include "B3BasicBlockInlines.h" |
| 33 | #include "B3CCallValue.h" |
| 34 | #include "B3Compilation.h" |
| 35 | #include "B3FrequentedBlock.h" |
| 36 | #include "B3Procedure.h" |
| 37 | #include "B3SwitchValue.h" |
| 38 | #include "B3Width.h" |
| 39 | #include "FTLAbbreviatedTypes.h" |
| 40 | #include "FTLAbstractHeapRepository.h" |
| 41 | #include "FTLCommonValues.h" |
| 42 | #include "FTLState.h" |
| 43 | #include "FTLSwitchCase.h" |
| 44 | #include "FTLTypedPointer.h" |
| 45 | #include "FTLValueFromBlock.h" |
| 46 | #include "FTLWeight.h" |
| 47 | #include "FTLWeightedTarget.h" |
| 48 | #include "HeapCell.h" |
| 49 | #include <wtf/OrderMaker.h> |
| 50 | #include <wtf/StringPrintStream.h> |
| 51 | |
| 52 | // FIXME: remove this once everything can be generated through B3. |
| 53 | IGNORE_WARNINGS_BEGIN("missing-noreturn" ) |
| 54 | ALLOW_UNUSED_PARAMETERS_BEGIN |
| 55 | |
| 56 | namespace JSC { |
| 57 | |
| 58 | namespace DFG { |
| 59 | struct Node; |
| 60 | } // namespace DFG |
| 61 | |
| 62 | namespace B3 { |
| 63 | class FenceValue; |
| 64 | class SlotBaseValue; |
| 65 | } // namespace B3 |
| 66 | |
| 67 | namespace FTL { |
| 68 | |
| 69 | enum Scale { ScaleOne, ScaleTwo, ScaleFour, ScaleEight, ScalePtr }; |
| 70 | |
| 71 | class Output : public CommonValues { |
| 72 | public: |
| 73 | Output(State&); |
| 74 | ~Output(); |
| 75 | |
| 76 | void initialize(AbstractHeapRepository&); |
| 77 | |
| 78 | void setFrequency(double value) |
| 79 | { |
| 80 | m_frequency = value; |
| 81 | } |
| 82 | |
| 83 | LBasicBlock newBlock(); |
| 84 | |
| 85 | LBasicBlock insertNewBlocksBefore(LBasicBlock nextBlock) |
| 86 | { |
| 87 | LBasicBlock lastNextBlock = m_nextBlock; |
| 88 | m_nextBlock = nextBlock; |
| 89 | return lastNextBlock; |
| 90 | } |
| 91 | |
| 92 | void applyBlockOrder(); |
| 93 | |
| 94 | LBasicBlock appendTo(LBasicBlock, LBasicBlock nextBlock); |
| 95 | void appendTo(LBasicBlock); |
| 96 | |
| 97 | void setOrigin(DFG::Node* node) { m_origin = node; } |
| 98 | B3::Origin origin() { return B3::Origin(m_origin); } |
| 99 | |
| 100 | LValue framePointer(); |
| 101 | |
| 102 | B3::SlotBaseValue* lockedStackSlot(size_t bytes); |
| 103 | |
| 104 | LValue constBool(bool value); |
| 105 | LValue constInt32(int32_t value); |
| 106 | |
| 107 | LValue weakPointer(DFG::Graph& graph, JSCell* cell) |
| 108 | { |
| 109 | ASSERT(graph.m_plan.weakReferences().contains(cell)); |
| 110 | |
| 111 | return constIntPtr(bitwise_cast<intptr_t>(cell)); |
| 112 | } |
| 113 | |
| 114 | LValue weakPointer(DFG::FrozenValue* value) |
| 115 | { |
| 116 | RELEASE_ASSERT(value->value().isCell()); |
| 117 | |
| 118 | return constIntPtr(bitwise_cast<intptr_t>(value->cell())); |
| 119 | } |
| 120 | |
| 121 | template<typename T> |
| 122 | LValue constIntPtr(T* value) |
| 123 | { |
| 124 | static_assert(!std::is_base_of<HeapCell, T>::value, "To use a GC pointer, the graph must be aware of it. Use gcPointer instead and make sure the graph is aware of this reference." ); |
| 125 | if (sizeof(void*) == 8) |
| 126 | return constInt64(bitwise_cast<intptr_t>(value)); |
| 127 | return constInt32(bitwise_cast<intptr_t>(value)); |
| 128 | } |
| 129 | template<typename T> |
| 130 | LValue constIntPtr(T value) |
| 131 | { |
| 132 | if (sizeof(void*) == 8) |
| 133 | return constInt64(static_cast<intptr_t>(value)); |
| 134 | return constInt32(static_cast<intptr_t>(value)); |
| 135 | } |
| 136 | LValue constInt64(int64_t value); |
| 137 | LValue constDouble(double value); |
| 138 | |
| 139 | LValue phi(LType); |
| 140 | template<typename... Params> |
| 141 | LValue phi(LType, ValueFromBlock, Params... theRest); |
| 142 | template<typename VectorType> |
| 143 | LValue phi(LType, const VectorType&); |
| 144 | void addIncomingToPhi(LValue phi, ValueFromBlock); |
| 145 | template<typename... Params> |
| 146 | void addIncomingToPhi(LValue phi, ValueFromBlock, Params... theRest); |
| 147 | |
| 148 | LValue opaque(LValue); |
| 149 | |
| 150 | LValue add(LValue, LValue); |
| 151 | LValue sub(LValue, LValue); |
| 152 | LValue mul(LValue, LValue); |
| 153 | LValue div(LValue, LValue); |
| 154 | LValue chillDiv(LValue, LValue); |
| 155 | LValue mod(LValue, LValue); |
| 156 | LValue chillMod(LValue, LValue); |
| 157 | LValue neg(LValue); |
| 158 | |
| 159 | LValue doubleAdd(LValue, LValue); |
| 160 | LValue doubleSub(LValue, LValue); |
| 161 | LValue doubleMul(LValue, LValue); |
| 162 | LValue doubleDiv(LValue, LValue); |
| 163 | LValue doubleMod(LValue, LValue); |
| 164 | LValue doubleNeg(LValue value) { return neg(value); } |
| 165 | |
| 166 | LValue bitAnd(LValue, LValue); |
| 167 | LValue bitOr(LValue, LValue); |
| 168 | LValue bitXor(LValue, LValue); |
| 169 | LValue shl(LValue, LValue shiftAmount); |
| 170 | LValue aShr(LValue, LValue shiftAmount); |
| 171 | LValue lShr(LValue, LValue shiftAmount); |
| 172 | LValue bitNot(LValue); |
| 173 | LValue logicalNot(LValue); |
| 174 | |
| 175 | LValue ctlz32(LValue); |
| 176 | LValue doubleAbs(LValue); |
| 177 | LValue doubleCeil(LValue); |
| 178 | LValue doubleFloor(LValue); |
| 179 | LValue doubleTrunc(LValue); |
| 180 | |
| 181 | LValue doubleUnary(DFG::Arith::UnaryType, LValue); |
| 182 | |
| 183 | LValue doublePow(LValue base, LValue exponent); |
| 184 | LValue doublePowi(LValue base, LValue exponent); |
| 185 | |
| 186 | LValue doubleSqrt(LValue); |
| 187 | |
| 188 | LValue doubleLog(LValue); |
| 189 | |
| 190 | LValue doubleToInt(LValue); |
| 191 | LValue doubleToUInt(LValue); |
| 192 | |
| 193 | LValue signExt32To64(LValue); |
| 194 | LValue signExt32ToPtr(LValue); |
| 195 | LValue zeroExt(LValue, LType); |
| 196 | LValue zeroExtPtr(LValue value) { return zeroExt(value, B3::Int64); } |
| 197 | LValue intToDouble(LValue); |
| 198 | LValue unsignedToDouble(LValue); |
| 199 | LValue castToInt32(LValue); |
| 200 | LValue doubleToFloat(LValue); |
| 201 | LValue floatToDouble(LValue); |
| 202 | LValue bitCast(LValue, LType); |
| 203 | LValue fround(LValue); |
| 204 | |
| 205 | LValue load(TypedPointer, LType); |
| 206 | LValue store(LValue, TypedPointer); |
| 207 | B3::FenceValue* fence(const AbstractHeap* read, const AbstractHeap* write); |
| 208 | |
| 209 | LValue load8SignExt32(TypedPointer); |
| 210 | LValue load8ZeroExt32(TypedPointer); |
| 211 | LValue load16SignExt32(TypedPointer); |
| 212 | LValue load16ZeroExt32(TypedPointer); |
| 213 | LValue load32(TypedPointer pointer) { return load(pointer, B3::Int32); } |
| 214 | LValue load64(TypedPointer pointer) { return load(pointer, B3::Int64); } |
| 215 | LValue loadPtr(TypedPointer pointer) { return load(pointer, B3::pointerType()); } |
| 216 | LValue loadFloat(TypedPointer pointer) { return load(pointer, B3::Float); } |
| 217 | LValue loadDouble(TypedPointer pointer) { return load(pointer, B3::Double); } |
| 218 | LValue store32As8(LValue, TypedPointer); |
| 219 | LValue store32As16(LValue, TypedPointer); |
| 220 | LValue store32(LValue value, TypedPointer pointer) |
| 221 | { |
| 222 | ASSERT(value->type() == B3::Int32); |
| 223 | return store(value, pointer); |
| 224 | } |
| 225 | LValue store64(LValue value, TypedPointer pointer) |
| 226 | { |
| 227 | ASSERT(value->type() == B3::Int64); |
| 228 | return store(value, pointer); |
| 229 | } |
| 230 | LValue storePtr(LValue value, TypedPointer pointer) |
| 231 | { |
| 232 | ASSERT(value->type() == B3::pointerType()); |
| 233 | return store(value, pointer); |
| 234 | } |
| 235 | LValue storeFloat(LValue value, TypedPointer pointer) |
| 236 | { |
| 237 | ASSERT(value->type() == B3::Float); |
| 238 | return store(value, pointer); |
| 239 | } |
| 240 | LValue storeDouble(LValue value, TypedPointer pointer) |
| 241 | { |
| 242 | ASSERT(value->type() == B3::Double); |
| 243 | return store(value, pointer); |
| 244 | } |
| 245 | |
| 246 | enum LoadType { |
| 247 | Load8SignExt32, |
| 248 | Load8ZeroExt32, |
| 249 | Load16SignExt32, |
| 250 | Load16ZeroExt32, |
| 251 | Load32, |
| 252 | Load64, |
| 253 | LoadPtr, |
| 254 | LoadFloat, |
| 255 | LoadDouble |
| 256 | }; |
| 257 | |
| 258 | LValue load(TypedPointer, LoadType); |
| 259 | |
| 260 | enum StoreType { |
| 261 | Store32As8, |
| 262 | Store32As16, |
| 263 | Store32, |
| 264 | Store64, |
| 265 | StorePtr, |
| 266 | StoreFloat, |
| 267 | StoreDouble |
| 268 | }; |
| 269 | |
| 270 | LValue store(LValue, TypedPointer, StoreType); |
| 271 | |
| 272 | LValue addPtr(LValue value, ptrdiff_t immediate = 0) |
| 273 | { |
| 274 | if (!immediate) |
| 275 | return value; |
| 276 | return add(value, constIntPtr(immediate)); |
| 277 | } |
| 278 | |
| 279 | // Construct an address by offsetting base by the requested amount and ascribing |
| 280 | // the requested abstract heap to it. |
| 281 | TypedPointer address(const AbstractHeap& heap, LValue base, ptrdiff_t offset = 0) |
| 282 | { |
| 283 | return TypedPointer(heap, addPtr(base, offset)); |
| 284 | } |
| 285 | // Construct an address by offsetting base by the amount specified by the field, |
| 286 | // and optionally an additional amount (use this with care), and then creating |
| 287 | // a TypedPointer with the given field as the heap. |
| 288 | TypedPointer address(LValue base, const AbstractHeap& field, ptrdiff_t offset = 0) |
| 289 | { |
| 290 | return address(field, base, offset + field.offset()); |
| 291 | } |
| 292 | |
| 293 | LValue baseIndex(LValue base, LValue index, Scale, ptrdiff_t offset = 0); |
| 294 | |
| 295 | TypedPointer baseIndex(const AbstractHeap& heap, LValue base, LValue index, Scale scale, ptrdiff_t offset = 0) |
| 296 | { |
| 297 | return TypedPointer(heap, baseIndex(base, index, scale, offset)); |
| 298 | } |
| 299 | TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue base, LValue index, JSValue indexAsConstant = JSValue(), ptrdiff_t offset = 0, LValue mask = nullptr) |
| 300 | { |
| 301 | return heap.baseIndex(*this, base, index, indexAsConstant, offset, mask); |
| 302 | } |
| 303 | |
| 304 | TypedPointer absolute(const void* address); |
| 305 | |
| 306 | LValue load8SignExt32(LValue base, const AbstractHeap& field) { return load8SignExt32(address(base, field)); } |
| 307 | LValue load8ZeroExt32(LValue base, const AbstractHeap& field) { return load8ZeroExt32(address(base, field)); } |
| 308 | LValue load16SignExt32(LValue base, const AbstractHeap& field) { return load16SignExt32(address(base, field)); } |
| 309 | LValue load16ZeroExt32(LValue base, const AbstractHeap& field) { return load16ZeroExt32(address(base, field)); } |
| 310 | LValue load32(LValue base, const AbstractHeap& field) { return load32(address(base, field)); } |
| 311 | LValue load64(LValue base, const AbstractHeap& field) { return load64(address(base, field)); } |
| 312 | LValue loadPtr(LValue base, const AbstractHeap& field) { return loadPtr(address(base, field)); } |
| 313 | LValue loadDouble(LValue base, const AbstractHeap& field) { return loadDouble(address(base, field)); } |
| 314 | void store32As8(LValue value, LValue base, const AbstractHeap& field) { store32As8(value, address(base, field)); } |
| 315 | void store32As16(LValue value, LValue base, const AbstractHeap& field) { store32As16(value, address(base, field)); } |
| 316 | void store32(LValue value, LValue base, const AbstractHeap& field) { store32(value, address(base, field)); } |
| 317 | void store64(LValue value, LValue base, const AbstractHeap& field) { store64(value, address(base, field)); } |
| 318 | void storePtr(LValue value, LValue base, const AbstractHeap& field) { storePtr(value, address(base, field)); } |
| 319 | void storeDouble(LValue value, LValue base, const AbstractHeap& field) { storeDouble(value, address(base, field)); } |
| 320 | |
| 321 | // FIXME: Explore adding support for value range constraints to B3. Maybe it could be as simple as having |
| 322 | // a load instruction that guarantees that its result is non-negative. |
| 323 | // https://bugs.webkit.org/show_bug.cgi?id=151458 |
| 324 | void ascribeRange(LValue, const ValueRange&) { } |
| 325 | LValue nonNegative32(LValue loadInstruction) { return loadInstruction; } |
| 326 | LValue load32NonNegative(TypedPointer pointer) { return load32(pointer); } |
| 327 | LValue load32NonNegative(LValue base, const AbstractHeap& field) { return load32(base, field); } |
| 328 | |
| 329 | LValue equal(LValue, LValue); |
| 330 | LValue notEqual(LValue, LValue); |
| 331 | LValue above(LValue, LValue); |
| 332 | LValue aboveOrEqual(LValue, LValue); |
| 333 | LValue below(LValue, LValue); |
| 334 | LValue belowOrEqual(LValue, LValue); |
| 335 | LValue greaterThan(LValue, LValue); |
| 336 | LValue greaterThanOrEqual(LValue, LValue); |
| 337 | LValue lessThan(LValue, LValue); |
| 338 | LValue lessThanOrEqual(LValue, LValue); |
| 339 | |
| 340 | LValue doubleEqual(LValue, LValue); |
| 341 | LValue doubleEqualOrUnordered(LValue, LValue); |
| 342 | LValue doubleNotEqualOrUnordered(LValue, LValue); |
| 343 | LValue doubleLessThan(LValue, LValue); |
| 344 | LValue doubleLessThanOrEqual(LValue, LValue); |
| 345 | LValue doubleGreaterThan(LValue, LValue); |
| 346 | LValue doubleGreaterThanOrEqual(LValue, LValue); |
| 347 | LValue doubleNotEqualAndOrdered(LValue, LValue); |
| 348 | LValue doubleLessThanOrUnordered(LValue, LValue); |
| 349 | LValue doubleLessThanOrEqualOrUnordered(LValue, LValue); |
| 350 | LValue doubleGreaterThanOrUnordered(LValue, LValue); |
| 351 | LValue doubleGreaterThanOrEqualOrUnordered(LValue, LValue); |
| 352 | |
| 353 | LValue isZero32(LValue); |
| 354 | LValue notZero32(LValue); |
| 355 | LValue isZero64(LValue); |
| 356 | LValue notZero64(LValue); |
| 357 | LValue isNull(LValue value) { return isZero64(value); } |
| 358 | LValue notNull(LValue value) { return notZero64(value); } |
| 359 | |
| 360 | LValue testIsZero32(LValue value, LValue mask) { return isZero32(bitAnd(value, mask)); } |
| 361 | LValue testNonZero32(LValue value, LValue mask) { return notZero32(bitAnd(value, mask)); } |
| 362 | LValue testIsZero64(LValue value, LValue mask) { return isZero64(bitAnd(value, mask)); } |
| 363 | LValue testNonZero64(LValue value, LValue mask) { return notZero64(bitAnd(value, mask)); } |
| 364 | LValue testIsZeroPtr(LValue value, LValue mask) { return isNull(bitAnd(value, mask)); } |
| 365 | LValue testNonZeroPtr(LValue value, LValue mask) { return notNull(bitAnd(value, mask)); } |
| 366 | |
| 367 | LValue select(LValue value, LValue taken, LValue notTaken); |
| 368 | |
| 369 | // These are relaxed atomics by default. Use AbstractHeapRepository::decorateFencedAccess() with a |
| 370 | // non-null heap to make them seq_cst fenced. |
| 371 | LValue atomicXchgAdd(LValue operand, TypedPointer pointer, B3::Width); |
| 372 | LValue atomicXchgAnd(LValue operand, TypedPointer pointer, B3::Width); |
| 373 | LValue atomicXchgOr(LValue operand, TypedPointer pointer, B3::Width); |
| 374 | LValue atomicXchgSub(LValue operand, TypedPointer pointer, B3::Width); |
| 375 | LValue atomicXchgXor(LValue operand, TypedPointer pointer, B3::Width); |
| 376 | LValue atomicXchg(LValue operand, TypedPointer pointer, B3::Width); |
| 377 | LValue atomicStrongCAS(LValue expected, LValue newValue, TypedPointer pointer, B3::Width); |
| 378 | |
| 379 | template<typename VectorType> |
| 380 | LValue call(LType type, LValue function, const VectorType& vector) |
| 381 | { |
| 382 | B3::CCallValue* result = m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function); |
| 383 | result->appendArgs(vector); |
| 384 | return result; |
| 385 | } |
| 386 | LValue call(LType type, LValue function) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function); } |
| 387 | LValue call(LType type, LValue function, LValue arg1) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function, arg1); } |
| 388 | template<typename... Args> |
| 389 | LValue call(LType type, LValue function, LValue arg1, Args... args) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function, arg1, args...); } |
| 390 | |
| 391 | template<typename Function, typename... Args> |
| 392 | LValue callWithoutSideEffects(B3::Type type, Function function, LValue arg1, Args... args) |
| 393 | { |
| 394 | return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), B3::Effects::none(), |
| 395 | constIntPtr(tagCFunctionPtr<void*>(function, B3CCallPtrTag)), arg1, args...); |
| 396 | } |
| 397 | |
| 398 | // FIXME: Consider enhancing this to allow the client to choose the target PtrTag to use. |
| 399 | // https://bugs.webkit.org/show_bug.cgi?id=184324 |
| 400 | template<typename FunctionType> |
| 401 | LValue operation(FunctionType function) { return constIntPtr(tagCFunctionPtr<void*>(function, B3CCallPtrTag)); } |
| 402 | |
| 403 | void jump(LBasicBlock); |
| 404 | void branch(LValue condition, LBasicBlock taken, Weight takenWeight, LBasicBlock notTaken, Weight notTakenWeight); |
| 405 | void branch(LValue condition, WeightedTarget taken, WeightedTarget notTaken) |
| 406 | { |
| 407 | branch(condition, taken.target(), taken.weight(), notTaken.target(), notTaken.weight()); |
| 408 | } |
| 409 | |
| 410 | // Branches to an already-created handler if true, "falls through" if false. Fall-through is |
| 411 | // simulated by creating a continuation for you. |
| 412 | void check(LValue condition, WeightedTarget taken, Weight notTakenWeight); |
| 413 | |
| 414 | // Same as check(), but uses Weight::inverse() to compute the notTakenWeight. |
| 415 | void check(LValue condition, WeightedTarget taken); |
| 416 | |
| 417 | template<typename VectorType> |
| 418 | void switchInstruction(LValue value, const VectorType& cases, LBasicBlock fallThrough, Weight fallThroughWeight) |
| 419 | { |
| 420 | B3::SwitchValue* switchValue = m_block->appendNew<B3::SwitchValue>(m_proc, origin(), value); |
| 421 | switchValue->setFallThrough(B3::FrequentedBlock(fallThrough)); |
| 422 | for (const SwitchCase& switchCase : cases) { |
| 423 | int64_t value = switchCase.value()->asInt(); |
| 424 | B3::FrequentedBlock target(switchCase.target(), switchCase.weight().frequencyClass()); |
| 425 | switchValue->appendCase(B3::SwitchCase(value, target)); |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | void entrySwitch(const Vector<LBasicBlock>&); |
| 430 | |
| 431 | void ret(LValue); |
| 432 | |
| 433 | void unreachable(); |
| 434 | |
| 435 | void appendSuccessor(WeightedTarget); |
| 436 | |
| 437 | B3::CheckValue* speculate(LValue); |
| 438 | B3::CheckValue* speculateAdd(LValue, LValue); |
| 439 | B3::CheckValue* speculateSub(LValue, LValue); |
| 440 | B3::CheckValue* speculateMul(LValue, LValue); |
| 441 | |
| 442 | B3::PatchpointValue* patchpoint(LType); |
| 443 | |
| 444 | void trap(); |
| 445 | |
| 446 | ValueFromBlock anchor(LValue); |
| 447 | |
| 448 | void incrementSuperSamplerCount(); |
| 449 | void decrementSuperSamplerCount(); |
| 450 | |
| 451 | #if PLATFORM(COCOA) |
| 452 | #pragma mark - States |
| 453 | #endif |
| 454 | B3::Procedure& m_proc; |
| 455 | |
| 456 | DFG::Node* m_origin { nullptr }; |
| 457 | LBasicBlock m_block { nullptr }; |
| 458 | LBasicBlock m_nextBlock { nullptr }; |
| 459 | |
| 460 | AbstractHeapRepository* m_heaps; |
| 461 | |
| 462 | double m_frequency { 1 }; |
| 463 | |
| 464 | private: |
| 465 | OrderMaker<LBasicBlock> m_blockOrder; |
| 466 | }; |
| 467 | |
| 468 | template<typename... Params> |
| 469 | inline LValue Output::phi(LType type, ValueFromBlock value, Params... theRest) |
| 470 | { |
| 471 | LValue phiNode = phi(type); |
| 472 | addIncomingToPhi(phiNode, value, theRest...); |
| 473 | return phiNode; |
| 474 | } |
| 475 | |
| 476 | template<typename VectorType> |
| 477 | inline LValue Output::phi(LType type, const VectorType& vector) |
| 478 | { |
| 479 | LValue phiNode = phi(type); |
| 480 | for (const ValueFromBlock& valueFromBlock : vector) |
| 481 | addIncomingToPhi(phiNode, valueFromBlock); |
| 482 | return phiNode; |
| 483 | } |
| 484 | |
| 485 | template<typename... Params> |
| 486 | inline void Output::addIncomingToPhi(LValue phi, ValueFromBlock value, Params... theRest) |
| 487 | { |
| 488 | addIncomingToPhi(phi, value); |
| 489 | addIncomingToPhi(phi, theRest...); |
| 490 | } |
| 491 | |
| 492 | ALLOW_UNUSED_PARAMETERS_END |
| 493 | IGNORE_WARNINGS_END |
| 494 | |
| 495 | } } // namespace JSC::FTL |
| 496 | |
| 497 | #endif // ENABLE(FTL_JIT) |
| 498 | |