| 1 | /* |
| 2 | * Copyright (C) 2013, 2015-2016 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #include "config.h" |
| 27 | #include "FTLAbstractHeap.h" |
| 28 | |
| 29 | #if ENABLE(FTL_JIT) |
| 30 | |
| 31 | #include "DFGCommon.h" |
| 32 | #include "FTLAbbreviatedTypes.h" |
| 33 | #include "FTLAbstractHeapRepository.h" |
| 34 | #include "FTLOutput.h" |
| 35 | #include "FTLTypedPointer.h" |
| 36 | #include "JSCInlines.h" |
| 37 | #include "Options.h" |
| 38 | |
| 39 | namespace JSC { namespace FTL { |
| 40 | |
| 41 | AbstractHeap::AbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset) |
| 42 | : m_offset(offset) |
| 43 | , m_heapName(heapName) |
| 44 | { |
| 45 | changeParent(parent); |
| 46 | } |
| 47 | |
| 48 | void AbstractHeap::changeParent(AbstractHeap* parent) |
| 49 | { |
| 50 | if (m_parent) { |
| 51 | bool result = m_parent->m_children.removeFirst(this); |
| 52 | RELEASE_ASSERT(result); |
| 53 | } |
| 54 | |
| 55 | m_parent = parent; |
| 56 | |
| 57 | if (parent) { |
| 58 | ASSERT(!m_parent->m_children.contains(this)); |
| 59 | m_parent->m_children.append(this); |
| 60 | } |
| 61 | } |
| 62 | |
| 63 | void AbstractHeap::compute(unsigned begin) |
| 64 | { |
| 65 | // This recursively computes the ranges of the tree. This solves the following constraints |
| 66 | // in linear time: |
| 67 | // |
| 68 | // - A node's end is greater than its begin. |
| 69 | // - A node's begin is greater than or equal to its parent's begin. |
| 70 | // - A node's end is less than or equal to its parent's end. |
| 71 | // - The ranges are as small as possible. |
| 72 | // |
| 73 | // It's OK to recurse because we keep the depth of our abstract heap hierarchy fairly sane. |
| 74 | // I think that it gets 4 deep at most. |
| 75 | |
| 76 | if (m_children.isEmpty()) { |
| 77 | // Must special-case leaves so that they use just one slot on the number line. |
| 78 | m_range = B3::HeapRange(begin); |
| 79 | return; |
| 80 | } |
| 81 | |
| 82 | unsigned current = begin; |
| 83 | for (AbstractHeap* child : m_children) { |
| 84 | child->compute(current); |
| 85 | current = child->range().end(); |
| 86 | } |
| 87 | |
| 88 | m_range = B3::HeapRange(begin, current); |
| 89 | } |
| 90 | |
| 91 | void AbstractHeap::shallowDump(PrintStream& out) const |
| 92 | { |
| 93 | out.print(heapName(), "(" , m_offset, ")" ); |
| 94 | if (m_range) |
| 95 | out.print("<" , m_range, ">" ); |
| 96 | } |
| 97 | |
| 98 | void AbstractHeap::dump(PrintStream& out) const |
| 99 | { |
| 100 | shallowDump(out); |
| 101 | if (m_parent) |
| 102 | out.print("->" , *m_parent); |
| 103 | } |
| 104 | |
| 105 | void AbstractHeap::deepDump(PrintStream& out, unsigned indent) const |
| 106 | { |
| 107 | auto printIndent = [&] () { |
| 108 | for (unsigned i = indent; i--;) |
| 109 | out.print(" " ); |
| 110 | }; |
| 111 | |
| 112 | printIndent(); |
| 113 | shallowDump(out); |
| 114 | |
| 115 | if (m_children.isEmpty()) { |
| 116 | out.print("\n" ); |
| 117 | return; |
| 118 | } |
| 119 | |
| 120 | out.print(":\n" ); |
| 121 | for (AbstractHeap* child : m_children) |
| 122 | child->deepDump(out, indent + 1); |
| 123 | } |
| 124 | |
| 125 | void AbstractHeap::badRangeError() const |
| 126 | { |
| 127 | dataLog("Heap does not have range: " , *this, "\n" ); |
| 128 | RELEASE_ASSERT_NOT_REACHED(); |
| 129 | } |
| 130 | |
| 131 | IndexedAbstractHeap::IndexedAbstractHeap(AbstractHeap* parent, const char* heapName, ptrdiff_t offset, size_t elementSize) |
| 132 | : m_heapForAnyIndex(parent, heapName) |
| 133 | , m_heapNameLength(strlen(heapName)) |
| 134 | , m_offset(offset) |
| 135 | , m_elementSize(elementSize) |
| 136 | { |
| 137 | } |
| 138 | |
| 139 | IndexedAbstractHeap::~IndexedAbstractHeap() |
| 140 | { |
| 141 | } |
| 142 | |
| 143 | TypedPointer IndexedAbstractHeap::baseIndex(Output& out, LValue base, LValue index, JSValue indexAsConstant, ptrdiff_t offset, LValue mask) |
| 144 | { |
| 145 | if (indexAsConstant.isInt32()) |
| 146 | return out.address(base, at(indexAsConstant.asInt32()), offset); |
| 147 | |
| 148 | if (mask) |
| 149 | index = out.bitAnd(mask, index); |
| 150 | LValue result = out.add(base, out.mul(index, out.constIntPtr(m_elementSize))); |
| 151 | |
| 152 | return TypedPointer(atAnyIndex(), out.addPtr(result, m_offset + offset)); |
| 153 | } |
| 154 | |
| 155 | const AbstractHeap& IndexedAbstractHeap::atSlow(ptrdiff_t index) |
| 156 | { |
| 157 | ASSERT(static_cast<size_t>(index) >= m_smallIndices.size()); |
| 158 | |
| 159 | if (UNLIKELY(!m_largeIndices)) |
| 160 | m_largeIndices = std::make_unique<MapType>(); |
| 161 | |
| 162 | std::unique_ptr<AbstractHeap>& field = m_largeIndices->add(index, nullptr).iterator->value; |
| 163 | if (!field) { |
| 164 | field = std::make_unique<AbstractHeap>(); |
| 165 | initialize(*field, index); |
| 166 | } |
| 167 | |
| 168 | return *field; |
| 169 | } |
| 170 | |
| 171 | void IndexedAbstractHeap::initialize(AbstractHeap& field, ptrdiff_t signedIndex) |
| 172 | { |
| 173 | // Build up a name of the form: |
| 174 | // |
| 175 | // heapName_hexIndex |
| 176 | // |
| 177 | // or: |
| 178 | // |
| 179 | // heapName_neg_hexIndex |
| 180 | // |
| 181 | // For example if you access an indexed heap called FooBar at index 5, you'll |
| 182 | // get: |
| 183 | // |
| 184 | // FooBar_5 |
| 185 | // |
| 186 | // Or if you access an indexed heap called Blah at index -10, you'll get: |
| 187 | // |
| 188 | // Blah_neg_A |
| 189 | // |
| 190 | // This naming convention comes from our previous use of LLVM. It's not clear that we need |
| 191 | // it anymore, though it is sort of nifty. Basically, B3 doesn't need string names for |
| 192 | // abstract heaps, but the fact that we have a reasonably efficient way to always name the |
| 193 | // heaps will probably come in handy for debugging. |
| 194 | |
| 195 | static const char* negSplit = "_neg_" ; |
| 196 | static const char* posSplit = "_" ; |
| 197 | |
| 198 | bool negative; |
| 199 | size_t index; |
| 200 | if (signedIndex < 0) { |
| 201 | negative = true; |
| 202 | index = -signedIndex; |
| 203 | } else { |
| 204 | negative = false; |
| 205 | index = signedIndex; |
| 206 | } |
| 207 | |
| 208 | for (unsigned power = 4; power <= sizeof(void*) * 8; power += 4) { |
| 209 | if (isGreaterThanNonZeroPowerOfTwo(index, power)) |
| 210 | continue; |
| 211 | |
| 212 | unsigned numHexlets = power >> 2; |
| 213 | |
| 214 | size_t stringLength = m_heapNameLength + (negative ? strlen(negSplit) : strlen(posSplit)) + numHexlets; |
| 215 | char* characters; |
| 216 | m_largeIndexNames.append(CString::newUninitialized(stringLength, characters)); |
| 217 | |
| 218 | memcpy(characters, m_heapForAnyIndex.heapName(), m_heapNameLength); |
| 219 | if (negative) |
| 220 | memcpy(characters + m_heapNameLength, negSplit, strlen(negSplit)); |
| 221 | else |
| 222 | memcpy(characters + m_heapNameLength, posSplit, strlen(posSplit)); |
| 223 | |
| 224 | size_t accumulator = index; |
| 225 | for (unsigned i = 0; i < numHexlets; ++i) { |
| 226 | characters[stringLength - i - 1] = lowerNibbleToASCIIHexDigit(accumulator); |
| 227 | accumulator >>= 4; |
| 228 | } |
| 229 | |
| 230 | field.initialize(&m_heapForAnyIndex, characters, m_offset + signedIndex * m_elementSize); |
| 231 | return; |
| 232 | } |
| 233 | |
| 234 | RELEASE_ASSERT_NOT_REACHED(); |
| 235 | } |
| 236 | |
| 237 | void IndexedAbstractHeap::dump(PrintStream& out) const |
| 238 | { |
| 239 | out.print("Indexed:" , atAnyIndex()); |
| 240 | } |
| 241 | |
| 242 | NumberedAbstractHeap::NumberedAbstractHeap(AbstractHeap* heap, const char* heapName) |
| 243 | : m_indexedHeap(heap, heapName, 0, 1) |
| 244 | { |
| 245 | } |
| 246 | |
| 247 | NumberedAbstractHeap::~NumberedAbstractHeap() |
| 248 | { |
| 249 | } |
| 250 | |
| 251 | void NumberedAbstractHeap::dump(PrintStream& out) const |
| 252 | { |
| 253 | out.print("Numbered: " , atAnyNumber()); |
| 254 | } |
| 255 | |
| 256 | AbsoluteAbstractHeap::AbsoluteAbstractHeap(AbstractHeap* heap, const char* heapName) |
| 257 | : m_indexedHeap(heap, heapName, 0, 1) |
| 258 | { |
| 259 | } |
| 260 | |
| 261 | AbsoluteAbstractHeap::~AbsoluteAbstractHeap() |
| 262 | { |
| 263 | } |
| 264 | |
| 265 | void AbsoluteAbstractHeap::dump(PrintStream& out) const |
| 266 | { |
| 267 | out.print("Absolute:" , atAnyAddress()); |
| 268 | } |
| 269 | |
| 270 | } } // namespace JSC::FTL |
| 271 | |
| 272 | #endif // ENABLE(FTL_JIT) |
| 273 | |
| 274 | |