| 1 | /* |
| 2 | * Copyright (C) 2008-2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #pragma once |
| 27 | |
| 28 | #include "CodeBlock.h" |
| 29 | #include "CodeOrigin.h" |
| 30 | #include "Instruction.h" |
| 31 | #include "JITStubRoutine.h" |
| 32 | #include "MacroAssembler.h" |
| 33 | #include "Options.h" |
| 34 | #include "RegisterSet.h" |
| 35 | #include "Structure.h" |
| 36 | #include "StructureSet.h" |
| 37 | #include "StructureStubClearingWatchpoint.h" |
| 38 | #include "StubInfoSummary.h" |
| 39 | |
| 40 | namespace JSC { |
| 41 | |
| 42 | #if ENABLE(JIT) |
| 43 | |
| 44 | class AccessCase; |
| 45 | class AccessGenerationResult; |
| 46 | class PolymorphicAccess; |
| 47 | |
| 48 | enum class AccessType : int8_t { |
| 49 | Get, |
| 50 | GetWithThis, |
| 51 | GetDirect, |
| 52 | TryGet, |
| 53 | Put, |
| 54 | In, |
| 55 | InstanceOf |
| 56 | }; |
| 57 | |
| 58 | enum class CacheType : int8_t { |
| 59 | Unset, |
| 60 | GetByIdSelf, |
| 61 | PutByIdReplace, |
| 62 | InByIdSelf, |
| 63 | Stub, |
| 64 | ArrayLength, |
| 65 | StringLength |
| 66 | }; |
| 67 | |
| 68 | class StructureStubInfo { |
| 69 | WTF_MAKE_NONCOPYABLE(StructureStubInfo); |
| 70 | WTF_MAKE_FAST_ALLOCATED; |
| 71 | public: |
| 72 | StructureStubInfo(AccessType); |
| 73 | ~StructureStubInfo(); |
| 74 | |
| 75 | void initGetByIdSelf(CodeBlock*, Structure* baseObjectStructure, PropertyOffset); |
| 76 | void initArrayLength(); |
| 77 | void initStringLength(); |
| 78 | void initPutByIdReplace(CodeBlock*, Structure* baseObjectStructure, PropertyOffset); |
| 79 | void initInByIdSelf(CodeBlock*, Structure* baseObjectStructure, PropertyOffset); |
| 80 | |
| 81 | AccessGenerationResult addAccessCase(const GCSafeConcurrentJSLocker&, CodeBlock*, const Identifier&, std::unique_ptr<AccessCase>); |
| 82 | |
| 83 | void reset(CodeBlock*); |
| 84 | |
| 85 | void deref(); |
| 86 | void aboutToDie(); |
| 87 | |
| 88 | // Check if the stub has weak references that are dead. If it does, then it resets itself, |
| 89 | // either entirely or just enough to ensure that those dead pointers don't get used anymore. |
| 90 | void visitWeakReferences(CodeBlock*); |
| 91 | |
| 92 | // This returns true if it has marked everything that it will ever mark. |
| 93 | bool propagateTransitions(SlotVisitor&); |
| 94 | |
| 95 | ALWAYS_INLINE bool considerCaching(CodeBlock* codeBlock, Structure* structure) |
| 96 | { |
| 97 | // We never cache non-cells. |
| 98 | if (!structure) { |
| 99 | sawNonCell = true; |
| 100 | return false; |
| 101 | } |
| 102 | |
| 103 | // This method is called from the Optimize variants of IC slow paths. The first part of this |
| 104 | // method tries to determine if the Optimize variant should really behave like the |
| 105 | // non-Optimize variant and leave the IC untouched. |
| 106 | // |
| 107 | // If we determine that we should do something to the IC then the next order of business is |
| 108 | // to determine if this Structure would impact the IC at all. We know that it won't, if we |
| 109 | // have already buffered something on its behalf. That's what the bufferedStructures set is |
| 110 | // for. |
| 111 | |
| 112 | everConsidered = true; |
| 113 | if (!countdown) { |
| 114 | // Check if we have been doing repatching too frequently. If so, then we should cool off |
| 115 | // for a while. |
| 116 | WTF::incrementWithSaturation(repatchCount); |
| 117 | if (repatchCount > Options::repatchCountForCoolDown()) { |
| 118 | // We've been repatching too much, so don't do it now. |
| 119 | repatchCount = 0; |
| 120 | // The amount of time we require for cool-down depends on the number of times we've |
| 121 | // had to cool down in the past. The relationship is exponential. The max value we |
| 122 | // allow here is 2^256 - 2, since the slow paths may increment the count to indicate |
| 123 | // that they'd like to temporarily skip patching just this once. |
| 124 | countdown = WTF::leftShiftWithSaturation( |
| 125 | static_cast<uint8_t>(Options::initialCoolDownCount()), |
| 126 | numberOfCoolDowns, |
| 127 | static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() - 1)); |
| 128 | WTF::incrementWithSaturation(numberOfCoolDowns); |
| 129 | |
| 130 | // We may still have had something buffered. Trigger generation now. |
| 131 | bufferingCountdown = 0; |
| 132 | return true; |
| 133 | } |
| 134 | |
| 135 | // We don't want to return false due to buffering indefinitely. |
| 136 | if (!bufferingCountdown) { |
| 137 | // Note that when this returns true, it's possible that we will not even get an |
| 138 | // AccessCase because this may cause Repatch.cpp to simply do an in-place |
| 139 | // repatching. |
| 140 | return true; |
| 141 | } |
| 142 | |
| 143 | bufferingCountdown--; |
| 144 | |
| 145 | // Now protect the IC buffering. We want to proceed only if this is a structure that |
| 146 | // we don't already have a case buffered for. Note that if this returns true but the |
| 147 | // bufferingCountdown is not zero then we will buffer the access case for later without |
| 148 | // immediately generating code for it. |
| 149 | // |
| 150 | // NOTE: This will behave oddly for InstanceOf if the user varies the prototype but not |
| 151 | // the base's structure. That seems unlikely for the canonical use of instanceof, where |
| 152 | // the prototype is fixed. |
| 153 | bool isNewlyAdded = bufferedStructures.add(structure); |
| 154 | if (isNewlyAdded) { |
| 155 | VM& vm = *codeBlock->vm(); |
| 156 | vm.heap.writeBarrier(codeBlock); |
| 157 | } |
| 158 | return isNewlyAdded; |
| 159 | } |
| 160 | countdown--; |
| 161 | return false; |
| 162 | } |
| 163 | |
| 164 | StubInfoSummary summary() const; |
| 165 | |
| 166 | static StubInfoSummary summary(const StructureStubInfo*); |
| 167 | |
| 168 | bool containsPC(void* pc) const; |
| 169 | |
| 170 | CodeOrigin codeOrigin; |
| 171 | CallSiteIndex callSiteIndex; |
| 172 | |
| 173 | union { |
| 174 | struct { |
| 175 | WriteBarrierBase<Structure> baseObjectStructure; |
| 176 | PropertyOffset offset; |
| 177 | } byIdSelf; |
| 178 | PolymorphicAccess* stub; |
| 179 | } u; |
| 180 | |
| 181 | // Represents those structures that already have buffered AccessCases in the PolymorphicAccess. |
| 182 | // Note that it's always safe to clear this. If we clear it prematurely, then if we see the same |
| 183 | // structure again during this buffering countdown, we will create an AccessCase object for it. |
| 184 | // That's not so bad - we'll get rid of the redundant ones once we regenerate. |
| 185 | StructureSet bufferedStructures; |
| 186 | |
| 187 | struct { |
| 188 | CodeLocationLabel<JITStubRoutinePtrTag> start; // This is either the start of the inline IC for *byId caches. or the location of patchable jump for 'instanceof' caches. |
| 189 | CodeLocationLabel<JSInternalPtrTag> doneLocation; |
| 190 | CodeLocationCall<JSInternalPtrTag> slowPathCallLocation; |
| 191 | CodeLocationLabel<JITStubRoutinePtrTag> slowPathStartLocation; |
| 192 | |
| 193 | RegisterSet usedRegisters; |
| 194 | |
| 195 | uint32_t inlineSize() const |
| 196 | { |
| 197 | int32_t inlineSize = MacroAssembler::differenceBetweenCodePtr(start, doneLocation); |
| 198 | ASSERT(inlineSize >= 0); |
| 199 | return inlineSize; |
| 200 | } |
| 201 | |
| 202 | GPRReg baseGPR; |
| 203 | GPRReg valueGPR; |
| 204 | GPRReg thisGPR; |
| 205 | #if USE(JSVALUE32_64) |
| 206 | GPRReg valueTagGPR; |
| 207 | GPRReg baseTagGPR; |
| 208 | GPRReg thisTagGPR; |
| 209 | #endif |
| 210 | } patch; |
| 211 | |
| 212 | GPRReg baseGPR() const |
| 213 | { |
| 214 | return patch.baseGPR; |
| 215 | } |
| 216 | |
| 217 | CodeLocationCall<JSInternalPtrTag> slowPathCallLocation() { return patch.slowPathCallLocation; } |
| 218 | CodeLocationLabel<JSInternalPtrTag> doneLocation() { return patch.doneLocation; } |
| 219 | CodeLocationLabel<JITStubRoutinePtrTag> slowPathStartLocation() { return patch.slowPathStartLocation; } |
| 220 | |
| 221 | CodeLocationJump<JSInternalPtrTag> patchableJump() |
| 222 | { |
| 223 | ASSERT(accessType == AccessType::InstanceOf); |
| 224 | return patch.start.jumpAtOffset<JSInternalPtrTag>(0); |
| 225 | } |
| 226 | |
| 227 | JSValueRegs valueRegs() const |
| 228 | { |
| 229 | return JSValueRegs( |
| 230 | #if USE(JSVALUE32_64) |
| 231 | patch.valueTagGPR, |
| 232 | #endif |
| 233 | patch.valueGPR); |
| 234 | } |
| 235 | |
| 236 | |
| 237 | AccessType accessType; |
| 238 | CacheType cacheType; |
| 239 | uint8_t countdown; // We repatch only when this is zero. If not zero, we decrement. |
| 240 | uint8_t repatchCount; |
| 241 | uint8_t numberOfCoolDowns; |
| 242 | uint8_t bufferingCountdown; |
| 243 | bool resetByGC : 1; |
| 244 | bool tookSlowPath : 1; |
| 245 | bool everConsidered : 1; |
| 246 | bool prototypeIsKnownObject : 1; // Only relevant for InstanceOf. |
| 247 | bool sawNonCell : 1; |
| 248 | }; |
| 249 | |
| 250 | inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStubInfo) |
| 251 | { |
| 252 | return structureStubInfo.codeOrigin; |
| 253 | } |
| 254 | |
| 255 | inline J_JITOperation_ESsiJI appropriateOptimizingGetByIdFunction(AccessType type) |
| 256 | { |
| 257 | switch (type) { |
| 258 | case AccessType::Get: |
| 259 | return operationGetByIdOptimize; |
| 260 | case AccessType::TryGet: |
| 261 | return operationTryGetByIdOptimize; |
| 262 | case AccessType::GetDirect: |
| 263 | return operationGetByIdDirectOptimize; |
| 264 | case AccessType::GetWithThis: |
| 265 | default: |
| 266 | ASSERT_NOT_REACHED(); |
| 267 | return nullptr; |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | inline J_JITOperation_EJI appropriateGenericGetByIdFunction(AccessType type) |
| 272 | { |
| 273 | switch (type) { |
| 274 | case AccessType::Get: |
| 275 | return operationGetByIdGeneric; |
| 276 | case AccessType::TryGet: |
| 277 | return operationTryGetByIdGeneric; |
| 278 | case AccessType::GetDirect: |
| 279 | return operationGetByIdDirectGeneric; |
| 280 | case AccessType::GetWithThis: |
| 281 | default: |
| 282 | ASSERT_NOT_REACHED(); |
| 283 | return nullptr; |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | #else |
| 288 | |
| 289 | class StructureStubInfo; |
| 290 | |
| 291 | #endif // ENABLE(JIT) |
| 292 | |
| 293 | typedef HashMap<CodeOrigin, StructureStubInfo*, CodeOriginApproximateHash> StubInfoMap; |
| 294 | |
| 295 | } // namespace JSC |
| 296 | |