| 1 | /* |
| 2 | * Copyright (C) 2003-2017 Apple Inc. All rights reserved. |
| 3 | * Copyright (C) 2007 Eric Seidel <eric@webkit.org> |
| 4 | * Copyright (C) 2009 Acision BV. All rights reserved. |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, write to the Free Software |
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 19 | * |
| 20 | */ |
| 21 | |
| 22 | #include "config.h" |
| 23 | #include "MachineStackMarker.h" |
| 24 | |
| 25 | #include "ConservativeRoots.h" |
| 26 | #include "MachineContext.h" |
| 27 | #include <setjmp.h> |
| 28 | #include <stdlib.h> |
| 29 | #include <wtf/BitVector.h> |
| 30 | #include <wtf/PageBlock.h> |
| 31 | #include <wtf/StdLibExtras.h> |
| 32 | |
| 33 | namespace JSC { |
| 34 | |
| 35 | MachineThreads::MachineThreads() |
| 36 | : m_threadGroup(ThreadGroup::create()) |
| 37 | { |
| 38 | } |
| 39 | |
| 40 | SUPPRESS_ASAN |
| 41 | void MachineThreads::gatherFromCurrentThread(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, CurrentThreadState& currentThreadState) |
| 42 | { |
| 43 | if (currentThreadState.registerState) { |
| 44 | void* registersBegin = currentThreadState.registerState; |
| 45 | void* registersEnd = reinterpret_cast<void*>(roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(currentThreadState.registerState + 1))); |
| 46 | conservativeRoots.add(registersBegin, registersEnd, jitStubRoutines, codeBlocks); |
| 47 | } |
| 48 | |
| 49 | conservativeRoots.add(currentThreadState.stackTop, currentThreadState.stackOrigin, jitStubRoutines, codeBlocks); |
| 50 | } |
| 51 | |
| 52 | static inline int osRedZoneAdjustment() |
| 53 | { |
| 54 | int redZoneAdjustment = 0; |
| 55 | #if !OS(WINDOWS) |
| 56 | #if CPU(X86_64) |
| 57 | // See http://people.freebsd.org/~obrien/amd64-elf-abi.pdf Section 3.2.2. |
| 58 | redZoneAdjustment = -128; |
| 59 | #elif CPU(ARM64) |
| 60 | // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html#//apple_ref/doc/uid/TP40013702-SW7 |
| 61 | redZoneAdjustment = -128; |
| 62 | #endif |
| 63 | #endif // !OS(WINDOWS) |
| 64 | return redZoneAdjustment; |
| 65 | } |
| 66 | |
| 67 | static std::pair<void*, size_t> captureStack(Thread& thread, void* stackTop) |
| 68 | { |
| 69 | char* begin = reinterpret_cast_ptr<char*>(thread.stack().origin()); |
| 70 | char* end = bitwise_cast<char*>(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(stackTop))); |
| 71 | ASSERT(begin >= end); |
| 72 | |
| 73 | char* endWithRedZone = end + osRedZoneAdjustment(); |
| 74 | ASSERT(WTF::roundUpToMultipleOf<sizeof(void*)>(reinterpret_cast<uintptr_t>(endWithRedZone)) == reinterpret_cast<uintptr_t>(endWithRedZone)); |
| 75 | |
| 76 | if (endWithRedZone < thread.stack().end()) |
| 77 | endWithRedZone = reinterpret_cast_ptr<char*>(thread.stack().end()); |
| 78 | |
| 79 | std::swap(begin, endWithRedZone); |
| 80 | return std::make_pair(begin, endWithRedZone - begin); |
| 81 | } |
| 82 | |
| 83 | SUPPRESS_ASAN |
| 84 | static void copyMemory(void* dst, const void* src, size_t size) |
| 85 | { |
| 86 | size_t dstAsSize = reinterpret_cast<size_t>(dst); |
| 87 | size_t srcAsSize = reinterpret_cast<size_t>(src); |
| 88 | RELEASE_ASSERT(dstAsSize == WTF::roundUpToMultipleOf<sizeof(CPURegister)>(dstAsSize)); |
| 89 | RELEASE_ASSERT(srcAsSize == WTF::roundUpToMultipleOf<sizeof(CPURegister)>(srcAsSize)); |
| 90 | RELEASE_ASSERT(size == WTF::roundUpToMultipleOf<sizeof(CPURegister)>(size)); |
| 91 | |
| 92 | CPURegister* dstPtr = reinterpret_cast<CPURegister*>(dst); |
| 93 | const CPURegister* srcPtr = reinterpret_cast<const CPURegister*>(src); |
| 94 | size /= sizeof(CPURegister); |
| 95 | while (size--) |
| 96 | *dstPtr++ = *srcPtr++; |
| 97 | } |
| 98 | |
| 99 | |
| 100 | |
| 101 | // This function must not call malloc(), free(), or any other function that might |
| 102 | // acquire a lock. Since 'thread' is suspended, trying to acquire a lock |
| 103 | // will deadlock if 'thread' holds that lock. |
| 104 | // This function, specifically the memory copying, was causing problems with Address Sanitizer in |
| 105 | // apps. Since we cannot blacklist the system memcpy we must use our own naive implementation, |
| 106 | // copyMemory, for ASan to work on either instrumented or non-instrumented builds. This is not a |
| 107 | // significant performance loss as tryCopyOtherThreadStack is only called as part of an O(heapsize) |
| 108 | // operation. As the heap is generally much larger than the stack the performance hit is minimal. |
| 109 | // See: https://bugs.webkit.org/show_bug.cgi?id=146297 |
| 110 | void MachineThreads::tryCopyOtherThreadStack(Thread& thread, void* buffer, size_t capacity, size_t* size) |
| 111 | { |
| 112 | PlatformRegisters registers; |
| 113 | size_t = thread.getRegisters(registers); |
| 114 | |
| 115 | // This is a workaround for <rdar://problem/27607384>. libdispatch recycles work |
| 116 | // queue threads without running pthread exit destructors. This can cause us to scan a |
| 117 | // thread during work queue initialization, when the stack pointer is null. |
| 118 | if (UNLIKELY(!MachineContext::stackPointer(registers))) { |
| 119 | *size = 0; |
| 120 | return; |
| 121 | } |
| 122 | |
| 123 | std::pair<void*, size_t> stack = captureStack(thread, MachineContext::stackPointer(registers)); |
| 124 | |
| 125 | bool canCopy = *size + registersSize + stack.second <= capacity; |
| 126 | |
| 127 | if (canCopy) |
| 128 | copyMemory(static_cast<char*>(buffer) + *size, ®isters, registersSize); |
| 129 | *size += registersSize; |
| 130 | |
| 131 | if (canCopy) |
| 132 | copyMemory(static_cast<char*>(buffer) + *size, stack.first, stack.second); |
| 133 | *size += stack.second; |
| 134 | } |
| 135 | |
| 136 | bool MachineThreads::tryCopyOtherThreadStacks(const AbstractLocker& locker, void* buffer, size_t capacity, size_t* size, Thread& currentThreadForGC) |
| 137 | { |
| 138 | // Prevent two VMs from suspending each other's threads at the same time, |
| 139 | // which can cause deadlock: <rdar://problem/20300842>. |
| 140 | static Lock mutex; |
| 141 | std::lock_guard<Lock> lock(mutex); |
| 142 | |
| 143 | *size = 0; |
| 144 | |
| 145 | Thread& currentThread = Thread::current(); |
| 146 | const ListHashSet<Ref<Thread>>& threads = m_threadGroup->threads(locker); |
| 147 | BitVector isSuspended(threads.size()); |
| 148 | |
| 149 | { |
| 150 | unsigned index = 0; |
| 151 | for (const Ref<Thread>& thread : threads) { |
| 152 | if (thread.ptr() != ¤tThread |
| 153 | && thread.ptr() != ¤tThreadForGC) { |
| 154 | auto result = thread->suspend(); |
| 155 | if (result) |
| 156 | isSuspended.set(index); |
| 157 | else { |
| 158 | #if OS(DARWIN) |
| 159 | // These threads will be removed from the ThreadGroup. Thus, we do not do anything here except for reporting. |
| 160 | ASSERT(result.error() != KERN_SUCCESS); |
| 161 | WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, |
| 162 | "JavaScript garbage collection encountered an invalid thread (err 0x%x): Thread [%d/%d: %p]." , |
| 163 | result.error(), index, threads.size(), thread.ptr()); |
| 164 | #endif |
| 165 | } |
| 166 | } |
| 167 | ++index; |
| 168 | } |
| 169 | } |
| 170 | |
| 171 | { |
| 172 | unsigned index = 0; |
| 173 | for (auto& thread : threads) { |
| 174 | if (isSuspended.get(index)) |
| 175 | tryCopyOtherThreadStack(thread.get(), buffer, capacity, size); |
| 176 | ++index; |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | { |
| 181 | unsigned index = 0; |
| 182 | for (auto& thread : threads) { |
| 183 | if (isSuspended.get(index)) |
| 184 | thread->resume(); |
| 185 | ++index; |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | return *size <= capacity; |
| 190 | } |
| 191 | |
| 192 | static void growBuffer(size_t size, void** buffer, size_t* capacity) |
| 193 | { |
| 194 | if (*buffer) |
| 195 | fastFree(*buffer); |
| 196 | |
| 197 | *capacity = WTF::roundUpToMultipleOf(WTF::pageSize(), size * 2); |
| 198 | *buffer = fastMalloc(*capacity); |
| 199 | } |
| 200 | |
| 201 | void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks, CurrentThreadState* currentThreadState, Thread* currentThread) |
| 202 | { |
| 203 | if (currentThreadState) |
| 204 | gatherFromCurrentThread(conservativeRoots, jitStubRoutines, codeBlocks, *currentThreadState); |
| 205 | |
| 206 | size_t size; |
| 207 | size_t capacity = 0; |
| 208 | void* buffer = nullptr; |
| 209 | auto locker = holdLock(m_threadGroup->getLock()); |
| 210 | while (!tryCopyOtherThreadStacks(locker, buffer, capacity, &size, *currentThread)) |
| 211 | growBuffer(size, &buffer, &capacity); |
| 212 | |
| 213 | if (!buffer) |
| 214 | return; |
| 215 | |
| 216 | conservativeRoots.add(buffer, static_cast<char*>(buffer) + size, jitStubRoutines, codeBlocks); |
| 217 | fastFree(buffer); |
| 218 | } |
| 219 | |
| 220 | NEVER_INLINE int callWithCurrentThreadState(const ScopedLambda<void(CurrentThreadState&)>& lambda) |
| 221 | { |
| 222 | DECLARE_AND_COMPUTE_CURRENT_THREAD_STATE(state); |
| 223 | lambda(state); |
| 224 | return 42; // Suppress tail call optimization. |
| 225 | } |
| 226 | |
| 227 | } // namespace JSC |
| 228 | |