1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "ExecutableAllocator.h" |
28 | |
29 | #if ENABLE(JIT) |
30 | |
31 | #include "CodeProfiling.h" |
32 | #include "ExecutableAllocationFuzz.h" |
33 | #include "JSCInlines.h" |
34 | #include <wtf/MetaAllocator.h> |
35 | #include <wtf/PageReservation.h> |
36 | #include <wtf/SystemTracing.h> |
37 | #include <wtf/WorkQueue.h> |
38 | |
39 | #if OS(DARWIN) |
40 | #include <mach/mach_time.h> |
41 | #include <sys/mman.h> |
42 | #endif |
43 | |
44 | #if PLATFORM(IOS_FAMILY) |
45 | #include <wtf/cocoa/Entitlements.h> |
46 | #endif |
47 | |
48 | #include "LinkBuffer.h" |
49 | #include "MacroAssembler.h" |
50 | |
51 | #if PLATFORM(COCOA) |
52 | #define HAVE_REMAP_JIT 1 |
53 | #endif |
54 | |
55 | #if HAVE(REMAP_JIT) |
56 | #if CPU(ARM64) && PLATFORM(IOS_FAMILY) |
57 | #define USE_EXECUTE_ONLY_JIT_WRITE_FUNCTION 1 |
58 | #endif |
59 | #endif |
60 | |
61 | #if OS(DARWIN) |
62 | #include <mach/mach.h> |
63 | extern "C" { |
64 | /* Routine mach_vm_remap */ |
65 | #ifdef mig_external |
66 | mig_external |
67 | #else |
68 | extern |
69 | #endif /* mig_external */ |
70 | kern_return_t mach_vm_remap |
71 | ( |
72 | vm_map_t target_task, |
73 | mach_vm_address_t *target_address, |
74 | mach_vm_size_t size, |
75 | mach_vm_offset_t mask, |
76 | int flags, |
77 | vm_map_t src_task, |
78 | mach_vm_address_t src_address, |
79 | boolean_t copy, |
80 | vm_prot_t *cur_protection, |
81 | vm_prot_t *max_protection, |
82 | vm_inherit_t inheritance |
83 | ); |
84 | } |
85 | |
86 | #endif |
87 | |
88 | namespace JSC { |
89 | |
90 | using namespace WTF; |
91 | |
92 | #if defined(FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB) && FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB > 0 |
93 | static const size_t fixedExecutableMemoryPoolSize = FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB * 1024 * 1024; |
94 | #elif CPU(ARM) |
95 | static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024; |
96 | #elif CPU(ARM64) |
97 | static const size_t fixedExecutableMemoryPoolSize = 128 * 1024 * 1024; |
98 | #elif CPU(X86_64) |
99 | static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024; |
100 | #else |
101 | static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; |
102 | #endif |
103 | |
104 | #if CPU(ARM) |
105 | static const double executablePoolReservationFraction = 0.15; |
106 | #else |
107 | static const double executablePoolReservationFraction = 0.25; |
108 | #endif |
109 | |
110 | #if ENABLE(SEPARATED_WX_HEAP) |
111 | JS_EXPORT_PRIVATE bool useFastPermisionsJITCopy { false }; |
112 | JS_EXPORT_PRIVATE JITWriteSeparateHeapsFunction jitWriteSeparateHeapsFunction; |
113 | #endif |
114 | |
115 | #if !USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) && HAVE(REMAP_JIT) |
116 | static uintptr_t startOfFixedWritableMemoryPool; |
117 | #endif |
118 | |
119 | class FixedVMPoolExecutableAllocator; |
120 | static FixedVMPoolExecutableAllocator* allocator = nullptr; |
121 | |
122 | static bool s_isJITEnabled = true; |
123 | static bool isJITEnabled() |
124 | { |
125 | #if PLATFORM(IOS_FAMILY) && (CPU(ARM64) || CPU(ARM)) |
126 | return processHasEntitlement("dynamic-codesigning" ) && s_isJITEnabled; |
127 | #else |
128 | return s_isJITEnabled; |
129 | #endif |
130 | } |
131 | |
132 | void ExecutableAllocator::setJITEnabled(bool enabled) |
133 | { |
134 | ASSERT(!allocator); |
135 | if (s_isJITEnabled == enabled) |
136 | return; |
137 | |
138 | s_isJITEnabled = enabled; |
139 | |
140 | #if PLATFORM(IOS_FAMILY) && (CPU(ARM64) || CPU(ARM)) |
141 | if (!enabled) { |
142 | constexpr size_t size = 1; |
143 | constexpr int protection = PROT_READ | PROT_WRITE | PROT_EXEC; |
144 | constexpr int flags = MAP_PRIVATE | MAP_ANON | MAP_JIT; |
145 | constexpr int fd = OSAllocator::JSJITCodePages; |
146 | void* allocation = mmap(nullptr, size, protection, flags, fd, 0); |
147 | const void* executableMemoryAllocationFailure = reinterpret_cast<void*>(-1); |
148 | RELEASE_ASSERT_WITH_MESSAGE(allocation && allocation != executableMemoryAllocationFailure, "We should not have allocated executable memory before disabling the JIT." ); |
149 | RELEASE_ASSERT_WITH_MESSAGE(!munmap(allocation, size), "Unmapping executable memory should succeed so we do not have any executable memory in the address space" ); |
150 | RELEASE_ASSERT_WITH_MESSAGE(mmap(nullptr, size, protection, flags, fd, 0) == executableMemoryAllocationFailure, "Allocating executable memory should fail after setJITEnabled(false) is called." ); |
151 | } |
152 | #endif |
153 | } |
154 | |
155 | class FixedVMPoolExecutableAllocator : public MetaAllocator { |
156 | WTF_MAKE_FAST_ALLOCATED; |
157 | public: |
158 | FixedVMPoolExecutableAllocator() |
159 | : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes |
160 | { |
161 | if (!isJITEnabled()) |
162 | return; |
163 | |
164 | size_t reservationSize; |
165 | if (Options::jitMemoryReservationSize()) |
166 | reservationSize = Options::jitMemoryReservationSize(); |
167 | else |
168 | reservationSize = fixedExecutableMemoryPoolSize; |
169 | reservationSize = std::max(roundUpToMultipleOf(pageSize(), reservationSize), pageSize() * 2); |
170 | |
171 | auto = [] (size_t reservationSize) { |
172 | #if OS(LINUX) |
173 | // If we use uncommitted reservation, mmap operation is recorded with small page size in perf command's output. |
174 | // This makes the following JIT code logging broken and some of JIT code is not recorded correctly. |
175 | // To avoid this problem, we use committed reservation if we need perf JITDump logging. |
176 | if (Options::logJITCodeForPerf()) |
177 | return PageReservation::reserveAndCommitWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); |
178 | #endif |
179 | return PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); |
180 | }; |
181 | |
182 | m_reservation = tryCreatePageReservation(reservationSize); |
183 | if (m_reservation) { |
184 | ASSERT(m_reservation.size() == reservationSize); |
185 | void* reservationBase = m_reservation.base(); |
186 | |
187 | #if ENABLE(FAST_JIT_PERMISSIONS) && !ENABLE(SEPARATED_WX_HEAP) |
188 | RELEASE_ASSERT(os_thread_self_restrict_rwx_is_supported()); |
189 | os_thread_self_restrict_rwx_to_rx(); |
190 | |
191 | #else // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP) |
192 | #if ENABLE(FAST_JIT_PERMISSIONS) |
193 | if (os_thread_self_restrict_rwx_is_supported()) { |
194 | useFastPermisionsJITCopy = true; |
195 | os_thread_self_restrict_rwx_to_rx(); |
196 | } else |
197 | #endif |
198 | if (Options::useSeparatedWXHeap()) { |
199 | // First page of our JIT allocation is reserved. |
200 | ASSERT(reservationSize >= pageSize() * 2); |
201 | reservationBase = (void*)((uintptr_t)reservationBase + pageSize()); |
202 | reservationSize -= pageSize(); |
203 | initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize); |
204 | } |
205 | #endif // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP) |
206 | |
207 | addFreshFreeSpace(reservationBase, reservationSize); |
208 | |
209 | void* reservationEnd = reinterpret_cast<uint8_t*>(reservationBase) + reservationSize; |
210 | |
211 | m_memoryStart = MacroAssemblerCodePtr<ExecutableMemoryPtrTag>(tagCodePtr<ExecutableMemoryPtrTag>(reservationBase)); |
212 | m_memoryEnd = MacroAssemblerCodePtr<ExecutableMemoryPtrTag>(tagCodePtr<ExecutableMemoryPtrTag>(reservationEnd)); |
213 | } |
214 | } |
215 | |
216 | virtual ~FixedVMPoolExecutableAllocator(); |
217 | |
218 | void* memoryStart() { return m_memoryStart.untaggedExecutableAddress(); } |
219 | void* memoryEnd() { return m_memoryEnd.untaggedExecutableAddress(); } |
220 | bool isJITPC(void* pc) { return memoryStart() <= pc && pc < memoryEnd(); } |
221 | |
222 | protected: |
223 | FreeSpacePtr allocateNewSpace(size_t&) override |
224 | { |
225 | // We're operating in a fixed pool, so new allocation is always prohibited. |
226 | return nullptr; |
227 | } |
228 | |
229 | void notifyNeedPage(void* page) override |
230 | { |
231 | #if USE(MADV_FREE_FOR_JIT_MEMORY) |
232 | UNUSED_PARAM(page); |
233 | #else |
234 | m_reservation.commit(page, pageSize()); |
235 | #endif |
236 | } |
237 | |
238 | void notifyPageIsFree(void* page) override |
239 | { |
240 | #if USE(MADV_FREE_FOR_JIT_MEMORY) |
241 | for (;;) { |
242 | int result = madvise(page, pageSize(), MADV_FREE); |
243 | if (!result) |
244 | return; |
245 | ASSERT(result == -1); |
246 | if (errno != EAGAIN) { |
247 | RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure. |
248 | break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway. |
249 | } |
250 | } |
251 | #else |
252 | m_reservation.decommit(page, pageSize()); |
253 | #endif |
254 | } |
255 | |
256 | private: |
257 | #if OS(DARWIN) && HAVE(REMAP_JIT) |
258 | void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize) |
259 | { |
260 | mach_vm_address_t writableAddr = 0; |
261 | |
262 | // Create a second mapping of the JIT region at a random address. |
263 | vm_prot_t cur, max; |
264 | int remapFlags = VM_FLAGS_ANYWHERE; |
265 | #if defined(VM_FLAGS_RANDOM_ADDR) |
266 | remapFlags |= VM_FLAGS_RANDOM_ADDR; |
267 | #endif |
268 | kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0, |
269 | remapFlags, |
270 | mach_task_self(), (mach_vm_address_t)jitBase, FALSE, |
271 | &cur, &max, VM_INHERIT_DEFAULT); |
272 | |
273 | bool remapSucceeded = (ret == KERN_SUCCESS); |
274 | if (!remapSucceeded) |
275 | return; |
276 | |
277 | // Assemble a thunk that will serve as the means for writing into the JIT region. |
278 | MacroAssemblerCodeRef<JITThunkPtrTag> writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize); |
279 | |
280 | int result = 0; |
281 | |
282 | #if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) |
283 | // Prevent reading the write thunk code. |
284 | result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(stubBase), stubSize, true, VM_PROT_EXECUTE); |
285 | RELEASE_ASSERT(!result); |
286 | #endif |
287 | |
288 | // Prevent writing into the executable JIT mapping. |
289 | result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(jitBase), jitSize, true, VM_PROT_READ | VM_PROT_EXECUTE); |
290 | RELEASE_ASSERT(!result); |
291 | |
292 | // Prevent execution in the writable JIT mapping. |
293 | result = vm_protect(mach_task_self(), static_cast<vm_address_t>(writableAddr), jitSize, true, VM_PROT_READ | VM_PROT_WRITE); |
294 | RELEASE_ASSERT(!result); |
295 | |
296 | // Zero out writableAddr to avoid leaking the address of the writable mapping. |
297 | memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr)); |
298 | |
299 | #if ENABLE(SEPARATED_WX_HEAP) |
300 | jitWriteSeparateHeapsFunction = reinterpret_cast<JITWriteSeparateHeapsFunction>(writeThunk.code().executableAddress()); |
301 | #endif |
302 | } |
303 | |
304 | #if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) |
305 | MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize) |
306 | { |
307 | using namespace ARM64Registers; |
308 | using TrustedImm32 = MacroAssembler::TrustedImm32; |
309 | |
310 | MacroAssembler jit; |
311 | |
312 | jit.tagReturnAddress(); |
313 | jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7); |
314 | jit.addPtr(x7, x0); |
315 | |
316 | jit.move(x0, x3); |
317 | MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64)); |
318 | |
319 | jit.add64(TrustedImm32(32), x3); |
320 | jit.and64(TrustedImm32(-32), x3); |
321 | jit.loadPair64(x1, x12, x13); |
322 | jit.loadPair64(x1, TrustedImm32(16), x14, x15); |
323 | jit.sub64(x3, x0, x5); |
324 | jit.addPtr(x5, x1); |
325 | |
326 | jit.loadPair64(x1, x8, x9); |
327 | jit.loadPair64(x1, TrustedImm32(16), x10, x11); |
328 | jit.add64(TrustedImm32(32), x1); |
329 | jit.sub64(x5, x2); |
330 | jit.storePair64(x12, x13, x0); |
331 | jit.storePair64(x14, x15, x0, TrustedImm32(16)); |
332 | MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2); |
333 | |
334 | MacroAssembler::Label copyLoop = jit.label(); |
335 | jit.storePair64WithNonTemporalAccess(x8, x9, x3); |
336 | jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16)); |
337 | jit.add64(TrustedImm32(32), x3); |
338 | jit.loadPair64WithNonTemporalAccess(x1, x8, x9); |
339 | jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11); |
340 | jit.add64(TrustedImm32(32), x1); |
341 | jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit); |
342 | |
343 | cleanup.link(&jit); |
344 | jit.add64(x2, x1); |
345 | jit.loadPair64(x1, x12, x13); |
346 | jit.loadPair64(x1, TrustedImm32(16), x14, x15); |
347 | jit.storePair64(x8, x9, x3); |
348 | jit.storePair64(x10, x11, x3, TrustedImm32(16)); |
349 | jit.addPtr(x2, x3); |
350 | jit.storePair64(x12, x13, x3, TrustedImm32(32)); |
351 | jit.storePair64(x14, x15, x3, TrustedImm32(48)); |
352 | jit.ret(); |
353 | |
354 | MacroAssembler::Label local0 = jit.label(); |
355 | jit.load64(x1, PostIndex(8), x6); |
356 | jit.store64(x6, x3, PostIndex(8)); |
357 | smallCopy.link(&jit); |
358 | jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit); |
359 | MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2); |
360 | MacroAssembler::Label local1 = jit.label(); |
361 | jit.load8(x1, PostIndex(1), x6); |
362 | jit.store8(x6, x3, PostIndex(1)); |
363 | jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit); |
364 | local2.link(&jit); |
365 | jit.ret(); |
366 | |
367 | auto stubBaseCodePtr = MacroAssemblerCodePtr<LinkBufferPtrTag>(tagCodePtr<LinkBufferPtrTag>(stubBase)); |
368 | LinkBuffer linkBuffer(jit, stubBaseCodePtr, stubSize); |
369 | // We don't use FINALIZE_CODE() for two reasons. |
370 | // The first is that we don't want the writeable address, as disassembled instructions, |
371 | // to appear in the console or anywhere in memory, via the PrintStream buffer. |
372 | // The second is we can't guarantee that the code is readable when using the |
373 | // asyncDisassembly option as our caller will set our pages execute only. |
374 | return linkBuffer.finalizeCodeWithoutDisassembly<JITThunkPtrTag>(); |
375 | } |
376 | #else // not CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) |
377 | static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize) |
378 | { |
379 | memcpy((void*)(startOfFixedWritableMemoryPool + offset), data, dataSize); |
380 | } |
381 | |
382 | MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* address, void*, size_t) |
383 | { |
384 | startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address); |
385 | void* function = reinterpret_cast<void*>(&genericWriteToJITRegion); |
386 | #if CPU(ARM_THUMB2) |
387 | // Handle thumb offset |
388 | uintptr_t functionAsInt = reinterpret_cast<uintptr_t>(function); |
389 | functionAsInt -= 1; |
390 | function = reinterpret_cast<void*>(functionAsInt); |
391 | #endif |
392 | auto codePtr = MacroAssemblerCodePtr<JITThunkPtrTag>(tagCFunctionPtr<JITThunkPtrTag>(function)); |
393 | return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(codePtr); |
394 | } |
395 | #endif // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) |
396 | |
397 | #else // OS(DARWIN) && HAVE(REMAP_JIT) |
398 | void initializeSeparatedWXHeaps(void*, size_t, void*, size_t) |
399 | { |
400 | } |
401 | #endif |
402 | |
403 | private: |
404 | PageReservation m_reservation; |
405 | MacroAssemblerCodePtr<ExecutableMemoryPtrTag> m_memoryStart; |
406 | MacroAssemblerCodePtr<ExecutableMemoryPtrTag> m_memoryEnd; |
407 | }; |
408 | |
409 | FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator() |
410 | { |
411 | m_reservation.deallocate(); |
412 | } |
413 | |
414 | void ExecutableAllocator::initializeUnderlyingAllocator() |
415 | { |
416 | ASSERT(!allocator); |
417 | allocator = new FixedVMPoolExecutableAllocator(); |
418 | CodeProfiling::notifyAllocator(allocator); |
419 | } |
420 | |
421 | bool ExecutableAllocator::isValid() const |
422 | { |
423 | if (!allocator) |
424 | return Base::isValid(); |
425 | return !!allocator->bytesReserved(); |
426 | } |
427 | |
428 | bool ExecutableAllocator::underMemoryPressure() |
429 | { |
430 | if (!allocator) |
431 | return Base::underMemoryPressure(); |
432 | MetaAllocator::Statistics statistics = allocator->currentStatistics(); |
433 | return statistics.bytesAllocated > statistics.bytesReserved / 2; |
434 | } |
435 | |
436 | double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) |
437 | { |
438 | if (!allocator) |
439 | return Base::memoryPressureMultiplier(addedMemoryUsage); |
440 | MetaAllocator::Statistics statistics = allocator->currentStatistics(); |
441 | ASSERT(statistics.bytesAllocated <= statistics.bytesReserved); |
442 | size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage; |
443 | size_t bytesAvailable = static_cast<size_t>( |
444 | statistics.bytesReserved * (1 - executablePoolReservationFraction)); |
445 | if (bytesAllocated >= bytesAvailable) |
446 | bytesAllocated = bytesAvailable; |
447 | double result = 1.0; |
448 | size_t divisor = bytesAvailable - bytesAllocated; |
449 | if (divisor) |
450 | result = static_cast<double>(bytesAvailable) / divisor; |
451 | if (result < 1.0) |
452 | result = 1.0; |
453 | return result; |
454 | } |
455 | |
456 | RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) |
457 | { |
458 | if (!allocator) |
459 | return Base::allocate(sizeInBytes, ownerUID, effort); |
460 | if (Options::logExecutableAllocation()) { |
461 | MetaAllocator::Statistics stats = allocator->currentStatistics(); |
462 | dataLog("Allocating " , sizeInBytes, " bytes of executable memory with " , stats.bytesAllocated, " bytes allocated, " , stats.bytesReserved, " bytes reserved, and " , stats.bytesCommitted, " committed.\n" ); |
463 | } |
464 | |
465 | if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) { |
466 | dataLog("Allocating " , sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n" ); |
467 | WTFReportBacktrace(); |
468 | } |
469 | |
470 | if (effort == JITCompilationCanFail |
471 | && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation) |
472 | return nullptr; |
473 | |
474 | if (effort == JITCompilationCanFail) { |
475 | // Don't allow allocations if we are down to reserve. |
476 | MetaAllocator::Statistics statistics = allocator->currentStatistics(); |
477 | size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes; |
478 | size_t bytesAvailable = static_cast<size_t>( |
479 | statistics.bytesReserved * (1 - executablePoolReservationFraction)); |
480 | if (bytesAllocated > bytesAvailable) { |
481 | if (Options::logExecutableAllocation()) |
482 | dataLog("Allocation failed because bytes allocated " , bytesAllocated, " > " , bytesAvailable, " bytes available.\n" ); |
483 | return nullptr; |
484 | } |
485 | } |
486 | |
487 | RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID); |
488 | if (!result) { |
489 | if (effort != JITCompilationCanFail) { |
490 | dataLog("Ran out of executable memory while allocating " , sizeInBytes, " bytes.\n" ); |
491 | CRASH(); |
492 | } |
493 | return nullptr; |
494 | } |
495 | |
496 | #if CPU(ARM64E) |
497 | void* start = allocator->memoryStart(); |
498 | void* end = allocator->memoryEnd(); |
499 | void* resultStart = result->start().untaggedPtr(); |
500 | void* resultEnd = result->end().untaggedPtr(); |
501 | RELEASE_ASSERT(start <= resultStart && resultStart < end); |
502 | RELEASE_ASSERT(start < resultEnd && resultEnd <= end); |
503 | #endif |
504 | return result; |
505 | } |
506 | |
507 | bool ExecutableAllocator::isValidExecutableMemory(const AbstractLocker& locker, void* address) |
508 | { |
509 | if (!allocator) |
510 | return Base::isValidExecutableMemory(locker, address); |
511 | return allocator->isInAllocatedMemory(locker, address); |
512 | } |
513 | |
514 | Lock& ExecutableAllocator::getLock() const |
515 | { |
516 | if (!allocator) |
517 | return Base::getLock(); |
518 | return allocator->getLock(); |
519 | } |
520 | |
521 | size_t ExecutableAllocator::committedByteCount() |
522 | { |
523 | if (!allocator) |
524 | return Base::committedByteCount(); |
525 | return allocator->bytesCommitted(); |
526 | } |
527 | |
528 | #if ENABLE(META_ALLOCATOR_PROFILE) |
529 | void ExecutableAllocator::dumpProfile() |
530 | { |
531 | if (!allocator) |
532 | return; |
533 | allocator->dumpProfile(); |
534 | } |
535 | #endif |
536 | |
537 | void* startOfFixedExecutableMemoryPoolImpl() |
538 | { |
539 | if (!allocator) |
540 | return nullptr; |
541 | return allocator->memoryStart(); |
542 | } |
543 | |
544 | void* endOfFixedExecutableMemoryPoolImpl() |
545 | { |
546 | if (!allocator) |
547 | return nullptr; |
548 | return allocator->memoryEnd(); |
549 | } |
550 | |
551 | bool isJITPC(void* pc) |
552 | { |
553 | return allocator && allocator->isJITPC(pc); |
554 | } |
555 | |
556 | void dumpJITMemory(const void* dst, const void* src, size_t size) |
557 | { |
558 | ASSERT(Options::dumpJITMemoryPath()); |
559 | |
560 | #if OS(DARWIN) |
561 | static int fd = -1; |
562 | static uint8_t* buffer; |
563 | static constexpr size_t bufferSize = fixedExecutableMemoryPoolSize; |
564 | static size_t offset = 0; |
565 | static Lock dumpJITMemoryLock; |
566 | static bool needsToFlush = false; |
567 | static auto flush = [](const AbstractLocker&) { |
568 | if (fd == -1) { |
569 | fd = open(Options::dumpJITMemoryPath(), O_CREAT | O_TRUNC | O_APPEND | O_WRONLY | O_EXLOCK | O_NONBLOCK, 0666); |
570 | RELEASE_ASSERT(fd != -1); |
571 | } |
572 | write(fd, buffer, offset); |
573 | offset = 0; |
574 | needsToFlush = false; |
575 | }; |
576 | |
577 | static std::once_flag once; |
578 | static LazyNeverDestroyed<Ref<WorkQueue>> flushQueue; |
579 | std::call_once(once, [] { |
580 | buffer = bitwise_cast<uint8_t*>(malloc(bufferSize)); |
581 | flushQueue.construct(WorkQueue::create("jsc.dumpJITMemory.queue" , WorkQueue::Type::Serial, WorkQueue::QOS::Background)); |
582 | std::atexit([] { |
583 | LockHolder locker(dumpJITMemoryLock); |
584 | flush(locker); |
585 | close(fd); |
586 | fd = -1; |
587 | }); |
588 | }); |
589 | |
590 | static auto enqueueFlush = [](const AbstractLocker&) { |
591 | if (needsToFlush) |
592 | return; |
593 | |
594 | needsToFlush = true; |
595 | flushQueue.get()->dispatchAfter(Seconds(Options::dumpJITMemoryFlushInterval()), [] { |
596 | LockHolder locker(dumpJITMemoryLock); |
597 | if (!needsToFlush) |
598 | return; |
599 | flush(locker); |
600 | }); |
601 | }; |
602 | |
603 | static auto write = [](const AbstractLocker& locker, const void* src, size_t size) { |
604 | if (UNLIKELY(offset + size > bufferSize)) |
605 | flush(locker); |
606 | memcpy(buffer + offset, src, size); |
607 | offset += size; |
608 | enqueueFlush(locker); |
609 | }; |
610 | |
611 | LockHolder locker(dumpJITMemoryLock); |
612 | uint64_t time = mach_absolute_time(); |
613 | uint64_t dst64 = bitwise_cast<uintptr_t>(dst); |
614 | uint64_t size64 = size; |
615 | TraceScope(DumpJITMemoryStart, DumpJITMemoryStop, time, dst64, size64); |
616 | write(locker, &time, sizeof(time)); |
617 | write(locker, &dst64, sizeof(dst64)); |
618 | write(locker, &size64, sizeof(size64)); |
619 | write(locker, src, size); |
620 | #else |
621 | UNUSED_PARAM(dst); |
622 | UNUSED_PARAM(src); |
623 | UNUSED_PARAM(size); |
624 | RELEASE_ASSERT_NOT_REACHED(); |
625 | #endif |
626 | } |
627 | |
628 | } // namespace JSC |
629 | |
630 | #endif // ENABLE(JIT) |
631 | |
632 | namespace JSC { |
633 | |
634 | static ExecutableAllocator* executableAllocator; |
635 | |
636 | void ExecutableAllocator::initialize() |
637 | { |
638 | executableAllocator = new ExecutableAllocator; |
639 | } |
640 | |
641 | ExecutableAllocator& ExecutableAllocator::singleton() |
642 | { |
643 | ASSERT(executableAllocator); |
644 | return *executableAllocator; |
645 | } |
646 | |
647 | } // namespace JSC |
648 | |