| 1 | /* |
| 2 | * Copyright (C) 2003-2018 Apple Inc. All rights reserved. |
| 3 | * Copyright (C) 2007 Eric Seidel <eric@webkit.org> |
| 4 | * |
| 5 | * This library is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU Lesser General Public |
| 7 | * License as published by the Free Software Foundation; either |
| 8 | * version 2 of the License, or (at your option) any later version. |
| 9 | * |
| 10 | * This library is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * Lesser General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU Lesser General Public |
| 16 | * License along with this library; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 18 | * |
| 19 | */ |
| 20 | |
| 21 | #include "config.h" |
| 22 | #include "MarkedSpace.h" |
| 23 | |
| 24 | #include "BlockDirectoryInlines.h" |
| 25 | #include "FunctionCodeBlock.h" |
| 26 | #include "IncrementalSweeper.h" |
| 27 | #include "JSObject.h" |
| 28 | #include "JSCInlines.h" |
| 29 | #include "MarkedBlockInlines.h" |
| 30 | #include <wtf/ListDump.h> |
| 31 | |
| 32 | namespace JSC { |
| 33 | |
| 34 | std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep; |
| 35 | |
| 36 | namespace { |
| 37 | |
| 38 | const Vector<size_t>& sizeClasses() |
| 39 | { |
| 40 | static Vector<size_t>* result; |
| 41 | static std::once_flag once; |
| 42 | std::call_once( |
| 43 | once, |
| 44 | [] { |
| 45 | result = new Vector<size_t>(); |
| 46 | |
| 47 | if (Options::dumpSizeClasses()) { |
| 48 | dataLog("Block size: " , MarkedBlock::blockSize, "\n" ); |
| 49 | dataLog("Footer size: " , sizeof(MarkedBlock::Footer), "\n" ); |
| 50 | } |
| 51 | |
| 52 | auto add = [&] (size_t sizeClass) { |
| 53 | sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass); |
| 54 | if (Options::dumpSizeClasses()) |
| 55 | dataLog("Adding JSC MarkedSpace size class: " , sizeClass, "\n" ); |
| 56 | // Perform some validation as we go. |
| 57 | RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep)); |
| 58 | if (result->isEmpty()) |
| 59 | RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep); |
| 60 | result->append(sizeClass); |
| 61 | }; |
| 62 | |
| 63 | // This is a definition of the size classes in our GC. It must define all of the |
| 64 | // size classes from sizeStep up to largeCutoff. |
| 65 | |
| 66 | // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce |
| 67 | // atomSize. |
| 68 | for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep) |
| 69 | add(size); |
| 70 | |
| 71 | // We want to make sure that the remaining size classes minimize internal fragmentation (i.e. |
| 72 | // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential |
| 73 | // way starting at just above the precise size classes to four cells per block. |
| 74 | |
| 75 | if (Options::dumpSizeClasses()) |
| 76 | dataLog(" Marked block payload size: " , static_cast<size_t>(MarkedSpace::blockPayload), "\n" ); |
| 77 | |
| 78 | for (unsigned i = 0; ; ++i) { |
| 79 | double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i); |
| 80 | |
| 81 | if (Options::dumpSizeClasses()) |
| 82 | dataLog(" Next size class as a double: " , approximateSize, "\n" ); |
| 83 | |
| 84 | size_t approximateSizeInBytes = static_cast<size_t>(approximateSize); |
| 85 | |
| 86 | if (Options::dumpSizeClasses()) |
| 87 | dataLog(" Next size class as bytes: " , approximateSizeInBytes, "\n" ); |
| 88 | |
| 89 | // Make sure that the computer did the math correctly. |
| 90 | RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff); |
| 91 | |
| 92 | if (approximateSizeInBytes > MarkedSpace::largeCutoff) |
| 93 | break; |
| 94 | |
| 95 | size_t sizeClass = |
| 96 | WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes); |
| 97 | |
| 98 | if (Options::dumpSizeClasses()) |
| 99 | dataLog(" Size class: " , sizeClass, "\n" ); |
| 100 | |
| 101 | // Optimize the size class so that there isn't any slop at the end of the block's |
| 102 | // payload. |
| 103 | unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass; |
| 104 | size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1); |
| 105 | |
| 106 | if (Options::dumpSizeClasses()) |
| 107 | dataLog(" Possibly better size class: " , possiblyBetterSizeClass, "\n" ); |
| 108 | |
| 109 | // The size class we just came up with is better than the other one if it reduces |
| 110 | // total wastage assuming we only allocate cells of that size. |
| 111 | size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass; |
| 112 | size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock; |
| 113 | |
| 114 | if (Options::dumpSizeClasses()) |
| 115 | dataLog(" Original wastage: " , originalWastage, ", new wastage: " , newWastage, "\n" ); |
| 116 | |
| 117 | size_t betterSizeClass; |
| 118 | if (newWastage > originalWastage) |
| 119 | betterSizeClass = sizeClass; |
| 120 | else |
| 121 | betterSizeClass = possiblyBetterSizeClass; |
| 122 | |
| 123 | if (Options::dumpSizeClasses()) |
| 124 | dataLog(" Choosing size class: " , betterSizeClass, "\n" ); |
| 125 | |
| 126 | if (betterSizeClass == result->last()) { |
| 127 | // Defense for when expStep is small. |
| 128 | continue; |
| 129 | } |
| 130 | |
| 131 | // This is usually how we get out of the loop. |
| 132 | if (betterSizeClass > MarkedSpace::largeCutoff |
| 133 | || betterSizeClass > Options::largeAllocationCutoff()) |
| 134 | break; |
| 135 | |
| 136 | add(betterSizeClass); |
| 137 | } |
| 138 | |
| 139 | // Manually inject size classes for objects we know will be allocated in high volume. |
| 140 | // FIXME: All of these things should have IsoSubspaces. |
| 141 | // https://bugs.webkit.org/show_bug.cgi?id=179876 |
| 142 | add(sizeof(UnlinkedFunctionCodeBlock)); |
| 143 | add(sizeof(JSString)); |
| 144 | |
| 145 | { |
| 146 | // Sort and deduplicate. |
| 147 | std::sort(result->begin(), result->end()); |
| 148 | auto it = std::unique(result->begin(), result->end()); |
| 149 | result->shrinkCapacity(it - result->begin()); |
| 150 | } |
| 151 | |
| 152 | if (Options::dumpSizeClasses()) |
| 153 | dataLog("JSC Heap MarkedSpace size class dump: " , listDump(*result), "\n" ); |
| 154 | |
| 155 | // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about |
| 156 | // the size class table. This checks our results against that function's assumptions. |
| 157 | for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++) |
| 158 | RELEASE_ASSERT(result->at(i) == size); |
| 159 | }); |
| 160 | return *result; |
| 161 | } |
| 162 | |
| 163 | template<typename TableType, typename SizeClassCons, typename DefaultCons> |
| 164 | void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons) |
| 165 | { |
| 166 | size_t nextIndex = 0; |
| 167 | for (size_t sizeClass : sizeClasses()) { |
| 168 | auto entry = cons(sizeClass); |
| 169 | size_t index = MarkedSpace::sizeClassToIndex(sizeClass); |
| 170 | for (size_t i = nextIndex; i <= index; ++i) |
| 171 | table[i] = entry; |
| 172 | nextIndex = index + 1; |
| 173 | } |
| 174 | ASSERT(MarkedSpace::sizeClassToIndex(MarkedSpace::largeCutoff - 1) < MarkedSpace::numSizeClasses); |
| 175 | for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i) |
| 176 | table[i] = defaultCons(MarkedSpace::indexToSizeClass(i)); |
| 177 | } |
| 178 | |
| 179 | } // anonymous namespace |
| 180 | |
| 181 | void MarkedSpace::initializeSizeClassForStepSize() |
| 182 | { |
| 183 | static std::once_flag flag; |
| 184 | std::call_once( |
| 185 | flag, |
| 186 | [] { |
| 187 | buildSizeClassTable( |
| 188 | s_sizeClassForSizeStep, |
| 189 | [&] (size_t sizeClass) -> size_t { |
| 190 | return sizeClass; |
| 191 | }, |
| 192 | [&] (size_t sizeClass) -> size_t { |
| 193 | return sizeClass; |
| 194 | }); |
| 195 | }); |
| 196 | } |
| 197 | |
| 198 | MarkedSpace::MarkedSpace(Heap* heap) |
| 199 | : m_heap(heap) |
| 200 | { |
| 201 | initializeSizeClassForStepSize(); |
| 202 | } |
| 203 | |
| 204 | MarkedSpace::~MarkedSpace() |
| 205 | { |
| 206 | ASSERT(!m_blocks.set().size()); |
| 207 | } |
| 208 | |
| 209 | void MarkedSpace::freeMemory() |
| 210 | { |
| 211 | forEachBlock( |
| 212 | [&] (MarkedBlock::Handle* block) { |
| 213 | freeBlock(block); |
| 214 | }); |
| 215 | for (LargeAllocation* allocation : m_largeAllocations) |
| 216 | allocation->destroy(); |
| 217 | } |
| 218 | |
| 219 | void MarkedSpace::lastChanceToFinalize() |
| 220 | { |
| 221 | forEachDirectory( |
| 222 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 223 | directory.lastChanceToFinalize(); |
| 224 | return IterationStatus::Continue; |
| 225 | }); |
| 226 | for (LargeAllocation* allocation : m_largeAllocations) |
| 227 | allocation->lastChanceToFinalize(); |
| 228 | } |
| 229 | |
| 230 | void MarkedSpace::sweep() |
| 231 | { |
| 232 | m_heap->sweeper().stopSweeping(); |
| 233 | forEachDirectory( |
| 234 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 235 | directory.sweep(); |
| 236 | return IterationStatus::Continue; |
| 237 | }); |
| 238 | } |
| 239 | |
| 240 | void MarkedSpace::sweepLargeAllocations() |
| 241 | { |
| 242 | RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size()); |
| 243 | unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep; |
| 244 | unsigned dstIndex = srcIndex; |
| 245 | while (srcIndex < m_largeAllocations.size()) { |
| 246 | LargeAllocation* allocation = m_largeAllocations[srcIndex++]; |
| 247 | allocation->sweep(); |
| 248 | if (allocation->isEmpty()) { |
| 249 | m_capacity -= allocation->cellSize(); |
| 250 | allocation->destroy(); |
| 251 | continue; |
| 252 | } |
| 253 | allocation->setIndexInSpace(dstIndex); |
| 254 | m_largeAllocations[dstIndex++] = allocation; |
| 255 | } |
| 256 | m_largeAllocations.shrink(dstIndex); |
| 257 | m_largeAllocationsNurseryOffset = m_largeAllocations.size(); |
| 258 | } |
| 259 | |
| 260 | void MarkedSpace::prepareForAllocation() |
| 261 | { |
| 262 | ASSERT(!Thread::mayBeGCThread() || m_heap->worldIsStopped()); |
| 263 | for (Subspace* subspace : m_subspaces) |
| 264 | subspace->prepareForAllocation(); |
| 265 | |
| 266 | m_activeWeakSets.takeFrom(m_newActiveWeakSets); |
| 267 | |
| 268 | if (m_heap->collectionScope() == CollectionScope::Eden) |
| 269 | m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset; |
| 270 | else |
| 271 | m_largeAllocationsNurseryOffsetForSweep = 0; |
| 272 | m_largeAllocationsNurseryOffset = m_largeAllocations.size(); |
| 273 | } |
| 274 | |
| 275 | void MarkedSpace::visitWeakSets(SlotVisitor& visitor) |
| 276 | { |
| 277 | auto visit = [&] (WeakSet* weakSet) { |
| 278 | weakSet->visit(visitor); |
| 279 | }; |
| 280 | |
| 281 | m_newActiveWeakSets.forEach(visit); |
| 282 | |
| 283 | if (m_heap->collectionScope() == CollectionScope::Full) |
| 284 | m_activeWeakSets.forEach(visit); |
| 285 | } |
| 286 | |
| 287 | void MarkedSpace::reapWeakSets() |
| 288 | { |
| 289 | auto visit = [&] (WeakSet* weakSet) { |
| 290 | weakSet->reap(); |
| 291 | }; |
| 292 | |
| 293 | m_newActiveWeakSets.forEach(visit); |
| 294 | |
| 295 | if (m_heap->collectionScope() == CollectionScope::Full) |
| 296 | m_activeWeakSets.forEach(visit); |
| 297 | } |
| 298 | |
| 299 | void MarkedSpace::stopAllocating() |
| 300 | { |
| 301 | ASSERT(!isIterating()); |
| 302 | forEachDirectory( |
| 303 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 304 | directory.stopAllocating(); |
| 305 | return IterationStatus::Continue; |
| 306 | }); |
| 307 | } |
| 308 | |
| 309 | void MarkedSpace::stopAllocatingForGood() |
| 310 | { |
| 311 | ASSERT(!isIterating()); |
| 312 | forEachDirectory( |
| 313 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 314 | directory.stopAllocatingForGood(); |
| 315 | return IterationStatus::Continue; |
| 316 | }); |
| 317 | } |
| 318 | |
| 319 | void MarkedSpace::prepareForConservativeScan() |
| 320 | { |
| 321 | m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection; |
| 322 | m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection; |
| 323 | m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end(); |
| 324 | RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize); |
| 325 | |
| 326 | std::sort( |
| 327 | m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd, |
| 328 | [&] (LargeAllocation* a, LargeAllocation* b) { |
| 329 | return a < b; |
| 330 | }); |
| 331 | unsigned index = m_largeAllocationsOffsetForThisCollection; |
| 332 | for (auto* start = m_largeAllocationsForThisCollectionBegin; start != m_largeAllocationsForThisCollectionEnd; ++start, ++index) { |
| 333 | (*start)->setIndexInSpace(index); |
| 334 | ASSERT(m_largeAllocations[index] == *start); |
| 335 | ASSERT(m_largeAllocations[index]->indexInSpace() == index); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | void MarkedSpace::prepareForMarking() |
| 340 | { |
| 341 | if (m_heap->collectionScope() == CollectionScope::Eden) |
| 342 | m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset; |
| 343 | else |
| 344 | m_largeAllocationsOffsetForThisCollection = 0; |
| 345 | } |
| 346 | |
| 347 | void MarkedSpace::resumeAllocating() |
| 348 | { |
| 349 | forEachDirectory( |
| 350 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 351 | directory.resumeAllocating(); |
| 352 | return IterationStatus::Continue; |
| 353 | }); |
| 354 | // Nothing to do for LargeAllocations. |
| 355 | } |
| 356 | |
| 357 | bool MarkedSpace::isPagedOut(MonotonicTime deadline) |
| 358 | { |
| 359 | bool result = false; |
| 360 | forEachDirectory( |
| 361 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 362 | if (directory.isPagedOut(deadline)) { |
| 363 | result = true; |
| 364 | return IterationStatus::Done; |
| 365 | } |
| 366 | return IterationStatus::Continue; |
| 367 | }); |
| 368 | // FIXME: Consider taking LargeAllocations into account here. |
| 369 | return result; |
| 370 | } |
| 371 | |
| 372 | void MarkedSpace::freeBlock(MarkedBlock::Handle* block) |
| 373 | { |
| 374 | block->directory()->removeBlock(block); |
| 375 | m_capacity -= MarkedBlock::blockSize; |
| 376 | m_blocks.remove(&block->block()); |
| 377 | delete block; |
| 378 | } |
| 379 | |
| 380 | void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block) |
| 381 | { |
| 382 | if (!block->isEmpty()) { |
| 383 | block->shrink(); |
| 384 | return; |
| 385 | } |
| 386 | |
| 387 | freeBlock(block); |
| 388 | } |
| 389 | |
| 390 | void MarkedSpace::shrink() |
| 391 | { |
| 392 | forEachDirectory( |
| 393 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 394 | directory.shrink(); |
| 395 | return IterationStatus::Continue; |
| 396 | }); |
| 397 | } |
| 398 | |
| 399 | void MarkedSpace::beginMarking() |
| 400 | { |
| 401 | if (m_heap->collectionScope() == CollectionScope::Full) { |
| 402 | forEachDirectory( |
| 403 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 404 | directory.beginMarkingForFullCollection(); |
| 405 | return IterationStatus::Continue; |
| 406 | }); |
| 407 | |
| 408 | if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) { |
| 409 | forEachBlock( |
| 410 | [&] (MarkedBlock::Handle* handle) { |
| 411 | handle->block().resetMarks(); |
| 412 | }); |
| 413 | } |
| 414 | |
| 415 | m_markingVersion = nextVersion(m_markingVersion); |
| 416 | |
| 417 | for (LargeAllocation* allocation : m_largeAllocations) |
| 418 | allocation->flip(); |
| 419 | } |
| 420 | |
| 421 | if (!ASSERT_DISABLED) { |
| 422 | forEachBlock( |
| 423 | [&] (MarkedBlock::Handle* block) { |
| 424 | if (block->areMarksStale()) |
| 425 | return; |
| 426 | ASSERT(!block->isFreeListed()); |
| 427 | }); |
| 428 | } |
| 429 | |
| 430 | m_isMarking = true; |
| 431 | } |
| 432 | |
| 433 | void MarkedSpace::endMarking() |
| 434 | { |
| 435 | if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) { |
| 436 | forEachBlock( |
| 437 | [&] (MarkedBlock::Handle* handle) { |
| 438 | handle->block().resetAllocated(); |
| 439 | }); |
| 440 | } |
| 441 | |
| 442 | m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion); |
| 443 | |
| 444 | for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i) |
| 445 | m_largeAllocations[i]->clearNewlyAllocated(); |
| 446 | |
| 447 | if (!ASSERT_DISABLED) { |
| 448 | for (LargeAllocation* allocation : m_largeAllocations) |
| 449 | ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated()); |
| 450 | } |
| 451 | |
| 452 | forEachDirectory( |
| 453 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 454 | directory.endMarking(); |
| 455 | return IterationStatus::Continue; |
| 456 | }); |
| 457 | |
| 458 | m_isMarking = false; |
| 459 | } |
| 460 | |
| 461 | void MarkedSpace::willStartIterating() |
| 462 | { |
| 463 | ASSERT(!isIterating()); |
| 464 | stopAllocating(); |
| 465 | m_isIterating = true; |
| 466 | } |
| 467 | |
| 468 | void MarkedSpace::didFinishIterating() |
| 469 | { |
| 470 | ASSERT(isIterating()); |
| 471 | resumeAllocating(); |
| 472 | m_isIterating = false; |
| 473 | } |
| 474 | |
| 475 | size_t MarkedSpace::objectCount() |
| 476 | { |
| 477 | size_t result = 0; |
| 478 | forEachBlock( |
| 479 | [&] (MarkedBlock::Handle* block) { |
| 480 | result += block->markCount(); |
| 481 | }); |
| 482 | for (LargeAllocation* allocation : m_largeAllocations) { |
| 483 | if (allocation->isMarked()) |
| 484 | result++; |
| 485 | } |
| 486 | return result; |
| 487 | } |
| 488 | |
| 489 | size_t MarkedSpace::size() |
| 490 | { |
| 491 | size_t result = 0; |
| 492 | forEachBlock( |
| 493 | [&] (MarkedBlock::Handle* block) { |
| 494 | result += block->markCount() * block->cellSize(); |
| 495 | }); |
| 496 | for (LargeAllocation* allocation : m_largeAllocations) { |
| 497 | if (allocation->isMarked()) |
| 498 | result += allocation->cellSize(); |
| 499 | } |
| 500 | return result; |
| 501 | } |
| 502 | |
| 503 | size_t MarkedSpace::capacity() |
| 504 | { |
| 505 | return m_capacity; |
| 506 | } |
| 507 | |
| 508 | void MarkedSpace::addActiveWeakSet(WeakSet* weakSet) |
| 509 | { |
| 510 | // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak |
| 511 | // sets might contain new weak handles even though they are tied to old objects. This slightly |
| 512 | // increases the amount of scanning that an eden collection would have to do, but the effect |
| 513 | // ought to be small. |
| 514 | m_newActiveWeakSets.append(weakSet); |
| 515 | } |
| 516 | |
| 517 | void MarkedSpace::didAddBlock(MarkedBlock::Handle* block) |
| 518 | { |
| 519 | // WARNING: This function is called before block is fully initialized. The block will not know |
| 520 | // its cellSize() or attributes(). The latter implies that you can't ask things like |
| 521 | // needsDestruction(). |
| 522 | m_capacity += MarkedBlock::blockSize; |
| 523 | m_blocks.add(&block->block()); |
| 524 | } |
| 525 | |
| 526 | void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block) |
| 527 | { |
| 528 | if (block->weakSet().isOnList()) { |
| 529 | block->weakSet().remove(); |
| 530 | m_newActiveWeakSets.append(&block->weakSet()); |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | void MarkedSpace::snapshotUnswept() |
| 535 | { |
| 536 | if (m_heap->collectionScope() == CollectionScope::Eden) { |
| 537 | forEachDirectory( |
| 538 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 539 | directory.snapshotUnsweptForEdenCollection(); |
| 540 | return IterationStatus::Continue; |
| 541 | }); |
| 542 | } else { |
| 543 | forEachDirectory( |
| 544 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 545 | directory.snapshotUnsweptForFullCollection(); |
| 546 | return IterationStatus::Continue; |
| 547 | }); |
| 548 | } |
| 549 | } |
| 550 | |
| 551 | void MarkedSpace::assertNoUnswept() |
| 552 | { |
| 553 | if (ASSERT_DISABLED) |
| 554 | return; |
| 555 | forEachDirectory( |
| 556 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 557 | directory.assertNoUnswept(); |
| 558 | return IterationStatus::Continue; |
| 559 | }); |
| 560 | } |
| 561 | |
| 562 | void MarkedSpace::dumpBits(PrintStream& out) |
| 563 | { |
| 564 | forEachDirectory( |
| 565 | [&] (BlockDirectory& directory) -> IterationStatus { |
| 566 | out.print("Bits for " , directory, ":\n" ); |
| 567 | directory.dumpBits(out); |
| 568 | return IterationStatus::Continue; |
| 569 | }); |
| 570 | } |
| 571 | |
| 572 | void MarkedSpace::addBlockDirectory(const AbstractLocker&, BlockDirectory* directory) |
| 573 | { |
| 574 | directory->setNextDirectory(nullptr); |
| 575 | |
| 576 | WTF::storeStoreFence(); |
| 577 | |
| 578 | m_directories.append(std::mem_fn(&BlockDirectory::setNextDirectory), directory); |
| 579 | } |
| 580 | |
| 581 | } // namespace JSC |
| 582 | |