2 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "CodeBlock.h"
25 #include "ConservativeRoots.h"
26 #include "GCActivityCallback.h"
27 #include "HeapRootVisitor.h"
28 #include "Interpreter.h"
29 #include "JSGlobalData.h"
30 #include "JSGlobalObject.h"
32 #include "JSONObject.h"
36 #define COLLECT_ON_EVERY_ALLOCATION 0
45 static const size_t largeHeapSize = 16 * 1024 * 1024;
46 static const size_t smallHeapSize = 512 * 1024;
48 static size_t heapSizeForHint(HeapSize heapSize)
50 #if ENABLE(LARGE_HEAP)
51 if (heapSize == LargeHeap)
53 ASSERT(heapSize == SmallHeap);
56 ASSERT_UNUSED(heapSize, heapSize == LargeHeap || heapSize == SmallHeap);
61 static inline bool isValidSharedInstanceThreadState()
63 if (!JSLock::lockCount())
66 if (!JSLock::currentThreadIsHoldingLock())
72 static inline bool isValidThreadState(JSGlobalData* globalData)
74 if (globalData->identifierTable != wtfThreadData().currentIdentifierTable())
77 if (globalData->isSharedInstance() && !isValidSharedInstanceThreadState())
85 typedef size_t ReturnType;
89 ReturnType returnValue();
95 inline CountFunctor::CountFunctor()
100 inline void CountFunctor::count(size_t count)
105 inline CountFunctor::ReturnType CountFunctor::returnValue()
110 struct ClearMarks : MarkedBlock::VoidFunctor {
111 void operator()(MarkedBlock*);
114 inline void ClearMarks::operator()(MarkedBlock* block)
117 block->notifyMayHaveFreshFreeCells();
120 struct Sweep : MarkedBlock::VoidFunctor {
121 void operator()(MarkedBlock*);
124 inline void Sweep::operator()(MarkedBlock* block)
129 struct MarkCount : CountFunctor {
130 void operator()(MarkedBlock*);
133 inline void MarkCount::operator()(MarkedBlock* block)
135 count(block->markCount());
138 struct Size : CountFunctor {
139 void operator()(MarkedBlock*);
142 inline void Size::operator()(MarkedBlock* block)
144 count(block->markCount() * block->cellSize());
147 struct Capacity : CountFunctor {
148 void operator()(MarkedBlock*);
151 inline void Capacity::operator()(MarkedBlock* block)
153 count(block->capacity());
156 struct Count : public CountFunctor {
157 void operator()(JSCell*);
160 inline void Count::operator()(JSCell*)
165 struct CountIfGlobalObject : CountFunctor {
166 void operator()(JSCell*);
169 inline void CountIfGlobalObject::operator()(JSCell* cell)
171 if (!cell->isObject())
173 if (!asObject(cell)->isGlobalObject())
180 typedef MarkedBlock* ReturnType;
182 TakeIfEmpty(NewSpace*);
183 void operator()(MarkedBlock*);
184 ReturnType returnValue();
187 NewSpace* m_newSpace;
188 DoublyLinkedList<MarkedBlock> m_empties;
191 inline TakeIfEmpty::TakeIfEmpty(NewSpace* newSpace)
192 : m_newSpace(newSpace)
196 inline void TakeIfEmpty::operator()(MarkedBlock* block)
198 if (!block->isEmpty())
201 m_newSpace->removeBlock(block);
202 m_empties.append(block);
205 inline TakeIfEmpty::ReturnType TakeIfEmpty::returnValue()
207 return m_empties.head();
212 typedef PassOwnPtr<TypeCountSet> ReturnType;
215 void operator()(JSCell*);
216 ReturnType returnValue();
219 const char* typeName(JSCell*);
220 OwnPtr<TypeCountSet> m_typeCountSet;
223 inline RecordType::RecordType()
224 : m_typeCountSet(adoptPtr(new TypeCountSet))
228 inline const char* RecordType::typeName(JSCell* cell)
230 const ClassInfo* info = cell->classInfo();
231 if (!info || !info->className)
233 return info->className;
236 inline void RecordType::operator()(JSCell* cell)
238 m_typeCountSet->add(typeName(cell));
241 inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
243 return m_typeCountSet.release();
246 } // anonymous namespace
248 Heap::Heap(JSGlobalData* globalData, HeapSize heapSize)
249 : m_heapSize(heapSize)
250 , m_minBytesPerCycle(heapSizeForHint(heapSize))
251 , m_operationInProgress(NoOperation)
255 , m_activityCallback(DefaultGCActivityCallback::create(this))
256 , m_machineThreads(this)
257 , m_slotVisitor(globalData->jsArrayVPtr)
258 , m_handleHeap(globalData)
259 , m_isSafeToCollect(false)
260 , m_globalData(globalData)
262 m_newSpace.setHighWaterMark(m_minBytesPerCycle);
263 (*m_activityCallback)();
264 #if ENABLE(LAZY_BLOCK_FREEING)
265 m_numberOfFreeBlocks = 0;
266 m_blockFreeingThread = createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree");
267 ASSERT(m_blockFreeingThread);
273 #if ENABLE(LAZY_BLOCK_FREEING)
274 // destroy our thread
276 MutexLocker locker(m_freeBlockLock);
277 m_blockFreeingThreadShouldQuit = true;
278 m_freeBlockCondition.broadcast();
280 waitForThreadCompletion(m_blockFreeingThread, 0);
283 // The destroy function must already have been called, so assert this.
284 ASSERT(!m_globalData);
289 JSLock lock(SilenceAssertionsOnly);
294 ASSERT(!m_globalData->dynamicGlobalObject);
295 ASSERT(m_operationInProgress == NoOperation);
297 // The global object is not GC protected at this point, so sweeping may delete it
298 // (and thus the global data) before other objects that may use the global data.
299 RefPtr<JSGlobalData> protect(m_globalData);
302 m_globalData->jitStubs->clearHostFunctionStubs();
305 delete m_markListSet;
309 m_handleHeap.finalizeWeakHandles();
310 m_globalData->smallStrings.finalizeSmallStrings();
315 #if ENABLE(SIMPLE_HEAP_PROFILING)
316 m_slotVisitor.m_visitedTypeCounts.dump(stderr, "Visited Type Counts");
317 m_destroyedTypeCounts.dump(stderr, "Destroyed Type Counts");
320 #if ENABLE(LAZY_BLOCK_FREEING)
327 #if ENABLE(LAZY_BLOCK_FREEING)
328 void Heap::waitForRelativeTimeWhileHoldingLock(double relative)
330 if (m_blockFreeingThreadShouldQuit)
332 m_freeBlockCondition.timedWait(m_freeBlockLock, currentTime() + relative);
335 void Heap::waitForRelativeTime(double relative)
337 // If this returns early, that's fine, so long as it doesn't do it too
338 // frequently. It would only be a bug if this function failed to return
339 // when it was asked to do so.
341 MutexLocker locker(m_freeBlockLock);
342 waitForRelativeTimeWhileHoldingLock(relative);
345 void* Heap::blockFreeingThreadStartFunc(void* heap)
347 static_cast<Heap*>(heap)->blockFreeingThreadMain();
351 void Heap::blockFreeingThreadMain()
353 while (!m_blockFreeingThreadShouldQuit) {
354 // Generally wait for one second before scavenging free blocks. This
355 // may return early, particularly when we're being asked to quit.
356 waitForRelativeTime(1.0);
357 if (m_blockFreeingThreadShouldQuit)
360 // Now process the list of free blocks. Keep freeing until half of the
361 // blocks that are currently on the list are gone. Assume that a size_t
362 // field can be accessed atomically.
363 size_t currentNumberOfFreeBlocks = m_numberOfFreeBlocks;
364 if (!currentNumberOfFreeBlocks)
367 size_t desiredNumberOfFreeBlocks = currentNumberOfFreeBlocks / 2;
369 while (!m_blockFreeingThreadShouldQuit) {
372 MutexLocker locker(m_freeBlockLock);
373 if (m_numberOfFreeBlocks <= desiredNumberOfFreeBlocks)
376 block = m_freeBlocks.removeHead();
378 m_numberOfFreeBlocks--;
385 MarkedBlock::destroy(block);
389 #endif // ENABLE(LAZY_BLOCK_FREEING)
391 void Heap::reportExtraMemoryCostSlowCase(size_t cost)
393 // Our frequency of garbage collection tries to balance memory use against speed
394 // by collecting based on the number of newly created values. However, for values
395 // that hold on to a great deal of memory that's not in the form of other JS values,
396 // that is not good enough - in some cases a lot of those objects can pile up and
397 // use crazy amounts of memory without a GC happening. So we track these extra
398 // memory costs. Only unusually large objects are noted, and we only keep track
399 // of this extra cost until the next GC. In garbage collected languages, most values
400 // are either very short lived temporaries, or have extremely long lifetimes. So
401 // if a large value survives one garbage collection, there is not much point to
402 // collecting more frequently as long as it stays alive.
404 if (m_extraCost > maxExtraCost && m_extraCost > m_newSpace.highWaterMark() / 2)
409 inline void* Heap::tryAllocate(NewSpace::SizeClass& sizeClass)
411 m_operationInProgress = Allocation;
412 void* result = m_newSpace.allocate(sizeClass);
413 m_operationInProgress = NoOperation;
417 void* Heap::allocateSlowCase(NewSpace::SizeClass& sizeClass)
419 #if COLLECT_ON_EVERY_ALLOCATION
421 ASSERT(m_operationInProgress == NoOperation);
424 void* result = tryAllocate(sizeClass);
426 if (LIKELY(result != 0))
429 AllocationEffort allocationEffort;
431 if (m_newSpace.waterMark() < m_newSpace.highWaterMark() || !m_isSafeToCollect)
432 allocationEffort = AllocationMustSucceed;
434 allocationEffort = AllocationCanFail;
436 MarkedBlock* block = allocateBlock(sizeClass.cellSize, allocationEffort);
438 m_newSpace.addBlock(sizeClass, block);
439 void* result = tryAllocate(sizeClass);
446 result = tryAllocate(sizeClass);
451 ASSERT(m_newSpace.waterMark() < m_newSpace.highWaterMark());
453 m_newSpace.addBlock(sizeClass, allocateBlock(sizeClass.cellSize, AllocationMustSucceed));
455 result = tryAllocate(sizeClass);
460 void Heap::protect(JSValue k)
463 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
468 m_protectedValues.add(k.asCell());
471 bool Heap::unprotect(JSValue k)
474 ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
479 return m_protectedValues.remove(k.asCell());
482 void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
484 ProtectCountSet::iterator end = m_protectedValues.end();
485 for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
486 heapRootVisitor.visit(&it->first);
489 void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
491 m_tempSortingVectors.append(tempVector);
494 void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
496 ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
497 m_tempSortingVectors.removeLast();
500 void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
502 typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;
504 VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
505 for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
506 Vector<ValueStringPair>* tempSortingVector = *it;
508 Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
509 for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
511 heapRootVisitor.visit(&vectorIt->first);
516 void Heap::harvestWeakReferences()
518 m_slotVisitor.harvestWeakReferences();
521 inline RegisterFile& Heap::registerFile()
523 return m_globalData->interpreter->registerFile();
526 void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
528 ASSERT(isValidThreadState(m_globalData));
529 if (m_operationInProgress != NoOperation)
531 m_operationInProgress = Collection;
532 ConservativeRoots registerFileRoots(&m_blocks);
533 registerFile().gatherConservativeRoots(registerFileRoots);
534 size_t registerFileRootCount = registerFileRoots.size();
535 JSCell** registerRoots = registerFileRoots.roots();
536 for (size_t i = 0; i < registerFileRootCount; i++) {
537 setMarked(registerRoots[i]);
538 roots.add(registerRoots[i]);
540 m_operationInProgress = NoOperation;
543 void Heap::markRoots()
545 ASSERT(isValidThreadState(m_globalData));
546 if (m_operationInProgress != NoOperation)
548 m_operationInProgress = Collection;
552 // We gather conservative roots before clearing mark bits because conservative
553 // gathering uses the mark bits to determine whether a reference is valid.
554 ConservativeRoots machineThreadRoots(&m_blocks);
555 m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
557 ConservativeRoots registerFileRoots(&m_blocks);
558 registerFile().gatherConservativeRoots(registerFileRoots);
562 SlotVisitor& visitor = m_slotVisitor;
563 HeapRootVisitor heapRootVisitor(visitor);
565 visitor.append(machineThreadRoots);
568 visitor.append(registerFileRoots);
571 markProtectedObjects(heapRootVisitor);
574 markTempSortVectors(heapRootVisitor);
577 if (m_markListSet && m_markListSet->size())
578 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
579 if (m_globalData->exception)
580 heapRootVisitor.visit(&m_globalData->exception);
583 m_handleHeap.visitStrongHandles(heapRootVisitor);
586 m_handleStack.visit(heapRootVisitor);
589 harvestWeakReferences();
591 // Weak handles must be marked last, because their owners use the set of
592 // opaque roots to determine reachability.
593 int lastOpaqueRootCount;
595 lastOpaqueRootCount = visitor.opaqueRootCount();
596 m_handleHeap.visitWeakHandles(heapRootVisitor);
598 // If the set of opaque roots has grown, more weak handles may have become reachable.
599 } while (lastOpaqueRootCount != visitor.opaqueRootCount());
603 m_operationInProgress = NoOperation;
606 void Heap::clearMarks()
608 forEachBlock<ClearMarks>();
613 forEachBlock<Sweep>();
616 size_t Heap::objectCount()
618 return forEachBlock<MarkCount>();
623 return forEachBlock<Size>();
626 size_t Heap::capacity()
628 return forEachBlock<Capacity>();
631 size_t Heap::protectedGlobalObjectCount()
633 return forEachProtectedCell<CountIfGlobalObject>();
636 size_t Heap::globalObjectCount()
638 return forEachCell<CountIfGlobalObject>();
641 size_t Heap::protectedObjectCount()
643 return forEachProtectedCell<Count>();
646 PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
648 return forEachProtectedCell<RecordType>();
651 PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
653 return forEachCell<RecordType>();
656 void Heap::collectAllGarbage()
658 if (!m_isSafeToCollect)
660 if (!m_globalData->dynamicGlobalObject)
661 m_globalData->recompileAllJSFunctions();
666 void Heap::collect(SweepToggle sweepToggle)
668 ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
669 ASSERT(m_isSafeToCollect);
670 JAVASCRIPTCORE_GC_BEGIN();
672 canonicalizeBlocks();
675 m_handleHeap.finalizeWeakHandles();
676 m_globalData->smallStrings.finalizeSmallStrings();
678 JAVASCRIPTCORE_GC_MARKED();
682 if (sweepToggle == DoSweep) {
687 // To avoid pathological GC churn in large heaps, we set the allocation high
688 // water mark to be proportional to the current size of the heap. The exact
689 // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
690 // new bytes allocated) proportion, and seems to work well in benchmarks.
691 size_t proportionalBytes = 2 * size();
692 m_newSpace.setHighWaterMark(max(proportionalBytes, m_minBytesPerCycle));
693 m_newSpace.resetPropertyStorageNursery();
694 JAVASCRIPTCORE_GC_END();
696 (*m_activityCallback)();
699 void Heap::canonicalizeBlocks()
701 m_newSpace.canonicalizeBlocks();
704 void Heap::resetAllocator()
707 m_newSpace.resetAllocator();
710 void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
712 m_activityCallback = activityCallback;
715 GCActivityCallback* Heap::activityCallback()
717 return m_activityCallback.get();
720 bool Heap::isValidAllocation(size_t bytes)
722 if (!isValidThreadState(m_globalData))
725 if (bytes > NewSpace::maxCellSize)
728 if (m_operationInProgress != NoOperation)
734 MarkedBlock* Heap::allocateBlock(size_t cellSize, Heap::AllocationEffort allocationEffort)
738 #if !ENABLE(LAZY_BLOCK_FREEING)
739 if (allocationEffort == AllocationCanFail)
742 block = MarkedBlock::create(this, cellSize);
745 MutexLocker locker(m_freeBlockLock);
746 if (m_numberOfFreeBlocks) {
747 block = m_freeBlocks.removeHead();
749 m_numberOfFreeBlocks--;
754 block->initForCellSize(cellSize);
755 else if (allocationEffort == AllocationCanFail)
758 block = MarkedBlock::create(this, cellSize);
766 void Heap::freeBlocks(MarkedBlock* head)
769 for (MarkedBlock* block = head; block; block = next) {
770 next = block->next();
772 m_blocks.remove(block);
774 #if !ENABLE(LAZY_BLOCK_FREEING)
775 MarkedBlock::destroy(block);
777 MutexLocker locker(m_freeBlockLock);
778 m_freeBlocks.append(block);
779 m_numberOfFreeBlocks++;
786 // We record a temporary list of empties to avoid modifying m_blocks while iterating it.
787 TakeIfEmpty takeIfEmpty(&m_newSpace);
788 freeBlocks(forEachBlock(takeIfEmpty));
791 #if ENABLE(LAZY_BLOCK_FREEING)
792 void Heap::releaseFreeBlocks()
797 MutexLocker locker(m_freeBlockLock);
798 if (!m_numberOfFreeBlocks)
801 block = m_freeBlocks.removeHead();
803 m_numberOfFreeBlocks--;
810 MarkedBlock::destroy(block);
816 void Heap::writeBarrierSlowCase(const JSCell* owner, JSCell* cell)
822 void Heap::writeBarrierSlowCase(const JSCell*, JSCell*)