2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkedBlock.h"
31 #include "ScopeChain.h"
35 MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize)
37 PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
38 if (!static_cast<bool>(allocation))
40 return new (allocation.base()) MarkedBlock(allocation, heap, cellSize);
43 void MarkedBlock::destroy(MarkedBlock* block)
45 block->m_allocation.deallocate();
48 MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, Heap* heap, size_t cellSize)
50 , m_allocation(allocation)
53 initForCellSize(cellSize);
56 void MarkedBlock::initForCellSize(size_t cellSize)
58 m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
59 m_endAtom = atomsPerBlock - m_atomsPerCell + 1;
60 setDestructorState(SomeFreeCellsStillHaveObjects);
63 template<MarkedBlock::DestructorState specializedDestructorState>
64 void MarkedBlock::callDestructor(JSCell* cell, void* jsFinalObjectVPtr)
66 if (specializedDestructorState == FreeCellsDontHaveObjects)
68 void* vptr = cell->vptr();
69 if (specializedDestructorState == AllFreeCellsHaveObjects || vptr) {
70 #if ENABLE(SIMPLE_HEAP_PROFILING)
71 m_heap->m_destroyedTypeCounts.countVPtr(vptr);
73 if (vptr == jsFinalObjectVPtr) {
74 JSFinalObject* object = reinterpret_cast<JSFinalObject*>(cell);
75 object->JSFinalObject::~JSFinalObject();
81 template<MarkedBlock::DestructorState specializedDestructorState>
82 void MarkedBlock::specializedReset()
84 void* jsFinalObjectVPtr = m_heap->globalData()->jsFinalObjectVPtr;
86 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell)
87 callDestructor<specializedDestructorState>(reinterpret_cast<JSCell*>(&atoms()[i]), jsFinalObjectVPtr);
90 void MarkedBlock::reset()
92 switch (destructorState()) {
93 case FreeCellsDontHaveObjects:
94 case SomeFreeCellsStillHaveObjects:
95 specializedReset<SomeFreeCellsStillHaveObjects>();
98 ASSERT(destructorState() == AllFreeCellsHaveObjects);
99 specializedReset<AllFreeCellsHaveObjects>();
104 template<MarkedBlock::DestructorState specializedDestructorState>
105 void MarkedBlock::specializedSweep()
107 if (specializedDestructorState != FreeCellsDontHaveObjects) {
108 void* jsFinalObjectVPtr = m_heap->globalData()->jsFinalObjectVPtr;
110 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
114 JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
115 callDestructor<specializedDestructorState>(cell, jsFinalObjectVPtr);
119 setDestructorState(FreeCellsDontHaveObjects);
123 void MarkedBlock::sweep()
125 HEAP_DEBUG_BLOCK(this);
127 switch (destructorState()) {
128 case FreeCellsDontHaveObjects:
130 case SomeFreeCellsStillHaveObjects:
131 specializedSweep<SomeFreeCellsStillHaveObjects>();
134 ASSERT(destructorState() == AllFreeCellsHaveObjects);
135 specializedSweep<AllFreeCellsHaveObjects>();
140 template<MarkedBlock::DestructorState specializedDestructorState>
141 ALWAYS_INLINE MarkedBlock::FreeCell* MarkedBlock::produceFreeList()
143 // This returns a free list that is ordered in reverse through the block.
144 // This is fine, since the allocation code makes no assumptions about the
145 // order of the free list.
147 void* jsFinalObjectVPtr = m_heap->globalData()->jsFinalObjectVPtr;
149 FreeCell* result = 0;
151 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
152 if (!m_marks.testAndSet(i)) {
153 JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
154 if (specializedDestructorState != FreeCellsDontHaveObjects)
155 callDestructor<specializedDestructorState>(cell, jsFinalObjectVPtr);
156 FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
157 freeCell->next = result;
162 // This is sneaky: if we're producing a free list then we intend to
163 // fill up the free cells in the block with objects, which means that
164 // if we have a new GC then all of the free stuff in this block will
165 // comprise objects rather than empty cells.
166 setDestructorState(AllFreeCellsHaveObjects);
171 MarkedBlock::FreeCell* MarkedBlock::lazySweep()
173 // This returns a free list that is ordered in reverse through the block.
174 // This is fine, since the allocation code makes no assumptions about the
175 // order of the free list.
177 HEAP_DEBUG_BLOCK(this);
179 switch (destructorState()) {
180 case FreeCellsDontHaveObjects:
181 return produceFreeList<FreeCellsDontHaveObjects>();
182 case SomeFreeCellsStillHaveObjects:
183 return produceFreeList<SomeFreeCellsStillHaveObjects>();
185 ASSERT(destructorState() == AllFreeCellsHaveObjects);
186 return produceFreeList<AllFreeCellsHaveObjects>();
190 MarkedBlock::FreeCell* MarkedBlock::blessNewBlockForFastPath()
192 // This returns a free list that is ordered in reverse through the block,
193 // as in lazySweep() above.
195 HEAP_DEBUG_BLOCK(this);
197 FreeCell* result = 0;
198 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
200 FreeCell* freeCell = reinterpret_cast<FreeCell*>(&atoms()[i]);
201 freeCell->next = result;
205 // See produceFreeList(). If we're here then we intend to fill the
206 // block with objects, so once a GC happens, all free cells will be
207 // occupied by objects.
208 setDestructorState(AllFreeCellsHaveObjects);
213 void MarkedBlock::blessNewBlockForSlowPath()
215 HEAP_DEBUG_BLOCK(this);
218 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell)
219 reinterpret_cast<FreeCell*>(&atoms()[i])->setNoObject();
221 setDestructorState(FreeCellsDontHaveObjects);
224 void MarkedBlock::canonicalizeBlock(FreeCell* firstFreeCell)
226 HEAP_DEBUG_BLOCK(this);
228 ASSERT(destructorState() == AllFreeCellsHaveObjects);
231 for (FreeCell* current = firstFreeCell; current;) {
232 FreeCell* next = current->next;
233 size_t i = atomNumber(current);
237 current->setNoObject();
242 setDestructorState(SomeFreeCellsStillHaveObjects);