2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <wtf/Bitmap.h>
26 #include <wtf/DoublyLinkedList.h>
27 #include <wtf/HashFunctions.h>
28 #include <wtf/PageAllocationAligned.h>
29 #include <wtf/StdLibExtras.h>
31 // Set to log state transitions of blocks.
32 #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
34 #if HEAP_LOG_BLOCK_STATE_TRANSITIONS
35 #define HEAP_DEBUG_BLOCK(block) do { \
36 printf("%s:%d %s: block %s = %p\n", \
37 __FILE__, __LINE__, __FUNCTION__, #block, (block)); \
40 #define HEAP_DEBUG_BLOCK(block) ((void)0)
48 typedef uintptr_t Bits;
50 static const size_t KB = 1024;
51 static const size_t MB = 1024 * 1024;
53 // A marked block is a page-aligned container for heap-allocated objects.
54 // Objects are allocated within cells of the marked block. For a given
55 // marked block, all cells have the same size. Objects smaller than the
56 // cell size may be allocated in the marked block, in which case the
57 // allocation suffers from internal fragmentation: wasted space whose
58 // size is equal to the difference between the cell size and the object
61 class MarkedBlock : public DoublyLinkedListNode<MarkedBlock> {
62 friend class WTF::DoublyLinkedListNode<MarkedBlock>;
64 // Ensure natural alignment for native types whilst recognizing that the smallest
65 // object the heap will commonly allocate is four words.
66 static const size_t atomSize = 4 * sizeof(void*);
67 static const size_t blockSize = 16 * KB;
69 static const size_t atomsPerBlock = blockSize / atomSize; // ~1.5% overhead
70 static const size_t ownerSetsPerBlock = 8; // ~2% overhead.
77 // This relies on FreeCell not having a vtable, and the next field
78 // falling exactly where a vtable would have been.
84 typedef void ReturnType;
85 void returnValue() { }
88 static MarkedBlock* create(Heap*, size_t cellSize);
89 static void destroy(MarkedBlock*);
91 static bool isAtomAligned(const void*);
92 static MarkedBlock* blockFor(const void*);
93 static size_t firstAtom();
98 void setInNewSpace(bool);
103 // This invokes destructors on all cells that are not marked, marks
104 // them, and returns a linked list of those cells.
105 FreeCell* lazySweep();
107 // Notify the block that destructors may have to be called again.
108 void notifyMayHaveFreshFreeCells();
110 void initForCellSize(size_t cellSize);
112 // These should be called immediately after a block is created.
113 // Blessing for fast path creates a linked list, while blessing for
114 // slow path creates dummy cells.
115 FreeCell* blessNewBlockForFastPath();
116 void blessNewBlockForSlowPath();
120 // This unmarks all cells on the free list, and allocates dummy JSCells
122 void canonicalizeBlock(FreeCell* firstFreeCell);
134 bool isMarked(const void*);
135 bool testAndSetMarked(const void*);
136 bool testAndClearMarked(const void*);
137 void setMarked(const void*);
139 template <typename Functor> void forEachCell(Functor&);
142 static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
143 static const size_t atomMask = ~(atomSize - 1); // atomSize must be a power of two.
145 enum DestructorState { FreeCellsDontHaveObjects, SomeFreeCellsStillHaveObjects, AllFreeCellsHaveObjects };
147 typedef char Atom[atomSize];
149 MarkedBlock(const PageAllocationAligned&, Heap*, size_t cellSize);
152 size_t atomNumber(const void*);
153 size_t ownerSetNumber(const JSCell*);
155 template<DestructorState destructorState>
156 void callDestructor(JSCell*, void* jsFinalObjectVPtr);
158 template<DestructorState destructorState>
159 void specializedReset();
161 template<DestructorState destructorState>
162 void specializedSweep();
164 template<DestructorState destructorState>
165 MarkedBlock::FreeCell* produceFreeList();
167 void setDestructorState(DestructorState destructorState)
169 m_destructorState = static_cast<int8_t>(destructorState);
172 DestructorState destructorState()
174 return static_cast<DestructorState>(m_destructorState);
177 size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
178 size_t m_atomsPerCell;
179 WTF::Bitmap<blockSize / atomSize> m_marks;
181 int8_t m_destructorState; // use getters/setters for this, particularly since we may want to compact this (effectively log(3)/log(2)-bit) field into other fields
182 PageAllocationAligned m_allocation;
188 inline size_t MarkedBlock::firstAtom()
190 return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
193 inline MarkedBlock::Atom* MarkedBlock::atoms()
195 return reinterpret_cast<Atom*>(this);
198 inline bool MarkedBlock::isAtomAligned(const void* p)
200 return !(reinterpret_cast<Bits>(p) & ~atomMask);
203 inline MarkedBlock* MarkedBlock::blockFor(const void* p)
205 return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
208 inline Heap* MarkedBlock::heap() const
213 inline bool MarkedBlock::inNewSpace()
218 inline void MarkedBlock::setInNewSpace(bool inNewSpace)
220 m_inNewSpace = inNewSpace;
223 inline void MarkedBlock::notifyMayHaveFreshFreeCells()
225 HEAP_DEBUG_BLOCK(this);
227 // This is called at the beginning of GC. If this block is
228 // AllFreeCellsHaveObjects, then it means that we filled up
229 // the block in this collection. If it's in any other state,
230 // then this collection will potentially produce new free
231 // cells; new free cells always have objects. Hence if the
232 // state currently claims that there are no objects in free
233 // cells then we need to bump it over. Otherwise leave it be.
234 // This all crucially relies on the collector canonicalizing
235 // blocks before doing anything else, as canonicalizeBlocks
236 // will correctly set SomeFreeCellsStillHaveObjects for
237 // blocks that were only partially filled during this
240 if (destructorState() == FreeCellsDontHaveObjects)
241 setDestructorState(SomeFreeCellsStillHaveObjects);
244 inline bool MarkedBlock::isEmpty()
246 return m_marks.isEmpty();
249 inline void MarkedBlock::clearMarks()
254 inline size_t MarkedBlock::markCount()
256 return m_marks.count();
259 inline size_t MarkedBlock::cellSize()
261 return m_atomsPerCell * atomSize;
264 inline size_t MarkedBlock::size()
266 return markCount() * cellSize();
269 inline size_t MarkedBlock::capacity()
271 return m_allocation.size();
274 inline size_t MarkedBlock::atomNumber(const void* p)
276 return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
279 inline bool MarkedBlock::isMarked(const void* p)
281 return m_marks.get(atomNumber(p));
284 inline bool MarkedBlock::testAndSetMarked(const void* p)
286 return m_marks.testAndSet(atomNumber(p));
289 inline bool MarkedBlock::testAndClearMarked(const void* p)
291 return m_marks.testAndClear(atomNumber(p));
294 inline void MarkedBlock::setMarked(const void* p)
296 m_marks.set(atomNumber(p));
299 template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor)
301 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
304 functor(reinterpret_cast<JSCell*>(&atoms()[i]));
308 inline size_t MarkedBlock::ownerSetNumber(const JSCell* cell)
310 return (reinterpret_cast<Bits>(cell) - reinterpret_cast<Bits>(this)) * ownerSetsPerBlock / blockSize;
317 struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
318 static unsigned hash(JSC::MarkedBlock* const& key)
320 // Aligned VM regions tend to be monotonically increasing integers,
321 // which is a great hash function, but we have to remove the low bits,
322 // since they're always zero, which is a terrible hash function!
323 return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
327 template<> struct DefaultHash<JSC::MarkedBlock*> {
328 typedef MarkedBlockHash Hash;
333 #endif // MarkedBlock_h