2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "MetaAllocator.h"
32 #include <wtf/FastMalloc.h>
36 MetaAllocatorHandle::MetaAllocatorHandle(MetaAllocator* allocator, void* start, size_t sizeInBytes)
37 : m_allocator(allocator)
39 , m_sizeInBytes(sizeInBytes)
46 MetaAllocatorHandle::~MetaAllocatorHandle()
50 SpinLockHolder locker(&m_allocator->m_lock);
52 m_allocator->decrementPageOccupancy(m_start, m_sizeInBytes);
53 m_allocator->addFreeSpaceFromReleasedHandle(m_start, m_sizeInBytes);
57 void MetaAllocatorHandle::shrink(size_t newSizeInBytes)
59 ASSERT(newSizeInBytes <= m_sizeInBytes);
62 m_sizeInBytes = newSizeInBytes;
66 SpinLockHolder locker(&m_allocator->m_lock);
68 newSizeInBytes = m_allocator->roundUp(newSizeInBytes);
70 ASSERT(newSizeInBytes <= m_sizeInBytes);
72 if (newSizeInBytes == m_sizeInBytes)
75 uintptr_t freeStart = reinterpret_cast<uintptr_t>(m_start) + newSizeInBytes;
76 size_t freeSize = m_sizeInBytes - newSizeInBytes;
77 uintptr_t freeEnd = freeStart + freeSize;
79 uintptr_t firstCompletelyFreePage = (freeStart + m_allocator->m_pageSize - 1) & ~(m_allocator->m_pageSize - 1);
80 if (firstCompletelyFreePage < freeEnd)
81 m_allocator->decrementPageOccupancy(reinterpret_cast<void*>(firstCompletelyFreePage), freeSize - (firstCompletelyFreePage - freeStart));
83 m_allocator->addFreeSpaceFromReleasedHandle(reinterpret_cast<void*>(freeStart), freeSize);
85 m_sizeInBytes = newSizeInBytes;
88 MetaAllocator::MetaAllocator(size_t allocationGranule)
89 : m_allocationGranule(allocationGranule)
90 , m_pageSize(pageSize())
97 #if ENABLE(META_ALLOCATOR_PROFILE)
104 for (m_logPageSize = 0; m_logPageSize < 32; ++m_logPageSize) {
105 if (static_cast<size_t>(1) << m_logPageSize == m_pageSize)
109 ASSERT(static_cast<size_t>(1) << m_logPageSize == m_pageSize);
111 for (m_logAllocationGranule = 0; m_logAllocationGranule < 32; ++m_logAllocationGranule) {
112 if (static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule)
116 ASSERT(static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule);
119 PassRefPtr<MetaAllocatorHandle> MetaAllocator::allocate(size_t sizeInBytes)
121 SpinLockHolder locker(&m_lock);
126 sizeInBytes = roundUp(sizeInBytes);
128 void* start = findAndRemoveFreeSpace(sizeInBytes);
130 size_t requestedNumberOfPages = (sizeInBytes + m_pageSize - 1) >> m_logPageSize;
131 size_t numberOfPages = requestedNumberOfPages;
133 start = allocateNewSpace(numberOfPages);
137 ASSERT(numberOfPages >= requestedNumberOfPages);
139 size_t roundedUpSize = numberOfPages << m_logPageSize;
141 ASSERT(roundedUpSize >= sizeInBytes);
143 m_bytesReserved += roundedUpSize;
145 if (roundedUpSize > sizeInBytes) {
146 void* freeSpaceStart = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes);
147 size_t freeSpaceSize = roundedUpSize - sizeInBytes;
148 addFreeSpace(freeSpaceStart, freeSpaceSize);
151 incrementPageOccupancy(start, sizeInBytes);
152 m_bytesAllocated += sizeInBytes;
153 #if ENABLE(META_ALLOCATOR_PROFILE)
156 return adoptRef(new MetaAllocatorHandle(this, start, sizeInBytes));
159 MetaAllocator::Statistics MetaAllocator::currentStatistics()
161 SpinLockHolder locker(&m_lock);
163 result.bytesAllocated = m_bytesAllocated;
164 result.bytesReserved = m_bytesReserved;
165 result.bytesCommitted = m_bytesCommitted;
169 void* MetaAllocator::findAndRemoveFreeSpace(size_t sizeInBytes)
171 FreeSpaceNode* node = m_freeSpaceSizeMap.findLeastGreaterThanOrEqual(sizeInBytes);
176 ASSERT(node->m_key >= sizeInBytes);
178 m_freeSpaceSizeMap.remove(node);
182 if (node->m_key == sizeInBytes) {
183 // Easy case: perfect fit, so just remove the node entirely.
184 result = node->m_value;
186 m_freeSpaceStartAddressMap.remove(node->m_value);
187 m_freeSpaceEndAddressMap.remove(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_value) + node->m_key));
188 freeFreeSpaceNode(node);
190 // Try to be a good citizen and ensure that the returned chunk of memory
191 // straddles as few pages as possible, but only insofar as doing so will
192 // not increase fragmentation. The intuition is that minimizing
193 // fragmentation is a strictly higher priority than minimizing the number
194 // of committed pages, since in the long run, smaller fragmentation means
195 // fewer committed pages and fewer failures in general.
197 uintptr_t firstPage = reinterpret_cast<uintptr_t>(node->m_value) >> m_logPageSize;
198 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(node->m_value) + node->m_key - 1) >> m_logPageSize;
200 uintptr_t lastPageForLeftAllocation = (reinterpret_cast<uintptr_t>(node->m_value) + sizeInBytes - 1) >> m_logPageSize;
201 uintptr_t firstPageForRightAllocation = (reinterpret_cast<uintptr_t>(node->m_value) + node->m_key - sizeInBytes) >> m_logPageSize;
203 if (lastPageForLeftAllocation - firstPage + 1 <= lastPage - firstPageForRightAllocation + 1) {
204 // Allocate in the left side of the returned chunk, and slide the node to the right.
205 result = node->m_value;
207 m_freeSpaceStartAddressMap.remove(node->m_value);
209 node->m_key -= sizeInBytes;
210 node->m_value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_value) + sizeInBytes);
212 m_freeSpaceSizeMap.insert(node);
213 m_freeSpaceStartAddressMap.add(node->m_value, node);
215 // Allocate in the right size of the returned chunk, and slide the node to the left;
217 result = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_value) + node->m_key - sizeInBytes);
219 m_freeSpaceEndAddressMap.remove(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_value) + node->m_key));
221 node->m_key -= sizeInBytes;
223 m_freeSpaceSizeMap.insert(node);
224 m_freeSpaceEndAddressMap.add(result, node);
231 void MetaAllocator::addFreeSpaceFromReleasedHandle(void* start, size_t sizeInBytes)
233 #if ENABLE(META_ALLOCATOR_PROFILE)
236 m_bytesAllocated -= sizeInBytes;
237 addFreeSpace(start, sizeInBytes);
240 void MetaAllocator::addFreshFreeSpace(void* start, size_t sizeInBytes)
242 SpinLockHolder locker(&m_lock);
243 m_bytesReserved += sizeInBytes;
244 addFreeSpace(start, sizeInBytes);
247 size_t MetaAllocator::debugFreeSpaceSize()
250 SpinLockHolder locker(&m_lock);
252 for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node; node = node->successor())
253 result += node->m_key;
261 void MetaAllocator::addFreeSpace(void* start, size_t sizeInBytes)
263 void* end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes);
265 HashMap<void*, FreeSpaceNode*>::iterator leftNeighbor = m_freeSpaceEndAddressMap.find(start);
266 HashMap<void*, FreeSpaceNode*>::iterator rightNeighbor = m_freeSpaceStartAddressMap.find(end);
268 if (leftNeighbor != m_freeSpaceEndAddressMap.end()) {
269 // We have something we can coalesce with on the left. Remove it from the tree, and
270 // remove its end from the end address map.
272 ASSERT(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftNeighbor->second->m_value) + leftNeighbor->second->m_key) == leftNeighbor->first);
274 FreeSpaceNode* leftNode = leftNeighbor->second;
276 void* leftStart = leftNode->m_value;
277 size_t leftSize = leftNode->m_key;
278 void* leftEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftStart) + leftSize);
280 ASSERT(leftEnd == start);
282 m_freeSpaceSizeMap.remove(leftNode);
283 m_freeSpaceEndAddressMap.remove(leftEnd);
285 // Now check if there is also something to coalesce with on the right.
286 if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
287 // Freeing something in the middle of free blocks. Coalesce both left and
288 // right, whilst removing the right neighbor from the maps.
290 ASSERT(rightNeighbor->second->m_value == rightNeighbor->first);
292 FreeSpaceNode* rightNode = rightNeighbor->second;
293 void* rightStart = rightNeighbor->first;
294 size_t rightSize = rightNode->m_key;
295 void* rightEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(rightStart) + rightSize);
297 ASSERT(rightStart == end);
298 ASSERT(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftStart) + leftSize + sizeInBytes + rightSize) == rightEnd);
300 m_freeSpaceSizeMap.remove(rightNode);
301 m_freeSpaceStartAddressMap.remove(rightStart);
302 m_freeSpaceEndAddressMap.remove(rightEnd);
304 freeFreeSpaceNode(rightNode);
306 leftNode->m_key += sizeInBytes + rightSize;
308 m_freeSpaceSizeMap.insert(leftNode);
309 m_freeSpaceEndAddressMap.add(rightEnd, leftNode);
311 leftNode->m_key += sizeInBytes;
313 m_freeSpaceSizeMap.insert(leftNode);
314 m_freeSpaceEndAddressMap.add(end, leftNode);
317 // Cannot coalesce with left; try to see if we can coalesce with right.
319 if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
320 FreeSpaceNode* rightNode = rightNeighbor->second;
321 void* rightStart = rightNeighbor->first;
322 size_t rightSize = rightNode->m_key;
323 void* rightEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(rightStart) + rightSize);
325 ASSERT(rightStart == end);
326 ASSERT_UNUSED(rightEnd, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes + rightSize) == rightEnd);
328 m_freeSpaceSizeMap.remove(rightNode);
329 m_freeSpaceStartAddressMap.remove(rightStart);
331 rightNode->m_key += sizeInBytes;
332 rightNode->m_value = start;
334 m_freeSpaceSizeMap.insert(rightNode);
335 m_freeSpaceStartAddressMap.add(start, rightNode);
337 // Nothing to coalesce with, so create a new free space node and add it.
339 FreeSpaceNode* node = allocFreeSpaceNode();
341 node->m_key = sizeInBytes;
342 node->m_value = start;
344 m_freeSpaceSizeMap.insert(node);
345 m_freeSpaceStartAddressMap.add(start, node);
346 m_freeSpaceEndAddressMap.add(end, node);
351 void MetaAllocator::incrementPageOccupancy(void* address, size_t sizeInBytes)
353 uintptr_t firstPage = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
354 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(address) + sizeInBytes - 1) >> m_logPageSize;
356 for (uintptr_t page = firstPage; page <= lastPage; ++page) {
357 HashMap<uintptr_t, size_t>::iterator iter = m_pageOccupancyMap.find(page);
358 if (iter == m_pageOccupancyMap.end()) {
359 m_pageOccupancyMap.add(page, 1);
360 m_bytesCommitted += m_pageSize;
361 notifyNeedPage(reinterpret_cast<void*>(page << m_logPageSize));
367 void MetaAllocator::decrementPageOccupancy(void* address, size_t sizeInBytes)
369 uintptr_t firstPage = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
370 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(address) + sizeInBytes - 1) >> m_logPageSize;
372 for (uintptr_t page = firstPage; page <= lastPage; ++page) {
373 HashMap<uintptr_t, size_t>::iterator iter = m_pageOccupancyMap.find(page);
374 ASSERT(iter != m_pageOccupancyMap.end());
375 if (!--(iter->second)) {
376 m_pageOccupancyMap.remove(iter);
377 m_bytesCommitted -= m_pageSize;
378 notifyPageIsFree(reinterpret_cast<void*>(page << m_logPageSize));
383 size_t MetaAllocator::roundUp(size_t sizeInBytes)
385 if (std::numeric_limits<size_t>::max() - m_allocationGranule <= sizeInBytes)
387 return (sizeInBytes + m_allocationGranule - 1) & ~(m_allocationGranule - 1);
390 MetaAllocator::FreeSpaceNode* MetaAllocator::allocFreeSpaceNode()
395 return new (fastMalloc(sizeof(FreeSpaceNode))) FreeSpaceNode(0, 0);
398 void MetaAllocator::freeFreeSpaceNode(FreeSpaceNode* node)
406 #if ENABLE(META_ALLOCATOR_PROFILE)
407 void MetaAllocator::dumpProfile()
409 printf("num allocations = %u, num frees = %u\n", m_numAllocations, m_numFrees);