2 * Copyright (C) 2015 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include "linker_allocator.h"
30 #include "linker_debug.h"
40 #include <async_safe/log.h>
42 #include "private/bionic_prctl.h"
45 // LinkerMemeoryAllocator is general purpose allocator
46 // designed to provide the same functionality as the malloc/free/realloc
50 // If size is >= 1k allocator proxies malloc call directly to mmap
51 // If size < 1k allocator uses SmallObjectAllocator for the size
52 // rounded up to the nearest power of two.
56 // For a pointer allocated using proxy-to-mmap allocator unmaps
59 // For a pointer allocated using SmallObjectAllocator it adds
60 // the block to free_blocks_list_. If the number of free pages reaches 2,
61 // SmallObjectAllocator munmaps one of the pages keeping the other one
64 static const char kSignature[4] = {'L', 'M', 'A', 1};
66 static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2;
68 // This type is used for large allocations (with size >1k)
69 static const uint32_t kLargeObject = 111;
71 bool operator<(const small_object_page_record& one, const small_object_page_record& two) {
72 return one.page_addr < two.page_addr;
75 static inline uint16_t log2(size_t number) {
87 LinkerSmallObjectAllocator::LinkerSmallObjectAllocator(uint32_t type, size_t block_size)
88 : type_(type), block_size_(block_size), free_pages_cnt_(0), free_blocks_list_(nullptr) {}
90 void* LinkerSmallObjectAllocator::alloc() {
91 CHECK(block_size_ != 0);
93 if (free_blocks_list_ == nullptr) {
97 small_object_block_record* block_record = free_blocks_list_;
98 if (block_record->free_blocks_cnt > 1) {
99 small_object_block_record* next_free = reinterpret_cast<small_object_block_record*>(
100 reinterpret_cast<uint8_t*>(block_record) + block_size_);
101 next_free->next = block_record->next;
102 next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1;
103 free_blocks_list_ = next_free;
105 free_blocks_list_ = block_record->next;
109 auto page_record = find_page_record(block_record);
111 if (page_record->allocated_blocks_cnt == 0) {
115 page_record->free_blocks_cnt--;
116 page_record->allocated_blocks_cnt++;
118 memset(block_record, 0, block_size_);
123 void LinkerSmallObjectAllocator::free_page(linker_vector_t::iterator page_record) {
124 void* page_start = reinterpret_cast<void*>(page_record->page_addr);
125 void* page_end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(page_start) + PAGE_SIZE);
127 while (free_blocks_list_ != nullptr &&
128 free_blocks_list_ > page_start &&
129 free_blocks_list_ < page_end) {
130 free_blocks_list_ = free_blocks_list_->next;
133 small_object_block_record* current = free_blocks_list_;
135 while (current != nullptr) {
136 while (current->next > page_start && current->next < page_end) {
137 current->next = current->next->next;
140 current = current->next;
143 munmap(page_start, PAGE_SIZE);
144 page_records_.erase(page_record);
148 void LinkerSmallObjectAllocator::free(void* ptr) {
149 auto page_record = find_page_record(ptr);
151 ssize_t offset = reinterpret_cast<uintptr_t>(ptr) - sizeof(page_info);
153 if (offset % block_size_ != 0) {
154 async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_);
157 memset(ptr, 0, block_size_);
158 small_object_block_record* block_record = reinterpret_cast<small_object_block_record*>(ptr);
160 block_record->next = free_blocks_list_;
161 block_record->free_blocks_cnt = 1;
163 free_blocks_list_ = block_record;
165 page_record->free_blocks_cnt++;
166 page_record->allocated_blocks_cnt--;
168 if (page_record->allocated_blocks_cnt == 0) {
169 if (free_pages_cnt_++ > 1) {
170 // if we already have a free page - unmap this one.
171 free_page(page_record);
176 linker_vector_t::iterator LinkerSmallObjectAllocator::find_page_record(void* ptr) {
177 void* addr = reinterpret_cast<void*>(PAGE_START(reinterpret_cast<uintptr_t>(ptr)));
178 small_object_page_record boundary;
179 boundary.page_addr = addr;
180 linker_vector_t::iterator it = std::lower_bound(
181 page_records_.begin(), page_records_.end(), boundary);
183 if (it == page_records_.end() || it->page_addr != addr) {
185 async_safe_fatal("page record for %p was not found (block_size=%zd)", ptr, block_size_);
191 void LinkerSmallObjectAllocator::create_page_record(void* page_addr, size_t free_blocks_cnt) {
192 small_object_page_record record;
193 record.page_addr = page_addr;
194 record.free_blocks_cnt = free_blocks_cnt;
195 record.allocated_blocks_cnt = 0;
197 linker_vector_t::iterator it = std::lower_bound(
198 page_records_.begin(), page_records_.end(), record);
199 page_records_.insert(it, record);
202 void LinkerSmallObjectAllocator::alloc_page() {
203 static_assert(sizeof(page_info) % 16 == 0,
204 "sizeof(page_info) is not multiple of 16");
205 void* map_ptr = mmap(nullptr, PAGE_SIZE,
206 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
207 if (map_ptr == MAP_FAILED) {
208 async_safe_fatal("mmap failed");
211 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE, "linker_alloc_small_objects");
213 page_info* info = reinterpret_cast<page_info*>(map_ptr);
214 memcpy(info->signature, kSignature, sizeof(kSignature));
216 info->allocator_addr = this;
218 size_t free_blocks_cnt = (PAGE_SIZE - sizeof(page_info))/block_size_;
220 create_page_record(map_ptr, free_blocks_cnt);
222 small_object_block_record* first_block = reinterpret_cast<small_object_block_record*>(info + 1);
224 first_block->next = free_blocks_list_;
225 first_block->free_blocks_cnt = free_blocks_cnt;
227 free_blocks_list_ = first_block;
231 void LinkerMemoryAllocator::initialize_allocators() {
232 if (allocators_ != nullptr) {
236 LinkerSmallObjectAllocator* allocators =
237 reinterpret_cast<LinkerSmallObjectAllocator*>(allocators_buf_);
239 for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) {
240 uint32_t type = i + kSmallObjectMinSizeLog2;
241 new (allocators + i) LinkerSmallObjectAllocator(type, 1 << type);
244 allocators_ = allocators;
247 void* LinkerMemoryAllocator::alloc_mmap(size_t size) {
248 size_t allocated_size = PAGE_END(size + sizeof(page_info));
249 void* map_ptr = mmap(nullptr, allocated_size,
250 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
252 if (map_ptr == MAP_FAILED) {
253 async_safe_fatal("mmap failed");
256 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "linker_alloc_lob");
258 page_info* info = reinterpret_cast<page_info*>(map_ptr);
259 memcpy(info->signature, kSignature, sizeof(kSignature));
260 info->type = kLargeObject;
261 info->allocated_size = allocated_size;
266 void* LinkerMemoryAllocator::alloc(size_t size) {
267 // treat alloc(0) as alloc(1)
272 if (size > kSmallObjectMaxSize) {
273 return alloc_mmap(size);
276 uint16_t log2_size = log2(size);
278 if (log2_size < kSmallObjectMinSizeLog2) {
279 log2_size = kSmallObjectMinSizeLog2;
282 return get_small_object_allocator(log2_size)->alloc();
285 page_info* LinkerMemoryAllocator::get_page_info(void* ptr) {
286 page_info* info = reinterpret_cast<page_info*>(PAGE_START(reinterpret_cast<size_t>(ptr)));
287 if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) {
288 async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
294 void* LinkerMemoryAllocator::realloc(void* ptr, size_t size) {
295 if (ptr == nullptr) {
304 page_info* info = get_page_info(ptr);
308 if (info->type == kLargeObject) {
309 old_size = info->allocated_size - sizeof(page_info);
311 LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
312 if (allocator != info->allocator_addr) {
313 async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr);
316 old_size = allocator->get_block_size();
319 if (old_size < size) {
320 void *result = alloc(size);
321 memcpy(result, ptr, old_size);
329 void LinkerMemoryAllocator::free(void* ptr) {
330 if (ptr == nullptr) {
334 page_info* info = get_page_info(ptr);
336 if (info->type == kLargeObject) {
337 munmap(info, info->allocated_size);
339 LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type);
340 if (allocator != info->allocator_addr) {
341 async_safe_fatal("invalid pointer %p (invalid allocator address for the page)", ptr);
344 allocator->free(ptr);
348 LinkerSmallObjectAllocator* LinkerMemoryAllocator::get_small_object_allocator(uint32_t type) {
349 if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) {
350 async_safe_fatal("invalid type: %u", type);
353 initialize_allocators();
354 return &allocators_[type - kSmallObjectMinSizeLog2];