blob: 991b956f80d1303ce57c9609fe9da52d6cab2de6 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andreas Gamped7576322014-10-24 22:13:45 -070017#include "rosalloc.h"
18
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070019#include "base/mutex-inl.h"
Andreas Gamped7576322014-10-24 22:13:45 -070020#include "gc/space/valgrind_settings.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080021#include "mirror/class-inl.h"
22#include "mirror/object.h"
23#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080024#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025#include "thread_list.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026
27#include <map>
28#include <list>
Ian Rogersc7dd2952014-10-21 23:31:19 -070029#include <sstream>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070030#include <vector>
31
32namespace art {
33namespace gc {
34namespace allocator {
35
Mathieu Chartier73d1e172014-04-11 17:53:48 -070036static constexpr bool kUsePrefetchDuringAllocRun = true;
37static constexpr bool kPrefetchNewRunDataByZeroing = false;
38static constexpr size_t kPrefetchStride = 64;
39
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070040size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
41size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
42size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
43size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
44size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
45size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
46bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070047size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
48RosAlloc::Run* RosAlloc::dedicated_full_run_ =
49 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080051RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Andreas Gamped7576322014-10-24 22:13:45 -070052 PageReleaseMode page_release_mode, bool running_on_valgrind,
53 size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070054 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080055 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080057 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
58 page_release_mode_(page_release_mode),
Andreas Gamped7576322014-10-24 22:13:45 -070059 page_release_size_threshold_(page_release_size_threshold),
60 running_on_valgrind_(running_on_valgrind) {
Ian Rogers5d057052014-03-12 14:32:27 -070061 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
62 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080063 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080064 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070065 if (!initialized_) {
66 Initialize();
67 }
68 VLOG(heap) << "RosAlloc base="
69 << std::hex << (intptr_t)base_ << ", end="
70 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080071 << ", capacity=" << std::dec << capacity_
72 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070073 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070074 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070075 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070076 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070077 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070078 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080079 DCHECK_EQ(footprint_, capacity_);
80 size_t num_of_pages = footprint_ / kPageSize;
81 size_t max_num_of_pages = max_capacity_ / kPageSize;
82 std::string error_msg;
83 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
84 PROT_READ | PROT_WRITE, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070085 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080086 page_map_ = page_map_mem_map_->Begin();
87 page_map_size_ = num_of_pages;
88 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070089 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070090 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
91 if (kIsDebugBuild) {
92 free_pages->magic_num_ = kMagicNumFree;
93 }
94 free_pages->SetByteSize(this, capacity_);
95 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080096 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070097 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080098 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070099 free_page_runs_.insert(free_pages);
100 if (kTraceRosAlloc) {
101 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
102 << reinterpret_cast<intptr_t>(free_pages)
103 << " into free_page_runs_";
104 }
105}
106
Mathieu Chartier661974a2014-01-09 11:23:53 -0800107RosAlloc::~RosAlloc() {
108 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
109 delete size_bracket_locks_[i];
110 }
111}
112
Ian Rogers13735952014-10-08 12:43:28 -0700113void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700114 lock_.AssertHeld(self);
115 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
116 FreePageRun* res = NULL;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700117 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700118 // Find the lowest address free page run that's large enough.
119 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
120 FreePageRun* fpr = *it;
121 DCHECK(fpr->IsFree());
122 size_t fpr_byte_size = fpr->ByteSize(this);
123 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
124 if (req_byte_size <= fpr_byte_size) {
125 // Found one.
126 free_page_runs_.erase(it++);
127 if (kTraceRosAlloc) {
128 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
129 << std::hex << reinterpret_cast<intptr_t>(fpr)
130 << " from free_page_runs_";
131 }
132 if (req_byte_size < fpr_byte_size) {
133 // Split.
Ian Rogers13735952014-10-08 12:43:28 -0700134 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700135 if (kIsDebugBuild) {
136 remainder->magic_num_ = kMagicNumFree;
137 }
138 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
139 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
140 // Don't need to call madvise on remainder here.
141 free_page_runs_.insert(remainder);
142 if (kTraceRosAlloc) {
143 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
144 << reinterpret_cast<intptr_t>(remainder)
145 << " into free_page_runs_";
146 }
147 fpr->SetByteSize(this, req_byte_size);
148 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
149 }
150 res = fpr;
151 break;
152 } else {
153 ++it;
154 }
155 }
156
157 // Failed to allocate pages. Grow the footprint, if possible.
158 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
159 FreePageRun* last_free_page_run = NULL;
160 size_t last_free_page_run_size;
161 auto it = free_page_runs_.rbegin();
162 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
163 // There is a free page run at the end.
164 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700165 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700166 last_free_page_run_size = last_free_page_run->ByteSize(this);
167 } else {
168 // There is no free page run at the end.
169 last_free_page_run_size = 0;
170 }
171 DCHECK_LT(last_free_page_run_size, req_byte_size);
172 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
173 // If we grow the heap, we can allocate it.
174 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
175 capacity_ - footprint_);
176 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
177 size_t new_footprint = footprint_ + increment;
178 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800179 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700180 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800181 page_map_size_ = new_num_of_pages;
182 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700183 free_page_run_size_map_.resize(new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800184 ArtRosAllocMoreCore(this, increment);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700185 if (last_free_page_run_size > 0) {
186 // There was a free page run at the end. Expand its size.
187 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
188 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
189 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700190 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700191 } else {
192 // Otherwise, insert a new free page run at the end.
193 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
194 if (kIsDebugBuild) {
195 new_free_page_run->magic_num_ = kMagicNumFree;
196 }
197 new_free_page_run->SetByteSize(this, increment);
198 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
199 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700200 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700201 if (kTraceRosAlloc) {
202 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
203 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
204 << " into free_page_runs_";
205 }
206 }
207 DCHECK_LE(footprint_ + increment, capacity_);
208 if (kTraceRosAlloc) {
209 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
210 << footprint_ << " to " << new_footprint;
211 }
212 footprint_ = new_footprint;
213
214 // And retry the last free page run.
215 it = free_page_runs_.rbegin();
216 DCHECK(it != free_page_runs_.rend());
217 FreePageRun* fpr = *it;
218 if (kIsDebugBuild && last_free_page_run_size > 0) {
219 DCHECK(last_free_page_run != NULL);
220 DCHECK_EQ(last_free_page_run, fpr);
221 }
222 size_t fpr_byte_size = fpr->ByteSize(this);
223 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
224 DCHECK_LE(req_byte_size, fpr_byte_size);
225 free_page_runs_.erase(fpr);
226 if (kTraceRosAlloc) {
227 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
228 << " from free_page_runs_";
229 }
230 if (req_byte_size < fpr_byte_size) {
231 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700232 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700233 if (kIsDebugBuild) {
234 remainder->magic_num_ = kMagicNumFree;
235 }
236 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
237 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
238 free_page_runs_.insert(remainder);
239 if (kTraceRosAlloc) {
240 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
241 << reinterpret_cast<intptr_t>(remainder)
242 << " into free_page_runs_";
243 }
244 fpr->SetByteSize(this, req_byte_size);
245 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
246 }
247 res = fpr;
248 }
249 }
250 if (LIKELY(res != NULL)) {
251 // Update the page map.
252 size_t page_map_idx = ToPageMapIndex(res);
253 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700254 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700255 }
256 switch (page_map_type) {
257 case kPageMapRun:
258 page_map_[page_map_idx] = kPageMapRun;
259 for (size_t i = 1; i < num_pages; i++) {
260 page_map_[page_map_idx + i] = kPageMapRunPart;
261 }
262 break;
263 case kPageMapLargeObject:
264 page_map_[page_map_idx] = kPageMapLargeObject;
265 for (size_t i = 1; i < num_pages; i++) {
266 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
267 }
268 break;
269 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700270 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700271 break;
272 }
273 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700274 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700275 memset(res, 0, kPageSize);
276 }
277 if (kTraceRosAlloc) {
278 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
279 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
280 << "(" << std::dec << (num_pages * kPageSize) << ")";
281 }
282 return res;
283 }
284
285 // Fail.
286 if (kTraceRosAlloc) {
287 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
288 }
289 return nullptr;
290}
291
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700292size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700293 lock_.AssertHeld(self);
294 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700295 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700296 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700297 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700298 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700299 switch (pm_type) {
300 case kPageMapRun:
301 pm_part_type = kPageMapRunPart;
302 break;
303 case kPageMapLargeObject:
304 pm_part_type = kPageMapLargeObjectPart;
305 break;
306 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700307 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700308 << static_cast<int>(pm_type) << ", ptr=" << std::hex
309 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700310 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700311 }
312 // Update the page map and count the number of pages.
313 size_t num_pages = 1;
314 page_map_[pm_idx] = kPageMapEmpty;
315 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800316 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700317 while (idx < end && page_map_[idx] == pm_part_type) {
318 page_map_[idx] = kPageMapEmpty;
319 num_pages++;
320 idx++;
321 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700322 const size_t byte_size = num_pages * kPageSize;
323 if (already_zero) {
Andreas Gamped7576322014-10-24 22:13:45 -0700324 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700325 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
326 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700327 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
328 }
329 }
330 } else if (!DoesReleaseAllPages()) {
331 memset(ptr, 0, byte_size);
332 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700333
334 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700335 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700336 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700337 << "(" << std::dec << (num_pages * kPageSize) << ")";
338 }
339
340 // Turn it into a free run.
341 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
342 if (kIsDebugBuild) {
343 fpr->magic_num_ = kMagicNumFree;
344 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700345 fpr->SetByteSize(this, byte_size);
346 DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700347
348 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
349 if (!free_page_runs_.empty()) {
350 // Try to coalesce in the higher address direction.
351 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700352 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700353 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
354 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800355 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700356 }
357 auto higher_it = free_page_runs_.upper_bound(fpr);
358 if (higher_it != free_page_runs_.end()) {
359 for (auto it = higher_it; it != free_page_runs_.end(); ) {
360 FreePageRun* h = *it;
361 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
362 if (kTraceRosAlloc) {
363 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
364 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
365 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800366 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700367 }
368 if (fpr->End(this) == h->Begin()) {
369 if (kTraceRosAlloc) {
370 LOG(INFO) << "Success";
371 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700372 // Clear magic num since this is no longer the start of a free page run.
373 if (kIsDebugBuild) {
374 h->magic_num_ = 0;
375 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700376 free_page_runs_.erase(it++);
377 if (kTraceRosAlloc) {
378 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
379 << reinterpret_cast<intptr_t>(h)
380 << " from free_page_runs_";
381 }
382 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
383 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
384 } else {
385 // Not adjacent. Stop.
386 if (kTraceRosAlloc) {
387 LOG(INFO) << "Fail";
388 }
389 break;
390 }
391 }
392 }
393 // Try to coalesce in the lower address direction.
394 auto lower_it = free_page_runs_.upper_bound(fpr);
395 if (lower_it != free_page_runs_.begin()) {
396 --lower_it;
397 for (auto it = lower_it; ; ) {
398 // We want to try to coalesce with the first element but
399 // there's no "<=" operator for the iterator.
400 bool to_exit_loop = it == free_page_runs_.begin();
401
402 FreePageRun* l = *it;
403 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
404 if (kTraceRosAlloc) {
405 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
406 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
407 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800408 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700409 }
410 if (l->End(this) == fpr->Begin()) {
411 if (kTraceRosAlloc) {
412 LOG(INFO) << "Success";
413 }
414 free_page_runs_.erase(it--);
415 if (kTraceRosAlloc) {
416 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
417 << reinterpret_cast<intptr_t>(l)
418 << " from free_page_runs_";
419 }
420 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
421 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700422 // Clear magic num since this is no longer the start of a free page run.
423 if (kIsDebugBuild) {
424 fpr->magic_num_ = 0;
425 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700426 fpr = l;
427 } else {
428 // Not adjacent. Stop.
429 if (kTraceRosAlloc) {
430 LOG(INFO) << "Fail";
431 }
432 break;
433 }
434 if (to_exit_loop) {
435 break;
436 }
437 }
438 }
439 }
440
441 // Insert it.
442 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
443 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800444 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700445 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800446 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700447 free_page_runs_.insert(fpr);
448 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
449 if (kTraceRosAlloc) {
450 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
451 << " into free_page_runs_";
452 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700453 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700454}
455
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800456void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700457 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800458 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
459 void* r;
460 {
461 MutexLock mu(self, lock_);
462 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700463 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800464 if (UNLIKELY(r == nullptr)) {
465 if (kTraceRosAlloc) {
466 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
467 }
468 return nullptr;
469 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700470 const size_t total_bytes = num_pages * kPageSize;
471 *bytes_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800472 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800473 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
474 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
475 << "(" << std::dec << (num_pages * kPageSize) << ")";
476 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700477 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -0700478 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700479 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
480 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
481 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700482 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700483 }
484 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800485 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700486}
487
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700488size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700489 DCHECK_LE(base_, ptr);
490 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700491 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700492 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700493 {
494 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700495 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700496 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700497 if (kTraceRosAlloc) {
498 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
499 << ", page_map_entry=" << static_cast<int>(page_map_entry);
500 }
501 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700502 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700503 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700504 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700505 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700506 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700507 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700508 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700509 do {
510 --pm_idx;
511 DCHECK_LT(pm_idx, capacity_ / kPageSize);
512 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700513 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700514 case kPageMapRun:
515 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700516 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700517 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700518 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700519 case kPageMapEmpty:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700520 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700521 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700522 }
523 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700524 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700525 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700526 }
527 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700528 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700529 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700530}
531
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700532size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700533 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700534 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700535}
536
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700537RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
538 RosAlloc::Run* new_run = nullptr;
539 {
540 MutexLock mu(self, lock_);
541 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
542 }
543 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700544 if (kIsDebugBuild) {
545 new_run->magic_num_ = kMagicNum;
546 }
547 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700548 new_run->SetAllocBitMapBitsForInvalidSlots();
549 DCHECK(!new_run->IsThreadLocal());
550 DCHECK_EQ(new_run->first_search_vec_idx_, 0U);
551 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700552 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700553 // Take ownership of the cache lines if we are likely to be thread local run.
554 if (kPrefetchNewRunDataByZeroing) {
555 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
556 // since we end up dirtying zero pages which may have been madvised.
557 new_run->ZeroData();
558 } else {
559 const size_t num_of_slots = numOfSlots[idx];
560 const size_t bracket_size = bracketSizes[idx];
561 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700562 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700563 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
564 __builtin_prefetch(begin + i);
565 }
566 }
567 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700568 }
569 return new_run;
570}
571
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700572RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
573 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700574 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700575 if (!bt->empty()) {
576 // If there's one, use it as the current run.
577 auto it = bt->begin();
578 Run* non_full_run = *it;
579 DCHECK(non_full_run != nullptr);
580 DCHECK(!non_full_run->IsThreadLocal());
581 bt->erase(it);
582 return non_full_run;
583 }
584 // If there's none, allocate a new run and use it as the current run.
585 return AllocRun(self, idx);
586}
587
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700588inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700589 Run* current_run = current_runs_[idx];
590 DCHECK(current_run != nullptr);
591 void* slot_addr = current_run->AllocSlot();
592 if (UNLIKELY(slot_addr == nullptr)) {
593 // The current run got full. Try to refill it.
594 DCHECK(current_run->IsFull());
595 if (kIsDebugBuild && current_run != dedicated_full_run_) {
596 full_runs_[idx].insert(current_run);
597 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700598 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
599 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700600 << " into full_runs_[" << std::dec << idx << "]";
601 }
602 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
603 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
604 }
605 current_run = RefillRun(self, idx);
606 if (UNLIKELY(current_run == nullptr)) {
607 // Failed to allocate a new run, make sure that it is the dedicated full run.
608 current_runs_[idx] = dedicated_full_run_;
609 return nullptr;
610 }
611 DCHECK(current_run != nullptr);
612 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
613 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
614 current_run->SetIsThreadLocal(false);
615 current_runs_[idx] = current_run;
616 DCHECK(!current_run->IsFull());
617 slot_addr = current_run->AllocSlot();
618 // Must succeed now with a new run.
619 DCHECK(slot_addr != nullptr);
620 }
621 return slot_addr;
622}
623
624void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated) {
625 DCHECK_LE(size, kLargeSizeThreshold);
626 size_t bracket_size;
627 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
628 DCHECK_EQ(idx, SizeToIndex(size));
629 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
630 DCHECK_EQ(bracket_size, bracketSizes[idx]);
631 DCHECK_LE(size, bracket_size);
632 DCHECK(size > 512 || bracket_size - size < 16);
633 Locks::mutator_lock_->AssertExclusiveHeld(self);
634 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
635 if (LIKELY(slot_addr != nullptr)) {
636 DCHECK(bytes_allocated != nullptr);
637 *bytes_allocated = bracket_size;
638 // Caller verifies that it is all 0.
639 }
640 return slot_addr;
641}
642
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700643void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700644 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700645 size_t bracket_size;
646 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
647 DCHECK_EQ(idx, SizeToIndex(size));
648 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
649 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700650 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700651 DCHECK(size > 512 || bracket_size - size < 16);
652
653 void* slot_addr;
654
Mathieu Chartier0651d412014-04-29 14:37:57 -0700655 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700656 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700657 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700658 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700659 if (kIsDebugBuild) {
660 // Need the lock to prevent race conditions.
661 MutexLock mu(self, *size_bracket_locks_[idx]);
662 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
663 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
664 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700665 DCHECK(thread_local_run != nullptr);
666 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700667 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700668 // The allocation must fail if the run is invalid.
669 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
670 << "allocated from an invalid run";
671 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700672 // The run got full. Try to free slots.
673 DCHECK(thread_local_run->IsFull());
674 MutexLock mu(self, *size_bracket_locks_[idx]);
675 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700676 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700677 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700678 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700679 // Some slot got freed. Keep it.
680 DCHECK(!thread_local_run->IsFull());
681 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
682 if (is_all_free_after_merge) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700683 // Check that the bitmap idx is back at 0 if it's all free.
684 DCHECK_EQ(thread_local_run->first_search_vec_idx_, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700685 }
686 } else {
687 // No slots got freed. Try to refill the thread-local run.
688 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700689 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700690 thread_local_run->SetIsThreadLocal(false);
691 if (kIsDebugBuild) {
692 full_runs_[idx].insert(thread_local_run);
693 if (kTraceRosAlloc) {
694 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
695 << reinterpret_cast<intptr_t>(thread_local_run)
696 << " into full_runs_[" << std::dec << idx << "]";
697 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700698 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700699 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
700 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700701 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700702
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700703 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700704 if (UNLIKELY(thread_local_run == nullptr)) {
705 self->SetRosAllocRun(idx, dedicated_full_run_);
706 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700707 }
708 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
709 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700710 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700711 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700712 DCHECK(!thread_local_run->IsFull());
713 }
714
Mathieu Chartier0651d412014-04-29 14:37:57 -0700715 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700716 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700717 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700718 slot_addr = thread_local_run->AllocSlot();
719 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700720 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700721 }
722 if (kTraceRosAlloc) {
723 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
724 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
725 << "(" << std::dec << (bracket_size) << ")";
726 }
727 } else {
728 // Use the (shared) current run.
729 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700730 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700731 if (kTraceRosAlloc) {
732 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
733 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
734 << "(" << std::dec << (bracket_size) << ")";
735 }
736 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700737 DCHECK(bytes_allocated != nullptr);
738 *bytes_allocated = bracket_size;
739 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700740 return slot_addr;
741}
742
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700743size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700744 DCHECK_EQ(run->magic_num_, kMagicNum);
745 DCHECK_LT(run, ptr);
746 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700747 const size_t idx = run->size_bracket_idx_;
748 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700749 bool run_was_full = false;
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800750 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700751 if (kIsDebugBuild) {
752 run_was_full = run->IsFull();
753 }
754 if (kTraceRosAlloc) {
755 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
756 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700757 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700758 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700759 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700760 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
761 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
762 run->MarkThreadLocalFreeBitMap(ptr);
763 if (kTraceRosAlloc) {
764 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
765 << reinterpret_cast<intptr_t>(run);
766 }
767 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700768 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700769 }
770 // Free the slot in the run.
771 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700772 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700773 if (run->IsAllFree()) {
774 // It has just become completely free. Free the pages of this run.
775 std::set<Run*>::iterator pos = non_full_runs->find(run);
776 if (pos != non_full_runs->end()) {
777 non_full_runs->erase(pos);
778 if (kTraceRosAlloc) {
779 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
780 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
781 }
782 }
783 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700784 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700785 }
786 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
787 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700788 run->ZeroHeader();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700789 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800790 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700791 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700792 }
793 } else {
794 // It is not completely free. If it wasn't the current run or
795 // already in the non-full run set (i.e., it was full) insert it
796 // into the non-full run set.
797 if (run != current_runs_[idx]) {
Mathieu Chartier58553c72014-09-16 16:25:55 -0700798 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
799 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700800 if (pos == non_full_runs->end()) {
801 DCHECK(run_was_full);
802 DCHECK(full_runs->find(run) != full_runs->end());
803 if (kIsDebugBuild) {
804 full_runs->erase(run);
805 if (kTraceRosAlloc) {
806 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
807 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
808 }
809 }
810 non_full_runs->insert(run);
811 DCHECK(!run->IsFull());
812 if (kTraceRosAlloc) {
813 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
814 << reinterpret_cast<intptr_t>(run)
815 << " into non_full_runs_[" << std::dec << idx << "]";
816 }
817 }
818 }
819 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700820 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700821}
822
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800823std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700824 std::string bit_map_str;
825 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800826 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700827 if (v != num_vec - 1) {
828 bit_map_str.append(StringPrintf("%x-", vec));
829 } else {
830 bit_map_str.append(StringPrintf("%x", vec));
831 }
832 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800833 return bit_map_str.c_str();
834}
835
836std::string RosAlloc::Run::Dump() {
837 size_t idx = size_bracket_idx_;
838 size_t num_slots = numOfSlots[idx];
839 size_t num_vec = RoundUp(num_slots, 32) / 32;
840 std::ostringstream stream;
841 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
842 << "{ magic_num=" << static_cast<int>(magic_num_)
843 << " size_bracket_idx=" << idx
844 << " is_thread_local=" << static_cast<int>(is_thread_local_)
845 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700846 << " first_search_vec_idx=" << first_search_vec_idx_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800847 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
848 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
849 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
850 << " }" << std::endl;
851 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700852}
853
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700854inline void* RosAlloc::Run::AllocSlot() {
855 const size_t idx = size_bracket_idx_;
856 while (true) {
857 if (kIsDebugBuild) {
858 // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
859 for (size_t i = 0; i < first_search_vec_idx_; ++i) {
860 CHECK_EQ(~alloc_bit_map_[i], 0U);
861 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700862 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700863 uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
864 uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
865 if (LIKELY(ffz1 != 0)) {
866 const uint32_t ffz = ffz1 - 1;
867 const uint32_t slot_idx = ffz + first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
868 const uint32_t mask = 1U << ffz;
869 DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700870 // Found an empty slot. Set the bit.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700871 DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
872 *alloc_bitmap_ptr |= mask;
873 DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
Ian Rogers13735952014-10-08 12:43:28 -0700874 uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700875 if (kTraceRosAlloc) {
876 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
877 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
878 }
879 return slot_addr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700880 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700881 const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
882 if (first_search_vec_idx_ + 1 >= num_words) {
883 DCHECK(IsFull());
884 // Already at the last word, return null.
885 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700886 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700887 // Increase the index to the next word and try again.
888 ++first_search_vec_idx_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700889 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700890}
891
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700892void RosAlloc::Run::FreeSlot(void* ptr) {
893 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700894 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700895 const size_t bracket_size = bracketSizes[idx];
Ian Rogers13735952014-10-08 12:43:28 -0700896 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
897 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700898 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
899 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -0700900 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700901 size_t vec_idx = slot_idx / 32;
902 if (kIsDebugBuild) {
903 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700904 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700905 }
906 size_t vec_off = slot_idx % 32;
907 uint32_t* vec = &alloc_bit_map_[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700908 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(vec_idx));
909 const uint32_t mask = 1U << vec_off;
910 DCHECK_NE(*vec & mask, 0U);
911 *vec &= ~mask;
912 DCHECK_EQ(*vec & mask, 0U);
913 // Zero out the memory.
914 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
915 memset(ptr, 0, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700916 if (kTraceRosAlloc) {
917 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
918 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
919 }
920}
921
922inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700923 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700924 // Free slots in the alloc bit map based on the thread local free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700925 const size_t idx = size_bracket_idx_;
926 const size_t num_of_slots = numOfSlots[idx];
927 const size_t num_vec = RoundUp(num_of_slots, 32) / 32;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700928 bool changed = false;
929 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800930 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700931 bool is_all_free_after = true;
932 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
933 uint32_t tl_free_vec = *tl_free_vecp;
934 uint32_t vec_before = *vecp;
935 uint32_t vec_after;
936 if (tl_free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700937 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700938 vec_after = vec_before & ~tl_free_vec;
939 *vecp = vec_after;
940 changed = true;
941 *tl_free_vecp = 0; // clear the thread local free bit map.
942 } else {
943 vec_after = vec_before;
944 }
945 if (vec_after != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700946 if (v == num_vec - 1) {
947 // Only not all free if a bit other than the mask bits are set.
948 is_all_free_after =
949 is_all_free_after && GetBitmapLastVectorMask(num_of_slots, num_vec) == vec_after;
950 } else {
951 is_all_free_after = false;
952 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700953 }
954 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
955 }
956 *is_all_free_after_out = is_all_free_after;
957 // Return true if there was at least a bit set in the thread-local
958 // free bit map and at least a bit in the alloc bit map changed.
959 return changed;
960}
961
962inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700963 DCHECK(!IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700964 // Free slots in the alloc bit map based on the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700965 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700966 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800967 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700968 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
969 uint32_t free_vec = *free_vecp;
970 if (free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700971 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700972 *vecp &= ~free_vec;
973 *free_vecp = 0; // clear the bulk free bit map.
974 }
975 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
976 }
977}
978
979inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700980 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700981 // Union the thread local bit map with the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700982 size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800983 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
984 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700985 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
986 uint32_t from_vec = *from_vecp;
987 if (from_vec != 0) {
988 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800989 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700990 }
991 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
992 }
993}
994
995inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700996 DCHECK(IsThreadLocal());
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800997 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700998}
999
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001000inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
1001 return MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001002}
1003
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001004inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
1005 const char* caller_name) {
Ian Rogers13735952014-10-08 12:43:28 -07001006 const uint8_t idx = size_bracket_idx_;
1007 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
1008 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001009 const size_t bracket_size = bracketSizes[idx];
1010 memset(ptr, 0, bracket_size);
1011 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
1012 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001013 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001014 size_t vec_idx = slot_idx / 32;
1015 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001016 size_t num_vec = NumberOfBitmapVectors();
Ian Rogers5d057052014-03-12 14:32:27 -07001017 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001018 }
1019 size_t vec_off = slot_idx % 32;
1020 uint32_t* vec = &free_bit_map_base[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001021 const uint32_t mask = 1U << vec_off;
1022 DCHECK_EQ(*vec & mask, 0U);
1023 *vec |= mask;
1024 DCHECK_NE(*vec & mask, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001025 if (kTraceRosAlloc) {
1026 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
1027 << reinterpret_cast<intptr_t>(ptr)
1028 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
1029 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001030 return bracket_size;
1031}
1032
1033inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
1034 const size_t kBitsPerVec = 32;
1035 DCHECK_GE(num_slots * kBitsPerVec, num_vec);
1036 size_t remain = num_vec * kBitsPerVec - num_slots;
1037 DCHECK_NE(remain, kBitsPerVec);
1038 return ((1U << remain) - 1) << (kBitsPerVec - remain);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001039}
1040
1041inline bool RosAlloc::Run::IsAllFree() {
Ian Rogers13735952014-10-08 12:43:28 -07001042 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001043 const size_t num_slots = numOfSlots[idx];
1044 const size_t num_vec = NumberOfBitmapVectors();
1045 DCHECK_NE(num_vec, 0U);
1046 // Check the last vector after the loop since it uses a special case for the masked bits.
1047 for (size_t v = 0; v < num_vec - 1; v++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001048 uint32_t vec = alloc_bit_map_[v];
1049 if (vec != 0) {
1050 return false;
1051 }
1052 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001053 // Make sure the last word is equal to the mask, all other bits must be 0.
1054 return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001055}
1056
1057inline bool RosAlloc::Run::IsFull() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001058 const size_t num_vec = NumberOfBitmapVectors();
1059 for (size_t v = 0; v < num_vec; ++v) {
1060 if (~alloc_bit_map_[v] != 0) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001061 return false;
1062 }
1063 }
1064 return true;
1065}
1066
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001067inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001068 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001069 for (size_t v = 0; v < num_vec; v++) {
1070 uint32_t vec = BulkFreeBitMap()[v];
1071 if (vec != 0) {
1072 return false;
1073 }
1074 }
1075 return true;
1076}
1077
1078inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001079 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001080 for (size_t v = 0; v < num_vec; v++) {
1081 uint32_t vec = ThreadLocalFreeBitMap()[v];
1082 if (vec != 0) {
1083 return false;
1084 }
1085 }
1086 return true;
1087}
1088
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001089inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
1090 const size_t idx = size_bracket_idx_;
1091 const size_t num_slots = numOfSlots[idx];
1092 const size_t num_vec = RoundUp(num_slots, 32) / 32;
1093 DCHECK_NE(num_vec, 0U);
1094 // Make sure to set the bits at the end of the bitmap so that we don't allocate there since they
1095 // don't represent valid slots.
1096 alloc_bit_map_[num_vec - 1] |= GetBitmapLastVectorMask(num_slots, num_vec);
1097}
1098
1099inline void RosAlloc::Run::ZeroHeader() {
Ian Rogers13735952014-10-08 12:43:28 -07001100 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001101 memset(this, 0, headerSizes[idx]);
1102}
1103
1104inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -07001105 const uint8_t idx = size_bracket_idx_;
1106 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001107 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
1108}
1109
1110inline void RosAlloc::Run::FillAllocBitMap() {
1111 size_t num_vec = NumberOfBitmapVectors();
1112 memset(alloc_bit_map_, 0xFF, sizeof(uint32_t) * num_vec);
1113 first_search_vec_idx_ = num_vec - 1; // No free bits in any of the bitmap words.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001114}
1115
1116void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1117 void* arg) {
1118 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -07001119 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001120 size_t num_slots = numOfSlots[idx];
1121 size_t bracket_size = IndexToBracketSize(idx);
Ian Rogers13735952014-10-08 12:43:28 -07001122 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001123 size_t num_vec = RoundUp(num_slots, 32) / 32;
1124 size_t slots = 0;
1125 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001126 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001127 uint32_t vec = alloc_bit_map_[v];
1128 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1129 for (size_t i = 0; i < end; ++i) {
1130 bool is_allocated = ((vec >> i) & 0x1) != 0;
Ian Rogers13735952014-10-08 12:43:28 -07001131 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001132 if (is_allocated) {
1133 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1134 } else {
1135 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1136 }
1137 }
1138 }
1139}
1140
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001141// If true, read the page map entries in BulkFree() without using the
1142// lock for better performance, assuming that the existence of an
1143// allocated chunk/pointer being freed in BulkFree() guarantees that
1144// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001145static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001146
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001147size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1148 size_t freed_bytes = 0;
Ian Rogerscf7f1912014-10-22 22:06:39 -07001149 if ((false)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001150 // Used only to test Free() as GC uses only BulkFree().
1151 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001152 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001153 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001154 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001155 }
1156
1157 WriterMutexLock wmu(self, bulk_free_lock_);
1158
1159 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001160 // size bracket locks. On host, unordered_set is faster than vector + flag.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001161#ifdef HAVE_ANDROID_OS
1162 std::vector<Run*> runs;
1163#else
Ian Rogers700a4022014-05-19 16:49:03 -07001164 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001165#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001166 for (size_t i = 0; i < num_ptrs; i++) {
1167 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001168 DCHECK_LE(base_, ptr);
1169 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001170 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001171 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001172 if (kReadPageMapEntryWithoutLockInBulkFree) {
1173 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001174 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001175 if (kTraceRosAlloc) {
1176 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1177 << std::dec << pm_idx
1178 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1179 }
1180 if (LIKELY(page_map_entry == kPageMapRun)) {
1181 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001182 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1183 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001184 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001185 do {
1186 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001187 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001188 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001189 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001190 } else if (page_map_entry == kPageMapLargeObject) {
1191 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001192 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001193 continue;
1194 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001195 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001196 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001197 } else {
1198 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001199 MutexLock mu(self, lock_);
1200 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001201 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001202 if (kTraceRosAlloc) {
1203 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1204 << std::dec << pm_idx
1205 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001206 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001207 if (LIKELY(page_map_entry == kPageMapRun)) {
1208 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1209 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1210 size_t pi = pm_idx;
1211 // Find the beginning of the run.
1212 do {
1213 --pi;
1214 DCHECK_LT(pi, capacity_ / kPageSize);
1215 } while (page_map_[pi] != kPageMapRun);
1216 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1217 } else if (page_map_entry == kPageMapLargeObject) {
1218 freed_bytes += FreePages(self, ptr, false);
1219 continue;
1220 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001221 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001222 }
1223 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001224 DCHECK(run != nullptr);
1225 DCHECK_EQ(run->magic_num_, kMagicNum);
1226 // Set the bit in the bulk free bit map.
1227 freed_bytes += run->MarkBulkFreeBitMap(ptr);
1228#ifdef HAVE_ANDROID_OS
1229 if (!run->to_be_bulk_freed_) {
1230 run->to_be_bulk_freed_ = true;
1231 runs.push_back(run);
1232 }
1233#else
1234 runs.insert(run);
1235#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001236 }
1237
1238 // Now, iterate over the affected runs and update the alloc bit map
1239 // based on the bulk free bit map (for non-thread-local runs) and
1240 // union the bulk free bit map into the thread-local free bit map
1241 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001242 for (Run* run : runs) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001243#ifdef HAVE_ANDROID_OS
1244 DCHECK(run->to_be_bulk_freed_);
1245 run->to_be_bulk_freed_ = false;
1246#endif
1247 size_t idx = run->size_bracket_idx_;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001248 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001249 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001250 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001251 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1252 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1253 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1254 if (kTraceRosAlloc) {
1255 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1256 << std::hex << reinterpret_cast<intptr_t>(run);
1257 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001258 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001259 // A thread local run will be kept as a thread local even if
1260 // it's become all free.
1261 } else {
1262 bool run_was_full = run->IsFull();
1263 run->MergeBulkFreeBitMapIntoAllocBitMap();
1264 if (kTraceRosAlloc) {
1265 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1266 << reinterpret_cast<intptr_t>(run);
1267 }
1268 // Check if the run should be moved to non_full_runs_ or
1269 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001270 auto* non_full_runs = &non_full_runs_[idx];
1271 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001272 if (run->IsAllFree()) {
1273 // It has just become completely free. Free the pages of the
1274 // run.
1275 bool run_was_current = run == current_runs_[idx];
1276 if (run_was_current) {
1277 DCHECK(full_runs->find(run) == full_runs->end());
1278 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1279 // If it was a current run, reuse it.
1280 } else if (run_was_full) {
1281 // If it was full, remove it from the full run set (debug
1282 // only.)
1283 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001284 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001285 DCHECK(pos != full_runs->end());
1286 full_runs->erase(pos);
1287 if (kTraceRosAlloc) {
1288 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1289 << reinterpret_cast<intptr_t>(run)
1290 << " from full_runs_";
1291 }
1292 DCHECK(full_runs->find(run) == full_runs->end());
1293 }
1294 } else {
1295 // If it was in a non full run set, remove it from the set.
1296 DCHECK(full_runs->find(run) == full_runs->end());
1297 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1298 non_full_runs->erase(run);
1299 if (kTraceRosAlloc) {
1300 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1301 << reinterpret_cast<intptr_t>(run)
1302 << " from non_full_runs_";
1303 }
1304 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1305 }
1306 if (!run_was_current) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001307 run->ZeroHeader();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001308 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001309 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001310 }
1311 } else {
1312 // It is not completely free. If it wasn't the current run or
1313 // already in the non-full run set (i.e., it was full) insert
1314 // it into the non-full run set.
1315 if (run == current_runs_[idx]) {
1316 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1317 DCHECK(full_runs->find(run) == full_runs->end());
1318 // If it was a current run, keep it.
1319 } else if (run_was_full) {
1320 // If it was full, remove it from the full run set (debug
1321 // only) and insert into the non-full run set.
1322 DCHECK(full_runs->find(run) != full_runs->end());
1323 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1324 if (kIsDebugBuild) {
1325 full_runs->erase(run);
1326 if (kTraceRosAlloc) {
1327 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1328 << reinterpret_cast<intptr_t>(run)
1329 << " from full_runs_";
1330 }
1331 }
1332 non_full_runs->insert(run);
1333 if (kTraceRosAlloc) {
1334 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1335 << reinterpret_cast<intptr_t>(run)
1336 << " into non_full_runs_[" << std::dec << idx;
1337 }
1338 } else {
1339 // If it was not full, so leave it in the non full run set.
1340 DCHECK(full_runs->find(run) == full_runs->end());
1341 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1342 }
1343 }
1344 }
1345 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001346 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001347}
1348
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001349std::string RosAlloc::DumpPageMap() {
1350 std::ostringstream stream;
1351 stream << "RosAlloc PageMap: " << std::endl;
1352 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001353 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001354 FreePageRun* curr_fpr = NULL;
1355 size_t curr_fpr_size = 0;
1356 size_t remaining_curr_fpr_size = 0;
1357 size_t num_running_empty_pages = 0;
1358 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001359 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001360 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001361 case kPageMapReleased:
1362 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001363 case kPageMapEmpty: {
1364 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1365 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1366 // Encountered a fresh free page run.
1367 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1368 DCHECK(fpr->IsFree());
1369 DCHECK(curr_fpr == NULL);
1370 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1371 curr_fpr = fpr;
1372 curr_fpr_size = fpr->ByteSize(this);
1373 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1374 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001375 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1376 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001377 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001378 if (remaining_curr_fpr_size == 0) {
1379 // Reset at the end of the current free page run.
1380 curr_fpr = NULL;
1381 curr_fpr_size = 0;
1382 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001383 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001384 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1385 } else {
1386 // Still part of the current free page run.
1387 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1388 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1389 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1390 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1391 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001392 stream << "[" << i << "]=Empty (FPR part)"
1393 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001394 if (remaining_curr_fpr_size == 0) {
1395 // Reset at the end of the current free page run.
1396 curr_fpr = NULL;
1397 curr_fpr_size = 0;
1398 }
1399 }
1400 num_running_empty_pages++;
1401 break;
1402 }
1403 case kPageMapLargeObject: {
1404 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1405 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001406 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001407 break;
1408 }
1409 case kPageMapLargeObjectPart:
1410 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1411 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001412 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001413 break;
1414 case kPageMapRun: {
1415 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1416 num_running_empty_pages = 0;
1417 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1418 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001419 stream << "[" << i << "]=Run (start)"
1420 << " idx=" << idx
1421 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001422 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001423 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1424 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001425 break;
1426 }
1427 case kPageMapRunPart:
1428 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1429 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001430 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001431 break;
1432 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001433 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001434 break;
1435 }
1436 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001437 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001438}
1439
Andreas Gamped7576322014-10-24 22:13:45 -07001440size_t RosAlloc::UsableSize(const void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001441 DCHECK_LE(base_, ptr);
1442 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001443 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1444 MutexLock mu(Thread::Current(), lock_);
1445 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001446 case kPageMapReleased:
1447 // Fall-through.
1448 case kPageMapEmpty:
1449 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1450 << std::hex << reinterpret_cast<intptr_t>(ptr);
1451 break;
1452 case kPageMapLargeObject: {
1453 size_t num_pages = 1;
1454 size_t idx = pm_idx + 1;
1455 size_t end = page_map_size_;
1456 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1457 num_pages++;
1458 idx++;
1459 }
1460 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001461 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001462 case kPageMapLargeObjectPart:
1463 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1464 << std::hex << reinterpret_cast<intptr_t>(ptr);
1465 break;
1466 case kPageMapRun:
1467 case kPageMapRunPart: {
1468 // Find the beginning of the run.
1469 while (page_map_[pm_idx] != kPageMapRun) {
1470 pm_idx--;
1471 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1472 }
1473 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1474 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1475 DCHECK_EQ(run->magic_num_, kMagicNum);
1476 size_t idx = run->size_bracket_idx_;
Andreas Gamped7576322014-10-24 22:13:45 -07001477 size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
Ian Rogers13735952014-10-08 12:43:28 -07001478 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001479 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1480 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001481 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001482 default: {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001483 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001484 break;
1485 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001486 }
1487 return 0;
1488}
1489
1490bool RosAlloc::Trim() {
1491 MutexLock mu(Thread::Current(), lock_);
1492 FreePageRun* last_free_page_run;
1493 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1494 auto it = free_page_runs_.rbegin();
1495 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1496 // Remove the last free page run, if any.
1497 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001498 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001499 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1500 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1501 free_page_runs_.erase(last_free_page_run);
1502 size_t decrement = last_free_page_run->ByteSize(this);
1503 size_t new_footprint = footprint_ - decrement;
1504 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1505 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001506 DCHECK_GE(page_map_size_, new_num_of_pages);
1507 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001508 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1509 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001510 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1511 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1512 if (madvise_size > 0) {
1513 DCHECK_ALIGNED(madvise_begin, kPageSize);
1514 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001515 if (!kMadviseZeroes) {
1516 memset(madvise_begin, 0, madvise_size);
1517 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001518 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1519 }
1520 if (madvise_begin - zero_begin) {
1521 memset(zero_begin, 0, madvise_begin - zero_begin);
1522 }
1523 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001524 free_page_run_size_map_.resize(new_num_of_pages);
1525 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001526 ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001527 if (kTraceRosAlloc) {
1528 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1529 << footprint_ << " to " << new_footprint;
1530 }
1531 DCHECK_LT(new_footprint, footprint_);
1532 DCHECK_LT(new_footprint, capacity_);
1533 footprint_ = new_footprint;
1534 return true;
1535 }
1536 return false;
1537}
1538
1539void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1540 void* arg) {
1541 // Note: no need to use this to release pages as we already do so in FreePages().
1542 if (handler == NULL) {
1543 return;
1544 }
1545 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001546 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001547 size_t i = 0;
1548 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001549 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001550 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001551 case kPageMapReleased:
1552 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001553 case kPageMapEmpty: {
1554 // The start of a free page run.
1555 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1556 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1557 size_t fpr_size = fpr->ByteSize(this);
1558 DCHECK(IsAligned<kPageSize>(fpr_size));
1559 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001560 if (kIsDebugBuild) {
1561 // In the debug build, the first page of a free page run
1562 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001563 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001564 }
Ian Rogers13735952014-10-08 12:43:28 -07001565 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001566 handler(start, end, 0, arg);
1567 size_t num_pages = fpr_size / kPageSize;
1568 if (kIsDebugBuild) {
1569 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001570 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001571 }
1572 }
1573 i += fpr_size / kPageSize;
1574 DCHECK_LE(i, pm_end);
1575 break;
1576 }
1577 case kPageMapLargeObject: {
1578 // The start of a large object.
1579 size_t num_pages = 1;
1580 size_t idx = i + 1;
1581 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1582 num_pages++;
1583 idx++;
1584 }
1585 void* start = base_ + i * kPageSize;
1586 void* end = base_ + (i + num_pages) * kPageSize;
1587 size_t used_bytes = num_pages * kPageSize;
1588 handler(start, end, used_bytes, arg);
1589 if (kIsDebugBuild) {
1590 for (size_t j = i + 1; j < i + num_pages; ++j) {
1591 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1592 }
1593 }
1594 i += num_pages;
1595 DCHECK_LE(i, pm_end);
1596 break;
1597 }
1598 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001599 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001600 break;
1601 case kPageMapRun: {
1602 // The start of a run.
1603 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001604 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001605 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1606 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001607 run->InspectAllSlots(handler, arg);
1608 size_t num_pages = numOfPages[run->size_bracket_idx_];
1609 if (kIsDebugBuild) {
1610 for (size_t j = i + 1; j < i + num_pages; ++j) {
1611 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1612 }
1613 }
1614 i += num_pages;
1615 DCHECK_LE(i, pm_end);
1616 break;
1617 }
1618 case kPageMapRunPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001619 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001620 break;
1621 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001622 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001623 break;
1624 }
1625 }
1626}
1627
1628size_t RosAlloc::Footprint() {
1629 MutexLock mu(Thread::Current(), lock_);
1630 return footprint_;
1631}
1632
1633size_t RosAlloc::FootprintLimit() {
1634 MutexLock mu(Thread::Current(), lock_);
1635 return capacity_;
1636}
1637
1638void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1639 MutexLock mu(Thread::Current(), lock_);
1640 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1641 // Only growing is supported here. But Trim() is supported.
1642 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001643 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001644 capacity_ = new_capacity;
1645 VLOG(heap) << "new capacity=" << capacity_;
1646 }
1647}
1648
1649void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1650 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001651 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001652 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001653 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001654 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001655 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001656 CHECK(thread_local_run != nullptr);
1657 // Invalid means already revoked.
1658 DCHECK(thread_local_run->IsThreadLocal());
1659 if (thread_local_run != dedicated_full_run_) {
1660 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001661 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001662 // Note the thread local run may not be full here.
1663 bool dont_care;
1664 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001665 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001666 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1667 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1668 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001669 RevokeRun(self, idx, thread_local_run);
1670 }
1671 }
1672}
1673
1674void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1675 size_bracket_locks_[idx]->AssertHeld(self);
1676 DCHECK(run != dedicated_full_run_);
1677 if (run->IsFull()) {
1678 if (kIsDebugBuild) {
1679 full_runs_[idx].insert(run);
1680 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1681 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001682 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001683 << reinterpret_cast<intptr_t>(run)
1684 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001685 }
1686 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001687 } else if (run->IsAllFree()) {
1688 run->ZeroHeader();
1689 MutexLock mu(self, lock_);
1690 FreePages(self, run, true);
1691 } else {
1692 non_full_runs_[idx].insert(run);
1693 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1694 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001695 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001696 << reinterpret_cast<intptr_t>(run)
1697 << " into non_full_runs_[" << std::dec << idx << "]";
1698 }
1699 }
1700}
1701
1702void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1703 // Revoke the current runs which share the same idx as thread local runs.
1704 Thread* self = Thread::Current();
1705 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1706 MutexLock mu(self, *size_bracket_locks_[idx]);
1707 if (current_runs_[idx] != dedicated_full_run_) {
1708 RevokeRun(self, idx, current_runs_[idx]);
1709 current_runs_[idx] = dedicated_full_run_;
1710 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001711 }
1712}
1713
1714void RosAlloc::RevokeAllThreadLocalRuns() {
1715 // This is called when a mutator thread won't allocate such as at
1716 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001717 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1718 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001719 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001720 for (Thread* thread : thread_list) {
1721 RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001722 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001723 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001724}
1725
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001726void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1727 if (kIsDebugBuild) {
1728 Thread* self = Thread::Current();
1729 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001730 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001731 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001732 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001733 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001734 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001735 }
1736 }
1737}
1738
1739void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1740 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001741 Thread* self = Thread::Current();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001742 MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1743 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001744 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1745 for (Thread* t : thread_list) {
1746 AssertThreadLocalRunsAreRevoked(t);
1747 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001748 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001749 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001750 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1751 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001752 }
1753}
1754
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001755void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001756 // bracketSizes.
1757 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1758 if (i < kNumOfSizeBrackets - 2) {
1759 bracketSizes[i] = 16 * (i + 1);
1760 } else if (i == kNumOfSizeBrackets - 2) {
1761 bracketSizes[i] = 1 * KB;
1762 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001763 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001764 bracketSizes[i] = 2 * KB;
1765 }
1766 if (kTraceRosAlloc) {
1767 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1768 }
1769 }
1770 // numOfPages.
1771 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1772 if (i < 4) {
1773 numOfPages[i] = 1;
1774 } else if (i < 8) {
1775 numOfPages[i] = 2;
1776 } else if (i < 16) {
1777 numOfPages[i] = 4;
1778 } else if (i < 32) {
1779 numOfPages[i] = 8;
1780 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001781 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001782 numOfPages[i] = 16;
1783 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001784 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001785 numOfPages[i] = 32;
1786 }
1787 if (kTraceRosAlloc) {
1788 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1789 }
1790 }
1791 // Compute numOfSlots and slotOffsets.
1792 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1793 size_t bracket_size = bracketSizes[i];
1794 size_t run_size = kPageSize * numOfPages[i];
1795 size_t max_num_of_slots = run_size / bracket_size;
1796 // Compute the actual number of slots by taking the header and
1797 // alignment into account.
1798 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1799 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1800 size_t header_size = 0;
1801 size_t bulk_free_bit_map_offset = 0;
1802 size_t thread_local_free_bit_map_offset = 0;
1803 size_t num_of_slots = 0;
1804 // Search for the maximum number of slots that allows enough space
1805 // for the header (including the bit maps.)
1806 for (int s = max_num_of_slots; s >= 0; s--) {
1807 size_t tmp_slots_size = bracket_size * s;
1808 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1809 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1810 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1811 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1812 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1813 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1814 // Align up the unaligned header size. bracket_size may not be a power of two.
1815 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1816 tmp_unaligned_header_size :
1817 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1818 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1819 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1820 if (tmp_slots_size + tmp_header_size <= run_size) {
1821 // Found the right number of slots, that is, there was enough
1822 // space for the header (including the bit maps.)
1823 num_of_slots = s;
1824 header_size = tmp_header_size;
1825 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1826 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1827 break;
1828 }
1829 }
1830 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1831 // Add the padding for the alignment remainder.
1832 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001833 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001834 numOfSlots[i] = num_of_slots;
1835 headerSizes[i] = header_size;
1836 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1837 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1838 if (kTraceRosAlloc) {
1839 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1840 << ", headerSizes[" << i << "]=" << headerSizes[i]
1841 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1842 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1843 }
1844 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001845 // Fill the alloc bitmap so nobody can successfully allocate from it.
1846 if (kIsDebugBuild) {
1847 dedicated_full_run_->magic_num_ = kMagicNum;
1848 }
1849 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1850 // fail 100% of the time you attempt to allocate into the dedicated full run.
1851 dedicated_full_run_->size_bracket_idx_ = 0;
1852 dedicated_full_run_->FillAllocBitMap();
1853 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001854}
1855
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001856void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1857 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001858 if (used_bytes == 0) {
1859 return;
1860 }
1861 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1862 *bytes_allocated += used_bytes;
1863}
1864
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001865void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1866 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001867 if (used_bytes == 0) {
1868 return;
1869 }
1870 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1871 ++(*objects_allocated);
1872}
1873
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001874void RosAlloc::Verify() {
1875 Thread* self = Thread::Current();
1876 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001877 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001878 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001879 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001880 std::vector<Run*> runs;
1881 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001882 MutexLock lock_mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001883 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001884 size_t i = 0;
1885 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001886 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001887 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001888 case kPageMapReleased:
1889 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001890 case kPageMapEmpty: {
1891 // The start of a free page run.
1892 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001893 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001894 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1895 << "An empty page must belong to the free page run set";
1896 size_t fpr_size = fpr->ByteSize(this);
1897 CHECK(IsAligned<kPageSize>(fpr_size))
1898 << "A free page run size isn't page-aligned : " << fpr_size;
1899 size_t num_pages = fpr_size / kPageSize;
1900 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1901 << "A free page run size must be > 0 : " << fpr_size;
1902 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001903 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001904 << "A mismatch between the page map table for kPageMapEmpty "
1905 << " at page index " << j
1906 << " and the free page run size : page index range : "
1907 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1908 }
1909 i += num_pages;
1910 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1911 << std::endl << DumpPageMap();
1912 break;
1913 }
1914 case kPageMapLargeObject: {
1915 // The start of a large object.
1916 size_t num_pages = 1;
1917 size_t idx = i + 1;
1918 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1919 num_pages++;
1920 idx++;
1921 }
Andreas Gamped7576322014-10-24 22:13:45 -07001922 uint8_t* start = base_ + i * kPageSize;
1923 if (running_on_valgrind_) {
1924 start += ::art::gc::space::kDefaultValgrindRedZoneBytes;
1925 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001926 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1927 size_t obj_size = obj->SizeOf();
Andreas Gamped7576322014-10-24 22:13:45 -07001928 CHECK_GT(obj_size +
1929 (running_on_valgrind_ ? 2 * ::art::gc::space::kDefaultValgrindRedZoneBytes : 0),
1930 kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001931 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1932 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1933 << "A rosalloc large object size " << obj_size
1934 << " does not match the page map table " << (num_pages * kPageSize)
1935 << std::endl << DumpPageMap();
1936 i += num_pages;
1937 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1938 << std::endl << DumpPageMap();
1939 break;
1940 }
1941 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001942 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001943 break;
1944 case kPageMapRun: {
1945 // The start of a run.
1946 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001947 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001948 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001949 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001950 size_t num_pages = numOfPages[idx];
1951 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1952 << "Run size must be > 0 : " << num_pages;
1953 for (size_t j = i + 1; j < i + num_pages; ++j) {
1954 CHECK_EQ(page_map_[j], kPageMapRunPart)
1955 << "A mismatch between the page map table for kPageMapRunPart "
1956 << " at page index " << j
1957 << " and the run size : page index range " << i << " to " << (i + num_pages)
1958 << std::endl << DumpPageMap();
1959 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001960 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001961 runs.push_back(run);
1962 i += num_pages;
1963 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1964 << std::endl << DumpPageMap();
1965 break;
1966 }
1967 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001968 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001969 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001970 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001971 break;
1972 }
1973 }
1974 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001975 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1976 for (Thread* thread : threads) {
1977 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001978 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001979 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1980 CHECK(thread_local_run != nullptr);
1981 CHECK(thread_local_run->IsThreadLocal());
1982 CHECK(thread_local_run == dedicated_full_run_ ||
1983 thread_local_run->size_bracket_idx_ == i);
1984 }
1985 }
1986 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001987 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001988 Run* current_run = current_runs_[i];
1989 CHECK(current_run != nullptr);
1990 if (current_run != dedicated_full_run_) {
1991 // The dedicated full run is currently marked as thread local.
1992 CHECK(!current_run->IsThreadLocal());
1993 CHECK_EQ(current_run->size_bracket_idx_, i);
1994 }
1995 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001996 // Call Verify() here for the lock order.
1997 for (auto& run : runs) {
Andreas Gamped7576322014-10-24 22:13:45 -07001998 run->Verify(self, this, running_on_valgrind_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001999 }
2000}
2001
Andreas Gamped7576322014-10-24 22:13:45 -07002002void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind) {
Ian Rogers5d057052014-03-12 14:32:27 -07002003 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002004 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07002005 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07002006 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002007 const size_t num_slots = numOfSlots[idx];
2008 const size_t num_vec = RoundUp(num_slots, 32) / 32;
2009 CHECK_GT(num_vec, 0U);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002010 size_t bracket_size = IndexToBracketSize(idx);
2011 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07002012 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002013 << "Mismatch in the end address of the run " << Dump();
2014 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
2015 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002016 uint32_t last_word_mask = GetBitmapLastVectorMask(num_slots, num_vec);
2017 // Make sure all the bits at the end of the run are set so that we don't allocate there.
2018 CHECK_EQ(alloc_bit_map_[num_vec - 1] & last_word_mask, last_word_mask);
2019 // Ensure that the first bitmap index is valid.
2020 CHECK_LT(first_search_vec_idx_, num_vec);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002021 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002022 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002023 // If it's a thread local run, then it must be pointed to by an owner thread.
2024 bool owner_found = false;
2025 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2026 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
2027 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07002028 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002029 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07002030 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002031 if (thread_local_run == this) {
2032 CHECK(!owner_found)
2033 << "A thread local run has more than one owner thread " << Dump();
2034 CHECK_EQ(i, idx)
2035 << "A mismatching size bracket index in a thread local run " << Dump();
2036 owner_found = true;
2037 }
2038 }
2039 }
2040 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
2041 } else {
2042 // If it's not thread local, check that the thread local free bitmap is clean.
2043 CHECK(IsThreadLocalFreeBitmapClean())
2044 << "A non-thread-local run's thread local free bitmap isn't clean "
2045 << Dump();
2046 // Check if it's a current run for the size bucket.
2047 bool is_current_run = false;
2048 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
2049 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
2050 Run* current_run = rosalloc->current_runs_[i];
2051 if (idx == i) {
2052 if (this == current_run) {
2053 is_current_run = true;
2054 }
2055 } else {
2056 // If the size bucket index does not match, then it must not
2057 // be a current run.
2058 CHECK_NE(this, current_run)
2059 << "A current run points to a run with a wrong size bracket index " << Dump();
2060 }
2061 }
2062 // If it's neither a thread local or current run, then it must be
2063 // in a run set.
2064 if (!is_current_run) {
2065 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07002066 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002067 // If it's all free, it must be a free page run rather than a run.
2068 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
2069 if (!IsFull()) {
2070 // If it's not full, it must in the non-full run set.
2071 CHECK(non_full_runs.find(this) != non_full_runs.end())
2072 << "A non-full run isn't in the non-full run set " << Dump();
2073 } else {
2074 // If it's full, it must in the full run set (debug build only.)
2075 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07002076 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002077 CHECK(full_runs.find(this) != full_runs.end())
2078 << " A full run isn't in the full run set " << Dump();
2079 }
2080 }
2081 }
2082 }
2083 // Check each slot.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002084 size_t slots = 0;
Andreas Gamped7576322014-10-24 22:13:45 -07002085 size_t valgrind_modifier = running_on_valgrind ?
2086 2 * ::art::gc::space::kDefaultValgrindRedZoneBytes :
2087 0U;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002088 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07002089 DCHECK_GE(num_slots, slots) << "Out of bounds";
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002090 uint32_t vec = alloc_bit_map_[v];
2091 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
2092 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
2093 for (size_t i = 0; i < end; ++i) {
2094 bool is_allocated = ((vec >> i) & 0x1) != 0;
2095 // If a thread local run, slots may be marked freed in the
2096 // thread local free bitmap.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002097 bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002098 if (is_allocated && !is_thread_local_freed) {
Ian Rogers13735952014-10-08 12:43:28 -07002099 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Andreas Gamped7576322014-10-24 22:13:45 -07002100 if (running_on_valgrind) {
2101 slot_addr += ::art::gc::space::kDefaultValgrindRedZoneBytes;
2102 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002103 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
2104 size_t obj_size = obj->SizeOf();
Andreas Gamped7576322014-10-24 22:13:45 -07002105 CHECK_LE(obj_size + valgrind_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002106 << "A run slot contains a large object " << Dump();
Andreas Gamped7576322014-10-24 22:13:45 -07002107 CHECK_EQ(SizeToIndex(obj_size + valgrind_modifier), idx)
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -07002108 << PrettyTypeOf(obj) << " "
Andreas Gamped7576322014-10-24 22:13:45 -07002109 << "obj_size=" << obj_size << "(" << obj_size + valgrind_modifier << "), idx=" << idx
2110 << " A run slot contains an object with wrong size " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002111 }
2112 }
2113 }
2114}
2115
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002116size_t RosAlloc::ReleasePages() {
2117 VLOG(heap) << "RosAlloc::ReleasePages()";
2118 DCHECK(!DoesReleaseAllPages());
2119 Thread* self = Thread::Current();
2120 size_t reclaimed_bytes = 0;
2121 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002122 // Check the page map size which might have changed due to grow/shrink.
2123 while (i < page_map_size_) {
2124 // Reading the page map without a lock is racy but the race is benign since it should only
2125 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07002126 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002127 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002128 case kPageMapReleased:
2129 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002130 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002131 // This is currently the start of a free page run.
2132 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002133 MutexLock mu(self, lock_);
2134 // Check that it's still empty after we acquired the lock since another thread could have
2135 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002136 if (IsFreePage(i)) {
2137 // Free page runs can start with a released page if we coalesced a released page free
2138 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002139 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002140 // There is a race condition where FreePage can coalesce fpr with the previous
2141 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2142 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2143 // to the next page.
2144 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2145 size_t fpr_size = fpr->ByteSize(this);
2146 DCHECK(IsAligned<kPageSize>(fpr_size));
Ian Rogers13735952014-10-08 12:43:28 -07002147 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002148 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2149 size_t pages = fpr_size / kPageSize;
2150 CHECK_GT(pages, 0U) << "Infinite loop probable";
2151 i += pages;
2152 DCHECK_LE(i, page_map_size_);
2153 break;
2154 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002155 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002156 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002157 }
2158 case kPageMapLargeObject: // Fall through.
2159 case kPageMapLargeObjectPart: // Fall through.
2160 case kPageMapRun: // Fall through.
2161 case kPageMapRunPart: // Fall through.
2162 ++i;
2163 break; // Skip.
2164 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07002165 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002166 break;
2167 }
2168 }
2169 return reclaimed_bytes;
2170}
2171
Ian Rogers13735952014-10-08 12:43:28 -07002172size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002173 DCHECK_ALIGNED(start, kPageSize);
2174 DCHECK_ALIGNED(end, kPageSize);
2175 DCHECK_LT(start, end);
2176 if (kIsDebugBuild) {
2177 // In the debug build, the first page of a free page run
2178 // contains a magic number for debugging. Exclude it.
2179 start += kPageSize;
Andreas Gamped7576322014-10-24 22:13:45 -07002180
2181 // Single pages won't be released.
2182 if (start == end) {
2183 return 0;
2184 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002185 }
2186 if (!kMadviseZeroes) {
2187 // TODO: Do this when we resurrect the page instead.
2188 memset(start, 0, end - start);
2189 }
2190 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2191 size_t pm_idx = ToPageMapIndex(start);
2192 size_t reclaimed_bytes = 0;
2193 // Calculate reclaimed bytes and upate page map.
2194 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2195 for (; pm_idx < max_idx; ++pm_idx) {
2196 DCHECK(IsFreePage(pm_idx));
2197 if (page_map_[pm_idx] == kPageMapEmpty) {
2198 // Mark the page as released and update how many bytes we released.
2199 reclaimed_bytes += kPageSize;
2200 page_map_[pm_idx] = kPageMapReleased;
2201 }
2202 }
2203 return reclaimed_bytes;
2204}
2205
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002206void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2207 Thread* self = Thread::Current();
2208 size_t largest_continuous_free_pages = 0;
2209 WriterMutexLock wmu(self, bulk_free_lock_);
2210 MutexLock mu(self, lock_);
2211 for (FreePageRun* fpr : free_page_runs_) {
2212 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2213 fpr->ByteSize(this));
2214 }
2215 if (failed_alloc_bytes > kLargeSizeThreshold) {
2216 // Large allocation.
2217 size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
2218 if (required_bytes > largest_continuous_free_pages) {
2219 os << "; failed due to fragmentation (required continguous free "
2220 << required_bytes << " bytes where largest contiguous free "
2221 << largest_continuous_free_pages << " bytes)";
2222 }
2223 } else {
2224 // Non-large allocation.
2225 size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2226 if (required_bytes > largest_continuous_free_pages) {
2227 os << "; failed due to fragmentation (required continguous free "
2228 << required_bytes << " bytes for a new buffer where largest contiguous free "
2229 << largest_continuous_free_pages << " bytes)";
2230 }
2231 }
2232}
2233
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002234} // namespace allocator
2235} // namespace gc
2236} // namespace art