Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "allocation_record.h" |
| 18 | |
| 19 | #include "art_method-inl.h" |
| 20 | #include "base/stl_util.h" |
| 21 | #include "stack.h" |
| 22 | |
| 23 | #ifdef HAVE_ANDROID_OS |
| 24 | #include "cutils/properties.h" |
| 25 | #endif |
| 26 | |
| 27 | namespace art { |
| 28 | namespace gc { |
| 29 | |
| 30 | int32_t AllocRecordStackTraceElement::ComputeLineNumber() const { |
| 31 | DCHECK(method_ != nullptr); |
| 32 | return method_->GetLineNumFromDexPC(dex_pc_); |
| 33 | } |
| 34 | |
Man Cao | 41656de | 2015-07-06 18:53:15 -0700 | [diff] [blame] | 35 | const char* AllocRecord::GetClassDescriptor(std::string* storage) const { |
| 36 | // klass_ could contain null only if we implement class unloading. |
| 37 | if (UNLIKELY(klass_.IsNull())) { |
| 38 | return "null"; |
| 39 | } else { |
| 40 | return klass_.Read()->GetDescriptor(storage); |
| 41 | } |
| 42 | } |
| 43 | |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 44 | void AllocRecordObjectMap::SetProperties() { |
| 45 | #ifdef HAVE_ANDROID_OS |
| 46 | // Check whether there's a system property overriding the max number of records. |
| 47 | const char* propertyName = "dalvik.vm.allocTrackerMax"; |
| 48 | char allocMaxString[PROPERTY_VALUE_MAX]; |
| 49 | if (property_get(propertyName, allocMaxString, "") > 0) { |
| 50 | char* end; |
| 51 | size_t value = strtoul(allocMaxString, &end, 10); |
| 52 | if (*end != '\0') { |
| 53 | LOG(ERROR) << "Ignoring " << propertyName << " '" << allocMaxString |
| 54 | << "' --- invalid"; |
| 55 | } else { |
| 56 | alloc_record_max_ = value; |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 57 | if (recent_record_max_ > value) { |
| 58 | recent_record_max_ = value; |
| 59 | } |
| 60 | } |
| 61 | } |
| 62 | // Check whether there's a system property overriding the number of recent records. |
| 63 | propertyName = "dalvik.vm.recentAllocMax"; |
| 64 | char recentAllocMaxString[PROPERTY_VALUE_MAX]; |
| 65 | if (property_get(propertyName, recentAllocMaxString, "") > 0) { |
| 66 | char* end; |
| 67 | size_t value = strtoul(recentAllocMaxString, &end, 10); |
| 68 | if (*end != '\0') { |
| 69 | LOG(ERROR) << "Ignoring " << propertyName << " '" << recentAllocMaxString |
| 70 | << "' --- invalid"; |
| 71 | } else if (value > alloc_record_max_) { |
| 72 | LOG(ERROR) << "Ignoring " << propertyName << " '" << recentAllocMaxString |
| 73 | << "' --- should be less than " << alloc_record_max_; |
| 74 | } else { |
| 75 | recent_record_max_ = value; |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 76 | } |
| 77 | } |
| 78 | // Check whether there's a system property overriding the max depth of stack trace. |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 79 | propertyName = "debug.allocTracker.stackDepth"; |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 80 | char stackDepthString[PROPERTY_VALUE_MAX]; |
| 81 | if (property_get(propertyName, stackDepthString, "") > 0) { |
| 82 | char* end; |
| 83 | size_t value = strtoul(stackDepthString, &end, 10); |
| 84 | if (*end != '\0') { |
| 85 | LOG(ERROR) << "Ignoring " << propertyName << " '" << stackDepthString |
| 86 | << "' --- invalid"; |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 87 | } else if (value > kMaxSupportedStackDepth) { |
| 88 | LOG(WARNING) << propertyName << " '" << stackDepthString << "' too large, using " |
| 89 | << kMaxSupportedStackDepth; |
| 90 | max_stack_depth_ = kMaxSupportedStackDepth; |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 91 | } else { |
| 92 | max_stack_depth_ = value; |
| 93 | } |
| 94 | } |
| 95 | #endif |
| 96 | } |
| 97 | |
| 98 | AllocRecordObjectMap::~AllocRecordObjectMap() { |
| 99 | STLDeleteValues(&entries_); |
| 100 | } |
| 101 | |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 102 | void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) { |
| 103 | CHECK_LE(recent_record_max_, alloc_record_max_); |
| 104 | BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger)); |
| 105 | size_t count = recent_record_max_; |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 106 | // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the |
| 107 | // klass_ fields as strong roots. |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 108 | for (auto it = entries_.rbegin(), end = entries_.rend(); count > 0 && it != end; count--, ++it) { |
Man Cao | 41656de | 2015-07-06 18:53:15 -0700 | [diff] [blame] | 109 | buffered_visitor.VisitRootIfNonNull(it->second->GetClassGcRoot()); |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 110 | } |
| 111 | } |
| 112 | |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 113 | static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 114 | SHARED_REQUIRES(Locks::mutator_lock_) |
| 115 | REQUIRES(Locks::alloc_tracker_lock_) { |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 116 | GcRoot<mirror::Class>& klass = record->GetClassGcRoot(); |
| 117 | // This does not need a read barrier because this is called by GC. |
| 118 | mirror::Object* old_object = klass.Read<kWithoutReadBarrier>(); |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 119 | if (old_object != nullptr) { |
| 120 | // The class object can become null if we implement class unloading. |
| 121 | // In that case we might still want to keep the class name string (not implemented). |
| 122 | mirror::Object* new_object = visitor->IsMarked(old_object); |
| 123 | DCHECK(new_object != nullptr); |
| 124 | if (UNLIKELY(old_object != new_object)) { |
| 125 | klass = GcRoot<mirror::Class>(new_object->AsClass()); |
| 126 | } |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 127 | } |
| 128 | } |
| 129 | |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 130 | void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) { |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 131 | VLOG(heap) << "Start SweepAllocationRecords()"; |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 132 | size_t count_deleted = 0, count_moved = 0, count = 0; |
| 133 | // Only the first (size - recent_record_max_) number of records can be deleted. |
| 134 | size_t delete_bound; |
| 135 | if (entries_.size() <= recent_record_max_) { |
| 136 | delete_bound = 0; |
| 137 | } else { |
| 138 | delete_bound = entries_.size() - recent_record_max_; |
| 139 | } |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 140 | for (auto it = entries_.begin(), end = entries_.end(); it != end;) { |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 141 | ++count; |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 142 | // This does not need a read barrier because this is called by GC. |
| 143 | mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>(); |
| 144 | AllocRecord* record = it->second; |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 145 | mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 146 | if (new_object == nullptr) { |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 147 | if (count > delete_bound) { |
| 148 | it->first = GcRoot<mirror::Object>(nullptr); |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 149 | SweepClassObject(record, visitor); |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 150 | ++it; |
| 151 | } else { |
| 152 | delete record; |
| 153 | it = entries_.erase(it); |
| 154 | ++count_deleted; |
| 155 | } |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 156 | } else { |
| 157 | if (old_object != new_object) { |
| 158 | it->first = GcRoot<mirror::Object>(new_object); |
| 159 | ++count_moved; |
| 160 | } |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 161 | SweepClassObject(record, visitor); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 162 | ++it; |
| 163 | } |
| 164 | } |
| 165 | VLOG(heap) << "Deleted " << count_deleted << " allocation records"; |
| 166 | VLOG(heap) << "Updated " << count_moved << " allocation records"; |
| 167 | } |
| 168 | |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 169 | void AllocRecordObjectMap::AllowNewAllocationRecords() { |
| 170 | allow_new_record_ = true; |
| 171 | new_record_condition_.Broadcast(Thread::Current()); |
| 172 | } |
| 173 | |
| 174 | void AllocRecordObjectMap::DisallowNewAllocationRecords() { |
| 175 | allow_new_record_ = false; |
| 176 | } |
| 177 | |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 178 | struct AllocRecordStackVisitor : public StackVisitor { |
| 179 | AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 180 | SHARED_REQUIRES(Locks::mutator_lock_) |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 181 | : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), |
| 182 | trace(trace_in), |
| 183 | depth(0), |
| 184 | max_depth(max) {} |
| 185 | |
| 186 | // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses |
| 187 | // annotalysis. |
| 188 | bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS { |
| 189 | if (depth >= max_depth) { |
| 190 | return false; |
| 191 | } |
| 192 | ArtMethod* m = GetMethod(); |
| 193 | if (!m->IsRuntimeMethod()) { |
| 194 | trace->SetStackElementAt(depth, m, GetDexPc()); |
| 195 | ++depth; |
| 196 | } |
| 197 | return true; |
| 198 | } |
| 199 | |
| 200 | ~AllocRecordStackVisitor() { |
| 201 | trace->SetDepth(depth); |
| 202 | } |
| 203 | |
| 204 | AllocRecordStackTrace* trace; |
| 205 | size_t depth; |
| 206 | const size_t max_depth; |
| 207 | }; |
| 208 | |
| 209 | void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) { |
| 210 | Thread* self = Thread::Current(); |
| 211 | Heap* heap = Runtime::Current()->GetHeap(); |
| 212 | if (enable) { |
| 213 | { |
| 214 | MutexLock mu(self, *Locks::alloc_tracker_lock_); |
| 215 | if (heap->IsAllocTrackingEnabled()) { |
| 216 | return; // Already enabled, bail. |
| 217 | } |
| 218 | AllocRecordObjectMap* records = new AllocRecordObjectMap(); |
| 219 | CHECK(records != nullptr); |
| 220 | records->SetProperties(); |
| 221 | std::string self_name; |
| 222 | self->GetThreadName(self_name); |
| 223 | if (self_name == "JDWP") { |
| 224 | records->alloc_ddm_thread_id_ = self->GetTid(); |
| 225 | } |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 226 | records->scratch_trace_.SetDepth(records->max_stack_depth_); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 227 | size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ + |
| 228 | sizeof(AllocRecord) + sizeof(AllocRecordStackTrace); |
| 229 | LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of " |
| 230 | << records->max_stack_depth_ << " frames, taking up to " |
| 231 | << PrettySize(sz * records->alloc_record_max_) << ")"; |
| 232 | heap->SetAllocationRecords(records); |
| 233 | heap->SetAllocTrackingEnabled(true); |
| 234 | } |
| 235 | Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); |
| 236 | } else { |
| 237 | { |
| 238 | MutexLock mu(self, *Locks::alloc_tracker_lock_); |
| 239 | if (!heap->IsAllocTrackingEnabled()) { |
| 240 | return; // Already disabled, bail. |
| 241 | } |
| 242 | heap->SetAllocTrackingEnabled(false); |
| 243 | LOG(INFO) << "Disabling alloc tracker"; |
| 244 | heap->SetAllocationRecords(nullptr); |
| 245 | } |
| 246 | // If an allocation comes in before we uninstrument, we will safely drop it on the floor. |
| 247 | Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(); |
| 248 | } |
| 249 | } |
| 250 | |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 251 | void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass, |
| 252 | size_t byte_count) { |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 253 | MutexLock mu(self, *Locks::alloc_tracker_lock_); |
| 254 | Heap* heap = Runtime::Current()->GetHeap(); |
| 255 | if (!heap->IsAllocTrackingEnabled()) { |
| 256 | // In the process of shutting down recording, bail. |
| 257 | return; |
| 258 | } |
| 259 | |
| 260 | AllocRecordObjectMap* records = heap->GetAllocationRecords(); |
| 261 | DCHECK(records != nullptr); |
| 262 | |
| 263 | // Do not record for DDM thread |
| 264 | if (records->alloc_ddm_thread_id_ == self->GetTid()) { |
| 265 | return; |
| 266 | } |
| 267 | |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 268 | // Wait for GC's sweeping to complete and allow new records |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 269 | while (UNLIKELY((!kUseReadBarrier && !records->allow_new_record_) || |
| 270 | (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 271 | records->new_record_condition_.WaitHoldingLocks(self); |
| 272 | } |
| 273 | |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 274 | DCHECK_LE(records->Size(), records->alloc_record_max_); |
| 275 | |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 276 | // Get stack trace. |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 277 | // add scope to make "visitor" destroyed promptly, in order to set the scratch_trace_->depth_ |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 278 | { |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 279 | AllocRecordStackVisitor visitor(self, &records->scratch_trace_, records->max_stack_depth_); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 280 | visitor.WalkStack(); |
| 281 | } |
Man Cao | 1ed11b9 | 2015-06-11 22:47:35 -0700 | [diff] [blame] | 282 | records->scratch_trace_.SetTid(self->GetTid()); |
| 283 | AllocRecordStackTrace* trace = new AllocRecordStackTrace(records->scratch_trace_); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 284 | |
| 285 | // Fill in the basics. |
Man Cao | 42c3c33 | 2015-06-23 16:38:25 -0700 | [diff] [blame] | 286 | AllocRecord* record = new AllocRecord(byte_count, klass, trace); |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 287 | |
| 288 | records->Put(obj, record); |
| 289 | DCHECK_LE(records->Size(), records->alloc_record_max_); |
| 290 | } |
| 291 | |
| 292 | } // namespace gc |
| 293 | } // namespace art |