blob: ec4d626e97697115a21342c3c24dea1ca18b5e2d [file] [log] [blame]
Man Cao8c2ff642015-05-27 17:25:30 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "allocation_record.h"
18
19#include "art_method-inl.h"
20#include "base/stl_util.h"
21#include "stack.h"
22
23#ifdef HAVE_ANDROID_OS
24#include "cutils/properties.h"
25#endif
26
27namespace art {
28namespace gc {
29
30int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
31 DCHECK(method_ != nullptr);
32 return method_->GetLineNumFromDexPC(dex_pc_);
33}
34
Man Cao41656de2015-07-06 18:53:15 -070035const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
36 // klass_ could contain null only if we implement class unloading.
37 if (UNLIKELY(klass_.IsNull())) {
38 return "null";
39 } else {
40 return klass_.Read()->GetDescriptor(storage);
41 }
42}
43
Man Cao8c2ff642015-05-27 17:25:30 -070044void AllocRecordObjectMap::SetProperties() {
45#ifdef HAVE_ANDROID_OS
46 // Check whether there's a system property overriding the max number of records.
47 const char* propertyName = "dalvik.vm.allocTrackerMax";
48 char allocMaxString[PROPERTY_VALUE_MAX];
49 if (property_get(propertyName, allocMaxString, "") > 0) {
50 char* end;
51 size_t value = strtoul(allocMaxString, &end, 10);
52 if (*end != '\0') {
53 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocMaxString
54 << "' --- invalid";
55 } else {
56 alloc_record_max_ = value;
Man Cao1ed11b92015-06-11 22:47:35 -070057 if (recent_record_max_ > value) {
58 recent_record_max_ = value;
59 }
60 }
61 }
62 // Check whether there's a system property overriding the number of recent records.
63 propertyName = "dalvik.vm.recentAllocMax";
64 char recentAllocMaxString[PROPERTY_VALUE_MAX];
65 if (property_get(propertyName, recentAllocMaxString, "") > 0) {
66 char* end;
67 size_t value = strtoul(recentAllocMaxString, &end, 10);
68 if (*end != '\0') {
69 LOG(ERROR) << "Ignoring " << propertyName << " '" << recentAllocMaxString
70 << "' --- invalid";
71 } else if (value > alloc_record_max_) {
72 LOG(ERROR) << "Ignoring " << propertyName << " '" << recentAllocMaxString
73 << "' --- should be less than " << alloc_record_max_;
74 } else {
75 recent_record_max_ = value;
Man Cao8c2ff642015-05-27 17:25:30 -070076 }
77 }
78 // Check whether there's a system property overriding the max depth of stack trace.
Man Cao1ed11b92015-06-11 22:47:35 -070079 propertyName = "debug.allocTracker.stackDepth";
Man Cao8c2ff642015-05-27 17:25:30 -070080 char stackDepthString[PROPERTY_VALUE_MAX];
81 if (property_get(propertyName, stackDepthString, "") > 0) {
82 char* end;
83 size_t value = strtoul(stackDepthString, &end, 10);
84 if (*end != '\0') {
85 LOG(ERROR) << "Ignoring " << propertyName << " '" << stackDepthString
86 << "' --- invalid";
Man Cao1ed11b92015-06-11 22:47:35 -070087 } else if (value > kMaxSupportedStackDepth) {
88 LOG(WARNING) << propertyName << " '" << stackDepthString << "' too large, using "
89 << kMaxSupportedStackDepth;
90 max_stack_depth_ = kMaxSupportedStackDepth;
Man Cao8c2ff642015-05-27 17:25:30 -070091 } else {
92 max_stack_depth_ = value;
93 }
94 }
95#endif
96}
97
98AllocRecordObjectMap::~AllocRecordObjectMap() {
99 STLDeleteValues(&entries_);
100}
101
Man Cao1ed11b92015-06-11 22:47:35 -0700102void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
103 CHECK_LE(recent_record_max_, alloc_record_max_);
104 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
105 size_t count = recent_record_max_;
Man Cao42c3c332015-06-23 16:38:25 -0700106 // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
107 // klass_ fields as strong roots.
Man Cao1ed11b92015-06-11 22:47:35 -0700108 for (auto it = entries_.rbegin(), end = entries_.rend(); count > 0 && it != end; count--, ++it) {
Man Cao41656de2015-07-06 18:53:15 -0700109 buffered_visitor.VisitRootIfNonNull(it->second->GetClassGcRoot());
Man Cao42c3c332015-06-23 16:38:25 -0700110 }
111}
112
Mathieu Chartier97509952015-07-13 14:35:43 -0700113static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -0700114 SHARED_REQUIRES(Locks::mutator_lock_)
115 REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao42c3c332015-06-23 16:38:25 -0700116 GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
117 // This does not need a read barrier because this is called by GC.
118 mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
Mathieu Chartier97509952015-07-13 14:35:43 -0700119 if (old_object != nullptr) {
120 // The class object can become null if we implement class unloading.
121 // In that case we might still want to keep the class name string (not implemented).
122 mirror::Object* new_object = visitor->IsMarked(old_object);
123 DCHECK(new_object != nullptr);
124 if (UNLIKELY(old_object != new_object)) {
125 klass = GcRoot<mirror::Class>(new_object->AsClass());
126 }
Man Cao1ed11b92015-06-11 22:47:35 -0700127 }
128}
129
Mathieu Chartier97509952015-07-13 14:35:43 -0700130void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
Man Cao8c2ff642015-05-27 17:25:30 -0700131 VLOG(heap) << "Start SweepAllocationRecords()";
Man Cao42c3c332015-06-23 16:38:25 -0700132 size_t count_deleted = 0, count_moved = 0, count = 0;
133 // Only the first (size - recent_record_max_) number of records can be deleted.
134 size_t delete_bound;
135 if (entries_.size() <= recent_record_max_) {
136 delete_bound = 0;
137 } else {
138 delete_bound = entries_.size() - recent_record_max_;
139 }
Man Cao8c2ff642015-05-27 17:25:30 -0700140 for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
Man Cao42c3c332015-06-23 16:38:25 -0700141 ++count;
Man Cao8c2ff642015-05-27 17:25:30 -0700142 // This does not need a read barrier because this is called by GC.
143 mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
144 AllocRecord* record = it->second;
Mathieu Chartier97509952015-07-13 14:35:43 -0700145 mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
Man Cao8c2ff642015-05-27 17:25:30 -0700146 if (new_object == nullptr) {
Man Cao42c3c332015-06-23 16:38:25 -0700147 if (count > delete_bound) {
148 it->first = GcRoot<mirror::Object>(nullptr);
Mathieu Chartier97509952015-07-13 14:35:43 -0700149 SweepClassObject(record, visitor);
Man Cao42c3c332015-06-23 16:38:25 -0700150 ++it;
151 } else {
152 delete record;
153 it = entries_.erase(it);
154 ++count_deleted;
155 }
Man Cao8c2ff642015-05-27 17:25:30 -0700156 } else {
157 if (old_object != new_object) {
158 it->first = GcRoot<mirror::Object>(new_object);
159 ++count_moved;
160 }
Mathieu Chartier97509952015-07-13 14:35:43 -0700161 SweepClassObject(record, visitor);
Man Cao8c2ff642015-05-27 17:25:30 -0700162 ++it;
163 }
164 }
165 VLOG(heap) << "Deleted " << count_deleted << " allocation records";
166 VLOG(heap) << "Updated " << count_moved << " allocation records";
167}
168
Man Cao42c3c332015-06-23 16:38:25 -0700169void AllocRecordObjectMap::AllowNewAllocationRecords() {
170 allow_new_record_ = true;
171 new_record_condition_.Broadcast(Thread::Current());
172}
173
174void AllocRecordObjectMap::DisallowNewAllocationRecords() {
175 allow_new_record_ = false;
176}
177
Man Cao8c2ff642015-05-27 17:25:30 -0700178struct AllocRecordStackVisitor : public StackVisitor {
179 AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
Mathieu Chartier90443472015-07-16 20:32:27 -0700180 SHARED_REQUIRES(Locks::mutator_lock_)
Man Cao8c2ff642015-05-27 17:25:30 -0700181 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
182 trace(trace_in),
183 depth(0),
184 max_depth(max) {}
185
186 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
187 // annotalysis.
188 bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
189 if (depth >= max_depth) {
190 return false;
191 }
192 ArtMethod* m = GetMethod();
193 if (!m->IsRuntimeMethod()) {
194 trace->SetStackElementAt(depth, m, GetDexPc());
195 ++depth;
196 }
197 return true;
198 }
199
200 ~AllocRecordStackVisitor() {
201 trace->SetDepth(depth);
202 }
203
204 AllocRecordStackTrace* trace;
205 size_t depth;
206 const size_t max_depth;
207};
208
209void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
210 Thread* self = Thread::Current();
211 Heap* heap = Runtime::Current()->GetHeap();
212 if (enable) {
213 {
214 MutexLock mu(self, *Locks::alloc_tracker_lock_);
215 if (heap->IsAllocTrackingEnabled()) {
216 return; // Already enabled, bail.
217 }
218 AllocRecordObjectMap* records = new AllocRecordObjectMap();
219 CHECK(records != nullptr);
220 records->SetProperties();
221 std::string self_name;
222 self->GetThreadName(self_name);
223 if (self_name == "JDWP") {
224 records->alloc_ddm_thread_id_ = self->GetTid();
225 }
Man Cao1ed11b92015-06-11 22:47:35 -0700226 records->scratch_trace_.SetDepth(records->max_stack_depth_);
Man Cao8c2ff642015-05-27 17:25:30 -0700227 size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
228 sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
229 LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
230 << records->max_stack_depth_ << " frames, taking up to "
231 << PrettySize(sz * records->alloc_record_max_) << ")";
232 heap->SetAllocationRecords(records);
233 heap->SetAllocTrackingEnabled(true);
234 }
235 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
236 } else {
237 {
238 MutexLock mu(self, *Locks::alloc_tracker_lock_);
239 if (!heap->IsAllocTrackingEnabled()) {
240 return; // Already disabled, bail.
241 }
242 heap->SetAllocTrackingEnabled(false);
243 LOG(INFO) << "Disabling alloc tracker";
244 heap->SetAllocationRecords(nullptr);
245 }
246 // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
247 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
248 }
249}
250
Man Cao42c3c332015-06-23 16:38:25 -0700251void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass,
252 size_t byte_count) {
Man Cao8c2ff642015-05-27 17:25:30 -0700253 MutexLock mu(self, *Locks::alloc_tracker_lock_);
254 Heap* heap = Runtime::Current()->GetHeap();
255 if (!heap->IsAllocTrackingEnabled()) {
256 // In the process of shutting down recording, bail.
257 return;
258 }
259
260 AllocRecordObjectMap* records = heap->GetAllocationRecords();
261 DCHECK(records != nullptr);
262
263 // Do not record for DDM thread
264 if (records->alloc_ddm_thread_id_ == self->GetTid()) {
265 return;
266 }
267
Man Cao42c3c332015-06-23 16:38:25 -0700268 // Wait for GC's sweeping to complete and allow new records
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700269 while (UNLIKELY((!kUseReadBarrier && !records->allow_new_record_) ||
270 (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
Man Cao42c3c332015-06-23 16:38:25 -0700271 records->new_record_condition_.WaitHoldingLocks(self);
272 }
273
Man Cao8c2ff642015-05-27 17:25:30 -0700274 DCHECK_LE(records->Size(), records->alloc_record_max_);
275
Man Cao8c2ff642015-05-27 17:25:30 -0700276 // Get stack trace.
Man Cao1ed11b92015-06-11 22:47:35 -0700277 // add scope to make "visitor" destroyed promptly, in order to set the scratch_trace_->depth_
Man Cao8c2ff642015-05-27 17:25:30 -0700278 {
Man Cao1ed11b92015-06-11 22:47:35 -0700279 AllocRecordStackVisitor visitor(self, &records->scratch_trace_, records->max_stack_depth_);
Man Cao8c2ff642015-05-27 17:25:30 -0700280 visitor.WalkStack();
281 }
Man Cao1ed11b92015-06-11 22:47:35 -0700282 records->scratch_trace_.SetTid(self->GetTid());
283 AllocRecordStackTrace* trace = new AllocRecordStackTrace(records->scratch_trace_);
Man Cao8c2ff642015-05-27 17:25:30 -0700284
285 // Fill in the basics.
Man Cao42c3c332015-06-23 16:38:25 -0700286 AllocRecord* record = new AllocRecord(byte_count, klass, trace);
Man Cao8c2ff642015-05-27 17:25:30 -0700287
288 records->Put(obj, record);
289 DCHECK_LE(records->Size(), records->alloc_record_max_);
290}
291
292} // namespace gc
293} // namespace art