summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gc/mark_sweep.cc32
-rw-r--r--src/gc/mark_sweep.h11
-rw-r--r--src/heap.cc4
-rw-r--r--src/heap.h2
-rw-r--r--src/stack.h5
-rw-r--r--src/thread.cc124
-rw-r--r--src/thread.h3
-rw-r--r--src/thread_list.cc7
-rw-r--r--src/thread_list.h3
9 files changed, 168 insertions, 23 deletions
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index e19e2c4f2f..03bbb6abcb 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -35,6 +35,7 @@
#include "space.h"
#include "timing_logger.h"
#include "thread.h"
+#include "thread_list.h"
static const bool kUseMarkStackPrefetch = true;
@@ -111,8 +112,12 @@ inline void MarkSweep::MarkObject0(const Object* obj, bool check_finger) {
LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
if (!large_objects->Test(obj)) {
- CHECK(large_object_space->Contains(obj)) << "Attempting to mark object " << obj
- << " not in large object space";
+ if (!large_object_space->Contains(obj)) {
+ LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
+ LOG(ERROR) << "Attempting see if it's a bad root";
+ VerifyRoots();
+ LOG(FATAL) << "Can't mark bad root";
+ }
large_objects->Set(obj);
// Don't need to check finger since large objects never have any object references.
}
@@ -165,6 +170,29 @@ void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
mark_sweep->MarkObject0(root, true);
}
+void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
+ const AbstractMethod* method) {
+ reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, method);
+}
+
+void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const AbstractMethod* method) {
+ // See if the root is on any space bitmap.
+ if (heap_->FindSpaceFromObject(root) == NULL) {
+ LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ if (large_object_space->Contains(root)) {
+ LOG(ERROR) << "Found invalid root: " << root;
+ LOG(ERROR) << "VReg / Shadow frame offset: " << vreg;
+ if (method != NULL) {
+ LOG(ERROR) << "In method " << PrettyMethod(method, true);
+ }
+ }
+ }
+}
+
+void MarkSweep::VerifyRoots() {
+ Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
+}
+
// Marks all objects in the root set.
void MarkSweep::MarkRoots() {
Runtime::Current()->VisitRoots(MarkObjectVisitor, this);
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
index 74b2aa734d..76c5428299 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -234,6 +234,17 @@ class MarkSweep {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Verify the roots of the heap and print out information related to any invalid roots.
+ // Called in MarkObject, so may we may not hold the mutator lock.
+ void VerifyRoots()
+ NO_THREAD_SAFETY_ANALYSIS;
+
+ static void VerifyRootCallback(const Object* root, void* arg, size_t vreg,
+ const AbstractMethod* method);
+
+ void VerifyRoot(const Object* root, size_t vreg, const AbstractMethod* method)
+ NO_THREAD_SAFETY_ANALYSIS;
+
template <typename Visitor>
static void VisitInstanceFieldsReferences(const Object* obj, const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
diff --git a/src/heap.cc b/src/heap.cc
index f91ce84d1b..6b0b0e5377 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1135,7 +1135,7 @@ void Heap::CollectGarbageMarkSweepPlan(Thread* self, GcType gc_type, GcCause gc_
// If the GC was slow, then print timings in the log.
uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
- total_paused_time_ += duration / kTimeAdjust;
+ total_paused_time_ += duration;
if (duration > MsToNs(50)) {
const size_t percent_free = GetPercentFree();
const size_t current_heap_size = GetUsedMemorySize();
@@ -1763,7 +1763,7 @@ void Heap::CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_type, G
uint64_t pause_roots = (root_end - root_begin) / 1000 * 1000;
uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
uint64_t duration = (NanoTime() - root_begin) / 1000 * 1000;
- total_paused_time_ += (pause_roots + pause_dirty) / kTimeAdjust;
+ total_paused_time_ += pause_roots + pause_dirty;
if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5) ||
(gc_cause == kGcCauseForAlloc && duration > MsToNs(20))) {
const size_t percent_free = GetPercentFree();
diff --git a/src/heap.h b/src/heap.h
index 6cd19d4cad..14d8382226 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -94,6 +94,8 @@ class Heap {
static const size_t kTimeAdjust = 1024;
typedef void (RootVisitor)(const Object* root, void* arg);
+ typedef void (VerifyRootVisitor)(const Object* root, void* arg, size_t vreg,
+ const AbstractMethod* method);
typedef bool (IsMarkedTester)(const Object* object, void* arg);
// Create a heap with the requested sizes. The possible empty
diff --git a/src/stack.h b/src/stack.h
index 70d6f9d0f7..bceadc3cd2 100644
--- a/src/stack.h
+++ b/src/stack.h
@@ -90,12 +90,13 @@ class ShadowFrame {
(shadow_frame_entry <= (&references_[number_of_references_ - 1])));
}
- void VisitRoots(Heap::RootVisitor* visitor, void* arg) {
+ template <typename Visitor>
+ void VisitRoots(const Visitor& visitor) {
size_t num_refs = NumberOfReferences();
for (size_t j = 0; j < num_refs; j++) {
Object* object = GetReference(j);
if (object != NULL) {
- visitor(object, arg);
+ visitor(object, j);
}
}
}
diff --git a/src/thread.cc b/src/thread.cc
index db8c39f207..485caab66e 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -1807,12 +1807,14 @@ bool Thread::HoldsLock(Object* object) {
return object->GetThinLockId() == thin_lock_id_;
}
+// Visitor parameters are: (const Object* obj, size_t vreg, const AbstractMethod* method).
+template <typename Visitor>
class ReferenceMapVisitor : public StackVisitor {
public:
ReferenceMapVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
- Context* context, Heap::RootVisitor* root_visitor, void* arg)
+ Context* context, const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(stack, trace_stack, context), root_visitor_(root_visitor), arg_(arg) {}
+ : StackVisitor(stack, trace_stack, context), visitor_(visitor) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (false) {
@@ -1821,7 +1823,8 @@ class ReferenceMapVisitor : public StackVisitor {
}
ShadowFrame* shadow_frame = GetCurrentShadowFrame();
if (shadow_frame != NULL) {
- shadow_frame->VisitRoots(root_visitor_, arg_);
+ WrapperVisitor wrapperVisitor(visitor_, shadow_frame->GetMethod());
+ shadow_frame->VisitRoots(wrapperVisitor);
} else {
AbstractMethod* m = GetMethod();
// Process register map (which native and runtime methods don't have)
@@ -1863,15 +1866,13 @@ class ReferenceMapVisitor : public StackVisitor {
}
spill_shifts--; // wind back one as we want the last match
ref = reinterpret_cast<Object*>(GetGPR(spill_shifts));
- if (ref != NULL) {
- root_visitor_(ref, arg_);
- }
} else {
ref = reinterpret_cast<Object*>(GetVReg(cur_quick_frame, code_item, core_spills,
fp_spills, frame_size, reg));
- if (ref != NULL) {
- root_visitor_(ref, arg_);
- }
+ }
+
+ if (ref != NULL) {
+ visitor_(ref, reg, m);
}
}
}
@@ -1882,18 +1883,104 @@ class ReferenceMapVisitor : public StackVisitor {
}
private:
- bool TestBitmap(int reg, const uint8_t* reg_vector) {
+
+ class WrapperVisitor {
+ public:
+ WrapperVisitor(const Visitor& visitor, AbstractMethod* method)
+ : visitor_(visitor),
+ method_(method) {
+
+ }
+
+ void operator()(const Object* obj, size_t offset) const {
+ visitor_(obj, offset, method_);
+ }
+
+ private:
+ const Visitor& visitor_;
+ AbstractMethod* method_;
+ };
+
+ static bool TestBitmap(int reg, const uint8_t* reg_vector) {
return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
}
- // Call-back when we visit a root.
- Heap::RootVisitor* root_visitor_;
- // Argument to call-back.
- void* arg_;
+ // Visitor for when we visit a root.
+ const Visitor& visitor_;
+
// A method helper we keep around to avoid dex file/cache re-computations.
MethodHelper mh_;
};
+class RootCallbackVisitor {
+ public:
+ RootCallbackVisitor(Heap::RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {
+
+ }
+
+ void operator()(const Object* obj, size_t, const AbstractMethod*) const {
+ visitor_(obj, arg_);
+ }
+
+ private:
+ Heap::RootVisitor* visitor_;
+ void* arg_;
+};
+
+class VerifyCallbackVisitor {
+ public:
+ VerifyCallbackVisitor(Heap::VerifyRootVisitor* visitor, void* arg)
+ : visitor_(visitor),
+ arg_(arg) {
+
+ }
+
+ void operator()(const Object* obj, size_t vreg, const AbstractMethod* method) const {
+ visitor_(obj, arg_, vreg, method);
+ }
+
+ private:
+ Heap::VerifyRootVisitor* visitor_;
+ void* arg_;
+};
+
+struct VerifyRootWrapperArg {
+ Heap::VerifyRootVisitor* visitor;
+ void* arg;
+};
+
+static void VerifyRootWrapperCallback(const Object* root, void* arg) {
+ VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
+ wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
+}
+
+void Thread::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) {
+ // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
+ // don't have.
+ VerifyRootWrapperArg wrapperArg;
+ wrapperArg.arg = arg;
+ wrapperArg.visitor = visitor;
+
+ if (exception_ != NULL) {
+ VerifyRootWrapperCallback(exception_, &wrapperArg);
+ }
+ if (class_loader_override_ != NULL) {
+ VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
+ }
+ jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
+ jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
+
+ SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
+
+ // Visit roots on this thread's stack
+ Context* context = GetLongJumpContext();
+ VerifyCallbackVisitor visitorToCallback(visitor, arg);
+ ReferenceMapVisitor<VerifyCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context,
+ visitorToCallback);
+ mapper.WalkStack();
+ ReleaseLongJumpContext(context);
+}
+
void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
if (exception_ != NULL) {
visitor(exception_, arg);
@@ -1908,7 +1995,9 @@ void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
// Visit roots on this thread's stack
Context* context = GetLongJumpContext();
- ReferenceMapVisitor mapper(GetManagedStack(), GetTraceStack(), context, visitor, arg);
+ RootCallbackVisitor visitorToCallback(visitor, arg);
+ ReferenceMapVisitor<RootCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context,
+ visitorToCallback);
mapper.WalkStack();
ReleaseLongJumpContext(context);
}
@@ -1921,8 +2010,9 @@ static void VerifyObject(const Object* obj, void* arg) {
void Thread::VerifyStack() {
UniquePtr<Context> context(Context::Create());
- ReferenceMapVisitor mapper(GetManagedStack(), GetTraceStack(), context.get(), VerifyObject,
- Runtime::Current()->GetHeap());
+ RootCallbackVisitor visitorToCallback(visitor, arg);
+ ReferenceMapVisitor<RootCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context.get(),
+ VerifyObject, Runtime::Current()->GetHeap());
mapper.WalkStack();
}
#endif
diff --git a/src/thread.h b/src/thread.h
index 06798cda45..3ba9f4a299 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -409,6 +409,9 @@ class PACKED Thread {
void VisitRoots(Heap::RootVisitor* visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
#if VERIFY_OBJECT_ENABLED
void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
#else
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 4030be6d6c..09a8de60bf 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -502,6 +502,13 @@ void ThreadList::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
}
}
+void ThreadList::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) const {
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
+ (*it)->VerifyRoots(visitor, arg);
+ }
+}
+
uint32_t ThreadList::AllocThreadId() {
MutexLock mu(Thread::Current(), allocated_ids_lock_);
for (size_t i = 0; i < allocated_ids_.size(); ++i) {
diff --git a/src/thread_list.h b/src/thread_list.h
index 94e987f1fd..3142fd3caf 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -81,6 +81,9 @@ class ThreadList {
void VisitRoots(Heap::RootVisitor* visitor, void* arg) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Return a copy of the thread list.
std::list<Thread*> GetList() EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) {
return list_;