summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/base/allocator.h5
-rw-r--r--runtime/base/mutex.cc8
-rw-r--r--runtime/base/mutex.h2
-rw-r--r--runtime/base/stringprintf_test.cc29
-rw-r--r--runtime/check_reference_map_visitor.h111
-rw-r--r--runtime/debugger.cc169
-rw-r--r--runtime/debugger.h18
-rw-r--r--runtime/dex_instruction_list.h8
-rw-r--r--runtime/gc/allocator/rosalloc.cc18
-rw-r--r--runtime/gc/allocator/rosalloc.h14
-rw-r--r--runtime/gc/collector/mark_compact.cc7
-rw-r--r--runtime/gc/collector/mark_sweep.cc40
-rw-r--r--runtime/gc/collector/semi_space.cc24
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc8
-rw-r--r--runtime/gc/heap.cc43
-rw-r--r--runtime/gc/heap.h16
-rw-r--r--runtime/gc/space/large_object_space.cc9
-rw-r--r--runtime/gc/space/large_object_space.h6
-rw-r--r--runtime/instrumentation.cc17
-rw-r--r--runtime/instrumentation.h10
-rw-r--r--runtime/jdwp/jdwp_handler.cc43
-rw-r--r--runtime/mirror/art_method-inl.h7
-rw-r--r--runtime/mirror/art_method.h5
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc4
-rw-r--r--runtime/oat_file-inl.h49
-rw-r--r--runtime/oat_file.cc53
-rw-r--r--runtime/oat_file.h34
-rw-r--r--runtime/parsed_options.cc32
-rw-r--r--runtime/parsed_options.h3
-rw-r--r--runtime/runtime.cc8
-rw-r--r--runtime/runtime.h2
-rw-r--r--runtime/stack.h4
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/trace.cc6
-rw-r--r--runtime/verifier/method_verifier.cc20
35 files changed, 604 insertions, 230 deletions
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index a7adb02e29..2c3e966d32 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -66,6 +66,7 @@ enum AllocatorTag {
kAllocatorTagCompileTimeClassPath,
kAllocatorTagOatFile,
kAllocatorTagDexFileVerifier,
+ kAllocatorTagRosAlloc,
kAllocatorTagCount, // Must always be last element.
};
std::ostream& operator<<(std::ostream& os, const AllocatorTag& tag);
@@ -149,6 +150,10 @@ class AllocationTrackingMultiMap : public std::multimap<
Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>> {
};
+template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
+class AllocationTrackingSet : public std::set<Key, Compare, TrackingAllocator<Key, kTag>> {
+};
+
} // namespace art
#endif // ART_RUNTIME_BASE_ALLOCATOR_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 455680b449..2c95eded08 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -884,6 +884,10 @@ void Locks::Init() {
DCHECK(heap_bitmap_lock_ == nullptr);
heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+ DCHECK(trace_lock_ == nullptr);
+ trace_lock_ = new Mutex("trace lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
DCHECK(runtime_shutdown_lock_ == nullptr);
runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
@@ -892,10 +896,6 @@ void Locks::Init() {
DCHECK(profiler_lock_ == nullptr);
profiler_lock_ = new Mutex("profiler lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
- DCHECK(trace_lock_ == nullptr);
- trace_lock_ = new Mutex("trace lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
DCHECK(deoptimization_lock_ == nullptr);
deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 20f58de888..8d2cdce802 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -92,12 +92,12 @@ enum LockLevel {
kBreakpointInvokeLock,
kAllocTrackerLock,
kDeoptimizationLock,
- kTraceLock,
kProfilerLock,
kJdwpEventListLock,
kJdwpAttachLock,
kJdwpStartLock,
kRuntimeShutdownLock,
+ kTraceLock,
kHeapBitmapLock,
kMutatorLock,
kThreadListSuspendThreadLock,
diff --git a/runtime/base/stringprintf_test.cc b/runtime/base/stringprintf_test.cc
new file mode 100644
index 0000000000..0bfde33a3f
--- /dev/null
+++ b/runtime/base/stringprintf_test.cc
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stringprintf.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(StringPrintfTest, HexSizeT) {
+ size_t size = 0x00107e59;
+ EXPECT_STREQ("00107e59", StringPrintf("%08zx", size).c_str());
+ EXPECT_STREQ("0x00107e59", StringPrintf("0x%08zx", size).c_str());
+}
+
+} // namespace art
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
new file mode 100644
index 0000000000..1a78d72657
--- /dev/null
+++ b/runtime/check_reference_map_visitor.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+
+#include "gc_map.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "stack_map.h"
+
+namespace art {
+
+// Helper class for tests checking that the compiler keeps track of dex registers
+// holding references.
+class CheckReferenceMapVisitor : public StackVisitor {
+ public:
+ explicit CheckReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(thread, nullptr) {}
+
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ if (m->IsCalleeSaveMethod() || m->IsNative()) {
+ CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
+ }
+
+ if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
+ return true;
+ }
+
+ LOG(INFO) << "At " << PrettyMethod(m, false);
+
+ if (m->IsCalleeSaveMethod()) {
+ LOG(WARNING) << "no PC for " << PrettyMethod(m);
+ return true;
+ }
+
+ return false;
+ }
+
+ void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (GetMethod()->IsOptimized()) {
+ CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
+ } else {
+ CheckQuickMethod(registers, number_of_references, native_pc_offset);
+ }
+ }
+
+ private:
+ void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ CodeInfo code_info = m->GetOptimizedCodeInfo();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map, m->GetCodeItem()->registers_size_);
+ MemoryRegion stack_mask = stack_map.GetStackMask();
+ uint32_t register_mask = stack_map.GetRegisterMask();
+ for (int i = 0; i < number_of_references; ++i) {
+ int reg = registers[i];
+ CHECK(reg < m->GetCodeItem()->registers_size_);
+ DexRegisterMap::LocationKind location = dex_register_map.GetLocationKind(reg);
+ switch (location) {
+ case DexRegisterMap::kNone:
+ // Not set, should not be a reference.
+ CHECK(false);
+ break;
+ case DexRegisterMap::kInStack:
+ CHECK(stack_mask.LoadBit(dex_register_map.GetValue(reg) >> 2));
+ break;
+ case DexRegisterMap::kInRegister:
+ CHECK_NE(register_mask & dex_register_map.GetValue(reg), 0u);
+ break;
+ case DexRegisterMap::kConstant:
+ CHECK_EQ(dex_register_map.GetValue(0), 0);
+ break;
+ }
+ }
+ }
+
+ void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ NativePcOffsetToReferenceMap map(m->GetNativeGcMap());
+ const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
+ CHECK(ref_bitmap);
+ for (int i = 0; i < number_of_references; ++i) {
+ int reg = registers[i];
+ CHECK(reg < m->GetCodeItem()->registers_size_);
+ CHECK((*((ref_bitmap) + reg / 8) >> (reg % 8) ) & 0x01)
+ << "Error: Reg @" << i << " is not in GC map";
+ }
+ }
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index cc1e0b994a..6c374029d8 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1886,11 +1886,25 @@ JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value,
return SetFieldValueImpl(0, field_id, value, width, true);
}
-std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
+JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
JDWP::JdwpError error;
- mirror::String* s = gRegistry->Get<mirror::String*>(string_id, &error);
- CHECK(s != nullptr) << error;
- return s->ToModifiedUtf8();
+ mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ if (obj == nullptr) {
+ return JDWP::ERR_INVALID_OBJECT;
+ }
+ {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
+ if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
+ // This isn't a string.
+ return JDWP::ERR_INVALID_STRING;
+ }
+ }
+ *str = obj->AsString()->ToModifiedUtf8();
+ return JDWP::ERR_NONE;
}
void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
@@ -1939,7 +1953,7 @@ JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name)
}
JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
if (error != JDWP::ERR_NONE) {
@@ -1970,26 +1984,54 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p
return error;
}
-std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
- ScopedObjectAccess soa(Thread::Current());
+static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
+ JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, error);
+ if (*error != JDWP::ERR_NONE) {
+ return nullptr;
+ }
+ if (thread_group == nullptr) {
+ *error = JDWP::ERR_INVALID_OBJECT;
+ return nullptr;
+ }
+ mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
+ CHECK(c != nullptr);
+ if (!c->IsAssignableFrom(thread_group->GetClass())) {
+ // This is not a java.lang.ThreadGroup.
+ *error = JDWP::ERR_INVALID_THREAD_GROUP;
+ return nullptr;
+ }
+ *error = JDWP::ERR_NONE;
+ return thread_group;
+}
+
+JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
- mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
- CHECK(thread_group != nullptr) << error;
+ mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
- CHECK(c != nullptr);
mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
CHECK(f != nullptr);
mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
soa.Self()->EndAssertNoThreadSuspension(old_cause);
- return s->ToModifiedUtf8();
+
+ std::string thread_group_name(s->ToModifiedUtf8());
+ expandBufAddUtf8String(pReply, thread_group_name);
+ return JDWP::ERR_NONE;
}
-JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
+JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
- mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
- CHECK(thread_group != nullptr) << error;
+ mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
CHECK(c != nullptr);
@@ -1997,19 +2039,69 @@ JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
CHECK(f != nullptr);
mirror::Object* parent = f->GetObject(thread_group);
soa.Self()->EndAssertNoThreadSuspension(old_cause);
- return gRegistry->Add(parent);
+
+ JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
+ expandBufAddObjectId(pReply, parent_group_id);
+ return JDWP::ERR_NONE;
}
-JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
+static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
+ std::vector<JDWP::ObjectId>* child_thread_group_ids)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(thread_group != nullptr);
+
+ // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
+ mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
+ mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
+
+ // Get the array and size out of the ArrayList<ThreadGroup>...
+ mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
+ mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
+ mirror::ObjectArray<mirror::Object>* groups_array =
+ array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
+ const int32_t size = size_field->GetInt(groups_array_list);
+
+ // Copy the first 'size' elements out of the array into the result.
+ for (int32_t i = 0; i < size; ++i) {
+ child_thread_group_ids->push_back(gRegistry->Add(groups_array->Get(i)));
+ }
+}
+
+JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
+ JDWP::ExpandBuf* pReply) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
- mirror::Object* group = f->GetObject(f->GetDeclaringClass());
- return gRegistry->Add(group);
+ JDWP::JdwpError error;
+ mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+
+ // Add child threads.
+ {
+ std::vector<JDWP::ObjectId> child_thread_ids;
+ GetThreads(thread_group, &child_thread_ids);
+ expandBufAdd4BE(pReply, child_thread_ids.size());
+ for (JDWP::ObjectId child_thread_id : child_thread_ids) {
+ expandBufAddObjectId(pReply, child_thread_id);
+ }
+ }
+
+ // Add child thread groups.
+ {
+ std::vector<JDWP::ObjectId> child_thread_groups_ids;
+ GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
+ expandBufAdd4BE(pReply, child_thread_groups_ids.size());
+ for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
+ expandBufAddObjectId(pReply, child_thread_group_id);
+ }
+ }
+
+ return JDWP::ERR_NONE;
}
-JDWP::ObjectId Dbg::GetMainThreadGroupId() {
- ScopedObjectAccess soa(Thread::Current());
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
+JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
mirror::Object* group = f->GetObject(f->GetDeclaringClass());
return gRegistry->Add(group);
}
@@ -2111,11 +2203,8 @@ static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
return (group == desired_thread_group);
}
-void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>* thread_ids) {
+void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- JDWP::JdwpError error;
- mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
- CHECK_EQ(error, JDWP::ERR_NONE);
std::list<Thread*> all_threads_list;
{
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
@@ -2147,30 +2236,6 @@ void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>
}
}
-void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id,
- std::vector<JDWP::ObjectId>* child_thread_group_ids) {
- ScopedObjectAccess soa(Thread::Current());
- JDWP::JdwpError error;
- mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
- CHECK(thread_group != nullptr) << error;
-
- // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
- mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
- mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
-
- // Get the array and size out of the ArrayList<ThreadGroup>...
- mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
- mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
- mirror::ObjectArray<mirror::Object>* groups_array =
- array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
- const int32_t size = size_field->GetInt(groups_array_list);
-
- // Copy the first 'size' elements out of the array into the result.
- for (int32_t i = 0; i < size; ++i) {
- child_thread_group_ids->push_back(gRegistry->Add(groups_array->Get(i)));
- }
-}
-
static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
explicit CountStackDepthVisitor(Thread* thread)
@@ -4323,7 +4388,7 @@ void Dbg::SetAllocTrackingEnabled(bool enable) {
recent_allocation_records_ = new AllocRecord[alloc_record_max_];
CHECK(recent_allocation_records_ != nullptr);
}
- Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+ Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
} else {
{
ScopedObjectAccess soa(self); // For type_cache_.Clear();
@@ -4339,7 +4404,7 @@ void Dbg::SetAllocTrackingEnabled(bool enable) {
type_cache_.Clear();
}
// If an allocation comes in before we uninstrument, we will safely drop it on the floor.
- Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+ Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(false);
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 219210e4aa..e171d7854f 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -381,7 +381,7 @@ class Dbg {
static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static std::string StringToUtf8(JDWP::ObjectId string_id)
+ static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -393,13 +393,19 @@ class Dbg {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_);
- static std::string GetThreadGroupName(JDWP::ObjectId thread_group_id);
- static JDWP::ObjectId GetThreadGroupParent(JDWP::ObjectId thread_group_id)
+ static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id,
+ JDWP::ExpandBuf* pReply)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id,
+ JDWP::ExpandBuf* pReply)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
+ JDWP::ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static JDWP::ObjectId GetSystemThreadGroupId()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::ObjectId GetMainThreadGroupId();
static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
@@ -414,11 +420,9 @@ class Dbg {
// Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
// returns all threads.
- static void GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>* thread_ids)
+ static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids)
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void GetChildThreadGroups(JDWP::ObjectId thread_group_id,
- std::vector<JDWP::ObjectId>* child_thread_group_ids);
static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result)
LOCKS_EXCLUDED(Locks::thread_list_lock_);
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index 64c9185c87..6a9976acb3 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -253,10 +253,10 @@
V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \
V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \
- V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, false, kUnknown, 0, kVerifyError) \
- V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xEF, UNUSED_EF, "unused-ef", k10x, false, kUnknown, 0, kVerifyError) \
V(0xF0, UNUSED_F0, "unused-f0", k10x, false, kUnknown, 0, kVerifyError) \
V(0xF1, UNUSED_F1, "unused-f1", k10x, false, kUnknown, 0, kVerifyError) \
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index ad22a2eae4..a7e5e7487b 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -569,7 +569,7 @@ RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
// Get the lowest address non-full run from the binary tree.
- std::set<Run*>* const bt = &non_full_runs_[idx];
+ auto* const bt = &non_full_runs_[idx];
if (!bt->empty()) {
// If there's one, use it as the current run.
auto it = bt->begin();
@@ -767,7 +767,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
}
// Free the slot in the run.
run->FreeSlot(ptr);
- std::set<Run*>* non_full_runs = &non_full_runs_[idx];
+ auto* non_full_runs = &non_full_runs_[idx];
if (run->IsAllFree()) {
// It has just become completely free. Free the pages of this run.
std::set<Run*>::iterator pos = non_full_runs->find(run);
@@ -793,9 +793,8 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
// already in the non-full run set (i.e., it was full) insert it
// into the non-full run set.
if (run != current_runs_[idx]) {
- std::unordered_set<Run*, hash_run, eq_run>* full_runs =
- kIsDebugBuild ? &full_runs_[idx] : NULL;
- std::set<Run*>::iterator pos = non_full_runs->find(run);
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto pos = non_full_runs->find(run);
if (pos == non_full_runs->end()) {
DCHECK(run_was_full);
DCHECK(full_runs->find(run) != full_runs->end());
@@ -1266,9 +1265,8 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
}
// Check if the run should be moved to non_full_runs_ or
// free_page_runs_.
- std::set<Run*>* non_full_runs = &non_full_runs_[idx];
- std::unordered_set<Run*, hash_run, eq_run>* full_runs =
- kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* non_full_runs = &non_full_runs_[idx];
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
if (run->IsAllFree()) {
// It has just become completely free. Free the pages of the
// run.
@@ -2056,7 +2054,7 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
// in a run set.
if (!is_current_run) {
MutexLock mu(self, rosalloc->lock_);
- std::set<Run*>& non_full_runs = rosalloc->non_full_runs_[idx];
+ auto& non_full_runs = rosalloc->non_full_runs_[idx];
// If it's all free, it must be a free page run rather than a run.
CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
if (!IsFull()) {
@@ -2066,7 +2064,7 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
} else {
// If it's full, it must in the full run set (debug build only.)
if (kIsDebugBuild) {
- std::unordered_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
+ auto& full_runs = rosalloc->full_runs_[idx];
CHECK(full_runs.find(this) != full_runs.end())
<< " A full run isn't in the full run set " << Dump();
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index b2a5a3c96c..2fbd97a8d5 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -26,6 +26,7 @@
#include <unordered_set>
#include <vector>
+#include "base/allocator.h"
#include "base/mutex.h"
#include "base/logging.h"
#include "globals.h"
@@ -53,7 +54,7 @@ class RosAlloc {
size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
DCHECK_GE(byte_size, static_cast<size_t>(0));
- DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
+ DCHECK_ALIGNED(byte_size, kPageSize);
return byte_size;
}
void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
@@ -403,6 +404,7 @@ class RosAlloc {
// We use thread-local runs for the size Brackets whose indexes
// are less than this index. We use shared (current) runs for the rest.
+
static const size_t kNumThreadLocalSizeBrackets = 11;
private:
@@ -423,12 +425,13 @@ class RosAlloc {
// The run sets that hold the runs whose slots are not all
// full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
- std::set<Run*> non_full_runs_[kNumOfSizeBrackets];
+ AllocationTrackingSet<Run*, kAllocatorTagRosAlloc> non_full_runs_[kNumOfSizeBrackets];
// The run sets that hold the runs whose slots are all full. This is
// debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
- std::unordered_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
+ std::unordered_set<Run*, hash_run, eq_run, TrackingAllocator<Run*, kAllocatorTagRosAlloc>>
+ full_runs_[kNumOfSizeBrackets];
// The set of free pages.
- std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
+ AllocationTrackingSet<FreePageRun*, kAllocatorTagRosAlloc> free_page_runs_ GUARDED_BY(lock_);
// The dedicated full run, it is always full and shared by all threads when revoking happens.
// This is an optimization since enables us to avoid a null check for revoked runs.
static Run* dedicated_full_run_;
@@ -460,7 +463,8 @@ class RosAlloc {
// The table that indicates the size of free page runs. These sizes
// are stored here to avoid storing in the free page header and
// release backing pages.
- std::vector<size_t> free_page_run_size_map_ GUARDED_BY(lock_);
+ std::vector<size_t, TrackingAllocator<size_t, kAllocatorTagRosAlloc>> free_page_run_size_map_
+ GUARDED_BY(lock_);
// The global lock. Used to guard the page map, the free page set,
// and the footprint.
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 40448524c6..b3bed64c5e 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -547,8 +547,11 @@ void MarkCompact::Sweep(bool swap_bitmaps) {
}
void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
- TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+ space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+ if (los != nullptr) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());\
+ RecordFreeLOS(los->Sweep(swap_bitmaps));
+ }
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 95530be202..930499a2fb 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -374,7 +374,8 @@ class MarkSweepMarkObjectSlowPath {
}
space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
- (kIsDebugBuild && !large_object_space->Contains(obj)))) {
+ (kIsDebugBuild && large_object_space != nullptr &&
+ !large_object_space->Contains(obj)))) {
LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
LOG(ERROR) << "Attempting see if it's a bad root";
mark_sweep_->VerifyRoots();
@@ -481,7 +482,7 @@ void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor*
// See if the root is on any space bitmap.
if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- if (!large_object_space->Contains(root)) {
+ if (large_object_space != nullptr && !large_object_space->Contains(root)) {
LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
if (visitor != NULL) {
LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
@@ -1074,20 +1075,22 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
}
// Handle the large object space.
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
- accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
- if (swap_bitmaps) {
- std::swap(large_live_objects, large_mark_objects);
- }
- for (size_t i = 0; i < count; ++i) {
- Object* obj = objects[i];
- // Handle large objects.
- if (kUseThreadLocalAllocationStack && obj == nullptr) {
- continue;
+ if (large_object_space != nullptr) {
+ accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
+ accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
+ if (swap_bitmaps) {
+ std::swap(large_live_objects, large_mark_objects);
}
- if (!large_mark_objects->Test(obj)) {
- ++freed_los.objects;
- freed_los.bytes += large_object_space->Free(self, obj);
+ for (size_t i = 0; i < count; ++i) {
+ Object* obj = objects[i];
+ // Handle large objects.
+ if (kUseThreadLocalAllocationStack && obj == nullptr) {
+ continue;
+ }
+ if (!large_mark_objects->Test(obj)) {
+ ++freed_los.objects;
+ freed_los.bytes += large_object_space->Free(self, obj);
+ }
}
}
{
@@ -1125,8 +1128,11 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
}
void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
- TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
- RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+ space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+ if (los != nullptr) {
+ TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
+ RecordFreeLOS(los->Sweep(swap_bitmaps));
+ }
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 8fb33cec2f..c8fa869a45 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -365,23 +365,23 @@ void SemiSpace::MarkReachableObjects() {
}
CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
- if (is_large_object_space_immune_) {
+ space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
+ if (is_large_object_space_immune_ && los != nullptr) {
TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
DCHECK(collect_from_space_only_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
// be newly added to the live set above in MarkAllocStackAsLive().
- GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+ los->CopyLiveToMarked();
// When the large object space is immune, we need to scan the
// large object space as roots as they contain references to their
// classes (primitive array classes) that could move though they
// don't contain any other references.
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
+ accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
- large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
- reinterpret_cast<uintptr_t>(large_object_space->End()),
+ large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
+ reinterpret_cast<uintptr_t>(los->End()),
visitor);
}
// Recursively process the mark stack.
@@ -655,8 +655,11 @@ void SemiSpace::Sweep(bool swap_bitmaps) {
void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
DCHECK(!is_large_object_space_immune_);
- TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
- RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+ space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+ if (los != nullptr) {
+ TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
+ RecordFreeLOS(los->Sweep(swap_bitmaps));
+ }
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
@@ -751,6 +754,7 @@ void SemiSpace::FinishPhase() {
from_space_ = nullptr;
CHECK(mark_stack_->IsEmpty());
mark_stack_->Reset();
+ space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
@@ -762,7 +766,7 @@ void SemiSpace::FinishPhase() {
bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
bool bytes_promoted_threshold_exceeded =
bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
- uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+ uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
uint64_t last_los_bytes_allocated =
large_object_bytes_allocated_at_last_whole_heap_collection_;
bool large_object_bytes_threshold_exceeded =
@@ -775,7 +779,7 @@ void SemiSpace::FinishPhase() {
// Reset the counters.
bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
large_object_bytes_allocated_at_last_whole_heap_collection_ =
- GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+ los != nullptr ? los->GetBytesAllocated() : 0U;
collect_from_space_only_ = true;
}
}
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 5a5844642b..4ed6abc386 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -16,7 +16,7 @@
#include "gc/heap.h"
#include "gc/space/large_object_space.h"
-#include "gc/space/space.h"
+#include "gc/space/space-inl.h"
#include "sticky_mark_sweep.h"
#include "thread-inl.h"
@@ -32,7 +32,6 @@ StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::stri
void StickyMarkSweep::BindBitmaps() {
PartialMarkSweep::BindBitmaps();
-
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// For sticky GC, we want to bind the bitmaps of all spaces as the allocation stack lets us
// know what was allocated since the last GC. A side-effect of binding the allocation space mark
@@ -44,7 +43,10 @@ void StickyMarkSweep::BindBitmaps() {
space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
}
- GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+ for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
+ CHECK(space->IsLargeObjectSpace());
+ space->AsLargeObjectSpace()->CopyLiveToMarked();
+ }
}
void StickyMarkSweep::MarkReachableObjects() {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bb7da0d13d..18441c1cdf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -83,13 +83,6 @@ static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
// threads (lower pauses, use less memory bandwidth).
static constexpr double kStickyGcThroughputAdjustment = 1.0;
-// Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
-// since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
-#if USE_ART_LOW_4G_ALLOCATOR
-static constexpr bool kUseFreeListSpaceForLOS = true;
-#else
-static constexpr bool kUseFreeListSpaceForLOS = false;
-#endif
// Whether or not we compact the zygote in PreZygoteFork.
static constexpr bool kCompactZygote = kMovingCollector;
// How many reserve entries are at the end of the allocation stack, these are only needed if the
@@ -107,8 +100,9 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
double target_utilization, double foreground_heap_growth_multiplier,
size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
- CollectorType background_collector_type, size_t parallel_gc_threads,
- size_t conc_gc_threads, bool low_memory_mode,
+ CollectorType background_collector_type,
+ space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
+ size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
size_t long_pause_log_threshold, size_t long_gc_log_threshold,
bool ignore_max_footprint, bool use_tlab,
bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
@@ -135,7 +129,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
ignore_max_footprint_(ignore_max_footprint),
zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
zygote_space_(nullptr),
- large_object_threshold_(kDefaultLargeObjectThreshold), // Starts out disabled.
+ large_object_threshold_(large_object_threshold),
collector_type_running_(kCollectorTypeNone),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -338,13 +332,21 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
CHECK(non_moving_space_ != nullptr);
CHECK(!non_moving_space_->CanMoveObjects());
// Allocate the large object space.
- if (kUseFreeListSpaceForLOS) {
- large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
+ if (large_object_space_type == space::kLargeObjectSpaceTypeFreeList) {
+ large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
+ capacity_);
+ CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
+ } else if (large_object_space_type == space::kLargeObjectSpaceTypeMap) {
+ large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
+ CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
} else {
- large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
+ // Disable the large object space by making the cutoff excessively large.
+ large_object_threshold_ = std::numeric_limits<size_t>::max();
+ large_object_space_ = nullptr;
+ }
+ if (large_object_space_ != nullptr) {
+ AddSpace(large_object_space_);
}
- CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
- AddSpace(large_object_space_);
// Compute heap capacity. Continuous spaces are sorted in order of Begin().
CHECK(!continuous_spaces_.empty());
// Relies on the spaces being sorted.
@@ -424,7 +426,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
}
if (running_on_valgrind_) {
- Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+ Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
}
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() exiting";
@@ -712,7 +714,8 @@ void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
CHECK(space1 != nullptr);
CHECK(space2 != nullptr);
MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
- large_object_space_->GetLiveBitmap(), stack);
+ (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
+ stack);
}
void Heap::DeleteThreadPool() {
@@ -1002,7 +1005,10 @@ void Heap::Trim() {
total_alloc_space_size += malloc_space->Size();
}
}
- total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
+ total_alloc_space_allocated = GetBytesAllocated();
+ if (large_object_space_ != nullptr) {
+ total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
+ }
if (bump_pointer_space_ != nullptr) {
total_alloc_space_allocated -= bump_pointer_space_->Size();
}
@@ -2018,6 +2024,7 @@ void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
} else if (bitmap2->HasAddress(obj)) {
bitmap2->Set(obj);
} else {
+ DCHECK(large_objects != nullptr);
large_objects->Set(obj);
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 351e1c6b11..faaea4077c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -30,6 +30,7 @@
#include "gc/collector/garbage_collector.h"
#include "gc/collector/gc_type.h"
#include "gc/collector_type.h"
+#include "gc/space/large_object_space.h"
#include "globals.h"
#include "gtest/gtest.h"
#include "instruction_set.h"
@@ -129,8 +130,6 @@ class Heap {
public:
// If true, measure the total allocation time.
static constexpr bool kMeasureAllocationTime = false;
- // Primitive arrays larger than this size are put in the large object space.
- static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
static constexpr size_t kDefaultStartingSize = kPageSize;
static constexpr size_t kDefaultInitialSize = 2 * MB;
static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -142,7 +141,17 @@ class Heap {
static constexpr size_t kDefaultTLABSize = 256 * KB;
static constexpr double kDefaultTargetUtilization = 0.5;
static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
-
+ // Primitive arrays larger than this size are put in the large object space.
+ static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
+ // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
+ // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
+#if USE_ART_LOW_4G_ALLOCATOR
+ static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
+ space::kLargeObjectSpaceTypeFreeList;
+#else
+ static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
+ space::kLargeObjectSpaceTypeMap;
+#endif
// Used so that we don't overflow the allocation time atomic integer.
static constexpr size_t kTimeAdjust = 1024;
@@ -161,6 +170,7 @@ class Heap {
const std::string& original_image_file_name,
InstructionSet image_instruction_set,
CollectorType foreground_collector_type, CollectorType background_collector_type,
+ space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
size_t long_pause_threshold, size_t long_gc_threshold,
bool ignore_max_footprint, bool use_tlab,
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2a4371290f..dad5855748 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -120,7 +120,7 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
large_objects_.push_back(obj);
mem_maps_.Put(obj, mem_map);
- size_t allocation_size = mem_map->Size();
+ const size_t allocation_size = mem_map->BaseSize();
DCHECK(bytes_allocated != nullptr);
begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size;
@@ -145,8 +145,9 @@ size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
}
- DCHECK_GE(num_bytes_allocated_, found->second->Size());
- size_t allocation_size = found->second->Size();
+ const size_t map_size = found->second->BaseSize();
+ DCHECK_GE(num_bytes_allocated_, map_size);
+ size_t allocation_size = map_size;
num_bytes_allocated_ -= allocation_size;
--num_objects_allocated_;
delete found->second;
@@ -158,7 +159,7 @@ size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_s
MutexLock mu(Thread::Current(), lock_);
auto found = mem_maps_.find(obj);
CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
- return found->second->Size();
+ return found->second->BaseSize();
}
size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 9d5e090a29..a63c5c0aae 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -31,6 +31,12 @@ namespace space {
class AllocationInfo;
+enum LargeObjectSpaceType {
+ kLargeObjectSpaceTypeDisabled,
+ kLargeObjectSpaceTypeMap,
+ kLargeObjectSpaceTypeFreeList,
+};
+
// Abstraction implemented by all large object spaces.
class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
public:
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 0f45b9e512..a2e88a694e 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -597,10 +597,13 @@ static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg) {
thread->ResetQuickAllocEntryPointsForThread();
}
-void Instrumentation::SetEntrypointsInstrumented(bool instrumented) {
+void Instrumentation::SetEntrypointsInstrumented(bool instrumented, bool suspended) {
Runtime* runtime = Runtime::Current();
ThreadList* tl = runtime->GetThreadList();
- if (runtime->IsStarted()) {
+ if (suspended) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ if (runtime->IsStarted() && !suspended) {
tl->SuspendAll();
}
{
@@ -608,30 +611,30 @@ void Instrumentation::SetEntrypointsInstrumented(bool instrumented) {
SetQuickAllocEntryPointsInstrumented(instrumented);
ResetQuickAllocEntryPoints();
}
- if (runtime->IsStarted()) {
+ if (runtime->IsStarted() && !suspended) {
tl->ResumeAll();
}
}
-void Instrumentation::InstrumentQuickAllocEntryPoints() {
+void Instrumentation::InstrumentQuickAllocEntryPoints(bool suspended) {
// TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
// should be guarded by a lock.
DCHECK_GE(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
const bool enable_instrumentation =
quick_alloc_entry_points_instrumentation_counter_.FetchAndAddSequentiallyConsistent(1) == 0;
if (enable_instrumentation) {
- SetEntrypointsInstrumented(true);
+ SetEntrypointsInstrumented(true, suspended);
}
}
-void Instrumentation::UninstrumentQuickAllocEntryPoints() {
+void Instrumentation::UninstrumentQuickAllocEntryPoints(bool suspended) {
// TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
// should be guarded by a lock.
DCHECK_GT(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
const bool disable_instrumentation =
quick_alloc_entry_points_instrumentation_counter_.FetchAndSubSequentiallyConsistent(1) == 1;
if (disable_instrumentation) {
- SetEntrypointsInstrumented(false);
+ SetEntrypointsInstrumented(false, suspended);
}
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index d05cee5dcb..3c1c756992 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -182,10 +182,10 @@ class Instrumentation {
return interpreter_handler_table_;
}
- void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::runtime_shutdown_lock_);
- void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
- Locks::runtime_shutdown_lock_);
+ void InstrumentQuickAllocEntryPoints(bool suspended)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
+ void UninstrumentQuickAllocEntryPoints(bool suspended)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
// Update the code of a method respecting any installed stubs.
@@ -350,7 +350,7 @@ class Instrumentation {
// No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring
// exclusive access to mutator lock which you can't get if the runtime isn't started.
- void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
+ void SetEntrypointsInstrumented(bool instrumented, bool suspended) NO_THREAD_SAFETY_ANALYSIS;
void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc) const
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 35095f95ca..e0a83f607b 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -151,7 +151,12 @@ static JdwpError FinishInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
/* show detailed debug output */
if (resultTag == JT_STRING && exceptObjId == 0) {
if (resultValue != 0) {
- VLOG(jdwp) << " string '" << Dbg::StringToUtf8(resultValue) << "'";
+ if (VLOG_IS_ON(jdwp)) {
+ std::string result_string;
+ JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
+ CHECK_EQ(error, JDWP::ERR_NONE);
+ VLOG(jdwp) << " string '" << result_string << "'";
+ }
} else {
VLOG(jdwp) << " string (null)";
}
@@ -220,7 +225,7 @@ static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf*
static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::vector<ObjectId> thread_ids;
- Dbg::GetThreads(0, &thread_ids);
+ Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids);
expandBufAdd4BE(pReply, thread_ids.size());
for (uint32_t i = 0; i < thread_ids.size(); ++i) {
@@ -919,7 +924,11 @@ static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* re
static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId stringObject = request->ReadObjectId();
- std::string str(Dbg::StringToUtf8(stringObject));
+ std::string str;
+ JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
VLOG(jdwp) << StringPrintf(" --> %s", PrintableString(str.c_str()).c_str());
@@ -1141,10 +1150,7 @@ static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* p
static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
-
- expandBufAddUtf8String(pReply, Dbg::GetThreadGroupName(thread_group_id));
-
- return ERR_NONE;
+ return Dbg::GetThreadGroupName(thread_group_id, pReply);
}
/*
@@ -1154,11 +1160,7 @@ static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
-
- ObjectId parentGroup = Dbg::GetThreadGroupParent(thread_group_id);
- expandBufAddObjectId(pReply, parentGroup);
-
- return ERR_NONE;
+ return Dbg::GetThreadGroupParent(thread_group_id, pReply);
}
/*
@@ -1168,22 +1170,7 @@ static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
-
- std::vector<ObjectId> thread_ids;
- Dbg::GetThreads(thread_group_id, &thread_ids);
- expandBufAdd4BE(pReply, thread_ids.size());
- for (uint32_t i = 0; i < thread_ids.size(); ++i) {
- expandBufAddObjectId(pReply, thread_ids[i]);
- }
-
- std::vector<ObjectId> child_thread_groups_ids;
- Dbg::GetChildThreadGroups(thread_group_id, &child_thread_groups_ids);
- expandBufAdd4BE(pReply, child_thread_groups_ids.size());
- for (uint32_t i = 0; i < child_thread_groups_ids.size(); ++i) {
- expandBufAddObjectId(pReply, child_thread_groups_ids[i]);
- }
-
- return ERR_NONE;
+ return Dbg::GetThreadGroupChildren(thread_group_id, pReply);
}
/*
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index ae170702b5..8447616cf5 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -285,14 +285,17 @@ inline const uint8_t* ArtMethod::GetVmapTable(const void* code_pointer) {
}
inline StackMap ArtMethod::GetStackMap(uint32_t native_pc_offset) {
+ return GetOptimizedCodeInfo().GetStackMapForNativePcOffset(native_pc_offset);
+}
+
+inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
DCHECK(IsOptimized());
const void* code_pointer = GetQuickOatCodePointer();
DCHECK(code_pointer != nullptr);
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
const void* data = reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
- CodeInfo code_info(data);
- return code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ return CodeInfo(data);
}
inline void ArtMethod::SetOatNativeGcMapOffset(uint32_t gc_map_offset) {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index d37aa57688..de6ec05442 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -155,7 +155,9 @@ class MANAGED ArtMethod FINAL : public Object {
// Temporary solution for detecting if a method has been optimized: the compiler
// does not create a GC map. Instead, the vmap table contains the stack map
// (as in stack_map.h).
- return (GetEntryPointFromQuickCompiledCode() != nullptr) && (GetNativeGcMap() == nullptr);
+ return (GetEntryPointFromQuickCompiledCode() != nullptr)
+ && (GetQuickOatCodePointer() != nullptr)
+ && (GetNativeGcMap() == nullptr);
}
bool IsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -349,6 +351,7 @@ class MANAGED ArtMethod FINAL : public Object {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
StackMap GetStackMap(uint32_t native_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_));
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index ceff2065ba..d8a537f948 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -60,11 +60,11 @@ static jobjectArray VMDebug_getVmFeatureList(JNIEnv* env, jclass) {
}
static void VMDebug_startAllocCounting(JNIEnv*, jclass) {
- Runtime::Current()->SetStatsEnabled(true);
+ Runtime::Current()->SetStatsEnabled(true, false);
}
static void VMDebug_stopAllocCounting(JNIEnv*, jclass) {
- Runtime::Current()->SetStatsEnabled(false);
+ Runtime::Current()->SetStatsEnabled(false, false);
}
static jint VMDebug_getAllocCount(JNIEnv*, jclass, jint kind) {
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 97ca6b2b37..9570bb501a 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -21,6 +21,39 @@
namespace art {
+inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return nullptr;
+ }
+ // Return a pointer to the packed struct before the code.
+ return reinterpret_cast<const OatQuickMethodHeader*>(code) - 1;
+}
+
+inline uint32_t OatFile::OatMethod::GetOatQuickMethodHeaderOffset() const {
+ const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const byte*>(method_header) - begin_;
+}
+
+inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+}
+
+inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const {
+ const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const byte*>(&method_header->code_size_) - begin_;
+}
+
inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
@@ -50,11 +83,27 @@ inline uint32_t OatFile::OatMethod::GetMappingTableOffset() const {
return static_cast<uint32_t>(mapping_table != nullptr ? mapping_table - begin_ : 0u);
}
+inline uint32_t OatFile::OatMethod::GetMappingTableOffsetOffset() const {
+ const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const byte*>(&method_header->mapping_table_offset_) - begin_;
+}
+
inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const {
const uint8_t* vmap_table = GetVmapTable();
return static_cast<uint32_t>(vmap_table != nullptr ? vmap_table - begin_ : 0u);
}
+inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const {
+ const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const byte*>(&method_header->vmap_table_offset_) - begin_;
+}
+
inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index c621e88111..a896f3eece 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -454,8 +454,12 @@ const DexFile* OatFile::OatDexFile::OpenDexFile(std::string* error_msg) const {
dex_file_location_checksum_, error_msg);
}
+uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
+ return oat_class_offsets_pointer_[class_def_index];
+}
+
OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
- uint32_t oat_class_offset = oat_class_offsets_pointer_[class_def_index];
+ uint32_t oat_class_offset = GetOatClassOffset(class_def_index);
const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
CHECK_LT(oat_class_pointer, oat_file_->End()) << oat_file_->GetLocation();
@@ -531,49 +535,54 @@ OatFile::OatClass::OatClass(const OatFile* oat_file,
}
}
-const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
+uint32_t OatFile::OatClass::GetOatMethodOffsetsOffset(uint32_t method_index) const {
+ const OatMethodOffsets* oat_method_offsets = GetOatMethodOffsets(method_index);
+ if (oat_method_offsets == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const uint8_t*>(oat_method_offsets) - oat_file_->Begin();
+}
+
+const OatMethodOffsets* OatFile::OatClass::GetOatMethodOffsets(uint32_t method_index) const {
// NOTE: We don't keep the number of methods and cannot do a bounds check for method_index.
- if (methods_pointer_ == NULL) {
+ if (methods_pointer_ == nullptr) {
CHECK_EQ(kOatClassNoneCompiled, type_);
- return OatMethod(NULL, 0, 0);
+ return nullptr;
}
size_t methods_pointer_index;
- if (bitmap_ == NULL) {
+ if (bitmap_ == nullptr) {
CHECK_EQ(kOatClassAllCompiled, type_);
methods_pointer_index = method_index;
} else {
CHECK_EQ(kOatClassSomeCompiled, type_);
if (!BitVector::IsBitSet(bitmap_, method_index)) {
- return OatMethod(NULL, 0, 0);
+ return nullptr;
}
size_t num_set_bits = BitVector::NumSetBits(bitmap_, method_index);
methods_pointer_index = num_set_bits;
}
const OatMethodOffsets& oat_method_offsets = methods_pointer_[methods_pointer_index];
- if (oat_file_->IsExecutable()
- || (Runtime::Current() == nullptr)
- || Runtime::Current()->IsCompiler()) {
+ return &oat_method_offsets;
+}
+
+const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
+ const OatMethodOffsets* oat_method_offsets = GetOatMethodOffsets(method_index);
+ if (oat_method_offsets == nullptr) {
+ return OatMethod(nullptr, 0, 0);
+ }
+ if (oat_file_->IsExecutable() ||
+ Runtime::Current() == nullptr || // This case applies for oatdump.
+ Runtime::Current()->IsCompiler()) {
return OatMethod(
oat_file_->Begin(),
- oat_method_offsets.code_offset_,
- oat_method_offsets.gc_map_offset_);
+ oat_method_offsets->code_offset_,
+ oat_method_offsets->gc_map_offset_);
} else {
// We aren't allowed to use the compiled code. We just force it down the interpreted version.
return OatMethod(oat_file_->Begin(), 0, 0);
}
}
-
-uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
- uintptr_t code = reinterpret_cast<uintptr_t>(GetQuickCode());
- if (code == 0) {
- return 0;
- }
- // TODO: make this Thumb2 specific
- code &= ~0x1;
- return reinterpret_cast<uint32_t*>(code)[-1];
-}
-
void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
CHECK(method != NULL);
method->SetEntryPointFromPortableCompiledCode(GetPortableCode());
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 2fd4f4c98a..b9d5702b0f 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -114,13 +114,22 @@ class OatFile {
}
}
+ // Returns 0.
uint32_t GetPortableCodeSize() const {
// TODO: With Quick, we store the size before the code. With Portable, the code is in a .o
// file we don't manage ourselves. ELF symbols do have a concept of size, so we could capture
// that and store it somewhere, such as the OatMethod.
return 0;
}
+
+ // Returns size of quick code.
uint32_t GetQuickCodeSize() const;
+ uint32_t GetQuickCodeSizeOffset() const;
+
+ // Returns OatQuickMethodHeader for debugging. Most callers should
+ // use more specific methods such as GetQuickCodeSize.
+ const OatQuickMethodHeader* GetOatQuickMethodHeader() const;
+ uint32_t GetOatQuickMethodHeaderOffset() const;
const uint8_t* GetNativeGcMap() const {
return GetOatPointer<const uint8_t*>(native_gc_map_offset_);
@@ -129,10 +138,14 @@ class OatFile {
size_t GetFrameSizeInBytes() const;
uint32_t GetCoreSpillMask() const;
uint32_t GetFpSpillMask() const;
- uint32_t GetMappingTableOffset() const;
- uint32_t GetVmapTableOffset() const;
+
const uint8_t* GetMappingTable() const;
+ uint32_t GetMappingTableOffset() const;
+ uint32_t GetMappingTableOffsetOffset() const;
+
const uint8_t* GetVmapTable() const;
+ uint32_t GetVmapTableOffset() const;
+ uint32_t GetVmapTableOffsetOffset() const;
// Create an OatMethod with offsets relative to the given base address
OatMethod(const byte* base, const uint32_t code_offset, const uint32_t gc_map_offset)
@@ -176,11 +189,21 @@ class OatFile {
}
// Get the OatMethod entry based on its index into the class
- // defintion. direct methods come first, followed by virtual
- // methods. note that runtime created methods such as miranda
+ // defintion. Direct methods come first, followed by virtual
+ // methods. Note that runtime created methods such as miranda
// methods are not included.
const OatMethod GetOatMethod(uint32_t method_index) const;
+ // Return a pointer to the OatMethodOffsets for the requested
+ // method_index, or nullptr if none is present. Note that most
+ // callers should use GetOatMethod.
+ const OatMethodOffsets* GetOatMethodOffsets(uint32_t method_index) const;
+
+ // Return the offset from the start of the OatFile to the
+ // OatMethodOffsets for the requested method_index, or 0 if none
+ // is present. Note that most callers should use GetOatMethod.
+ uint32_t GetOatMethodOffsetsOffset(uint32_t method_index) const;
+
// A representation of an invalid OatClass, used when an OatClass can't be found.
// See ClassLinker::FindOatClass.
static OatClass Invalid() {
@@ -239,6 +262,9 @@ class OatFile {
// Returns the OatClass for the class specified by the given DexFile class_def_index.
OatClass GetOatClass(uint16_t class_def_index) const;
+ // Returns the offset to the OatClass information. Most callers should use GetOatClass.
+ uint32_t GetOatClassOffset(uint16_t class_def_index) const;
+
~OatDexFile();
private:
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 37e08a57ea..6b4f764d6e 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -63,6 +63,8 @@ ParsedOptions::ParsedOptions()
heap_min_free_(gc::Heap::kDefaultMinFree),
heap_max_free_(gc::Heap::kDefaultMaxFree),
heap_non_moving_space_capacity_(gc::Heap::kDefaultNonMovingSpaceCapacity),
+ large_object_space_type_(gc::Heap::kDefaultLargeObjectSpaceType),
+ large_object_threshold_(gc::Heap::kDefaultLargeObjectThreshold),
heap_target_utilization_(gc::Heap::kDefaultTargetUtilization),
foreground_heap_growth_multiplier_(gc::Heap::kDefaultHeapGrowthMultiplier),
parallel_gc_threads_(1),
@@ -452,6 +454,32 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
if (!ParseXGcOption(option)) {
return false;
}
+ } else if (StartsWith(option, "-XX:LargeObjectSpace=")) {
+ std::string substring;
+ if (!ParseStringAfterChar(option, '=', &substring)) {
+ return false;
+ }
+ if (substring == "disabled") {
+ large_object_space_type_ = gc::space::kLargeObjectSpaceTypeDisabled;
+ } else if (substring == "freelist") {
+ large_object_space_type_ = gc::space::kLargeObjectSpaceTypeFreeList;
+ } else if (substring == "map") {
+ large_object_space_type_ = gc::space::kLargeObjectSpaceTypeMap;
+ } else {
+ Usage("Unknown -XX:LargeObjectSpace= option %s\n", substring.c_str());
+ return false;
+ }
+ } else if (StartsWith(option, "-XX:LargeObjectThreshold=")) {
+ std::string substring;
+ if (!ParseStringAfterChar(option, '=', &substring)) {
+ return false;
+ }
+ size_t size = ParseMemoryOption(substring.c_str(), 1);
+ if (size == 0) {
+ Usage("Failed to parse memory option %s\n", option.c_str());
+ return false;
+ }
+ large_object_threshold_ = size;
} else if (StartsWith(option, "-XX:BackgroundGC=")) {
std::string substring;
if (!ParseStringAfterChar(option, '=', &substring)) {
@@ -757,7 +785,6 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xstacktracefile:<filename>\n");
UsageMessage(stream, " -Xgc:[no]preverify\n");
UsageMessage(stream, " -Xgc:[no]postverify\n");
- UsageMessage(stream, " -XX:+DisableExplicitGC\n");
UsageMessage(stream, " -XX:HeapGrowthLimit=N\n");
UsageMessage(stream, " -XX:HeapMinFree=N\n");
UsageMessage(stream, " -XX:HeapMaxFree=N\n");
@@ -774,6 +801,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xgc:[no]postverify_rosalloc\n");
UsageMessage(stream, " -Xgc:[no]presweepingverify\n");
UsageMessage(stream, " -Ximage:filename\n");
+ UsageMessage(stream, " -XX:+DisableExplicitGC\n");
UsageMessage(stream, " -XX:ParallelGCThreads=integervalue\n");
UsageMessage(stream, " -XX:ConcGCThreads=integervalue\n");
UsageMessage(stream, " -XX:MaxSpinsBeforeThinLockInflation=integervalue\n");
@@ -783,6 +811,8 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -XX:IgnoreMaxFootprint\n");
UsageMessage(stream, " -XX:UseTLAB\n");
UsageMessage(stream, " -XX:BackgroundGC=none\n");
+ UsageMessage(stream, " -XX:LargeObjectSpace={disabled,map,freelist}\n");
+ UsageMessage(stream, " -XX:LargeObjectThreshold=N\n");
UsageMessage(stream, " -Xmethod-trace\n");
UsageMessage(stream, " -Xmethod-trace-file:filename");
UsageMessage(stream, " -Xmethod-trace-file-size:integervalue\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 3839e19940..26a2f31b13 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -24,6 +24,7 @@
#include "globals.h"
#include "gc/collector_type.h"
+#include "gc/space/large_object_space.h"
#include "instruction_set.h"
#include "profiler_options.h"
@@ -72,6 +73,8 @@ class ParsedOptions {
size_t heap_min_free_;
size_t heap_max_free_;
size_t heap_non_moving_space_capacity_;
+ gc::space::LargeObjectSpaceType large_object_space_type_;
+ size_t large_object_threshold_;
double heap_target_utilization_;
double foreground_heap_growth_multiplier_;
unsigned int parallel_gc_threads_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a1ea3cf5bf..0e382ff65a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -697,6 +697,8 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
options->image_isa_,
options->collector_type_,
options->background_collector_type_,
+ options->large_object_space_type_,
+ options->large_object_threshold_,
options->parallel_gc_threads_,
options->conc_gc_threads_,
options->low_memory_mode_,
@@ -996,14 +998,14 @@ void Runtime::DumpLockHolders(std::ostream& os) {
}
}
-void Runtime::SetStatsEnabled(bool new_state) {
+void Runtime::SetStatsEnabled(bool new_state, bool suspended) {
if (new_state == true) {
GetStats()->Clear(~0);
// TODO: wouldn't it make more sense to clear _all_ threads' stats?
Thread::Current()->GetStats()->Clear(~0);
- GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+ GetInstrumentation()->InstrumentQuickAllocEntryPoints(suspended);
} else {
- GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+ GetInstrumentation()->UninstrumentQuickAllocEntryPoints(suspended);
}
stats_enabled_ = new_state;
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index cfb1abc477..f9c017b278 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -390,7 +390,7 @@ class Runtime {
void ResetStats(int kinds);
- void SetStatsEnabled(bool new_state);
+ void SetStatsEnabled(bool new_state, bool suspended);
enum class NativeBridgeAction { // private
kUnload,
diff --git a/runtime/stack.h b/runtime/stack.h
index 8e5da3587d..44e36c478e 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -604,8 +604,8 @@ class StackVisitor {
* | Compiler temp region | ... (reg >= max_num_special_temps)
* | . |
* | . |
- * | V[max_num_special_temps + 1] |
- * | V[max_num_special_temps + 0] |
+ * | V[max_num_special_temps + 1] |
+ * | V[max_num_special_temps + 0] |
* +-------------------------------+
* | OUT[outs-1] |
* | OUT[outs-2] |
diff --git a/runtime/thread.cc b/runtime/thread.cc
index c54bebe1ce..650b0f95d4 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -961,7 +961,7 @@ void Thread::DumpStack(std::ostream& os) const {
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr));
+ DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr, !dump_for_abort));
}
DumpJavaStack(os);
} else {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 6dcc5fe6bb..b32e0429b1 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -373,11 +373,9 @@ void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int
// Enable count of allocs if specified in the flags.
if ((flags && kTraceCountAllocs) != 0) {
- runtime->SetStatsEnabled(true);
+ runtime->SetStatsEnabled(true, true);
}
-
-
if (sampling_enabled) {
CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread,
reinterpret_cast<void*>(interval_us)),
@@ -492,7 +490,7 @@ void Trace::FinishTracing() {
size_t final_offset = cur_offset_.LoadRelaxed();
if ((flags_ & kTraceCountAllocs) != 0) {
- Runtime::Current()->SetStatsEnabled(false);
+ Runtime::Current()->SetStatsEnabled(false, true);
}
std::set<mirror::ArtMethod*> visited_methods;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f24128137e..f28d4883b2 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -311,9 +311,15 @@ MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self, std::ostream&
verifier->Verify();
verifier->DumpFailures(os);
os << verifier->info_messages_.str();
- verifier->Dump(os);
-
- return verifier;
+ // Only dump and return if no hard failures. Otherwise the verifier may be not fully initialized
+ // and querying any info is dangerous/can abort.
+ if (verifier->have_pending_hard_failure_) {
+ delete verifier;
+ return nullptr;
+ } else {
+ verifier->Dump(os);
+ return verifier;
+ }
}
MethodVerifier::MethodVerifier(Thread* self,
@@ -3345,8 +3351,8 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
}
mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst,
- bool is_range) {
- DCHECK(Runtime::Current()->IsStarted());
+ bool is_range) {
+ DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(),
is_range);
if (res_method == nullptr) {
@@ -3861,7 +3867,7 @@ mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
bool is_primitive) {
- DCHECK(Runtime::Current()->IsStarted());
+ DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
if (field == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
@@ -3920,7 +3926,7 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
bool is_primitive) {
- DCHECK(Runtime::Current()->IsStarted());
+ DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
if (field == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();