Make ART compile with GCC -O0 again.
Tidy up InstructionSetFeatures so that it has a type hierarchy dependent on
architecture.
Add to instruction_set_test to warn when InstructionSetFeatures don't agree
with ones from system properties, AT_HWCAP and /proc/cpuinfo.
Clean-up class linker entry point logic to not return entry points but to
test whether the passed code is the particular entrypoint. This works around
image trampolines that replicate entrypoints.
Bug: 17993736
Change-Id: I5f4b49e88c3b02a79f9bee04f83395146ed7be23
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 1a65d99..664a412 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -24,7 +24,6 @@
#include "class_linker.h"
#include "dex_cache.h"
#include "dex_file.h"
-#include "entrypoints/entrypoint_utils.h"
#include "method_helper.h"
#include "object-inl.h"
#include "object_array.h"
@@ -176,32 +175,6 @@
}
}
-inline void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
- if (!kIsDebugBuild) {
- return;
- }
- if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
- return;
- }
- if (pc == GetQuickInstrumentationExitPc()) {
- return;
- }
- const void* code = GetEntryPointFromQuickCompiledCode();
- if (code == GetQuickToInterpreterBridge() || code == GetQuickInstrumentationEntryPoint()) {
- return;
- }
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (code == class_linker->GetQuickResolutionTrampoline() ||
- code == class_linker->GetQuickToInterpreterBridgeTrampoline()) {
- return;
- }
- DCHECK(IsWithinQuickCode(pc))
- << PrettyMethod(this)
- << " pc=" << std::hex << pc
- << " code=" << code
- << " size=" << GetCodeSize();
-}
-
inline uint32_t ArtMethod::GetQuickOatCodeOffset() {
DCHECK(!Runtime::Current()->IsStarted());
return PointerToLowMemUInt32(GetEntryPointFromQuickCompiledCode());
@@ -222,27 +195,6 @@
SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(code_offset));
}
-inline const void* ArtMethod::GetQuickOatEntryPoint() {
- if (IsPortableCompiled() || IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
- return nullptr;
- }
- Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
- // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods.
- DCHECK(entry_point != runtime->GetClassLinker()->GetQuickToInterpreterBridgeTrampoline());
- if (UNLIKELY(entry_point == GetQuickToInterpreterBridge()) ||
- UNLIKELY(entry_point == runtime->GetClassLinker()->GetQuickGenericJniTrampoline())) {
- return nullptr;
- }
- return entry_point;
-}
-
-inline const void* ArtMethod::GetQuickOatCodePointer() {
- return EntryPointToCodePointer(GetQuickOatEntryPoint());
-}
-
inline const uint8_t* ArtMethod::GetMappingTable() {
const void* code_pointer = GetQuickOatCodePointer();
if (code_pointer == nullptr) {
@@ -341,69 +293,17 @@
return result;
}
-inline uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc) {
+inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
return pc - reinterpret_cast<uintptr_t>(code);
}
-inline uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc, const void* quick_entry_point) {
- DCHECK(quick_entry_point != GetQuickToInterpreterBridge());
- DCHECK(quick_entry_point == Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this));
- return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
-}
-
template<VerifyObjectFlags kVerifyFlags>
inline void ArtMethod::SetNativeMethod(const void* native_method) {
SetFieldPtr<false, true, kVerifyFlags>(
OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
}
-inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
- if (UNLIKELY(IsPortableCompiled())) {
- // Portable compiled dex bytecode or jni stub.
- return QuickMethodFrameInfo(kStackAlignment, 0u, 0u);
- }
- Runtime* runtime = Runtime::Current();
- // For Proxy method we exclude direct method (there is only one direct method - constructor).
- // Direct method is cloned from original java.lang.reflect.Proxy class together with code
- // and as a result it is executed as usual quick compiled method without any stubs.
- // So the frame info should be returned as it is a quick method not a stub.
- if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod() && !IsDirect())) {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
- if (UNLIKELY(IsRuntimeMethod())) {
- return runtime->GetRuntimeMethodFrameInfo(this);
- }
-
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
- // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods. And we really shouldn't see a failure for non-native methods here.
- DCHECK(entry_point != runtime->GetClassLinker()->GetQuickToInterpreterBridgeTrampoline());
- CHECK(entry_point != GetQuickToInterpreterBridge());
-
- if (UNLIKELY(entry_point == runtime->GetClassLinker()->GetQuickGenericJniTrampoline())) {
- // Generic JNI frame.
- DCHECK(IsNative());
- StackHandleScope<1> hs(Thread::Current());
- uint32_t handle_refs =
- MethodHelper(hs.NewHandle(this)).GetNumberOfReferenceArgsWithoutReceiver() + 1;
- size_t scope_size = HandleScope::SizeOf(handle_refs);
- QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-
- // Callee saves + handle scope + method ref + alignment
- size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
- - sizeof(void*) // callee-save frame stores a whole method pointer
- + sizeof(StackReference<mirror::ArtMethod>),
- kStackAlignment);
-
- return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
- }
-
- const void* code_pointer = EntryPointToCodePointer(entry_point);
- return GetQuickFrameInfo(code_pointer);
-}
-
inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
DCHECK(code_pointer != nullptr);
DCHECK_EQ(code_pointer, GetQuickOatCodePointer());
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 787c767..5c72e55 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -23,6 +23,8 @@
#include "class-inl.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
#include "jni_internal.h"
@@ -203,7 +205,7 @@
return DexFile::kDexNoIndex;
}
-uintptr_t ArtMethod::ToNativePc(const uint32_t dex_pc) {
+uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc) {
const void* entry_point = GetQuickOatEntryPoint();
MappingTable table(
entry_point != nullptr ? GetMappingTable(EntryPointToCodePointer(entry_point)) : nullptr);
@@ -281,6 +283,36 @@
return found_dex_pc;
}
+void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
+ if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
+ return;
+ }
+ if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ return;
+ }
+ const void* code = GetEntryPointFromQuickCompiledCode();
+ if (code == GetQuickInstrumentationEntryPoint()) {
+ return;
+ }
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (class_linker->IsQuickToInterpreterBridge(code) ||
+ class_linker->IsQuickResolutionStub(code)) {
+ return;
+ }
+ /*
+ * During a stack walk, a return PC may point past-the-end of the code
+ * in the case that the last instruction is a call that isn't expected to
+ * return. Thus, we check <= code + GetCodeSize().
+ *
+ * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
+ */
+ CHECK(PcIsWithinQuickCode(pc))
+ << PrettyMethod(this)
+ << " pc=" << std::hex << pc
+ << " code=" << code
+ << " size=" << GetCodeSize();
+}
+
bool ArtMethod::IsEntrypointInterpreter() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
@@ -294,6 +326,31 @@
}
}
+const void* ArtMethod::GetQuickOatEntryPoint() {
+ if (IsPortableCompiled() || IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
+ return nullptr;
+ }
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+ // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+ // for non-native methods.
+ if (class_linker->IsQuickToInterpreterBridge(code) ||
+ class_linker->IsQuickGenericJniStub(code)) {
+ return nullptr;
+ }
+ return code;
+}
+
+#ifndef NDEBUG
+uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) {
+ CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
+ CHECK_EQ(quick_entry_point, Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this));
+ return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
+}
+#endif
+
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -379,8 +436,53 @@
self->PopManagedStackFragment(fragment);
}
-void ArtMethod::RegisterNative(Thread* self, const void* native_method, bool is_fast) {
- DCHECK(Thread::Current() == self);
+QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
+ if (UNLIKELY(IsPortableCompiled())) {
+ // Portable compiled dex bytecode or jni stub.
+ return QuickMethodFrameInfo(kStackAlignment, 0u, 0u);
+ }
+ Runtime* runtime = Runtime::Current();
+ // For Proxy method we exclude direct method (there is only one direct method - constructor).
+ // Direct method is cloned from original java.lang.reflect.Proxy class together with code
+ // and as a result it is executed as usual quick compiled method without any stubs.
+ // So the frame info should be returned as it is a quick method not a stub.
+ if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod() && !IsDirect())) {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+ if (UNLIKELY(IsRuntimeMethod())) {
+ return runtime->GetRuntimeMethodFrameInfo(this);
+ }
+
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+ // for non-native methods. And we really shouldn't see a failure for non-native methods here.
+ DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
+
+ if (class_linker->IsQuickGenericJniStub(entry_point)) {
+ // Generic JNI frame.
+ DCHECK(IsNative());
+ StackHandleScope<1> hs(Thread::Current());
+ uint32_t handle_refs =
+ MethodHelper(hs.NewHandle(this)).GetNumberOfReferenceArgsWithoutReceiver() + 1;
+ size_t scope_size = HandleScope::SizeOf(handle_refs);
+ QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+
+ // Callee saves + handle scope + method ref + alignment
+ size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
+ - sizeof(void*) // callee-save frame stores a whole method pointer
+ + sizeof(StackReference<mirror::ArtMethod>),
+ kStackAlignment);
+
+ return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+ }
+
+ const void* code_pointer = EntryPointToCodePointer(entry_point);
+ return GetQuickFrameInfo(code_pointer);
+}
+
+void ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
CHECK(IsNative()) << PrettyMethod(this);
CHECK(!IsFastNative()) << PrettyMethod(this);
CHECK(native_method != NULL) << PrettyMethod(this);
@@ -390,10 +492,10 @@
SetNativeMethod(native_method);
}
-void ArtMethod::UnregisterNative(Thread* self) {
+void ArtMethod::UnregisterNative() {
CHECK(IsNative() && !IsFastNative()) << PrettyMethod(this);
// restore stub to lookup native pointer via dlsym
- RegisterNative(self, GetJniDlsymLookupStub(), false);
+ RegisterNative(GetJniDlsymLookupStub(), false);
}
} // namespace mirror
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 939d856..1dbfe5d 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -39,7 +39,7 @@
namespace mirror {
-typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper& mh,
+typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper* mh,
const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result);
// C++ mirror of java.lang.reflect.ArtMethod.
@@ -302,7 +302,10 @@
uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Check whether the given PC is within the quick compiled code associated with this method's
+ // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
+ // debug purposes.
+ bool PcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode());
if (code == 0) {
return pc == 0;
@@ -329,16 +332,19 @@
void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetPortableOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static const void* EntryPointToCodePointer(const void* entry_point) ALWAYS_INLINE {
+ ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
code &= ~0x1; // TODO: Make this Thumb2 specific.
return reinterpret_cast<const void*>(code);
}
- // Actual entry point pointer to compiled oat code or nullptr.
+ // Actual entry point pointer to compiled oat code or nullptr if method has no compiled code.
const void* GetQuickOatEntryPoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Actual pointer to compiled oat code or nullptr.
- const void* GetQuickOatCodePointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const void* GetQuickOatCodePointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return EntryPointToCodePointer(GetQuickOatEntryPoint());
+ }
// Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
const uint8_t* GetMappingTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -378,24 +384,25 @@
QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t GetReturnPcOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetReturnPcOffsetInBytes(GetFrameSizeInBytes());
+ FrameOffset GetReturnPcOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetReturnPcOffset(GetFrameSizeInBytes());
}
- size_t GetReturnPcOffsetInBytes(uint32_t frame_size_in_bytes)
+ FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
- return frame_size_in_bytes - sizeof(void*);
+ return FrameOffset(frame_size_in_bytes - sizeof(void*));
}
- size_t GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return sizeof(void*);
+ FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(sizeof(void*), GetFrameSizeInBytes());
+ return FrameOffset(sizeof(void*));
}
- void RegisterNative(Thread* self, const void* native_method, bool is_fast)
+ void RegisterNative(const void* native_method, bool is_fast)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static MemberOffset NativeMethodOffset() {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_);
@@ -423,16 +430,23 @@
bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uintptr_t NativePcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uintptr_t NativePcOffset(const uintptr_t pc, const void* quick_entry_point)
+ uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#ifdef NDEBUG
+ uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
+ }
+#else
+ uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#endif
// Converts a native PC to a dex PC.
uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Converts a dex PC to a native PC.
- uintptr_t ToNativePc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uintptr_t ToNativeQuickPc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 1320ab7..64408a6 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -17,8 +17,6 @@
#ifndef ART_RUNTIME_MIRROR_STRING_H_
#define ART_RUNTIME_MIRROR_STRING_H_
-#include <gtest/gtest.h>
-
#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
@@ -163,7 +161,8 @@
static GcRoot<Class> java_lang_String_;
friend struct art::StringOffsets; // for verifying offset information
- FRIEND_TEST(ObjectTest, StringLength); // for SetOffset and SetCount
+ ART_FRIEND_TEST(ObjectTest, StringLength); // for SetOffset and SetCount
+
DISALLOW_IMPLICIT_CONSTRUCTORS(String);
};