summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common.mk29
-rw-r--r--build/Android.libart-compiler.mk1
-rw-r--r--src/base/macros.h6
-rw-r--r--src/base/stringpiece.cc6
-rw-r--r--src/base/stringpiece.h6
-rw-r--r--src/class_linker.cc103
-rw-r--r--src/class_linker.h25
-rw-r--r--src/compiler/dex/arena_bit_vector.cc49
-rw-r--r--src/compiler/dex/arena_bit_vector.h36
-rw-r--r--src/compiler/dex/dataflow_iterator-inl.h68
-rw-r--r--src/compiler/dex/dataflow_iterator.cc63
-rw-r--r--src/compiler/dex/dataflow_iterator.h11
-rw-r--r--src/compiler/dex/frontend.cc2
-rw-r--r--src/compiler/dex/mir_dataflow.cc26
-rw-r--r--src/compiler/dex/mir_graph.cc4
-rw-r--r--src/compiler/dex/mir_graph.h5
-rw-r--r--src/compiler/dex/mir_optimization.cc9
-rw-r--r--src/compiler/dex/portable/mir_to_gbc.cc4
-rw-r--r--src/compiler/dex/quick/arm/assemble_arm.cc1
-rw-r--r--src/compiler/dex/quick/arm/call_arm.cc1
-rw-r--r--src/compiler/dex/quick/arm/codegen_arm.h245
-rw-r--r--src/compiler/dex/quick/arm/fp_arm.cc1
-rw-r--r--src/compiler/dex/quick/arm/int_arm.cc1
-rw-r--r--src/compiler/dex/quick/arm/target_arm.cc1
-rw-r--r--src/compiler/dex/quick/arm/utility_arm.cc2
-rw-r--r--src/compiler/dex/quick/codegen_util.cc169
-rw-r--r--src/compiler/dex/quick/gen_common.cc202
-rw-r--r--src/compiler/dex/quick/gen_invoke.cc197
-rw-r--r--src/compiler/dex/quick/gen_loadstore.cc1
-rw-r--r--src/compiler/dex/quick/mips/assemble_mips.cc1
-rw-r--r--src/compiler/dex/quick/mips/call_mips.cc1
-rw-r--r--src/compiler/dex/quick/mips/codegen_mips.h222
-rw-r--r--src/compiler/dex/quick/mips/fp_mips.cc1
-rw-r--r--src/compiler/dex/quick/mips/int_mips.cc1
-rw-r--r--src/compiler/dex/quick/mips/target_mips.cc1
-rw-r--r--src/compiler/dex/quick/mips/utility_mips.cc1
-rw-r--r--src/compiler/dex/quick/mir_to_lir-inl.h201
-rw-r--r--src/compiler/dex/quick/mir_to_lir.cc12
-rw-r--r--src/compiler/dex/quick/mir_to_lir.h59
-rw-r--r--src/compiler/dex/quick/ralloc_util.cc29
-rw-r--r--src/compiler/dex/quick/x86/assemble_x86.cc1
-rw-r--r--src/compiler/dex/quick/x86/call_x86.cc1
-rw-r--r--src/compiler/dex/quick/x86/codegen_x86.h222
-rw-r--r--src/compiler/dex/quick/x86/fp_x86.cc1
-rw-r--r--src/compiler/dex/quick/x86/int_x86.cc1
-rw-r--r--src/compiler/dex/quick/x86/target_x86.cc1
-rw-r--r--src/compiler/dex/quick/x86/utility_x86.cc1
-rw-r--r--src/compiler/dex/ssa_transformation.cc2
-rw-r--r--src/compiler/dex/vreg_analysis.cc18
-rw-r--r--src/compiler/driver/compiler_driver.cc391
-rw-r--r--src/compiler/driver/compiler_driver.h47
-rw-r--r--src/compiler/driver/compiler_driver_test.cc2
-rw-r--r--src/compiler/driver/dex_compilation_unit.cc30
-rw-r--r--src/compiler/driver/dex_compilation_unit.h6
-rw-r--r--src/compiler/llvm/gbc_expander.cc22
-rw-r--r--src/compiler/llvm/llvm_compilation_unit.h6
-rw-r--r--src/compiler/llvm/runtime_support_llvm.cc1
-rw-r--r--src/compiler/stubs/portable/stubs.cc136
-rw-r--r--src/compiler/stubs/quick/stubs.cc262
-rw-r--r--src/compiler/stubs/stubs.h59
-rw-r--r--src/dex2oat.cc15
-rw-r--r--src/dex_file.cc56
-rw-r--r--src/dex_file.h9
-rw-r--r--src/dex_file_verifier.cc36
-rw-r--r--src/dex_instruction-inl.h12
-rw-r--r--src/dex_instruction.cc14
-rw-r--r--src/dex_instruction.h39
-rw-r--r--src/image_test.cc3
-rw-r--r--src/image_writer.cc35
-rw-r--r--src/image_writer.h12
-rw-r--r--src/instrumentation.cc4
-rw-r--r--src/interpreter/interpreter.cc255
-rw-r--r--src/interpreter/interpreter.h4
-rw-r--r--src/locks.cc5
-rw-r--r--src/locks.h2
-rw-r--r--src/mirror/abstract_method-inl.h3
-rw-r--r--src/mirror/abstract_method.cc51
-rw-r--r--src/mirror/abstract_method.h5
-rw-r--r--src/mirror/dex_cache-inl.h40
-rw-r--r--src/mirror/dex_cache.cc12
-rw-r--r--src/oat.cc102
-rw-r--r--src/oat.h18
-rw-r--r--src/oat/runtime/arm/oat_support_entrypoints_arm.cc20
-rw-r--r--src/oat/runtime/arm/runtime_support_arm.S42
-rw-r--r--src/oat/runtime/mips/oat_support_entrypoints_mips.cc20
-rw-r--r--src/oat/runtime/mips/runtime_support_mips.S65
-rw-r--r--src/oat/runtime/oat_support_entrypoints.h28
-rw-r--r--src/oat/runtime/support_interpreter.cc7
-rw-r--r--src/oat/runtime/support_invoke.cc1
-rw-r--r--src/oat/runtime/support_stubs.cc34
-rw-r--r--src/oat/runtime/x86/oat_support_entrypoints_x86.cc20
-rw-r--r--src/oat/runtime/x86/runtime_support_x86.S49
-rw-r--r--src/oat_test.cc16
-rw-r--r--src/oat_writer.cc192
-rw-r--r--src/oat_writer.h37
-rw-r--r--src/oatdump.cc2
-rw-r--r--src/output_stream_test.cc20
-rw-r--r--src/runtime.cc4
-rw-r--r--src/runtime_support.h18
-rw-r--r--src/stack.cc9
-rw-r--r--src/stack.h10
-rw-r--r--src/thread.cc7
-rw-r--r--src/utf.cc17
-rw-r--r--src/utf.h6
-rw-r--r--src/vector_output_stream.cc16
-rw-r--r--src/vector_output_stream.h23
-rw-r--r--src/verifier/method_verifier.cc879
-rw-r--r--src/verifier/method_verifier.h67
-rw-r--r--src/verifier/reg_type.cc313
-rw-r--r--src/verifier/reg_type.h618
-rw-r--r--src/verifier/reg_type_cache-inl.h8
-rw-r--r--src/verifier/reg_type_cache.cc129
-rw-r--r--src/verifier/reg_type_cache.h11
-rw-r--r--src/verifier/reg_type_test.cc12
-rw-r--r--src/verifier/register_line-inl.h35
-rw-r--r--src/verifier/register_line.cc128
-rw-r--r--src/verifier/register_line.h29
-rw-r--r--test/045-reflect-array/expected.txt3
-rw-r--r--test/045-reflect-array/src/Main.java11
-rw-r--r--test/108-check-cast/expected.txt1
-rw-r--r--test/108-check-cast/info.txt10
-rw-r--r--test/108-check-cast/src/Main.java48
122 files changed, 4248 insertions, 2462 deletions
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 508ff1bb80..ba5fe9f2ed 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -19,6 +19,9 @@ ifneq ($(wildcard art/SMALL_ART),)
$(info Enabling ART_SMALL_MODE because of existence of art/SMALL_ART)
ART_SMALL_MODE := true
endif
+ifeq ($(WITH_ART_SMALL_MODE), true)
+ART_SMALL_MODE := true
+endif
ART_USE_PORTABLE_COMPILER := false
ifneq ($(wildcard art/USE_PORTABLE_COMPILER),)
@@ -54,6 +57,7 @@ ART_C_INCLUDES := \
art/src
art_cflags := \
+ -fno-rtti \
-O2 \
-ggdb3 \
-Wall \
@@ -62,13 +66,6 @@ art_cflags := \
-Wstrict-aliasing=3 \
-fstrict-aliasing
-# Enable thread-safety for GCC 4.6 but not for GCC 4.7 where this feature was removed.
-# Enable GCC 4.6 builds with 'export TARGET_GCC_VERSION_EXP=4.6'
-ifneq ($(filter 4.6 4.6.%, $(TARGET_GCC_VERSION)),)
- $(info Enabling thread-safety for GCC $(TARGET_GCC_VERSION))
- art_cflags += -Wthread-safety
-endif
-
ifeq ($(ART_SMALL_MODE),true)
art_cflags += -DART_SMALL_MODE=1
endif
@@ -108,6 +105,22 @@ else
ART_TARGET_CFLAGS += -DANDROID_SMP=0
endif
+# Enable thread-safety for GCC 4.6 on the target but not for GCC 4.7 where this feature was removed.
+ifneq ($(filter 4.6 4.6.%, $(TARGET_GCC_VERSION)),)
+ ART_TARGET_CFLAGS += -Wthread-safety
+else
+ # Warn if not using GCC 4.6 for target builds when not doing a top-level or 'mma' build.
+ ifneq ($(ONE_SHOT_MAKEFILE),)
+ # Enable target GCC 4.6 with: export TARGET_GCC_VERSION_EXP=4.6
+ $(info Using target GCC $(TARGET_GCC_VERSION) disables thread-safety checks.)
+ endif
+endif
+# We build with GCC 4.6 on the host.
+ART_HOST_CFLAGS += -Wthread-safety
+
+# Make host builds easier to debug and profile by not omitting the frame pointer.
+ART_HOST_CFLAGS += -fno-omit-frame-pointer
+
# To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16"
# ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs
@@ -159,6 +172,8 @@ LIBART_COMMON_SRC_FILES := \
src/compiled_method.cc \
src/compiler/driver/compiler_driver.cc \
src/compiler/llvm/runtime_support_llvm.cc \
+ src/compiler/stubs/portable/stubs.cc \
+ src/compiler/stubs/quick/stubs.cc \
src/debugger.cc \
src/dex_file.cc \
src/dex_file_verifier.cc \
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
index 4452f05f52..b73a32961e 100644
--- a/build/Android.libart-compiler.mk
+++ b/build/Android.libart-compiler.mk
@@ -45,7 +45,6 @@ LIBART_COMPILER_SRC_FILES := \
src/compiler/dex/quick/x86/utility_x86.cc \
src/compiler/dex/portable/mir_to_gbc.cc \
src/compiler/dex/mir_dataflow.cc \
- src/compiler/dex/dataflow_iterator.cc \
src/compiler/dex/mir_optimization.cc \
src/compiler/dex/frontend.cc \
src/compiler/dex/mir_graph.cc \
diff --git a/src/base/macros.h b/src/base/macros.h
index 8579872d58..847105d20c 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -136,6 +136,12 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#define ALWAYS_INLINE __attribute__ ((always_inline))
#endif
+#if defined (__APPLE__)
+#define HOT_ATTR
+#else
+#define HOT_ATTR __attribute__ ((hot))
+#endif
+
// bionic and glibc both have TEMP_FAILURE_RETRY, but Mac OS' libc doesn't.
#ifndef TEMP_FAILURE_RETRY
#define TEMP_FAILURE_RETRY(exp) ({ \
diff --git a/src/base/stringpiece.cc b/src/base/stringpiece.cc
index 715d964a12..47140e3247 100644
--- a/src/base/stringpiece.cc
+++ b/src/base/stringpiece.cc
@@ -21,12 +21,6 @@
namespace art {
-bool operator<(const StringPiece& x, const StringPiece& y) {
- const int r = memcmp(x.data(), y.data(),
- std::min(x.size(), y.size()));
- return ((r < 0) || ((r == 0) && (x.size() < y.size())));
-}
-
void StringPiece::CopyToString(std::string* target) const {
target->assign(ptr_, length_);
}
diff --git a/src/base/stringpiece.h b/src/base/stringpiece.h
index 193f5f7e7b..3664218860 100644
--- a/src/base/stringpiece.h
+++ b/src/base/stringpiece.h
@@ -188,7 +188,11 @@ inline bool operator!=(const StringPiece& x, const StringPiece& y) {
return !(x == y);
}
-bool operator<(const StringPiece& x, const StringPiece& y);
+inline bool operator<(const StringPiece& x, const StringPiece& y) {
+ const int r = memcmp(x.data(), y.data(),
+ std::min(x.size(), y.size()));
+ return ((r < 0) || ((r == 0) && (x.size() < y.size())));
+}
inline bool operator>(const StringPiece& x, const StringPiece& y) {
return y < x;
diff --git a/src/class_linker.cc b/src/class_linker.cc
index cfad9d1b7d..0fae4248e9 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -44,7 +44,7 @@
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/field-inl.h"
#include "mirror/iftable-inl.h"
#include "mirror/abstract_method.h"
@@ -74,7 +74,9 @@
namespace art {
-void artInterpreterToQuickEntry(Thread* self, ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
@@ -195,12 +197,14 @@ ClassLinker* ClassLinker::CreateFromImage(InternTable* intern_table) {
ClassLinker::ClassLinker(InternTable* intern_table)
// dex_lock_ is recursive as it may be used in stack dumping.
- : dex_lock_("ClassLinker dex lock", kDefaultMutexLevel, true),
+ : dex_lock_("ClassLinker dex lock", kDefaultMutexLevel),
class_roots_(NULL),
array_iftable_(NULL),
init_done_(false),
is_dirty_(false),
- intern_table_(intern_table) {
+ intern_table_(intern_table),
+ portable_resolution_trampoline_(NULL),
+ quick_resolution_trampoline_(NULL) {
CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax));
}
@@ -662,22 +666,22 @@ bool ClassLinker::GenerateOatFile(const std::string& dex_filename,
}
void ClassLinker::RegisterOatFile(const OatFile& oat_file) {
- MutexLock mu(Thread::Current(), dex_lock_);
+ WriterMutexLock mu(Thread::Current(), dex_lock_);
RegisterOatFileLocked(oat_file);
}
void ClassLinker::RegisterOatFileLocked(const OatFile& oat_file) {
- dex_lock_.AssertHeld(Thread::Current());
-#ifndef NDEBUG
- for (size_t i = 0; i < oat_files_.size(); ++i) {
- CHECK_NE(&oat_file, oat_files_[i]) << oat_file.GetLocation();
+ dex_lock_.AssertExclusiveHeld(Thread::Current());
+ if (kIsDebugBuild) {
+ for (size_t i = 0; i < oat_files_.size(); ++i) {
+ CHECK_NE(&oat_file, oat_files_[i]) << oat_file.GetLocation();
+ }
}
-#endif
oat_files_.push_back(&oat_file);
}
OatFile* ClassLinker::OpenOat(const ImageSpace* space) {
- MutexLock mu(Thread::Current(), dex_lock_);
+ WriterMutexLock mu(Thread::Current(), dex_lock_);
const Runtime* runtime = Runtime::Current();
const ImageHeader& image_header = space->GetImageHeader();
// Grab location but don't use Object::AsString as we haven't yet initialized the roots to
@@ -708,7 +712,7 @@ OatFile* ClassLinker::OpenOat(const ImageSpace* space) {
}
const OatFile* ClassLinker::FindOpenedOatFileForDexFile(const DexFile& dex_file) {
- MutexLock mu(Thread::Current(), dex_lock_);
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
return FindOpenedOatFileFromDexLocation(dex_file.GetLocation());
}
@@ -754,7 +758,7 @@ static const DexFile* FindDexFileInOatLocation(const std::string& dex_location,
const DexFile* ClassLinker::FindOrCreateOatFileForDexLocation(const std::string& dex_location,
const std::string& oat_location) {
- MutexLock mu(Thread::Current(), dex_lock_);
+ WriterMutexLock mu(Thread::Current(), dex_lock_);
return FindOrCreateOatFileForDexLocationLocked(dex_location, oat_location);
}
@@ -856,7 +860,7 @@ const DexFile* ClassLinker::VerifyAndOpenDexFileFromOatFile(const OatFile* oat_f
}
const DexFile* ClassLinker::FindDexFileInOatFileFromDexLocation(const std::string& dex_location) {
- MutexLock mu(Thread::Current(), dex_lock_);
+ WriterMutexLock mu(Thread::Current(), dex_lock_);
const OatFile* open_oat_file = FindOpenedOatFileFromDexLocation(dex_location);
if (open_oat_file != NULL) {
@@ -923,7 +927,7 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string&
}
const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_location) {
- MutexLock mu(Thread::Current(), dex_lock_);
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
return FindOatFileFromOatLocationLocked(oat_location);
}
@@ -951,6 +955,8 @@ void ClassLinker::InitFromImage() {
CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
CHECK(oat_file->GetOatHeader().GetImageFileLocation().empty());
+ portable_resolution_trampoline_ = oat_file->GetOatHeader().GetPortableResolutionTrampoline();
+ quick_resolution_trampoline_ = oat_file->GetOatHeader().GetQuickResolutionTrampoline();
mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
mirror::ObjectArray<mirror::DexCache>* dex_caches =
dex_caches_object->AsObjectArray<mirror::DexCache>();
@@ -1037,17 +1043,12 @@ void ClassLinker::InitFromImageCallback(mirror::Object* obj, void* arg) {
return;
}
- // Check if object is a method without its code set and point it to the resolution trampoline.
+ // Set entry points to interpreter for methods in interpreter only mode.
if (obj->IsMethod()) {
mirror::AbstractMethod* method = obj->AsMethod();
- // Install entry point from interpreter.
- if (method->GetEntryPointFromCompiledCode() == NULL && !method->IsNative() && !method->IsProxyMethod()) {
- method->SetEntryPointFromInterpreter(interpreter::EnterInterpreterFromInterpreter);
- } else {
- method->SetEntryPointFromInterpreter(artInterpreterToQuickEntry);
- }
- if (method->GetEntryPointFromCompiledCode() == NULL) {
- method->SetEntryPointFromCompiledCode(GetResolutionTrampoline());
+ if (Runtime::Current()->GetInstrumentation()->InterpretOnly() && !method->IsNative()) {
+ method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry);
+ method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
}
}
}
@@ -1059,14 +1060,14 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg) {
visitor(class_roots_, arg);
Thread* self = Thread::Current();
{
- MutexLock mu(self, dex_lock_);
+ ReaderMutexLock mu(self, dex_lock_);
for (size_t i = 0; i < dex_caches_.size(); i++) {
visitor(dex_caches_[i], arg);
}
}
{
- MutexLock mu(self, *Locks::classlinker_classes_lock_);
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
typedef Table::const_iterator It; // TODO: C++0x auto
for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
visitor(it->second, arg);
@@ -1081,7 +1082,7 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg) {
}
void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const {
- MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
typedef Table::const_iterator It; // TODO: C++0x auto
for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
if (!visitor(it->second, arg)) {
@@ -1608,14 +1609,16 @@ static void LinkCode(SirtRef<mirror::AbstractMethod>& method, const OatFile::Oat
oat_method.LinkMethod(method.get());
// Install entry point from interpreter.
- if (method->GetEntryPointFromCompiledCode() == NULL && !method->IsNative() &&
- !method->IsProxyMethod()) {
- method->SetEntryPointFromInterpreter(interpreter::EnterInterpreterFromInterpreter);
+ Runtime* runtime = Runtime::Current();
+ bool enter_interpreter = method->GetEntryPointFromCompiledCode() == NULL ||
+ (runtime->GetInstrumentation()->InterpretOnly() &&
+ !method->IsNative() && !method->IsProxyMethod());
+ if (enter_interpreter) {
+ method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry);
} else {
method->SetEntryPointFromInterpreter(artInterpreterToQuickEntry);
}
- Runtime* runtime = Runtime::Current();
if (method->IsAbstract()) {
method->SetEntryPointFromCompiledCode(GetAbstractMethodErrorStub());
return;
@@ -1623,7 +1626,7 @@ static void LinkCode(SirtRef<mirror::AbstractMethod>& method, const OatFile::Oat
if (method->IsStatic() && !method->IsConstructor()) {
// For static methods excluding the class initializer, install the trampoline.
- method->SetEntryPointFromCompiledCode(GetResolutionTrampoline());
+ method->SetEntryPointFromCompiledCode(GetResolutionTrampoline(runtime->GetClassLinker()));
}
if (method->IsNative()) {
@@ -1631,8 +1634,8 @@ static void LinkCode(SirtRef<mirror::AbstractMethod>& method, const OatFile::Oat
method->UnregisterNative(Thread::Current());
}
- if (method->GetEntryPointFromCompiledCode() == NULL) {
- // No code? You must mean to go into the interpreter.
+ if (enter_interpreter) {
+ // Set entry point from compiled code if there's no code or in interpreter only mode.
method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
}
@@ -1807,7 +1810,7 @@ void ClassLinker::AppendToBootClassPath(const DexFile& dex_file, SirtRef<mirror:
}
bool ClassLinker::IsDexFileRegisteredLocked(const DexFile& dex_file) const {
- dex_lock_.AssertHeld(Thread::Current());
+ dex_lock_.AssertSharedHeld(Thread::Current());
for (size_t i = 0; i != dex_caches_.size(); ++i) {
if (dex_caches_[i]->GetDexFile() == &dex_file) {
return true;
@@ -1817,12 +1820,12 @@ bool ClassLinker::IsDexFileRegisteredLocked(const DexFile& dex_file) const {
}
bool ClassLinker::IsDexFileRegistered(const DexFile& dex_file) const {
- MutexLock mu(Thread::Current(), dex_lock_);
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
return IsDexFileRegisteredLocked(dex_file);
}
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache) {
- dex_lock_.AssertHeld(Thread::Current());
+ dex_lock_.AssertExclusiveHeld(Thread::Current());
CHECK(dex_cache.get() != NULL) << dex_file.GetLocation();
CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()));
dex_caches_.push_back(dex_cache.get());
@@ -1833,7 +1836,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file, SirtRef<mirror:
void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
Thread* self = Thread::Current();
{
- MutexLock mu(self, dex_lock_);
+ ReaderMutexLock mu(self, dex_lock_);
if (IsDexFileRegisteredLocked(dex_file)) {
return;
}
@@ -1843,7 +1846,7 @@ void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
// get to a suspend point.
SirtRef<mirror::DexCache> dex_cache(self, AllocDexCache(self, dex_file));
{
- MutexLock mu(self, dex_lock_);
+ WriterMutexLock mu(self, dex_lock_);
if (IsDexFileRegisteredLocked(dex_file)) {
return;
}
@@ -1852,12 +1855,12 @@ void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
}
void ClassLinker::RegisterDexFile(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache) {
- MutexLock mu(Thread::Current(), dex_lock_);
+ WriterMutexLock mu(Thread::Current(), dex_lock_);
RegisterDexFileLocked(dex_file, dex_cache);
}
mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) const {
- MutexLock mu(Thread::Current(), dex_lock_);
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
// Search assuming unique-ness of dex file.
for (size_t i = 0; i != dex_caches_.size(); ++i) {
mirror::DexCache* dex_cache = dex_caches_[i];
@@ -1883,7 +1886,7 @@ mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file) const {
}
void ClassLinker::FixupDexCaches(mirror::AbstractMethod* resolution_method) const {
- MutexLock mu(Thread::Current(), dex_lock_);
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
for (size_t i = 0; i != dex_caches_.size(); ++i) {
dex_caches_[i]->Fixup(resolution_method);
}
@@ -2079,7 +2082,7 @@ mirror::Class* ClassLinker::InsertClass(const StringPiece& descriptor, mirror::C
LOG(INFO) << "Loaded class " << descriptor << source;
}
size_t hash = StringPieceHash()(descriptor);
- MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
Table& classes = image_class ? image_classes_ : classes_;
mirror::Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes);
#ifndef NDEBUG
@@ -2097,7 +2100,7 @@ mirror::Class* ClassLinker::InsertClass(const StringPiece& descriptor, mirror::C
bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) {
size_t hash = Hash(descriptor);
- MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
typedef Table::iterator It; // TODO: C++0x auto
// TODO: determine if its better to search classes_ or image_classes_ first
ClassHelper kh;
@@ -2125,7 +2128,7 @@ bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader*
mirror::Class* ClassLinker::LookupClass(const char* descriptor,
const mirror::ClassLoader* class_loader) {
size_t hash = Hash(descriptor);
- MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
// TODO: determine if its better to search classes_ or image_classes_ first
mirror::Class* klass = NULL;
// Use image class only if the class_loader is null.
@@ -2165,7 +2168,7 @@ mirror::Class* ClassLinker::LookupClassLocked(const char* descriptor,
void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes) {
classes.clear();
size_t hash = Hash(descriptor);
- MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
typedef Table::const_iterator It; // TODO: C++0x auto
// TODO: determine if its better to search classes_ or image_classes_ first
ClassHelper kh(NULL, this);
@@ -2499,7 +2502,7 @@ mirror::AbstractMethod* ClassLinker::FindMethodForProxy(const mirror::Class* pro
mirror::DexCache* dex_cache = NULL;
{
mirror::ObjectArray<mirror::Class>* resolved_types = proxy_method->GetDexCacheResolvedTypes();
- MutexLock mu(Thread::Current(), dex_lock_);
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
for (size_t i = 0; i != dex_caches_.size(); ++i) {
if (dex_caches_[i]->GetResolvedTypes() == resolved_types) {
dex_cache = dex_caches_[i];
@@ -3880,7 +3883,7 @@ void ClassLinker::DumpAllClasses(int flags) const {
// lock held, because it might need to resolve a field's type, which would try to take the lock.
std::vector<mirror::Class*> all_classes;
{
- MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
typedef Table::const_iterator It; // TODO: C++0x auto
for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
all_classes.push_back(it->second);
@@ -3896,13 +3899,13 @@ void ClassLinker::DumpAllClasses(int flags) const {
}
void ClassLinker::DumpForSigQuit(std::ostream& os) const {
- MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
os << "Loaded classes: " << image_classes_.size() << " image classes; "
<< classes_.size() << " allocated classes\n";
}
size_t ClassLinker::NumLoadedClasses() const {
- MutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
return classes_.size() + image_classes_.size();
}
diff --git a/src/class_linker.h b/src/class_linker.h
index d41373c7d7..eab1fcc814 100644
--- a/src/class_linker.h
+++ b/src/class_linker.h
@@ -240,7 +240,7 @@ class ClassLinker {
LOCKS_EXCLUDED(dex_lock_);
const OatFile* FindOatFileFromOatLocationLocked(const std::string& location)
- EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
+ SHARED_LOCKS_REQUIRED(dex_lock_);
// Finds the oat file for a dex location, generating the oat file if
// it is missing or out of date. Returns the DexFile from within the
@@ -334,6 +334,14 @@ class ClassLinker {
is_dirty_ = true;
}
+ const void* GetPortableResolutionTrampoline() const {
+ return portable_resolution_trampoline_;
+ }
+
+ const void* GetQuickResolutionTrampoline() const {
+ return quick_resolution_trampoline_;
+ }
+
private:
explicit ClassLinker(InternTable*);
@@ -420,7 +428,7 @@ class ClassLinker {
void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache)
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsDexFileRegisteredLocked(const DexFile& dex_file) const EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
+ bool IsDexFileRegisteredLocked(const DexFile& dex_file) const SHARED_LOCKS_REQUIRED(dex_lock_);
void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
@@ -489,10 +497,9 @@ class ClassLinker {
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const OatFile* FindOpenedOatFileFromDexLocation(const std::string& dex_location)
- EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_);
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
- EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
+ SHARED_LOCKS_REQUIRED(dex_lock_);
const DexFile* VerifyAndOpenDexFileFromOatFile(const OatFile* oat_file,
const std::string& dex_location,
uint32_t dex_location_checksum)
@@ -508,7 +515,7 @@ class ClassLinker {
std::vector<const DexFile*> boot_class_path_;
- mutable Mutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<mirror::DexCache*> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
@@ -522,8 +529,7 @@ class ClassLinker {
mirror::Class* LookupClassLocked(const char* descriptor, const mirror::ClassLoader* class_loader,
size_t hash, const Table& classes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::classlinker_classes_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::classlinker_classes_lock_);
// indexes into class_roots_.
// needs to be kept in sync with class_roots_descriptors_.
@@ -595,6 +601,9 @@ class ClassLinker {
InternTable* intern_table_;
+ const void* portable_resolution_trampoline_;
+ const void* quick_resolution_trampoline_;
+
friend class CommonTest;
friend class ImageWriter; // for GetClassRoots
friend class ObjectTest;
diff --git a/src/compiler/dex/arena_bit_vector.cc b/src/compiler/dex/arena_bit_vector.cc
index 6f664e5565..1fbf7740ac 100644
--- a/src/compiler/dex/arena_bit_vector.cc
+++ b/src/compiler/dex/arena_bit_vector.cc
@@ -113,18 +113,6 @@ void ArenaBitVector::Union(const ArenaBitVector* src) {
}
}
-// Are we equal to another bit vector? Note: expandability attributes must also match.
-bool ArenaBitVector::Equal(const ArenaBitVector* src) {
- if (storage_size_ != src->GetStorageSize() ||
- expandable_ != src->IsExpandable())
- return false;
-
- for (unsigned int idx = 0; idx < storage_size_; idx++) {
- if (storage_[idx] != src->GetRawStorageWord(idx)) return false;
- }
- return true;
-}
-
// Count the number of bits that are set.
int ArenaBitVector::NumSetBits()
{
@@ -136,43 +124,6 @@ int ArenaBitVector::NumSetBits()
return count;
}
-// Return the position of the next set bit. -1 means end-of-element reached.
-// TUNING: Hot function.
-int ArenaBitVector::Iterator::Next()
-{
- // Did anything obviously change since we started?
- DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
- DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
-
- if (bit_index_ >= bit_size_) return -1;
-
- uint32_t word_index = bit_index_ >> 5;
- uint32_t end_word_index = bit_size_ >> 5;
- uint32_t word = bit_storage_[word_index++];
-
- // Mask out any bits in the first word we've already considered.
- word &= ~((1 << (bit_index_ & 0x1f))-1);
-
- for (; word_index <= end_word_index;) {
- uint32_t bit_pos = bit_index_ & 0x1f;
- if (word == 0) {
- bit_index_ += (32 - bit_pos);
- word = bit_storage_[word_index++];
- continue;
- }
- for (; bit_pos < 32; bit_pos++) {
- if (word & (1 << bit_pos)) {
- bit_index_++;
- return bit_index_ - 1;
- }
- bit_index_++;
- }
- word = bit_storage_[word_index++];
- }
- bit_index_ = bit_size_;
- return -1;
-}
-
/*
* Mark specified number of bits as "set". Cannot set all bits like ClearAll
* since there might be unused bits - setting those to one will confuse the
diff --git a/src/compiler/dex/arena_bit_vector.h b/src/compiler/dex/arena_bit_vector.h
index f5c471c5d3..a950e82498 100644
--- a/src/compiler/dex/arena_bit_vector.h
+++ b/src/compiler/dex/arena_bit_vector.h
@@ -39,7 +39,33 @@ class ArenaBitVector {
bit_index_(0),
bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {};
- int Next(); // Returns -1 when no next.
+ // Return the position of the next set bit. -1 means end-of-element reached.
+ int Next() {
+ // Did anything obviously change since we started?
+ DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
+ DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
+
+ if (bit_index_ >= bit_size_) return -1;
+
+ uint32_t word_index = bit_index_ / 32;
+ uint32_t word = bit_storage_[word_index];
+ // Mask out any bits in the first word we've already considered.
+ word >>= bit_index_ & 0x1f;
+ if (word == 0) {
+ bit_index_ &= ~0x1f;
+ do {
+ word_index++;
+ if ((word_index * 32) >= bit_size_) {
+ bit_index_ = bit_size_;
+ return -1;
+ }
+ word = bit_storage_[word_index];
+ bit_index_ += 32;
+ } while (word == 0);
+ }
+ bit_index_ += CTZ(word) + 1;
+ return bit_index_ - 1;
+ }
static void* operator new(size_t size, ArenaAllocator* arena) {
return arena->NewMem(sizeof(ArenaBitVector::Iterator), true,
@@ -73,13 +99,19 @@ class ArenaBitVector {
void Copy(ArenaBitVector* src);
void Intersect(const ArenaBitVector* src2);
void Union(const ArenaBitVector* src);
- bool Equal(const ArenaBitVector* src);
+ // Are we equal to another bit vector? Note: expandability attributes must also match.
+ bool Equal(const ArenaBitVector* src) {
+ return (storage_size_ == src->GetStorageSize()) &&
+ (expandable_ == src->IsExpandable()) &&
+ (memcmp(storage_, src->GetRawStorage(), storage_size_ * 4) == 0);
+ }
int NumSetBits();
uint32_t GetStorageSize() const { return storage_size_; }
bool IsExpandable() const { return expandable_; }
uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
uint32_t* GetRawStorage() { return storage_; }
+ const uint32_t* GetRawStorage() const { return storage_; }
private:
ArenaAllocator* const arena_;
diff --git a/src/compiler/dex/dataflow_iterator-inl.h b/src/compiler/dex/dataflow_iterator-inl.h
new file mode 100644
index 0000000000..b20004decc
--- /dev/null
+++ b/src/compiler/dex/dataflow_iterator-inl.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
+#define ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
+
+#include "dataflow_iterator.h"
+
+namespace art {
+
+inline BasicBlock* DataflowIterator::NextBody(bool had_change) {
+ changed_ |= had_change;
+ BasicBlock* res = NULL;
+ if (reverse_) {
+ if (is_iterative_ && changed_ && (idx_ < 0)) {
+ idx_ = start_idx_;
+ changed_ = false;
+ }
+ if (idx_ >= 0) {
+ int bb_id = block_id_list_->Get(idx_--);
+ res = mir_graph_->GetBasicBlock(bb_id);
+ }
+ } else {
+ if (is_iterative_ && changed_ && (idx_ >= end_idx_)) {
+ idx_ = start_idx_;
+ changed_ = false;
+ }
+ if (idx_ < end_idx_) {
+ int bb_id = block_id_list_->Get(idx_++);
+ res = mir_graph_->GetBasicBlock(bb_id);
+ }
+ }
+ return res;
+}
+
+// AllNodes uses the existing GrowableArray iterator, so use different NextBody().
+inline BasicBlock* AllNodesIterator::NextBody(bool had_change) {
+ changed_ |= had_change;
+ BasicBlock* res = NULL;
+ bool keep_looking = true;
+ while (keep_looking) {
+ res = all_nodes_iterator_->Next();
+ if (is_iterative_ && changed_ && (res == NULL)) {
+ all_nodes_iterator_->Reset();
+ changed_ = false;
+ } else if ((res == NULL) || (!res->hidden)) {
+ keep_looking = false;
+ }
+ }
+ return res;
+}
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
diff --git a/src/compiler/dex/dataflow_iterator.cc b/src/compiler/dex/dataflow_iterator.cc
deleted file mode 100644
index bb5b969925..0000000000
--- a/src/compiler/dex/dataflow_iterator.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dataflow_iterator.h"
-
-namespace art {
-
- BasicBlock* DataflowIterator::NextBody(bool had_change) {
- changed_ |= had_change;
- BasicBlock* res = NULL;
- if (reverse_) {
- if (is_iterative_ && changed_ && (idx_ < 0)) {
- idx_ = start_idx_;
- changed_ = false;
- }
- if (idx_ >= 0) {
- int bb_id = block_id_list_->Get(idx_--);
- res = mir_graph_->GetBasicBlock(bb_id);
- }
- } else {
- if (is_iterative_ && changed_ && (idx_ >= end_idx_)) {
- idx_ = start_idx_;
- changed_ = false;
- }
- if (idx_ < end_idx_) {
- int bb_id = block_id_list_->Get(idx_++);
- res = mir_graph_->GetBasicBlock(bb_id);
- }
- }
- return res;
- }
-
- // AllNodes uses the existing GrowableArray iterator, so use different NextBody().
- BasicBlock* AllNodesIterator::NextBody(bool had_change) {
- changed_ |= had_change;
- BasicBlock* res = NULL;
- bool keep_looking = true;
- while (keep_looking) {
- res = all_nodes_iterator_->Next();
- if (is_iterative_ && changed_ && (res == NULL)) {
- all_nodes_iterator_->Reset();
- changed_ = false;
- } else if ((res == NULL) || (!res->hidden)) {
- keep_looking = false;
- }
- }
- return res;
- }
-
-} // namespace art
diff --git a/src/compiler/dex/dataflow_iterator.h b/src/compiler/dex/dataflow_iterator.h
index a4b38bd80f..12cbf9cadf 100644
--- a/src/compiler/dex/dataflow_iterator.h
+++ b/src/compiler/dex/dataflow_iterator.h
@@ -71,7 +71,7 @@ namespace art {
idx_(0),
changed_(false) {}
- virtual BasicBlock* NextBody(bool had_change);
+ virtual BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
MIRGraph* const mir_graph_;
const bool is_iterative_;
@@ -86,7 +86,6 @@ namespace art {
class ReachableNodesIterator : public DataflowIterator {
public:
-
ReachableNodesIterator(MIRGraph* mir_graph, bool is_iterative)
: DataflowIterator(mir_graph, is_iterative, 0,
mir_graph->GetNumReachableBlocks(), false) {
@@ -97,7 +96,6 @@ namespace art {
class PreOrderDfsIterator : public DataflowIterator {
public:
-
PreOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
: DataflowIterator(mir_graph, is_iterative, 0,
mir_graph->GetNumReachableBlocks(), false) {
@@ -119,7 +117,6 @@ namespace art {
class ReversePostOrderDfsIterator : public DataflowIterator {
public:
-
ReversePostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
: DataflowIterator(mir_graph, is_iterative,
mir_graph->GetNumReachableBlocks() -1, 0, true) {
@@ -130,7 +127,6 @@ namespace art {
class PostOrderDOMIterator : public DataflowIterator {
public:
-
PostOrderDOMIterator(MIRGraph* mir_graph, bool is_iterative)
: DataflowIterator(mir_graph, is_iterative, 0,
mir_graph->GetNumReachableBlocks(), false) {
@@ -141,18 +137,17 @@ namespace art {
class AllNodesIterator : public DataflowIterator {
public:
-
AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
: DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
all_nodes_iterator_ =
new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator (mir_graph->GetBlockList());
}
- virtual void Reset() {
+ void Reset() {
all_nodes_iterator_->Reset();
}
- virtual BasicBlock* NextBody(bool had_change);
+ BasicBlock* NextBody(bool had_change) ALWAYS_INLINE;
private:
GrowableArray<BasicBlock*>::Iterator* all_nodes_iterator_;
diff --git a/src/compiler/dex/frontend.cc b/src/compiler/dex/frontend.cc
index ca751ab849..e015645584 100644
--- a/src/compiler/dex/frontend.cc
+++ b/src/compiler/dex/frontend.cc
@@ -18,7 +18,7 @@
#include "compiler/driver/compiler_driver.h"
#include "compiler_internals.h"
-#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
#if defined(ART_USE_PORTABLE_COMPILER)
#include "compiler/llvm/llvm_compilation_unit.h"
#include "compiler/dex/portable/mir_to_gbc.h"
diff --git a/src/compiler/dex/mir_dataflow.cc b/src/compiler/dex/mir_dataflow.cc
index 9f61d73d6b..79eac6d14c 100644
--- a/src/compiler/dex/mir_dataflow.cc
+++ b/src/compiler/dex/mir_dataflow.cc
@@ -16,7 +16,7 @@
#include "compiler_internals.h"
#include "local_value_numbering.h"
-#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
namespace art {
@@ -933,11 +933,6 @@ int MIRGraph::AddNewSReg(int v_reg)
SetNumSSARegs(ssa_reg + 1);
ssa_base_vregs_->Insert(v_reg);
ssa_subscripts_->Insert(subscript);
- std::string ssa_name = GetSSAName(ssa_reg);
- char* name = static_cast<char*>(arena_->NewMem(ssa_name.length() + 1, false,
- ArenaAllocator::kAllocDFInfo));
- strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
- ssa_strings_->Insert(name);
DCHECK_EQ(ssa_base_vregs_->Size(), ssa_subscripts_->Size());
return ssa_reg;
}
@@ -1140,8 +1135,6 @@ void MIRGraph::CompilerInitializeSSAConversion()
kGrowableArraySSAtoDalvikMap);
ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
kGrowableArraySSAtoDalvikMap);
- ssa_strings_ = new (arena_) GrowableArray<char*>(arena_, num_dalvik_reg + GetDefCount() + 128,
- kGrowableArraySSAtoDalvikMap);
/*
* Initial number of SSA registers is equal to the number of Dalvik
* registers.
@@ -1156,11 +1149,6 @@ void MIRGraph::CompilerInitializeSSAConversion()
for (unsigned int i = 0; i < num_dalvik_reg; i++) {
ssa_base_vregs_->Insert(i);
ssa_subscripts_->Insert(0);
- std::string ssa_name = GetSSAName(i);
- char* name = static_cast<char*>(arena_->NewMem(ssa_name.length() + 1, true,
- ArenaAllocator::kAllocDFInfo));
- strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
- ssa_strings_->Insert(name);
}
/*
@@ -1237,17 +1225,17 @@ bool MIRGraph::InvokeUsesMethodStar(MIR* mir)
return false;
}
DexCompilationUnit m_unit(cu_);
- // TODO: add a flag so we don't counts the stats for this twice
- uint32_t dex_method_idx = mir->dalvikInsn.vB;
+ CompilerDriver::MethodReference target_method(cu_->dex_file, mir->dalvikInsn.vB);
int vtable_idx;
uintptr_t direct_code;
uintptr_t direct_method;
uint32_t current_offset = static_cast<uint32_t>(current_offset_);
bool fast_path =
- cu_->compiler_driver->ComputeInvokeInfo(dex_method_idx, current_offset,
- &m_unit, type,
- vtable_idx, direct_code,
- direct_method) &&
+ cu_->compiler_driver->ComputeInvokeInfo(&m_unit, current_offset,
+ type, target_method,
+ vtable_idx,
+ direct_code, direct_method,
+ false) &&
!(cu_->enable_debug & (1 << kDebugSlowInvokePath));
return (((type == kDirect) || (type == kStatic)) &&
fast_path && ((direct_code == 0) || (direct_method == 0)));
diff --git a/src/compiler/dex/mir_graph.cc b/src/compiler/dex/mir_graph.cc
index 6154eec6ca..11e100dc61 100644
--- a/src/compiler/dex/mir_graph.cc
+++ b/src/compiler/dex/mir_graph.cc
@@ -77,7 +77,6 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
cu_(cu),
ssa_base_vregs_(NULL),
ssa_subscripts_(NULL),
- ssa_strings_(NULL),
vreg_to_ssa_map_(NULL),
ssa_last_defs_(NULL),
is_constant_v_(NULL),
@@ -1037,6 +1036,9 @@ void MIRGraph::ReplaceSpecialChars(std::string& str)
std::string MIRGraph::GetSSAName(int ssa_reg)
{
+ // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to
+ // the arena. We should be smarter and just place straight into the arena, or compute the
+ // value more lazily.
return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
}
diff --git a/src/compiler/dex/mir_graph.h b/src/compiler/dex/mir_graph.h
index 882a5088d7..2b1c21fd70 100644
--- a/src/compiler/dex/mir_graph.h
+++ b/src/compiler/dex/mir_graph.h
@@ -452,10 +452,6 @@ class MIRGraph {
return ssa_subscripts_->Get(ssa_reg);
}
- const char* GetSSAString(int ssa_reg) const {
- return ssa_strings_->Get(ssa_reg);
- }
-
RegLocation GetRawSrc(MIR* mir, int num)
{
DCHECK(num < mir->ssa_rep->num_uses);
@@ -628,7 +624,6 @@ class MIRGraph {
CompilationUnit* const cu_;
GrowableArray<int>* ssa_base_vregs_;
GrowableArray<int>* ssa_subscripts_;
- GrowableArray<char*>* ssa_strings_;
// Map original Dalvik virtual reg i to the current SSA name.
int* vreg_to_ssa_map_; // length == method->registers_size
int* ssa_last_defs_; // length == method->registers_size
diff --git a/src/compiler/dex/mir_optimization.cc b/src/compiler/dex/mir_optimization.cc
index 534550112a..d9c443e536 100644
--- a/src/compiler/dex/mir_optimization.cc
+++ b/src/compiler/dex/mir_optimization.cc
@@ -16,7 +16,7 @@
#include "compiler_internals.h"
#include "local_value_numbering.h"
-#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
namespace art {
@@ -418,6 +418,13 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb)
static_cast<bool*>(arena_->NewMem(sizeof(bool) * 1, false,
ArenaAllocator::kAllocDFInfo));
mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
+ // Match type of uses to def.
+ mir->ssa_rep->fp_use =
+ static_cast<bool*>(arena_->NewMem(sizeof(bool) * mir->ssa_rep->num_uses, false,
+ ArenaAllocator::kAllocDFInfo));
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
+ }
/*
* There is usually a Phi node in the join block for our two cases. If the
* Phi node only contains our two cases as input, we will use the result
diff --git a/src/compiler/dex/portable/mir_to_gbc.cc b/src/compiler/dex/portable/mir_to_gbc.cc
index 6fccb47d9f..1f9c92a3d2 100644
--- a/src/compiler/dex/portable/mir_to_gbc.cc
+++ b/src/compiler/dex/portable/mir_to_gbc.cc
@@ -28,7 +28,7 @@
#include <llvm/Support/ToolOutputFile.h>
#include "compiler/dex/compiler_internals.h"
-#include "compiler/dex/dataflow_iterator.h"
+#include "compiler/dex/dataflow_iterator-inl.h"
#include "compiler/dex/frontend.h"
#include "mir_to_gbc.h"
@@ -1964,7 +1964,7 @@ void MirConverter::MethodMIR2Bitcode()
::llvm::Constant* imm_value = mir_graph_->reg_location_[i].wide ?
irb_->getJLong(0) : irb_->getJInt(0);
val = EmitConst(imm_value, mir_graph_->reg_location_[i]);
- val->setName(mir_graph_->GetSSAString(i));
+ val->setName(mir_graph_->GetSSAName(i));
llvm_values_.Insert(val);
} else {
// Recover previously-created argument values
diff --git a/src/compiler/dex/quick/arm/assemble_arm.cc b/src/compiler/dex/quick/arm/assemble_arm.cc
index 23a87dcec1..36038f7741 100644
--- a/src/compiler/dex/quick/arm/assemble_arm.cc
+++ b/src/compiler/dex/quick/arm/assemble_arm.cc
@@ -16,6 +16,7 @@
#include "arm_lir.h"
#include "codegen_arm.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
namespace art {
diff --git a/src/compiler/dex/quick/arm/call_arm.cc b/src/compiler/dex/quick/arm/call_arm.cc
index 32d4ed680f..77e09b8e4e 100644
--- a/src/compiler/dex/quick/arm/call_arm.cc
+++ b/src/compiler/dex/quick/arm/call_arm.cc
@@ -18,6 +18,7 @@
#include "arm_lir.h"
#include "codegen_arm.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "oat/runtime/oat_support_entrypoints.h"
namespace art {
diff --git a/src/compiler/dex/quick/arm/codegen_arm.h b/src/compiler/dex/quick/arm/codegen_arm.h
index 9e409e6772..60111d1d06 100644
--- a/src/compiler/dex/quick/arm/codegen_arm.h
+++ b/src/compiler/dex/quick/arm/codegen_arm.h
@@ -23,143 +23,142 @@ namespace art {
class ArmMir2Lir : public Mir2Lir {
public:
-
ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
- virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+ bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
RegLocation rl_dest, int lit);
- virtual int LoadHelper(int offset);
- virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
- virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
- int s_reg);
- virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
- virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_dest, int r_dest_hi, OpSize size, int s_reg);
- virtual LIR* LoadConstantNoClobber(int r_dest, int value);
- virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
- virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
- virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
- virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
- virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
- int r_src, int r_src_hi, OpSize size, int s_reg);
- virtual void MarkGCCard(int val_reg, int tgt_addr_reg);
+ int LoadHelper(int offset);
+ LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+ LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+ int s_reg);
+ LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+ LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+ int r_dest, int r_dest_hi, OpSize size, int s_reg);
+ LIR* LoadConstantNoClobber(int r_dest, int value);
+ LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+ LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+ LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+ LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+ LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+ int r_src, int r_src_hi, OpSize size, int s_reg);
+ void MarkGCCard(int val_reg, int tgt_addr_reg);
// Required for target - register utilities.
- virtual bool IsFpReg(int reg);
- virtual bool SameRegType(int reg1, int reg2);
- virtual int AllocTypedTemp(bool fp_hint, int reg_class);
- virtual int AllocTypedTempPair(bool fp_hint, int reg_class);
- virtual int S2d(int low_reg, int high_reg);
- virtual int TargetReg(SpecialTargetRegister reg);
- virtual RegisterInfo* GetRegInfo(int reg);
- virtual RegLocation GetReturnAlt();
- virtual RegLocation GetReturnWideAlt();
- virtual RegLocation LocCReturn();
- virtual RegLocation LocCReturnDouble();
- virtual RegLocation LocCReturnFloat();
- virtual RegLocation LocCReturnWide();
- virtual uint32_t FpRegMask();
- virtual uint64_t GetRegMaskCommon(int reg);
- virtual void AdjustSpillMask();
- virtual void ClobberCalleeSave();
- virtual void FlushReg(int reg);
- virtual void FlushRegWide(int reg1, int reg2);
- virtual void FreeCallTemps();
- virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
- virtual void LockCallTemps();
- virtual void MarkPreservedSingle(int v_reg, int reg);
- virtual void CompilerInitializeRegAlloc();
+ bool IsFpReg(int reg);
+ bool SameRegType(int reg1, int reg2);
+ int AllocTypedTemp(bool fp_hint, int reg_class);
+ int AllocTypedTempPair(bool fp_hint, int reg_class);
+ int S2d(int low_reg, int high_reg);
+ int TargetReg(SpecialTargetRegister reg);
+ RegisterInfo* GetRegInfo(int reg);
+ RegLocation GetReturnAlt();
+ RegLocation GetReturnWideAlt();
+ RegLocation LocCReturn();
+ RegLocation LocCReturnDouble();
+ RegLocation LocCReturnFloat();
+ RegLocation LocCReturnWide();
+ uint32_t FpRegMask();
+ uint64_t GetRegMaskCommon(int reg);
+ void AdjustSpillMask();
+ void ClobberCalleeSave();
+ void FlushReg(int reg);
+ void FlushRegWide(int reg1, int reg2);
+ void FreeCallTemps();
+ void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+ void LockCallTemps();
+ void MarkPreservedSingle(int v_reg, int reg);
+ void CompilerInitializeRegAlloc();
// Required for target - miscellaneous.
- virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr);
- virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
- virtual void SetupTargetResourceMasks(LIR* lir);
- virtual const char* GetTargetInstFmt(int opcode);
- virtual const char* GetTargetInstName(int opcode);
- virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- virtual uint64_t GetPCUseDefEncoding();
- virtual uint64_t GetTargetInstFlags(int opcode);
- virtual int GetInsnSize(LIR* lir);
- virtual bool IsUnconditionalBranch(LIR* lir);
+ AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+ void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+ void SetupTargetResourceMasks(LIR* lir);
+ const char* GetTargetInstFmt(int opcode);
+ const char* GetTargetInstName(int opcode);
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ uint64_t GetPCUseDefEncoding();
+ uint64_t GetTargetInstFlags(int opcode);
+ int GetInsnSize(LIR* lir);
+ bool IsUnconditionalBranch(LIR* lir);
// Required for target - Dalvik-level generators.
- virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
- RegLocation rl_src, int scale);
- virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale);
- virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale);
- virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift);
- virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
- virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
- virtual bool GenInlinedSqrt(CallInfo* info);
- virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_src, int scale);
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale);
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift);
+ void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+ bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+ bool GenInlinedSqrt(CallInfo* info);
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
ThrowKind kind);
- virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
- virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
- virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenDivZeroCheck(int reg_lo, int reg_hi);
- virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- virtual void GenExitSequence();
- virtual void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
- virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- virtual void GenSelect(BasicBlock* bb, MIR* mir);
- virtual void GenMemBarrier(MemBarrierKind barrier_kind);
- virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
- virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
- virtual void GenMoveException(RegLocation rl_dest);
- virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivZeroCheck(int reg_lo, int reg_hi);
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+ void GenExitSequence();
+ void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+ void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenMemBarrier(MemBarrierKind barrier_kind);
+ void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+ void GenMonitorExit(int opt_flags, RegLocation rl_src);
+ void GenMoveException(RegLocation rl_dest);
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
int first_bit, int second_bit);
- virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+ void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
// Required for target - single operation generators.
- virtual LIR* OpUnconditionalBranch(LIR* target);
- virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
- virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
- virtual LIR* OpCondBranch(ConditionCode cc, LIR* target);
- virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
- virtual LIR* OpFpRegCopy(int r_dest, int r_src);
- virtual LIR* OpIT(ConditionCode cond, const char* guide);
- virtual LIR* OpMem(OpKind op, int rBase, int disp);
- virtual LIR* OpPcRelLoad(int reg, LIR* target);
- virtual LIR* OpReg(OpKind op, int r_dest_src);
- virtual LIR* OpRegCopy(int r_dest, int r_src);
- virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src);
- virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
- virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
- virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
- virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
- virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
- virtual LIR* OpTestSuspend(LIR* target);
- virtual LIR* OpThreadMem(OpKind op, int thread_offset);
- virtual LIR* OpVldm(int rBase, int count);
- virtual LIR* OpVstm(int rBase, int count);
- virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
- virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
- virtual void OpTlsCmp(int offset, int val);
+ LIR* OpUnconditionalBranch(LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+ LIR* OpCondBranch(ConditionCode cc, LIR* target);
+ LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+ LIR* OpFpRegCopy(int r_dest, int r_src);
+ LIR* OpIT(ConditionCode cond, const char* guide);
+ LIR* OpMem(OpKind op, int rBase, int disp);
+ LIR* OpPcRelLoad(int reg, LIR* target);
+ LIR* OpReg(OpKind op, int r_dest_src);
+ LIR* OpRegCopy(int r_dest, int r_src);
+ LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+ LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+ LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+ LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+ LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+ LIR* OpTestSuspend(LIR* target);
+ LIR* OpThreadMem(OpKind op, int thread_offset);
+ LIR* OpVldm(int rBase, int count);
+ LIR* OpVstm(int rBase, int count);
+ void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+ void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+ void OpTlsCmp(int offset, int val);
RegLocation ArgLoc(RegLocation loc);
LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
diff --git a/src/compiler/dex/quick/arm/fp_arm.cc b/src/compiler/dex/quick/arm/fp_arm.cc
index 4bf8738949..cd71c0798b 100644
--- a/src/compiler/dex/quick/arm/fp_arm.cc
+++ b/src/compiler/dex/quick/arm/fp_arm.cc
@@ -16,6 +16,7 @@
#include "arm_lir.h"
#include "codegen_arm.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
namespace art {
diff --git a/src/compiler/dex/quick/arm/int_arm.cc b/src/compiler/dex/quick/arm/int_arm.cc
index 586a3a49b5..110e9f4320 100644
--- a/src/compiler/dex/quick/arm/int_arm.cc
+++ b/src/compiler/dex/quick/arm/int_arm.cc
@@ -18,6 +18,7 @@
#include "arm_lir.h"
#include "codegen_arm.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "mirror/array.h"
#include "oat/runtime/oat_support_entrypoints.h"
diff --git a/src/compiler/dex/quick/arm/target_arm.cc b/src/compiler/dex/quick/arm/target_arm.cc
index 0a05a3a431..ee127a8e17 100644
--- a/src/compiler/dex/quick/arm/target_arm.cc
+++ b/src/compiler/dex/quick/arm/target_arm.cc
@@ -19,6 +19,7 @@
#include "arm_lir.h"
#include "codegen_arm.h"
#include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
namespace art {
diff --git a/src/compiler/dex/quick/arm/utility_arm.cc b/src/compiler/dex/quick/arm/utility_arm.cc
index c689f72436..ef0cc72a5c 100644
--- a/src/compiler/dex/quick/arm/utility_arm.cc
+++ b/src/compiler/dex/quick/arm/utility_arm.cc
@@ -16,7 +16,7 @@
#include "arm_lir.h"
#include "codegen_arm.h"
-#include "compiler/dex/quick/mir_to_lir.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
namespace art {
diff --git a/src/compiler/dex/quick/codegen_util.cc b/src/compiler/dex/quick/codegen_util.cc
index 517d1b5c03..ac2828c276 100644
--- a/src/compiler/dex/quick/codegen_util.cc
+++ b/src/compiler/dex/quick/codegen_util.cc
@@ -17,6 +17,7 @@
#include "compiler/dex/compiler_internals.h"
#include "dex_file-inl.h"
#include "gc_map.h"
+#include "mir_to_lir-inl.h"
#include "verifier/dex_gc_map.h"
#include "verifier/method_verifier.h"
@@ -112,81 +113,6 @@ void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
}
/*
- * Mark the corresponding bit(s).
- */
-void Mir2Lir::SetupRegMask(uint64_t* mask, int reg)
-{
- *mask |= GetRegMaskCommon(reg);
-}
-
-/*
- * Set up the proper fields in the resource mask
- */
-void Mir2Lir::SetupResourceMasks(LIR* lir)
-{
- int opcode = lir->opcode;
-
- if (opcode <= 0) {
- lir->use_mask = lir->def_mask = 0;
- return;
- }
-
- uint64_t flags = GetTargetInstFlags(opcode);
-
- if (flags & NEEDS_FIXUP) {
- lir->flags.pcRelFixup = true;
- }
-
- /* Get the starting size of the instruction's template */
- lir->flags.size = GetInsnSize(lir);
-
- /* Set up the mask for resources that are updated */
- if (flags & (IS_LOAD | IS_STORE)) {
- /* Default to heap - will catch specialized classes later */
- SetMemRefType(lir, flags & IS_LOAD, kHeapRef);
- }
-
- /*
- * Conservatively assume the branch here will call out a function that in
- * turn will trash everything.
- */
- if (flags & IS_BRANCH) {
- lir->def_mask = lir->use_mask = ENCODE_ALL;
- return;
- }
-
- if (flags & REG_DEF0) {
- SetupRegMask(&lir->def_mask, lir->operands[0]);
- }
-
- if (flags & REG_DEF1) {
- SetupRegMask(&lir->def_mask, lir->operands[1]);
- }
-
-
- if (flags & SETS_CCODES) {
- lir->def_mask |= ENCODE_CCODE;
- }
-
- if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
- int i;
-
- for (i = 0; i < 4; i++) {
- if (flags & (1 << (kRegUse0 + i))) {
- SetupRegMask(&lir->use_mask, lir->operands[i]);
- }
- }
- }
-
- if (flags & USES_CCODES) {
- lir->use_mask |= ENCODE_CCODE;
- }
-
- // Handle target-specific actions
- SetupTargetResourceMasks(lir);
-}
-
-/*
* Debugging macros
*/
#define DUMP_RESOURCE_MASK(X)
@@ -361,99 +287,6 @@ void Mir2Lir::CodegenDump()
DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_);
}
-
-LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
- int op1, int op2, int op3, int op4, LIR* target)
-{
- LIR* insn = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
- insn->dalvik_offset = dalvik_offset;
- insn->opcode = opcode;
- insn->operands[0] = op0;
- insn->operands[1] = op1;
- insn->operands[2] = op2;
- insn->operands[3] = op3;
- insn->operands[4] = op4;
- insn->target = target;
- SetupResourceMasks(insn);
- if ((opcode == kPseudoTargetLabel) || (opcode == kPseudoSafepointPC) ||
- (opcode == kPseudoExportedPC)) {
- // Always make labels scheduling barriers
- insn->use_mask = insn->def_mask = ENCODE_ALL;
- }
- return insn;
-}
-
-/*
- * The following are building blocks to construct low-level IRs with 0 - 4
- * operands.
- */
-LIR* Mir2Lir::NewLIR0(int opcode)
-{
- DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
- << GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
- << current_dalvik_offset_;
- LIR* insn = RawLIR(current_dalvik_offset_, opcode);
- AppendLIR(insn);
- return insn;
-}
-
-LIR* Mir2Lir::NewLIR1(int opcode, int dest)
-{
- DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
- << GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
- << current_dalvik_offset_;
- LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest);
- AppendLIR(insn);
- return insn;
-}
-
-LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1)
-{
- DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
- << GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
- << current_dalvik_offset_;
- LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1);
- AppendLIR(insn);
- return insn;
-}
-
-LIR* Mir2Lir::NewLIR3(int opcode, int dest, int src1, int src2)
-{
- DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
- << GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
- << current_dalvik_offset_;
- LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2);
- AppendLIR(insn);
- return insn;
-}
-
-LIR* Mir2Lir::NewLIR4(int opcode, int dest, int src1, int src2, int info)
-{
- DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
- << GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
- << current_dalvik_offset_;
- LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info);
- AppendLIR(insn);
- return insn;
-}
-
-LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1,
- int info2)
-{
- DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
- << GetTargetInstName(opcode) << " " << opcode << " "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
- << current_dalvik_offset_;
- LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info1, info2);
- AppendLIR(insn);
- return insn;
-}
-
/*
* Search the existing constants in the literal pool for an exact or close match
* within specified delta (greater or equal to 0).
diff --git a/src/compiler/dex/quick/gen_common.cc b/src/compiler/dex/quick/gen_common.cc
index 15aa904f46..7aa71cfd10 100644
--- a/src/compiler/dex/quick/gen_common.cc
+++ b/src/compiler/dex/quick/gen_common.cc
@@ -16,8 +16,10 @@
#include "compiler/dex/compiler_ir.h"
#include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "mirror/array.h"
#include "oat/runtime/oat_support_entrypoints.h"
+#include "verifier/method_verifier.h"
namespace art {
@@ -881,23 +883,81 @@ void Mir2Lir::GenThrow(RegLocation rl_src)
CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
}
-void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src)
-{
+// For final classes there are no sub-classes to check and so we can answer the instance-of
+// question with simple comparisons.
+void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src) {
+ RegLocation object = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ int result_reg = rl_result.low_reg;
+ if (result_reg == object.low_reg) {
+ result_reg = AllocTypedTemp(false, kCoreReg);
+ }
+ LoadConstant(result_reg, 0); // assume false
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
+
+ int check_class = AllocTypedTemp(false, kCoreReg);
+ int object_class = AllocTypedTemp(false, kCoreReg);
+
+ LoadCurrMethodDirect(check_class);
+ if (use_declaring_class) {
+ LoadWordDisp(check_class, mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
+ check_class);
+ LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ } else {
+ LoadWordDisp(check_class, mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ check_class);
+ LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ int32_t offset_of_type =
+ mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
+ (sizeof(mirror::Class*) * type_idx);
+ LoadWordDisp(check_class, offset_of_type, check_class);
+ }
+
+ LIR* ne_branchover = NULL;
+ if (cu_->instruction_set == kThumb2) {
+ OpRegReg(kOpCmp, check_class, object_class); // Same?
+ OpIT(kCondEq, ""); // if-convert the test
+ LoadConstant(result_reg, 1); // .eq case - load true
+ } else {
+ ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
+ LoadConstant(result_reg, 1); // eq case - load true
+ }
+ LIR* target = NewLIR0(kPseudoTargetLabel);
+ null_branchover->target = target;
+ if (ne_branchover != NULL) {
+ ne_branchover->target = target;
+ }
+ FreeTemp(object_class);
+ FreeTemp(check_class);
+ if (IsTemp(result_reg)) {
+ OpRegCopy(rl_result.low_reg, result_reg);
+ FreeTemp(result_reg);
+ }
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+ bool type_known_abstract, bool use_declaring_class,
+ bool can_assume_type_is_in_dex_cache,
+ uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src) {
FlushAllRegs();
// May generate a call - use explicit registers
LockCallTemps();
LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
- if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
- *cu_->dex_file,
- type_idx)) {
+ if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
+ } else if (use_declaring_class) {
+ LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
+ LoadWordDisp(TargetReg(kArg1),
+ mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
@@ -907,8 +967,7 @@ void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
LoadWordDisp(class_reg, offset_of_type, class_reg);
- if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(
- *cu_->dex_file, type_idx)) {
+ if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
// Not resolved
@@ -924,65 +983,120 @@ void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
/* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
RegLocation rl_result = GetReturn(false);
if (cu_->instruction_set == kMips) {
- LoadConstant(rl_result.low_reg, 0); // store false result for if branch is taken
+ // On MIPS rArg0 != rl_result, place false in result if branch is taken.
+ LoadConstant(rl_result.low_reg, 0);
}
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
+
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
- LIR* call_inst;
LIR* branchover = NULL;
- if (cu_->instruction_set == kThumb2) {
- /* Uses conditional nullification */
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
- OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
- OpIT(kCondEq, "EE"); // if-convert the test
- LoadConstant(TargetReg(kArg0), 1); // .eq case - load true
- OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
- call_inst = OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
- FreeTemp(r_tgt);
+ if (type_known_final) {
+ // rl_result == ref == null == 0.
+ if (cu_->instruction_set == kThumb2) {
+ OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
+ OpIT(kCondEq, "E"); // if-convert the test
+ LoadConstant(rl_result.low_reg, 1); // .eq case - load true
+ LoadConstant(rl_result.low_reg, 0); // .ne case - load false
+ } else {
+ LoadConstant(rl_result.low_reg, 0); // ne case - load false
+ branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
+ LoadConstant(rl_result.low_reg, 1); // eq case - load true
+ }
} else {
- /* Uses branchovers */
- LoadConstant(rl_result.low_reg, 1); // assume true
- branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
- if (cu_->instruction_set != kX86) {
+ if (cu_->instruction_set == kThumb2) {
int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ if (!type_known_abstract) {
+ /* Uses conditional nullification */
+ OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
+ OpIT(kCondEq, "EE"); // if-convert the test
+ LoadConstant(TargetReg(kArg0), 1); // .eq case - load true
+ }
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
- call_inst = OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
+ OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
FreeTemp(r_tgt);
} else {
- OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
- call_inst = OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ if (!type_known_abstract) {
+ /* Uses branchovers */
+ LoadConstant(rl_result.low_reg, 1); // assume true
+ branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
+ }
+ if (cu_->instruction_set != kX86) {
+ int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
+ OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
+ FreeTemp(r_tgt);
+ } else {
+ OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
+ OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ }
}
}
- MarkSafepointPC(call_inst);
+ // TODO: only clobber when type isn't final?
ClobberCalleeSave();
/* branch targets here */
LIR* target = NewLIR0(kPseudoTargetLabel);
StoreValue(rl_dest, rl_result);
branch1->target = target;
- if (cu_->instruction_set != kThumb2) {
+ if (branchover != NULL) {
branchover->target = target;
}
}
-void Mir2Lir::GenCheckCast(uint32_t type_idx, RegLocation rl_src)
+void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
+ bool type_known_final, type_known_abstract, use_declaring_class;
+ bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
+ *cu_->dex_file,
+ type_idx,
+ &type_known_final,
+ &type_known_abstract,
+ &use_declaring_class);
+ bool can_assume_type_is_in_dex_cache = !needs_access_check &&
+ cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
+
+ if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
+ GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
+ } else {
+ GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
+ use_declaring_class, can_assume_type_is_in_dex_cache,
+ type_idx, rl_dest, rl_src);
+ }
+}
+
+void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src)
{
+ bool type_known_final, type_known_abstract, use_declaring_class;
+ bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
+ *cu_->dex_file,
+ type_idx,
+ &type_known_final,
+ &type_known_abstract,
+ &use_declaring_class);
+ // Note: currently type_known_final is unused, as optimizing will only improve the performance
+ // of the exception throw path.
+ DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
+ const CompilerDriver::MethodReference mr(cu->GetDexFile(), cu->GetDexMethodIndex());
+ if (!needs_access_check && cu_->compiler_driver->IsSafeCast(mr, insn_idx)) {
+ // Verifier type analysis proved this check cast would never cause an exception.
+ return;
+ }
FlushAllRegs();
// May generate a call - use explicit registers
LockCallTemps();
LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
- if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
- *cu_->dex_file,
- type_idx)) {
+ if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
// InitializeTypeAndVerifyAccess(idx, method)
CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
+ } else if (use_declaring_class) {
+ LoadWordDisp(TargetReg(kArg1),
+ mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadWordDisp(TargetReg(kArg1),
@@ -991,8 +1105,7 @@ void Mir2Lir::GenCheckCast(uint32_t type_idx, RegLocation rl_src)
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
(sizeof(mirror::Class*) * type_idx);
LoadWordDisp(class_reg, offset_of_type, class_reg);
- if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(
- *cu_->dex_file, type_idx)) {
+ if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
// Not resolved
@@ -1014,25 +1127,18 @@ void Mir2Lir::GenCheckCast(uint32_t type_idx, RegLocation rl_src)
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg1 now contains object->klass_ */
- LIR* branch2;
- if (cu_->instruction_set == kThumb2) {
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pCheckCastFromCode));
- OpRegReg(kOpCmp, TargetReg(kArg1), class_reg);
- branch2 = OpCondBranch(kCondEq, NULL); /* If eq, trivial yes */
- OpRegCopy(TargetReg(kArg0), TargetReg(kArg1));
- OpRegCopy(TargetReg(kArg1), TargetReg(kArg2));
- ClobberCalleeSave();
- LIR* call_inst = OpReg(kOpBlx, r_tgt);
- MarkSafepointPC(call_inst);
- FreeTemp(r_tgt);
- } else {
+ LIR* branch2 = NULL;
+ if (!type_known_abstract) {
branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), true);
}
+ CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2),
+ true);
/* branch target here */
LIR* target = NewLIR0(kPseudoTargetLabel);
branch1->target = target;
- branch2->target = target;
+ if (branch2 != NULL) {
+ branch2->target = target;
+ }
}
void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
diff --git a/src/compiler/dex/quick/gen_invoke.cc b/src/compiler/dex/quick/gen_invoke.cc
index afcd9efb7d..4b12bb407a 100644
--- a/src/compiler/dex/quick/gen_invoke.cc
+++ b/src/compiler/dex/quick/gen_invoke.cc
@@ -15,9 +15,11 @@
*/
#include "compiler/dex/compiler_ir.h"
+#include "dex_file-inl.h"
#include "invoke_type.h"
#include "mirror/array.h"
#include "mirror/string.h"
+#include "mir_to_lir-inl.h"
#include "oat/runtime/oat_support_entrypoints.h"
#include "x86/codegen_x86.h"
@@ -311,7 +313,8 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method)
* emit the next instruction in static & direct invoke sequences.
*/
static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
- int state, uint32_t dex_idx, uint32_t unused,
+ int state, const CompilerDriver::MethodReference& target_method,
+ uint32_t unused,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type)
{
@@ -327,9 +330,11 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
if (direct_code != static_cast<unsigned int>(-1)) {
cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
} else {
- LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_, dex_idx, 0);
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
+ LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
+ target_method.dex_method_index, 0);
if (data_target == NULL) {
- data_target = cg->AddWordData(&cg->code_literal_list_, dex_idx);
+ data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
data_target->operands[1] = type;
}
LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
@@ -339,9 +344,11 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
if (direct_method != static_cast<unsigned int>(-1)) {
cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
} else {
- LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_, dex_idx, 0);
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
+ LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
+ target_method.dex_method_index, 0);
if (data_target == NULL) {
- data_target = cg->AddWordData(&cg->method_literal_list_, dex_idx);
+ data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index);
data_target->operands[1] = type;
}
LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
@@ -366,9 +373,11 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
if (direct_code != static_cast<unsigned int>(-1)) {
cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
} else {
- LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_, dex_idx, 0);
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
+ LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
+ target_method.dex_method_index, 0);
if (data_target == NULL) {
- data_target = cg->AddWordData(&cg->code_literal_list_, dex_idx);
+ data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
data_target->operands[1] = type;
}
LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
@@ -378,8 +387,10 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
}
break;
case 2: // Grab target method*
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
cg->LoadWordDisp(cg->TargetReg(kArg0),
- mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4,
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ (target_method.dex_method_index * 4),
cg-> TargetReg(kArg0));
break;
case 3: // Grab the code from the method*
@@ -407,8 +418,9 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
* kArg1 here rather than the standard LoadArgRegs.
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
- int state, uint32_t dex_idx, uint32_t method_idx,
- uintptr_t unused, uintptr_t unused2, InvokeType unused3)
+ int state, const CompilerDriver::MethodReference& target_method,
+ uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
+ InvokeType unused3)
{
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
@@ -455,7 +467,8 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
* which will locate the target and continue on via a tail call.
*/
static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
- uint32_t dex_idx, uint32_t unused, uintptr_t unused2,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t unused, uintptr_t unused2,
uintptr_t direct_method, InvokeType unused4)
{
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
@@ -476,9 +489,12 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
if (direct_method != static_cast<unsigned int>(-1)) {
cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
} else {
- LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_, dex_idx, 0);
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
+ LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
+ target_method.dex_method_index, 0);
if (data_target == NULL) {
- data_target = cg->AddWordData(&cg->method_literal_list_, dex_idx);
+ data_target = cg->AddWordData(&cg->method_literal_list_,
+ target_method.dex_method_index);
data_target->operands[1] = kInterface;
}
LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
@@ -505,8 +521,10 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
cg->TargetReg(kArg0));
break;
case 2: // Grab target method* [set/use kArg0]
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
cg->LoadWordDisp(cg->TargetReg(kArg0),
- mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4,
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ (target_method.dex_method_index * 4),
cg->TargetReg(kArg0));
break;
default:
@@ -517,7 +535,8 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
}
static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
- int state, uint32_t dex_idx, uint32_t method_idx)
+ int state, const CompilerDriver::MethodReference& target_method,
+ uint32_t method_idx)
{
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
@@ -530,58 +549,66 @@ static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
}
// Load kArg0 with method index
- cg->LoadConstant(cg->TargetReg(kArg0), dex_idx);
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
+ cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
return 1;
}
return -1;
}
static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
- int state, uint32_t dex_idx, uint32_t method_idx,
+ int state,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t method_idx,
uintptr_t unused, uintptr_t unused2,
- InvokeType unused3)
+ InvokeType unused3)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
- uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
- uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
- uintptr_t unused2, InvokeType unused3)
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t method_idx, uintptr_t unused,
+ uintptr_t unused2, InvokeType unused3)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
- uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
CallInfo* info, int state,
- uint32_t dex_idx, uint32_t unused,
- uintptr_t unused2, uintptr_t unused3,
- InvokeType unused4)
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t unused,
+ uintptr_t unused2, uintptr_t unused3,
+ InvokeType unused4)
{
int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
- return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+ return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
- NextCallInsn next_call_insn, uint32_t dex_idx,
- uint32_t method_idx, uintptr_t direct_code,
+ NextCallInsn next_call_insn,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type, bool skip_this)
{
int last_arg_reg = TargetReg(kArg3);
@@ -605,8 +632,8 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
}
LoadValueDirectFixed(rl_arg, next_reg);
}
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
- direct_code, direct_method, type);
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
}
return call_state;
}
@@ -620,7 +647,8 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
*/
int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
- uint32_t dex_idx, uint32_t method_idx, uintptr_t direct_code,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type, bool skip_this)
{
RegLocation rl_arg;
@@ -629,8 +657,8 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
if (info->num_arg_words == 0)
return call_state;
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
- direct_code, direct_method, type);
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
DCHECK_LE(info->num_arg_words, 5);
if (info->num_arg_words > 3) {
@@ -650,13 +678,13 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
// kArg2 & rArg3 can safely be used here
reg = TargetReg(kArg3);
LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
- call_state = next_call_insn(cu_, info, call_state, dex_idx,
- method_idx, direct_code, direct_method, type);
+ call_state = next_call_insn(cu_, info, call_state, target_method,
+ vtable_idx, direct_code, direct_method, type);
}
StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
- direct_code, direct_method, type);
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
next_use++;
}
// Loop through the rest
@@ -676,8 +704,8 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
} else {
LoadValueDirectFixed(rl_arg, low_reg);
}
- call_state = next_call_insn(cu_, info, call_state, dex_idx,
- method_idx, direct_code, direct_method, type);
+ call_state = next_call_insn(cu_, info, call_state, target_method,
+ vtable_idx, direct_code, direct_method, type);
}
int outs_offset = (next_use + 1) * 4;
if (rl_arg.wide) {
@@ -687,14 +715,14 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
next_use++;
}
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
}
}
call_state = LoadArgRegs(info, call_state, next_call_insn,
- dex_idx, method_idx, direct_code, direct_method,
- type, skip_this);
+ target_method, vtable_idx, direct_code, direct_method,
+ type, skip_this);
if (pcrLabel) {
*pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
@@ -718,15 +746,16 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
*
*/
int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
- LIR** pcrLabel, NextCallInsn next_call_insn, uint32_t dex_idx,
- uint32_t method_idx, uintptr_t direct_code, uintptr_t direct_method,
+ LIR** pcrLabel, NextCallInsn next_call_insn,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
InvokeType type, bool skip_this)
{
// If we can treat it as non-range (Jumbo ops will use range form)
if (info->num_arg_words <= 5)
return GenDalvikArgsNoRange(info, call_state, pcrLabel,
- next_call_insn, dex_idx, method_idx,
+ next_call_insn, target_method, vtable_idx,
direct_code, direct_method, type, skip_this);
/*
* First load the non-register arguments. Both forms expect all
@@ -772,31 +801,31 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
} else {
// Use vldm/vstm pair using kArg3 as a temp
int regs_left = std::min(info->num_arg_words - 3, 16);
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
LIR* ld = OpVldm(TargetReg(kArg3), regs_left);
//TUNING: loosen barrier
ld->def_mask = ENCODE_ALL;
SetMemRefType(ld, true /* is_load */, kDalvikReg);
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
LIR* st = OpVstm(TargetReg(kArg3), regs_left);
SetMemRefType(st, false /* is_load */, kDalvikReg);
st->def_mask = ENCODE_ALL;
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
}
}
call_state = LoadArgRegs(info, call_state, next_call_insn,
- dex_idx, method_idx, direct_code, direct_method,
- type, skip_this);
+ target_method, vtable_idx, direct_code, direct_method,
+ type, skip_this);
- call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
*pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
@@ -1150,6 +1179,10 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
// TODO - add Mips implementation
return false;
}
+ if (cu_->instruction_set == kX86 && is_object) {
+ // TODO: fix X86, it exhausts registers for card marking.
+ return false;
+ }
// Unused - RegLocation rl_src_unsafe = info->args[0];
RegLocation rl_src_obj = info->args[1]; // Object
RegLocation rl_src_offset = info->args[2]; // long low
@@ -1193,20 +1226,27 @@ bool Mir2Lir::GenIntrinsic(CallInfo* info)
* method. By doing this during basic block construction, we can also
* take advantage of/generate new useful dataflow info.
*/
- std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
- if (tgt_method.find(" java.lang") != std::string::npos) {
+ StringPiece tgt_methods_declaring_class(
+ cu_->dex_file->GetMethodDeclaringClassDescriptor(cu_->dex_file->GetMethodId(info->index)));
+ if (tgt_methods_declaring_class.starts_with("Ljava/lang/Double;")) {
+ std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
return GenInlinedDoubleCvt(info);
}
if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
return GenInlinedDoubleCvt(info);
}
+ } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Float;")) {
+ std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
return GenInlinedFloatCvt(info);
}
if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
return GenInlinedFloatCvt(info);
}
+ } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Math;") ||
+ tgt_methods_declaring_class.starts_with("Ljava/lang/StrictMath;")) {
+ std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
if (tgt_method == "int java.lang.Math.abs(int)" ||
tgt_method == "int java.lang.StrictMath.abs(int)") {
return GenInlinedAbsInt(info);
@@ -1227,6 +1267,8 @@ bool Mir2Lir::GenIntrinsic(CallInfo* info)
tgt_method == "double java.lang.StrictMath.sqrt(double)") {
return GenInlinedSqrt(info);
}
+ } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/String;")) {
+ std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
if (tgt_method == "char java.lang.String.charAt(int)") {
return GenInlinedCharAt(info);
}
@@ -1245,10 +1287,13 @@ bool Mir2Lir::GenIntrinsic(CallInfo* info)
if (tgt_method == "int java.lang.String.length()") {
return GenInlinedStringIsEmptyOrLength(info, false /* is_empty */);
}
+ } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Thread;")) {
+ std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") {
return GenInlinedCurrentThread(info);
}
- } else if (tgt_method.find(" sun.misc.Unsafe") != std::string::npos) {
+ } else if (tgt_methods_declaring_class.starts_with("Lsun/misc/Unsafe;")) {
+ std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
return GenInlinedCas32(info, false);
}
@@ -1327,20 +1372,24 @@ void Mir2Lir::GenInvoke(CallInfo* info)
// Explicit register usage
LockCallTemps();
- uint32_t dex_method_idx = info->index;
+ DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
+ CompilerDriver::MethodReference target_method(cUnit->GetDexFile(), info->index);
int vtable_idx;
uintptr_t direct_code;
uintptr_t direct_method;
bool skip_this;
- bool fast_path = cu_->compiler_driver->ComputeInvokeInfo(
- dex_method_idx, current_dalvik_offset_, mir_graph_->GetCurrentDexCompilationUnit(), info->type, vtable_idx,
- direct_code, direct_method) && !SLOW_INVOKE_PATH;
+ bool fast_path =
+ cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
+ current_dalvik_offset_,
+ info->type, target_method,
+ vtable_idx,
+ direct_code, direct_method,
+ true) && !SLOW_INVOKE_PATH;
if (info->type == kInterface) {
if (fast_path) {
p_null_ck = &null_ck;
}
- next_call_insn = fast_path ? NextInterfaceCallInsn
- : NextInterfaceCallInsnWithAccessCheck;
+ next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
skip_this = false;
} else if (info->type == kDirect) {
if (fast_path) {
@@ -1362,20 +1411,20 @@ void Mir2Lir::GenInvoke(CallInfo* info)
}
if (!info->is_range) {
call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
- next_call_insn, dex_method_idx,
- vtable_idx, direct_code, direct_method,
- original_type, skip_this);
+ next_call_insn, target_method,
+ vtable_idx, direct_code, direct_method,
+ original_type, skip_this);
} else {
call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
- next_call_insn, dex_method_idx, vtable_idx,
- direct_code, direct_method, original_type,
- skip_this);
+ next_call_insn, target_method, vtable_idx,
+ direct_code, direct_method, original_type,
+ skip_this);
}
// Finish up any of the call sequence not interleaved in arg loading
while (call_state >= 0) {
- call_state = next_call_insn(cu_, info, call_state, dex_method_idx,
- vtable_idx, direct_code, direct_method,
- original_type);
+ call_state = next_call_insn(cu_, info, call_state, target_method,
+ vtable_idx, direct_code, direct_method,
+ original_type);
}
LIR* call_inst;
if (cu_->instruction_set != kX86) {
diff --git a/src/compiler/dex/quick/gen_loadstore.cc b/src/compiler/dex/quick/gen_loadstore.cc
index 1cebd31d19..085f7f518c 100644
--- a/src/compiler/dex/quick/gen_loadstore.cc
+++ b/src/compiler/dex/quick/gen_loadstore.cc
@@ -16,6 +16,7 @@
#include "compiler/dex/compiler_ir.h"
#include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "invoke_type.h"
namespace art {
diff --git a/src/compiler/dex/quick/mips/assemble_mips.cc b/src/compiler/dex/quick/mips/assemble_mips.cc
index 5223a0ecfd..002a23e5ae 100644
--- a/src/compiler/dex/quick/mips/assemble_mips.cc
+++ b/src/compiler/dex/quick/mips/assemble_mips.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_mips.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "mips_lir.h"
namespace art {
diff --git a/src/compiler/dex/quick/mips/call_mips.cc b/src/compiler/dex/quick/mips/call_mips.cc
index b53d1e35a5..9f1d314b55 100644
--- a/src/compiler/dex/quick/mips/call_mips.cc
+++ b/src/compiler/dex/quick/mips/call_mips.cc
@@ -17,6 +17,7 @@
/* This file contains codegen for the Mips ISA */
#include "codegen_mips.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "mips_lir.h"
#include "oat/runtime/oat_support_entrypoints.h"
diff --git a/src/compiler/dex/quick/mips/codegen_mips.h b/src/compiler/dex/quick/mips/codegen_mips.h
index db262a8e96..9fa8f779fe 100644
--- a/src/compiler/dex/quick/mips/codegen_mips.h
+++ b/src/compiler/dex/quick/mips/codegen_mips.h
@@ -28,139 +28,139 @@ class MipsMir2Lir : public Mir2Lir {
MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen utilities.
- virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+ bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
RegLocation rl_dest, int lit);
- virtual int LoadHelper(int offset);
- virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
- virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+ int LoadHelper(int offset);
+ LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+ LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg);
- virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
- virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+ LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+ LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
int r_dest, int r_dest_hi, OpSize size, int s_reg);
- virtual LIR* LoadConstantNoClobber(int r_dest, int value);
- virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
- virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
- virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
- virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
- virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+ LIR* LoadConstantNoClobber(int r_dest, int value);
+ LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+ LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+ LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+ LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+ LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
int r_src, int r_src_hi, OpSize size, int s_reg);
- virtual void MarkGCCard(int val_reg, int tgt_addr_reg);
+ void MarkGCCard(int val_reg, int tgt_addr_reg);
// Required for target - register utilities.
- virtual bool IsFpReg(int reg);
- virtual bool SameRegType(int reg1, int reg2);
- virtual int AllocTypedTemp(bool fp_hint, int reg_class);
- virtual int AllocTypedTempPair(bool fp_hint, int reg_class);
- virtual int S2d(int low_reg, int high_reg);
- virtual int TargetReg(SpecialTargetRegister reg);
- virtual RegisterInfo* GetRegInfo(int reg);
- virtual RegLocation GetReturnAlt();
- virtual RegLocation GetReturnWideAlt();
- virtual RegLocation LocCReturn();
- virtual RegLocation LocCReturnDouble();
- virtual RegLocation LocCReturnFloat();
- virtual RegLocation LocCReturnWide();
- virtual uint32_t FpRegMask();
- virtual uint64_t GetRegMaskCommon(int reg);
- virtual void AdjustSpillMask();
- virtual void ClobberCalleeSave();
- virtual void FlushReg(int reg);
- virtual void FlushRegWide(int reg1, int reg2);
- virtual void FreeCallTemps();
- virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
- virtual void LockCallTemps();
- virtual void MarkPreservedSingle(int v_reg, int reg);
- virtual void CompilerInitializeRegAlloc();
+ bool IsFpReg(int reg);
+ bool SameRegType(int reg1, int reg2);
+ int AllocTypedTemp(bool fp_hint, int reg_class);
+ int AllocTypedTempPair(bool fp_hint, int reg_class);
+ int S2d(int low_reg, int high_reg);
+ int TargetReg(SpecialTargetRegister reg);
+ RegisterInfo* GetRegInfo(int reg);
+ RegLocation GetReturnAlt();
+ RegLocation GetReturnWideAlt();
+ RegLocation LocCReturn();
+ RegLocation LocCReturnDouble();
+ RegLocation LocCReturnFloat();
+ RegLocation LocCReturnWide();
+ uint32_t FpRegMask();
+ uint64_t GetRegMaskCommon(int reg);
+ void AdjustSpillMask();
+ void ClobberCalleeSave();
+ void FlushReg(int reg);
+ void FlushRegWide(int reg1, int reg2);
+ void FreeCallTemps();
+ void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+ void LockCallTemps();
+ void MarkPreservedSingle(int v_reg, int reg);
+ void CompilerInitializeRegAlloc();
// Required for target - miscellaneous.
- virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr);
- virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
- virtual void SetupTargetResourceMasks(LIR* lir);
- virtual const char* GetTargetInstFmt(int opcode);
- virtual const char* GetTargetInstName(int opcode);
- virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- virtual uint64_t GetPCUseDefEncoding();
- virtual uint64_t GetTargetInstFlags(int opcode);
- virtual int GetInsnSize(LIR* lir);
- virtual bool IsUnconditionalBranch(LIR* lir);
+ AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+ void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+ void SetupTargetResourceMasks(LIR* lir);
+ const char* GetTargetInstFmt(int opcode);
+ const char* GetTargetInstName(int opcode);
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ uint64_t GetPCUseDefEncoding();
+ uint64_t GetTargetInstFlags(int opcode);
+ int GetInsnSize(LIR* lir);
+ bool IsUnconditionalBranch(LIR* lir);
// Required for target - Dalvik-level generators.
- virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
+ void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_src, int scale);
- virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale);
- virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale);
- virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift);
- virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+ void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
- virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
- virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
- virtual bool GenInlinedSqrt(CallInfo* info);
- virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+ bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+ bool GenInlinedSqrt(CallInfo* info);
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
ThrowKind kind);
- virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
- virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
- virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenDivZeroCheck(int reg_lo, int reg_hi);
- virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- virtual void GenExitSequence();
- virtual void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
- virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- virtual void GenSelect(BasicBlock* bb, MIR* mir);
- virtual void GenMemBarrier(MemBarrierKind barrier_kind);
- virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
- virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
- virtual void GenMoveException(RegLocation rl_dest);
- virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivZeroCheck(int reg_lo, int reg_hi);
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+ void GenExitSequence();
+ void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+ void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenMemBarrier(MemBarrierKind barrier_kind);
+ void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+ void GenMonitorExit(int opt_flags, RegLocation rl_src);
+ void GenMoveException(RegLocation rl_dest);
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
int first_bit, int second_bit);
- virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+ void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
// Required for target - single operation generators.
- virtual LIR* OpUnconditionalBranch(LIR* target);
- virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
- virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
- virtual LIR* OpCondBranch(ConditionCode cc, LIR* target);
- virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
- virtual LIR* OpFpRegCopy(int r_dest, int r_src);
- virtual LIR* OpIT(ConditionCode cond, const char* guide);
- virtual LIR* OpMem(OpKind op, int rBase, int disp);
- virtual LIR* OpPcRelLoad(int reg, LIR* target);
- virtual LIR* OpReg(OpKind op, int r_dest_src);
- virtual LIR* OpRegCopy(int r_dest, int r_src);
- virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src);
- virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
- virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
- virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
- virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
- virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
- virtual LIR* OpTestSuspend(LIR* target);
- virtual LIR* OpThreadMem(OpKind op, int thread_offset);
- virtual LIR* OpVldm(int rBase, int count);
- virtual LIR* OpVstm(int rBase, int count);
- virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
- virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
- virtual void OpTlsCmp(int offset, int val);
+ LIR* OpUnconditionalBranch(LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+ LIR* OpCondBranch(ConditionCode cc, LIR* target);
+ LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+ LIR* OpFpRegCopy(int r_dest, int r_src);
+ LIR* OpIT(ConditionCode cond, const char* guide);
+ LIR* OpMem(OpKind op, int rBase, int disp);
+ LIR* OpPcRelLoad(int reg, LIR* target);
+ LIR* OpReg(OpKind op, int r_dest_src);
+ LIR* OpRegCopy(int r_dest, int r_src);
+ LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+ LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+ LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+ LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+ LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+ LIR* OpTestSuspend(LIR* target);
+ LIR* OpThreadMem(OpKind op, int thread_offset);
+ LIR* OpVldm(int rBase, int count);
+ LIR* OpVstm(int rBase, int count);
+ void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+ void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+ void OpTlsCmp(int offset, int val);
LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
int s_reg);
diff --git a/src/compiler/dex/quick/mips/fp_mips.cc b/src/compiler/dex/quick/mips/fp_mips.cc
index 5ddec00e80..f384da1a5b 100644
--- a/src/compiler/dex/quick/mips/fp_mips.cc
+++ b/src/compiler/dex/quick/mips/fp_mips.cc
@@ -16,6 +16,7 @@
#include "codegen_mips.h"
#include "mips_lir.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "oat/runtime/oat_support_entrypoints.h"
namespace art {
diff --git a/src/compiler/dex/quick/mips/int_mips.cc b/src/compiler/dex/quick/mips/int_mips.cc
index fbff397bd9..fe9e83f879 100644
--- a/src/compiler/dex/quick/mips/int_mips.cc
+++ b/src/compiler/dex/quick/mips/int_mips.cc
@@ -17,6 +17,7 @@
/* This file contains codegen for the Mips ISA */
#include "codegen_mips.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "mips_lir.h"
#include "mirror/array.h"
#include "oat/runtime/oat_support_entrypoints.h"
diff --git a/src/compiler/dex/quick/mips/target_mips.cc b/src/compiler/dex/quick/mips/target_mips.cc
index 46a625e83f..356104c86c 100644
--- a/src/compiler/dex/quick/mips/target_mips.cc
+++ b/src/compiler/dex/quick/mips/target_mips.cc
@@ -16,6 +16,7 @@
#include "codegen_mips.h"
#include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "mips_lir.h"
#include <string>
diff --git a/src/compiler/dex/quick/mips/utility_mips.cc b/src/compiler/dex/quick/mips/utility_mips.cc
index 5f9f8c58a7..257b0f6cb2 100644
--- a/src/compiler/dex/quick/mips/utility_mips.cc
+++ b/src/compiler/dex/quick/mips/utility_mips.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_mips.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "mips_lir.h"
namespace art {
diff --git a/src/compiler/dex/quick/mir_to_lir-inl.h b/src/compiler/dex/quick/mir_to_lir-inl.h
new file mode 100644
index 0000000000..f7546924c8
--- /dev/null
+++ b/src/compiler/dex/quick/mir_to_lir-inl.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
+#define ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
+
+#include "mir_to_lir.h"
+
+#include "compiler/dex/compiler_internals.h"
+
+namespace art {
+
+/* Mark a temp register as dead. Does not affect allocation state. */
+inline void Mir2Lir::ClobberBody(RegisterInfo* p) {
+ if (p->is_temp) {
+ DCHECK(!(p->live && p->dirty)) << "Live & dirty temp in clobber";
+ p->live = false;
+ p->s_reg = INVALID_SREG;
+ p->def_start = NULL;
+ p->def_end = NULL;
+ if (p->pair) {
+ p->pair = false;
+ Clobber(p->partner);
+ }
+ }
+}
+
+inline LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
+ int op1, int op2, int op3, int op4, LIR* target) {
+ LIR* insn = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+ insn->dalvik_offset = dalvik_offset;
+ insn->opcode = opcode;
+ insn->operands[0] = op0;
+ insn->operands[1] = op1;
+ insn->operands[2] = op2;
+ insn->operands[3] = op3;
+ insn->operands[4] = op4;
+ insn->target = target;
+ SetupResourceMasks(insn);
+ if ((opcode == kPseudoTargetLabel) || (opcode == kPseudoSafepointPC) ||
+ (opcode == kPseudoExportedPC)) {
+ // Always make labels scheduling barriers
+ insn->use_mask = insn->def_mask = ENCODE_ALL;
+ }
+ return insn;
+}
+
+/*
+ * The following are building blocks to construct low-level IRs with 0 - 4
+ * operands.
+ */
+inline LIR* Mir2Lir::NewLIR0(int opcode) {
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
+ << GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+ << current_dalvik_offset_;
+ LIR* insn = RawLIR(current_dalvik_offset_, opcode);
+ AppendLIR(insn);
+ return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR1(int opcode, int dest) {
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
+ << GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+ << current_dalvik_offset_;
+ LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest);
+ AppendLIR(insn);
+ return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1) {
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
+ << GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+ << current_dalvik_offset_;
+ LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1);
+ AppendLIR(insn);
+ return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR3(int opcode, int dest, int src1, int src2) {
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
+ << GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+ << current_dalvik_offset_;
+ LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2);
+ AppendLIR(insn);
+ return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR4(int opcode, int dest, int src1, int src2, int info) {
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
+ << GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+ << current_dalvik_offset_;
+ LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info);
+ AppendLIR(insn);
+ return insn;
+}
+
+inline LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1,
+ int info2) {
+ DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
+ << GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+ << current_dalvik_offset_;
+ LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info1, info2);
+ AppendLIR(insn);
+ return insn;
+}
+
+/*
+ * Mark the corresponding bit(s).
+ */
+inline void Mir2Lir::SetupRegMask(uint64_t* mask, int reg) {
+ *mask |= GetRegMaskCommon(reg);
+}
+
+/*
+ * Set up the proper fields in the resource mask
+ */
+inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
+ int opcode = lir->opcode;
+
+ if (opcode <= 0) {
+ lir->use_mask = lir->def_mask = 0;
+ return;
+ }
+
+ uint64_t flags = GetTargetInstFlags(opcode);
+
+ if (flags & NEEDS_FIXUP) {
+ lir->flags.pcRelFixup = true;
+ }
+
+ /* Get the starting size of the instruction's template */
+ lir->flags.size = GetInsnSize(lir);
+
+ /* Set up the mask for resources that are updated */
+ if (flags & (IS_LOAD | IS_STORE)) {
+ /* Default to heap - will catch specialized classes later */
+ SetMemRefType(lir, flags & IS_LOAD, kHeapRef);
+ }
+
+ /*
+ * Conservatively assume the branch here will call out a function that in
+ * turn will trash everything.
+ */
+ if (flags & IS_BRANCH) {
+ lir->def_mask = lir->use_mask = ENCODE_ALL;
+ return;
+ }
+
+ if (flags & REG_DEF0) {
+ SetupRegMask(&lir->def_mask, lir->operands[0]);
+ }
+
+ if (flags & REG_DEF1) {
+ SetupRegMask(&lir->def_mask, lir->operands[1]);
+ }
+
+
+ if (flags & SETS_CCODES) {
+ lir->def_mask |= ENCODE_CCODE;
+ }
+
+ if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (flags & (1 << (kRegUse0 + i))) {
+ SetupRegMask(&lir->use_mask, lir->operands[i]);
+ }
+ }
+ }
+
+ if (flags & USES_CCODES) {
+ lir->use_mask |= ENCODE_CCODE;
+ }
+
+ // Handle target-specific actions
+ SetupTargetResourceMasks(lir);
+}
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
diff --git a/src/compiler/dex/quick/mir_to_lir.cc b/src/compiler/dex/quick/mir_to_lir.cc
index 481078d7d5..754aae42dd 100644
--- a/src/compiler/dex/quick/mir_to_lir.cc
+++ b/src/compiler/dex/quick/mir_to_lir.cc
@@ -14,10 +14,10 @@
* limitations under the License.
*/
-#include "object_utils.h"
-
#include "compiler/dex/compiler_internals.h"
-#include "compiler/dex/dataflow_iterator.h"
+#include "compiler/dex/dataflow_iterator-inl.h"
+#include "mir_to_lir-inl.h"
+#include "object_utils.h"
namespace art {
@@ -184,10 +184,10 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenMonitorExit(opt_flags, rl_src[0]);
break;
- case Instruction::CHECK_CAST:
- GenCheckCast(vB, rl_src[0]);
+ case Instruction::CHECK_CAST: {
+ GenCheckCast(mir->offset, vB, rl_src[0]);
break;
-
+ }
case Instruction::INSTANCE_OF:
GenInstanceof(vC, rl_dest, rl_src[0]);
break;
diff --git a/src/compiler/dex/quick/mir_to_lir.h b/src/compiler/dex/quick/mir_to_lir.h
index 21a0aacc13..9eb4524d0d 100644
--- a/src/compiler/dex/quick/mir_to_lir.h
+++ b/src/compiler/dex/quick/mir_to_lir.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
-#define ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
+#ifndef ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
#include "invoke_type.h"
#include "compiled_method.h"
@@ -24,6 +24,7 @@
#include "compiler/dex/backend.h"
#include "compiler/dex/growable_array.h"
#include "compiler/dex/arena_allocator.h"
+#include "compiler/driver/compiler_driver.h"
#include "safe_map.h"
namespace art {
@@ -98,7 +99,8 @@ struct RegisterInfo;
class MIRGraph;
class Mir2Lir;
-typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, uint32_t dex_idx,
+typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int,
+ const CompilerDriver::MethodReference& target_method,
uint32_t method_idx, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type);
@@ -312,8 +314,10 @@ class Mir2Lir : public Backend {
void DumpRegPool(RegisterInfo* p, int num_regs);
void DumpCoreRegPool();
void DumpFpRegPool();
- void ClobberBody(RegisterInfo* p);
- void Clobber(int reg);
+ /* Mark a temp register as dead. Does not affect allocation state. */
+ void Clobber(int reg) {
+ ClobberBody(GetRegInfo(reg));
+ }
void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg);
void ClobberSReg(int s_reg);
int SRegToPMap(int s_reg);
@@ -337,7 +341,6 @@ class Mir2Lir : public Backend {
RegisterInfo* IsPromoted(int reg);
bool IsDirty(int reg);
void LockTemp(int reg);
- void ResetDefBody(RegisterInfo* p);
void ResetDef(int reg);
void NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2);
void MarkDef(RegLocation rl, LIR *start, LIR *finish);
@@ -410,7 +413,8 @@ class Mir2Lir : public Backend {
void GenThrow(RegLocation rl_src);
void GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
- void GenCheckCast(uint32_t type_idx, RegLocation rl_src);
+ void GenCheckCast(uint32_t insn_idx, uint32_t type_idx,
+ RegLocation rl_src);
void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
@@ -462,11 +466,15 @@ class Mir2Lir : public Backend {
void GenInvoke(CallInfo* info);
void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
- NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+ NextCallInsn next_call_insn,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t vtable_idx,
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
bool skip_this);
int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
- NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+ NextCallInsn next_call_insn,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t vtable_idx,
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
bool skip_this);
RegLocation InlineTarget(CallInfo* info);
@@ -486,7 +494,9 @@ class Mir2Lir : public Backend {
bool is_volatile, bool is_ordered);
bool GenIntrinsic(CallInfo* info);
int LoadArgRegs(CallInfo* info, int call_state,
- NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+ NextCallInsn next_call_insn,
+ const CompilerDriver::MethodReference& target_method,
+ uint32_t vtable_idx,
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
bool skip_this);
@@ -681,11 +691,6 @@ class Mir2Lir : public Backend {
// Temp workaround
void Workaround7250540(RegLocation rl_dest, int value);
- // TODO: add accessors for these.
- LIR* literal_list_; // Constants.
- LIR* method_literal_list_; // Method literals requiring patching.
- LIR* code_literal_list_; // Code literals requiring patching.
-
protected:
Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
@@ -693,6 +698,28 @@ class Mir2Lir : public Backend {
return cu_;
}
+ private:
+ void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src);
+ void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+ bool type_known_abstract, bool use_declaring_class,
+ bool can_assume_type_is_in_dex_cache,
+ uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src);
+
+ void ClobberBody(RegisterInfo* p);
+ void ResetDefBody(RegisterInfo* p) {
+ p->def_start = NULL;
+ p->def_end = NULL;
+ }
+
+ public:
+ // TODO: add accessors for these.
+ LIR* literal_list_; // Constants.
+ LIR* method_literal_list_; // Method literals requiring patching.
+ LIR* code_literal_list_; // Code literals requiring patching.
+
+ protected:
CompilationUnit* const cu_;
MIRGraph* const mir_graph_;
GrowableArray<SwitchTable*> switch_tables_;
@@ -749,4 +776,4 @@ class Mir2Lir : public Backend {
} // namespace art
-#endif // ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
+#endif //ART_SRC_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
diff --git a/src/compiler/dex/quick/ralloc_util.cc b/src/compiler/dex/quick/ralloc_util.cc
index 30ed1b7db2..8e0dba3a4f 100644
--- a/src/compiler/dex/quick/ralloc_util.cc
+++ b/src/compiler/dex/quick/ralloc_util.cc
@@ -18,6 +18,7 @@
#include "compiler/dex/compiler_ir.h"
#include "compiler/dex/compiler_internals.h"
+#include "mir_to_lir-inl.h"
namespace art {
@@ -84,28 +85,6 @@ void Mir2Lir::DumpFpRegPool()
DumpRegPool(reg_pool_->FPRegs, reg_pool_->num_fp_regs);
}
-/* Mark a temp register as dead. Does not affect allocation state. */
-void Mir2Lir::ClobberBody(RegisterInfo* p)
-{
- if (p->is_temp) {
- DCHECK(!(p->live && p->dirty)) << "Live & dirty temp in clobber";
- p->live = false;
- p->s_reg = INVALID_SREG;
- p->def_start = NULL;
- p->def_end = NULL;
- if (p->pair) {
- p->pair = false;
- Clobber(p->partner);
- }
- }
-}
-
-/* Mark a temp register as dead. Does not affect allocation state. */
-void Mir2Lir::Clobber(int reg)
-{
- ClobberBody(GetRegInfo(reg));
-}
-
void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg)
{
int i;
@@ -555,12 +534,6 @@ void Mir2Lir::LockTemp(int reg)
LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
}
-void Mir2Lir::ResetDefBody(RegisterInfo* p)
-{
- p->def_start = NULL;
- p->def_end = NULL;
-}
-
void Mir2Lir::ResetDef(int reg)
{
ResetDefBody(GetRegInfo(reg));
diff --git a/src/compiler/dex/quick/x86/assemble_x86.cc b/src/compiler/dex/quick/x86/assemble_x86.cc
index f7c1594908..83dabe6c9a 100644
--- a/src/compiler/dex/quick/x86/assemble_x86.cc
+++ b/src/compiler/dex/quick/x86/assemble_x86.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_x86.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "x86_lir.h"
namespace art {
diff --git a/src/compiler/dex/quick/x86/call_x86.cc b/src/compiler/dex/quick/x86/call_x86.cc
index 614a72d6c2..1e37b2f7f0 100644
--- a/src/compiler/dex/quick/x86/call_x86.cc
+++ b/src/compiler/dex/quick/x86/call_x86.cc
@@ -17,6 +17,7 @@
/* This file contains codegen for the X86 ISA */
#include "codegen_x86.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "x86_lir.h"
namespace art {
diff --git a/src/compiler/dex/quick/x86/codegen_x86.h b/src/compiler/dex/quick/x86/codegen_x86.h
index 99e5148f9b..9050656071 100644
--- a/src/compiler/dex/quick/x86/codegen_x86.h
+++ b/src/compiler/dex/quick/x86/codegen_x86.h
@@ -28,139 +28,139 @@ class X86Mir2Lir : public Mir2Lir {
X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
- virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+ bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
RegLocation rl_dest, int lit);
- virtual int LoadHelper(int offset);
- virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
- virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+ int LoadHelper(int offset);
+ LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+ LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg);
- virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
- virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+ LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+ LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
int r_dest, int r_dest_hi, OpSize size, int s_reg);
- virtual LIR* LoadConstantNoClobber(int r_dest, int value);
- virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
- virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
- virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
- virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
- virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+ LIR* LoadConstantNoClobber(int r_dest, int value);
+ LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+ LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+ LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+ LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+ LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
int r_src, int r_src_hi, OpSize size, int s_reg);
- virtual void MarkGCCard(int val_reg, int tgt_addr_reg);
+ void MarkGCCard(int val_reg, int tgt_addr_reg);
// Required for target - register utilities.
- virtual bool IsFpReg(int reg);
- virtual bool SameRegType(int reg1, int reg2);
- virtual int AllocTypedTemp(bool fp_hint, int reg_class);
- virtual int AllocTypedTempPair(bool fp_hint, int reg_class);
- virtual int S2d(int low_reg, int high_reg);
- virtual int TargetReg(SpecialTargetRegister reg);
- virtual RegisterInfo* GetRegInfo(int reg);
- virtual RegLocation GetReturnAlt();
- virtual RegLocation GetReturnWideAlt();
- virtual RegLocation LocCReturn();
- virtual RegLocation LocCReturnDouble();
- virtual RegLocation LocCReturnFloat();
- virtual RegLocation LocCReturnWide();
- virtual uint32_t FpRegMask();
- virtual uint64_t GetRegMaskCommon(int reg);
- virtual void AdjustSpillMask();
- virtual void ClobberCalleeSave();
- virtual void FlushReg(int reg);
- virtual void FlushRegWide(int reg1, int reg2);
- virtual void FreeCallTemps();
- virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
- virtual void LockCallTemps();
- virtual void MarkPreservedSingle(int v_reg, int reg);
- virtual void CompilerInitializeRegAlloc();
+ bool IsFpReg(int reg);
+ bool SameRegType(int reg1, int reg2);
+ int AllocTypedTemp(bool fp_hint, int reg_class);
+ int AllocTypedTempPair(bool fp_hint, int reg_class);
+ int S2d(int low_reg, int high_reg);
+ int TargetReg(SpecialTargetRegister reg);
+ RegisterInfo* GetRegInfo(int reg);
+ RegLocation GetReturnAlt();
+ RegLocation GetReturnWideAlt();
+ RegLocation LocCReturn();
+ RegLocation LocCReturnDouble();
+ RegLocation LocCReturnFloat();
+ RegLocation LocCReturnWide();
+ uint32_t FpRegMask();
+ uint64_t GetRegMaskCommon(int reg);
+ void AdjustSpillMask();
+ void ClobberCalleeSave();
+ void FlushReg(int reg);
+ void FlushRegWide(int reg1, int reg2);
+ void FreeCallTemps();
+ void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+ void LockCallTemps();
+ void MarkPreservedSingle(int v_reg, int reg);
+ void CompilerInitializeRegAlloc();
// Required for target - miscellaneous.
- virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr);
- virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
- virtual void SetupTargetResourceMasks(LIR* lir);
- virtual const char* GetTargetInstFmt(int opcode);
- virtual const char* GetTargetInstName(int opcode);
- virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- virtual uint64_t GetPCUseDefEncoding();
- virtual uint64_t GetTargetInstFlags(int opcode);
- virtual int GetInsnSize(LIR* lir);
- virtual bool IsUnconditionalBranch(LIR* lir);
+ AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+ void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+ void SetupTargetResourceMasks(LIR* lir);
+ const char* GetTargetInstFmt(int opcode);
+ const char* GetTargetInstName(int opcode);
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ uint64_t GetPCUseDefEncoding();
+ uint64_t GetTargetInstFlags(int opcode);
+ int GetInsnSize(LIR* lir);
+ bool IsUnconditionalBranch(LIR* lir);
// Required for target - Dalvik-level generators.
- virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array,
+ void GenArrayObjPut(int opt_flags, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale);
- virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale);
- virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale);
- virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift);
- virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+ void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
- virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
- virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
- virtual bool GenInlinedSqrt(CallInfo* info);
- virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+ bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+ bool GenInlinedSqrt(CallInfo* info);
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
ThrowKind kind);
- virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
- virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
- virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- virtual void GenDivZeroCheck(int reg_lo, int reg_hi);
- virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- virtual void GenExitSequence();
- virtual void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
- virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- virtual void GenSelect(BasicBlock* bb, MIR* mir);
- virtual void GenMemBarrier(MemBarrierKind barrier_kind);
- virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
- virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
- virtual void GenMoveException(RegLocation rl_dest);
- virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
+ RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivZeroCheck(int reg_lo, int reg_hi);
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+ void GenExitSequence();
+ void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+ void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenMemBarrier(MemBarrierKind barrier_kind);
+ void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+ void GenMonitorExit(int opt_flags, RegLocation rl_src);
+ void GenMoveException(RegLocation rl_dest);
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
int lit, int first_bit, int second_bit);
- virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+ void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
// Single operation generators.
- virtual LIR* OpUnconditionalBranch(LIR* target);
- virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
- virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
- virtual LIR* OpCondBranch(ConditionCode cc, LIR* target);
- virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
- virtual LIR* OpFpRegCopy(int r_dest, int r_src);
- virtual LIR* OpIT(ConditionCode cond, const char* guide);
- virtual LIR* OpMem(OpKind op, int rBase, int disp);
- virtual LIR* OpPcRelLoad(int reg, LIR* target);
- virtual LIR* OpReg(OpKind op, int r_dest_src);
- virtual LIR* OpRegCopy(int r_dest, int r_src);
- virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src);
- virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
- virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
- virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
- virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
- virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
- virtual LIR* OpTestSuspend(LIR* target);
- virtual LIR* OpThreadMem(OpKind op, int thread_offset);
- virtual LIR* OpVldm(int rBase, int count);
- virtual LIR* OpVstm(int rBase, int count);
- virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
- virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
- virtual void OpTlsCmp(int offset, int val);
+ LIR* OpUnconditionalBranch(LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+ LIR* OpCondBranch(ConditionCode cc, LIR* target);
+ LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+ LIR* OpFpRegCopy(int r_dest, int r_src);
+ LIR* OpIT(ConditionCode cond, const char* guide);
+ LIR* OpMem(OpKind op, int rBase, int disp);
+ LIR* OpPcRelLoad(int reg, LIR* target);
+ LIR* OpReg(OpKind op, int r_dest_src);
+ LIR* OpRegCopy(int r_dest, int r_src);
+ LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+ LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+ LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+ LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+ LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+ LIR* OpTestSuspend(LIR* target);
+ LIR* OpThreadMem(OpKind op, int thread_offset);
+ LIR* OpVldm(int rBase, int count);
+ LIR* OpVstm(int rBase, int count);
+ void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+ void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+ void OpTlsCmp(int offset, int val);
void OpRegThreadMem(OpKind op, int r_dest, int thread_offset);
void SpillCoreRegs();
diff --git a/src/compiler/dex/quick/x86/fp_x86.cc b/src/compiler/dex/quick/x86/fp_x86.cc
index db2cf289d8..3341e28ce5 100644
--- a/src/compiler/dex/quick/x86/fp_x86.cc
+++ b/src/compiler/dex/quick/x86/fp_x86.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_x86.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "x86_lir.h"
namespace art {
diff --git a/src/compiler/dex/quick/x86/int_x86.cc b/src/compiler/dex/quick/x86/int_x86.cc
index b2ee949f22..fffb900ec9 100644
--- a/src/compiler/dex/quick/x86/int_x86.cc
+++ b/src/compiler/dex/quick/x86/int_x86.cc
@@ -18,6 +18,7 @@
#include "codegen_x86.h"
#include "mirror/array.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "x86_lir.h"
namespace art {
diff --git a/src/compiler/dex/quick/x86/target_x86.cc b/src/compiler/dex/quick/x86/target_x86.cc
index e6a49f8ce3..9110b704b8 100644
--- a/src/compiler/dex/quick/x86/target_x86.cc
+++ b/src/compiler/dex/quick/x86/target_x86.cc
@@ -16,6 +16,7 @@
#include "codegen_x86.h"
#include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "x86_lir.h"
#include <string>
diff --git a/src/compiler/dex/quick/x86/utility_x86.cc b/src/compiler/dex/quick/x86/utility_x86.cc
index 45c0e9cb41..82466d496b 100644
--- a/src/compiler/dex/quick/x86/utility_x86.cc
+++ b/src/compiler/dex/quick/x86/utility_x86.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_x86.h"
+#include "compiler/dex/quick/mir_to_lir-inl.h"
#include "x86_lir.h"
namespace art {
diff --git a/src/compiler/dex/ssa_transformation.cc b/src/compiler/dex/ssa_transformation.cc
index a90d705c6f..41820720d8 100644
--- a/src/compiler/dex/ssa_transformation.cc
+++ b/src/compiler/dex/ssa_transformation.cc
@@ -15,7 +15,7 @@
*/
#include "compiler_internals.h"
-#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
#define NOTVISITED (-1)
diff --git a/src/compiler/dex/vreg_analysis.cc b/src/compiler/dex/vreg_analysis.cc
index d4223f1ab1..b941140b00 100644
--- a/src/compiler/dex/vreg_analysis.cc
+++ b/src/compiler/dex/vreg_analysis.cc
@@ -15,7 +15,7 @@
*/
#include "compiler_internals.h"
-#include "dataflow_iterator.h"
+#include "compiler/dex/dataflow_iterator-inl.h"
namespace art {
@@ -292,18 +292,10 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb)
is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
}
/*
- * TODO: cleaner fix
- * We don't normally expect to see a Dalvik register
- * definition used both as a floating point and core
- * value. However, the instruction rewriting that occurs
- * during verification can eliminate some type information,
- * leaving us confused. The real fix here is either to
- * add explicit type information to Dalvik byte codes,
- * or to recognize THROW_VERIFICATION_ERROR as
- * an unconditional branch and support dead code elimination.
- * As a workaround we can detect this situation and
- * disable register promotion (which is the only thing that
- * relies on distinctions between core and fp usages.
+ * We don't normally expect to see a Dalvik register definition used both as a
+ * floating point and core value, though technically it could happen with constants.
+ * Until we have proper typing, detect this situation and disable register promotion
+ * (which relies on the distinction between core a fp usages).
*/
if ((defined_fp && (defined_core | defined_ref)) &&
((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
diff --git a/src/compiler/driver/compiler_driver.cc b/src/compiler/driver/compiler_driver.cc
index 698517277f..40cc4830d4 100644
--- a/src/compiler/driver/compiler_driver.cc
+++ b/src/compiler/driver/compiler_driver.cc
@@ -24,6 +24,7 @@
#include "base/stl_util.h"
#include "base/timing_logger.h"
#include "class_linker.h"
+#include "compiler/stubs/stubs.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
@@ -34,7 +35,7 @@
#include "gc/space.h"
#include "mirror/class_loader.h"
#include "mirror/class-inl.h"
-#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/field-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object-inl.h"
@@ -72,7 +73,8 @@ class AOTCompilationStats {
resolved_types_(0), unresolved_types_(0),
resolved_instance_fields_(0), unresolved_instance_fields_(0),
resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0),
- type_based_devirtualization_(0) {
+ type_based_devirtualization_(0),
+ safe_casts_(0), not_safe_casts_(0) {
for (size_t i = 0; i <= kMaxInvokeType; i++) {
resolved_methods_[i] = 0;
unresolved_methods_[i] = 0;
@@ -91,8 +93,14 @@ class AOTCompilationStats {
"static fields resolved");
DumpStat(resolved_local_static_fields_, resolved_static_fields_ + unresolved_static_fields_,
"static fields local to a class");
- DumpStat(type_based_devirtualization_,virtual_made_direct_[kInterface] + virtual_made_direct_[kVirtual]
- - type_based_devirtualization_, "sharpened calls based on type information");
+ DumpStat(safe_casts_, not_safe_casts_, "check-casts removed based on type information");
+ // Note, the code below subtracts the stat value so that when added to the stat value we have
+ // 100% of samples. TODO: clean this up.
+ DumpStat(type_based_devirtualization_,
+ resolved_methods_[kVirtual] + unresolved_methods_[kVirtual] +
+ resolved_methods_[kInterface] + unresolved_methods_[kInterface] -
+ type_based_devirtualization_,
+ "virtual/interface calls made direct based on type information");
for (size_t i = 0; i <= kMaxInvokeType; i++) {
std::ostringstream oss;
@@ -184,40 +192,61 @@ class AOTCompilationStats {
unresolved_static_fields_++;
}
+ // Indicate that type information from the verifier led to devirtualization.
void PreciseTypeDevirtualization() {
STATS_LOCK();
type_based_devirtualization_++;
}
+
+ // Indicate that a method of the given type was resolved at compile time.
void ResolvedMethod(InvokeType type) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
resolved_methods_[type]++;
}
+ // Indicate that a method of the given type was unresolved at compile time as it was in an
+ // unknown dex file.
void UnresolvedMethod(InvokeType type) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
unresolved_methods_[type]++;
}
+ // Indicate that a type of virtual method dispatch has been converted into a direct method
+ // dispatch.
void VirtualMadeDirect(InvokeType type) {
- DCHECK_LE(type, kMaxInvokeType);
+ DCHECK(type == kVirtual || type == kInterface || type == kSuper);
STATS_LOCK();
virtual_made_direct_[type]++;
}
+ // Indicate that a method of the given type was able to call directly into boot.
void DirectCallsToBoot(InvokeType type) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
direct_calls_to_boot_[type]++;
}
+ // Indicate that a method of the given type was able to be resolved directly from boot.
void DirectMethodsToBoot(InvokeType type) {
DCHECK_LE(type, kMaxInvokeType);
STATS_LOCK();
direct_methods_to_boot_[type]++;
}
+ // A check-cast could be eliminated due to verifier type analysis.
+ void SafeCast() {
+ STATS_LOCK();
+ safe_casts_++;
+ }
+
+ // A check-cast couldn't be eliminated due to verifier type analysis.
+ void NotASafeCast() {
+ STATS_LOCK();
+ not_safe_casts_++;
+ }
+
private:
Mutex stats_lock_;
@@ -245,6 +274,9 @@ class AOTCompilationStats {
size_t direct_calls_to_boot_[kMaxInvokeType + 1];
size_t direct_methods_to_boot_[kMaxInvokeType + 1];
+ size_t safe_casts_;
+ size_t not_safe_casts_;
+
DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
};
@@ -312,6 +344,7 @@ CompilerDriver::CompilerDriver(CompilerBackend compiler_backend, InstructionSet
compiler_(NULL),
compiler_context_(NULL),
jni_compiler_(NULL),
+ compiler_enable_auto_elf_loading_(NULL),
compiler_get_method_code_addr_(NULL)
{
std::string compiler_so_name(MakeCompilerSoName(compiler_backend_));
@@ -416,6 +449,66 @@ CompilerTls* CompilerDriver::GetTls() {
return res;
}
+const std::vector<uint8_t>* CompilerDriver::CreatePortableResolutionTrampoline() const {
+ switch (instruction_set_) {
+ case kArm:
+ case kThumb2:
+ return arm::CreatePortableResolutionTrampoline();
+ case kMips:
+ return mips::CreatePortableResolutionTrampoline();
+ case kX86:
+ return x86::CreatePortableResolutionTrampoline();
+ default:
+ LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+ return NULL;
+ }
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateQuickResolutionTrampoline() const {
+ switch (instruction_set_) {
+ case kArm:
+ case kThumb2:
+ return arm::CreateQuickResolutionTrampoline();
+ case kMips:
+ return mips::CreateQuickResolutionTrampoline();
+ case kX86:
+ return x86::CreateQuickResolutionTrampoline();
+ default:
+ LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+ return NULL;
+ }
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToInterpreterEntry() const {
+ switch (instruction_set_) {
+ case kArm:
+ case kThumb2:
+ return arm::CreateInterpreterToInterpreterEntry();
+ case kMips:
+ return mips::CreateInterpreterToInterpreterEntry();
+ case kX86:
+ return x86::CreateInterpreterToInterpreterEntry();
+ default:
+ LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+ return NULL;
+ }
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToQuickEntry() const {
+ switch (instruction_set_) {
+ case kArm:
+ case kThumb2:
+ return arm::CreateInterpreterToQuickEntry();
+ case kMips:
+ return mips::CreateInterpreterToQuickEntry();
+ case kX86:
+ return x86::CreateInterpreterToQuickEntry();
+ default:
+ LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
+ return NULL;
+ }
+}
+
void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files) {
DCHECK(!Runtime::Current()->IsStarted());
@@ -490,7 +583,7 @@ void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const De
InitializeClasses(class_loader, dex_files, thread_pool, timings);
}
-bool CompilerDriver::IsImageClass(const std::string& descriptor) const {
+bool CompilerDriver::IsImageClass(const char* descriptor) const {
if (image_classes_ == NULL) {
return false;
}
@@ -504,24 +597,19 @@ void CompilerDriver::RecordClassStatus(ClassReference ref, CompiledClass* compil
bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file,
uint32_t type_idx) {
- ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
- if (!IsImage()) {
- stats_->TypeNotInDexCache();
- return false;
- }
- mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
- if (resolved_class == NULL) {
- stats_->TypeNotInDexCache();
- return false;
- }
- bool result = IsImageClass(ClassHelper(resolved_class).GetDescriptor());
- if (result) {
+ if (IsImage() && IsImageClass(dex_file.GetTypeDescriptor(dex_file.GetTypeId(type_idx)))) {
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+ CHECK(resolved_class != NULL);
+ }
stats_->TypeInDexCache();
+ return true;
} else {
stats_->TypeNotInDexCache();
+ return false;
}
- return result;
}
bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
@@ -545,7 +633,18 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
}
bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
- uint32_t type_idx) {
+ uint32_t type_idx,
+ bool* type_known_final, bool* type_known_abstract,
+ bool* equals_referrers_class) {
+ if (type_known_final != NULL) {
+ *type_known_final = false;
+ }
+ if (type_known_abstract != NULL) {
+ *type_known_abstract = false;
+ }
+ if (equals_referrers_class != NULL) {
+ *equals_referrers_class = false;
+ }
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
// Get type from dex cache assuming it was populated by the verifier
@@ -555,6 +654,9 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const Dex
return false; // Unknown class needs access checks.
}
const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
+ if (equals_referrers_class != NULL) {
+ *equals_referrers_class = (method_id.class_idx_ == type_idx);
+ }
mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
if (referrer_class == NULL) {
stats_->TypeNeedsAccessCheck();
@@ -565,6 +667,12 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const Dex
bool result = referrer_class->CanAccess(resolved_class);
if (result) {
stats_->TypeDoesntNeedAccessCheck();
+ if (type_known_final != NULL) {
+ *type_known_final = resolved_class->IsFinal() && !resolved_class->IsArrayClass();
+ }
+ if (type_known_abstract != NULL) {
+ *type_known_abstract = resolved_class->IsAbstract();
+ }
} else {
stats_->TypeNeedsAccessCheck();
}
@@ -600,9 +708,14 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id
}
static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
+ mirror::DexCache* dex_cache,
const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+ // The passed dex_cache is a hint, sanity check before asking the class linker that will take a
+ // lock.
+ if (dex_cache->GetDexFile() != mUnit->GetDexFile()) {
+ dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+ }
mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
const DexFile::MethodId& referrer_method_id = mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
return mUnit->GetClassLinker()->ResolveType(*mUnit->GetDexFile(), referrer_method_id.class_idx_,
@@ -639,7 +752,9 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
// Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && !resolved_field->IsStatic()) {
- mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
+ mirror::Class* referrer_class =
+ ComputeCompilingMethodsClass(soa, resolved_field->GetDeclaringClass()->GetDexCache(),
+ mUnit);
if (referrer_class != NULL) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
bool access_ok = referrer_class->CanAccess(fields_class) &&
@@ -688,7 +803,9 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
// Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
mirror::Field* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && resolved_field->IsStatic()) {
- mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
+ mirror::Class* referrer_class =
+ ComputeCompilingMethodsClass(soa, resolved_field->GetDeclaringClass()->GetDexCache(),
+ mUnit);
if (referrer_class != NULL) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
if (fields_class == referrer_class) {
@@ -733,9 +850,8 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
}
// Search dex file for localized ssb index, may fail if field's class is a parent
// of the class mentioned in the dex file and there is no dex cache entry.
- std::string descriptor(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
const DexFile::StringId* string_id =
- mUnit->GetDexFile()->FindStringId(descriptor);
+ mUnit->GetDexFile()->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
if (string_id != NULL) {
const DexFile::TypeId* type_id =
mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id));
@@ -764,7 +880,8 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType s
mirror::Class* referrer_class,
mirror::AbstractMethod* method,
uintptr_t& direct_code,
- uintptr_t& direct_method) {
+ uintptr_t& direct_method,
+ bool update_stats) {
// For direct and static methods compute possible direct_code and direct_method values, ie
// an address for the Method* being invoked and an address of the code for that Method*.
// For interface calls compute a value for direct_method that is the interface method being
@@ -789,10 +906,12 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType s
// Ensure we run the clinit trampoline unless we are invoking a static method in the same class.
return;
}
- if (sharp_type != kInterface) { // Interfaces always go via a trampoline.
- stats_->DirectCallsToBoot(type);
+ if (update_stats) {
+ if (sharp_type != kInterface) { // Interfaces always go via a trampoline.
+ stats_->DirectCallsToBoot(type);
+ }
+ stats_->DirectMethodsToBoot(type);
}
- stats_->DirectMethodsToBoot(type);
bool compiling_boot = Runtime::Current()->GetHeap()->GetSpaces().size() == 1;
if (compiling_boot) {
const bool kSupportBootImageFixup = true;
@@ -813,26 +932,26 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType s
}
}
-bool CompilerDriver::ComputeInvokeInfo(uint32_t method_idx,const uint32_t dex_pc,
- const DexCompilationUnit* mUnit, InvokeType& type,
- int& vtable_idx, uintptr_t& direct_code,
- uintptr_t& direct_method) {
+bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
+ InvokeType& invoke_type,
+ MethodReference& target_method,
+ int& vtable_idx,
+ uintptr_t& direct_code, uintptr_t& direct_method,
+ bool update_stats) {
ScopedObjectAccess soa(Thread::Current());
-
- const bool kEnableVerifierBasedSharpening = true;
- const CompilerDriver::MethodReference ref_caller(mUnit->GetDexFile(), mUnit->GetDexMethodIndex());
- const CompilerDriver::MethodReference* ref_sharpen = verifier::MethodVerifier::GetDevirtMap(ref_caller, dex_pc);
- bool can_devirtualize = (dex_pc != art::kDexPCNotReady) && (ref_sharpen != NULL);
vtable_idx = -1;
direct_code = 0;
direct_method = 0;
mirror::AbstractMethod* resolved_method =
- ComputeMethodReferencedFromCompilingMethod(soa, mUnit, method_idx, type);
+ ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method.dex_method_index,
+ invoke_type);
if (resolved_method != NULL) {
// Don't try to fast-path if we don't understand the caller's class or this appears to be an
// Incompatible Class Change Error.
- mirror::Class* referrer_class = ComputeCompilingMethodsClass(soa, mUnit);
- bool icce = resolved_method->CheckIncompatibleClassChange(type);
+ mirror::Class* referrer_class =
+ ComputeCompilingMethodsClass(soa, resolved_method->GetDeclaringClass()->GetDexCache(),
+ mUnit);
+ bool icce = resolved_method->CheckIncompatibleClassChange(invoke_type);
if (referrer_class != NULL && !icce) {
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
if (!referrer_class->CanAccess(methods_class) ||
@@ -842,74 +961,168 @@ bool CompilerDriver::ComputeInvokeInfo(uint32_t method_idx,const uint32_t dex_pc
// protected method being made public by implementing an interface that re-declares the
// method public. Resort to the dex file to determine the correct class for the access
// check.
- const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile();
- methods_class =
- mUnit->GetClassLinker()->ResolveType(dex_file,
- dex_file.GetMethodId(method_idx).class_idx_,
- referrer_class);
+ uint16_t class_idx =
+ target_method.dex_file->GetMethodId(target_method.dex_method_index).class_idx_;
+ methods_class = mUnit->GetClassLinker()->ResolveType(*target_method.dex_file,
+ class_idx, referrer_class);
}
if (referrer_class->CanAccess(methods_class) &&
referrer_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags())) {
- vtable_idx = resolved_method->GetMethodIndex();
- const bool kEnableSharpening = true;
- // Sharpen a virtual call into a direct call when the target is known.
- bool can_sharpen = type == kVirtual && (resolved_method->IsFinal() ||
- methods_class->IsFinal());
- // Ensure the vtable index will be correct to dispatch in the vtable of the super class.
- can_sharpen = can_sharpen || (type == kSuper && referrer_class != methods_class &&
- referrer_class->IsSubClass(methods_class) &&
- vtable_idx < methods_class->GetVTable()->GetLength() &&
- methods_class->GetVTable()->Get(vtable_idx) == resolved_method);
-
- if (kEnableSharpening && can_sharpen) {
- stats_->ResolvedMethod(type);
+ const bool kEnableFinalBasedSharpening = true;
+ // Sharpen a virtual call into a direct call when the target is known not to have been
+ // overridden (ie is final).
+ bool can_sharpen_virtual_based_on_type =
+ (invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
+ // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
+ // the super class.
+ bool can_sharpen_super_based_on_type = (invoke_type == kSuper) &&
+ (referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
+ resolved_method->GetMethodIndex() < methods_class->GetVTable()->GetLength() &&
+ (methods_class->GetVTable()->Get(resolved_method->GetMethodIndex()) == resolved_method);
+
+ if (kEnableFinalBasedSharpening && (can_sharpen_virtual_based_on_type ||
+ can_sharpen_super_based_on_type)) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
- CHECK(referrer_class->GetDexCache()->GetResolvedMethod(method_idx) == resolved_method)
- << PrettyMethod(resolved_method);
- stats_->VirtualMadeDirect(type);
- GetCodeAndMethodForDirectCall(type, kDirect, referrer_class, resolved_method,
- direct_code, direct_method);
- type = kDirect;
+ CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method.dex_method_index) ==
+ resolved_method) << PrettyMethod(resolved_method);
+ if (update_stats) {
+ stats_->ResolvedMethod(invoke_type);
+ stats_->VirtualMadeDirect(invoke_type);
+ }
+ GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, resolved_method,
+ direct_code, direct_method, update_stats);
+ invoke_type = kDirect;
return true;
- } else if(can_devirtualize && kEnableSharpening && kEnableVerifierBasedSharpening) {
- // If traditional sharpening fails, try the sharpening based on type information (Devirtualization)
- mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*ref_sharpen->first);
- mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
- mirror::AbstractMethod* concrete_method = mUnit->GetClassLinker()->ResolveMethod(
- *ref_sharpen->first, ref_sharpen->second, dex_cache, class_loader, NULL, kVirtual);
- CHECK(concrete_method != NULL);
- CHECK(!concrete_method->IsAbstract());
- // TODO: fix breakage in image patching to be able to devirtualize cases with different
- // resolved and concrete methods.
- if(resolved_method == concrete_method) {
- GetCodeAndMethodForDirectCall(type, kDirect, referrer_class, concrete_method, direct_code, direct_method);
- stats_->VirtualMadeDirect(type);
- type = kDirect;
- stats_->PreciseTypeDevirtualization();
+ }
+ const bool kEnableVerifierBasedSharpening = true;
+ if (kEnableVerifierBasedSharpening && (invoke_type == kVirtual ||
+ invoke_type == kInterface)) {
+ // Did the verifier record a more precise invoke target based on its type information?
+ const CompilerDriver::MethodReference caller_method(mUnit->GetDexFile(),
+ mUnit->GetDexMethodIndex());
+ const CompilerDriver::MethodReference* devirt_map_target =
+ verifier::MethodVerifier::GetDevirtMap(caller_method, dex_pc);
+ if (devirt_map_target != NULL) {
+ mirror::DexCache* target_dex_cache =
+ mUnit->GetClassLinker()->FindDexCache(*devirt_map_target->dex_file);
+ mirror::ClassLoader* class_loader =
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+ mirror::AbstractMethod* called_method =
+ mUnit->GetClassLinker()->ResolveMethod(*devirt_map_target->dex_file,
+ devirt_map_target->dex_method_index,
+ target_dex_cache, class_loader, NULL,
+ kVirtual);
+ CHECK(called_method != NULL);
+ CHECK(!called_method->IsAbstract());
+ GetCodeAndMethodForDirectCall(invoke_type, kDirect, referrer_class, called_method,
+ direct_code, direct_method, update_stats);
+ bool compiler_needs_dex_cache =
+ (GetCompilerBackend() == kPortable) ||
+ (GetCompilerBackend() == kQuick && instruction_set_ != kThumb2) ||
+ (direct_code == 0) || (direct_code == static_cast<unsigned int>(-1)) ||
+ (direct_method == 0) || (direct_method == static_cast<unsigned int>(-1));
+ if ((devirt_map_target->dex_file != target_method.dex_file) &&
+ compiler_needs_dex_cache) {
+ // We need to use the dex cache to find either the method or code, and the dex file
+ // containing the method isn't the one expected for the target method. Try to find
+ // the method within the expected target dex file.
+ // TODO: the -1 could be handled as direct code if the patching new the target dex
+ // file.
+ // TODO: quick only supports direct pointers with Thumb2.
+ // TODO: the following should be factored into a common helper routine to find
+ // one dex file's method within another.
+ const DexFile* dexfile = target_method.dex_file;
+ const DexFile* cm_dexfile =
+ called_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ const DexFile::MethodId& cm_method_id =
+ cm_dexfile->GetMethodId(called_method->GetDexMethodIndex());
+ const char* cm_descriptor = cm_dexfile->StringByTypeIdx(cm_method_id.class_idx_);
+ const DexFile::StringId* descriptor = dexfile->FindStringId(cm_descriptor);
+ if (descriptor != NULL) {
+ const DexFile::TypeId* type_id =
+ dexfile->FindTypeId(dexfile->GetIndexForStringId(*descriptor));
+ if (type_id != NULL) {
+ const char* cm_name = cm_dexfile->GetMethodName(cm_method_id);
+ const DexFile::StringId* name = dexfile->FindStringId(cm_name);
+ if (name != NULL) {
+ uint16_t return_type_idx;
+ std::vector<uint16_t> param_type_idxs;
+ bool success = dexfile->CreateTypeList(&return_type_idx, &param_type_idxs,
+ cm_dexfile->GetMethodSignature(cm_method_id));
+ if (success) {
+ const DexFile::ProtoId* sig =
+ dexfile->FindProtoId(return_type_idx, param_type_idxs);
+ if (sig != NULL) {
+ const DexFile::MethodId* method_id = dexfile->FindMethodId(*type_id,
+ *name, *sig);
+ if (method_id != NULL) {
+ if (update_stats) {
+ stats_->ResolvedMethod(invoke_type);
+ stats_->VirtualMadeDirect(invoke_type);
+ stats_->PreciseTypeDevirtualization();
+ }
+ target_method.dex_method_index = dexfile->GetIndexForMethodId(*method_id);
+ invoke_type = kDirect;
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ // TODO: the stats for direct code and method are off as we failed to find the direct
+ // method in the referring method's dex cache/file.
+ } else {
+ if (update_stats) {
+ stats_->ResolvedMethod(invoke_type);
+ stats_->VirtualMadeDirect(invoke_type);
+ stats_->PreciseTypeDevirtualization();
+ }
+ target_method = *devirt_map_target;
+ invoke_type = kDirect;
+ return true;
}
- stats_->ResolvedMethod(type);
- return true;
+ }
}
- else if (type == kSuper) {
+ if (invoke_type == kSuper) {
// Unsharpened super calls are suspicious so go slow-path.
} else {
- stats_->ResolvedMethod(type);
- GetCodeAndMethodForDirectCall(type, type, referrer_class, resolved_method,
- direct_code, direct_method);
+ // Sharpening failed so generate a regular resolved method dispatch.
+ if (update_stats) {
+ stats_->ResolvedMethod(invoke_type);
+ }
+ if (invoke_type == kVirtual || invoke_type == kSuper) {
+ vtable_idx = resolved_method->GetMethodIndex();
+ }
+ GetCodeAndMethodForDirectCall(invoke_type, invoke_type, referrer_class, resolved_method,
+ direct_code, direct_method, update_stats);
return true;
}
}
}
}
- // Clean up any exception left by method/type resolution
+ // Clean up any exception left by method/invoke_type resolution
if (soa.Self()->IsExceptionPending()) {
soa.Self()->ClearException();
}
- stats_->UnresolvedMethod(type);
+ if (update_stats) {
+ stats_->UnresolvedMethod(invoke_type);
+ }
return false; // Incomplete knowledge needs slow path.
}
+bool CompilerDriver::IsSafeCast(const MethodReference& mr, uint32_t dex_pc) {
+ bool result = verifier::MethodVerifier::IsSafeCast(mr, dex_pc);
+ if (result) {
+ stats_->SafeCast();
+ } else {
+ stats_->NotASafeCast();
+ }
+ return result;
+}
+
+
void CompilerDriver::AddCodePatch(const DexFile* dex_file,
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
diff --git a/src/compiler/driver/compiler_driver.h b/src/compiler/driver/compiler_driver.h
index 75d276d5b1..4f77bdb7a7 100644
--- a/src/compiler/driver/compiler_driver.h
+++ b/src/compiler/driver/compiler_driver.h
@@ -39,7 +39,6 @@ class ParallelCompilationManager;
class DexCompilationUnit;
class TimingLogger;
-const uint32_t kDexPCNotReady = 0xFFFFFF;
enum CompilerBackend {
kQuick,
kPortable,
@@ -99,6 +98,16 @@ class CompilerDriver {
CompilerTls* GetTls();
+ // Generate the trampolines that are invoked by unresolved direct methods.
+ const std::vector<uint8_t>* CreatePortableResolutionTrampoline() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const std::vector<uint8_t>* CreateInterpreterToQuickEntry() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// A class is uniquely located by its DexFile and the class_defs_ table index into that DexFile
typedef std::pair<const DexFile*, uint32_t> ClassReference;
@@ -106,7 +115,22 @@ class CompilerDriver {
LOCKS_EXCLUDED(compiled_classes_lock_);
// A method is uniquely located by its DexFile and the method_ids_ table index into that DexFile
- typedef std::pair<const DexFile*, uint32_t> MethodReference;
+ struct MethodReference {
+ MethodReference(const DexFile* file, uint32_t index) : dex_file(file), dex_method_index(index) {
+ }
+ const DexFile* dex_file;
+ uint32_t dex_method_index;
+ };
+
+ struct MethodReferenceComparator {
+ bool operator()(MethodReference mr1, MethodReference mr2) const {
+ if (mr1.dex_file == mr2.dex_file) {
+ return mr1.dex_method_index < mr2.dex_method_index;
+ } else {
+ return mr1.dex_file < mr2.dex_file;
+ }
+ }
+ };
CompiledMethod* GetCompiledMethod(MethodReference ref) const
LOCKS_EXCLUDED(compiled_methods_lock_);
@@ -124,7 +148,9 @@ class CompilerDriver {
// Are runtime access checks necessary in the compiled code?
bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
- uint32_t type_idx)
+ uint32_t type_idx, bool* type_known_final = NULL,
+ bool* type_known_abstract = NULL,
+ bool* equals_referrers_class = NULL)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
@@ -146,11 +172,13 @@ class CompilerDriver {
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
// index.
- bool ComputeInvokeInfo(uint32_t method_idx, uint32_t dex_pc,
- const DexCompilationUnit* mUnit, InvokeType& type, int& vtable_idx,
- uintptr_t& direct_code, uintptr_t& direct_method)
+ bool ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
+ InvokeType& type, MethodReference& target_method, int& vtable_idx,
+ uintptr_t& direct_code, uintptr_t& direct_method, bool update_stats)
LOCKS_EXCLUDED(Locks::mutator_lock_);
+ bool IsSafeCast(const MethodReference& mr, uint32_t dex_pc);
+
// Record patch information for later fix up.
void AddCodePatch(const DexFile* dex_file,
uint32_t referrer_method_idx,
@@ -253,7 +281,7 @@ class CompilerDriver {
}
// Checks if class specified by type_idx is one of the image_classes_
- bool IsImageClass(const std::string& descriptor) const;
+ bool IsImageClass(const char* descriptor) const;
void RecordClassStatus(ClassReference ref, CompiledClass* compiled_class);
@@ -262,7 +290,8 @@ class CompilerDriver {
void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
mirror::Class* referrer_class,
mirror::AbstractMethod* method,
- uintptr_t& direct_code, uintptr_t& direct_method)
+ uintptr_t& direct_code, uintptr_t& direct_method,
+ bool update_stats)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
@@ -321,7 +350,7 @@ class CompilerDriver {
mutable Mutex compiled_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ClassTable compiled_classes_ GUARDED_BY(compiled_classes_lock_);
- typedef SafeMap<const MethodReference, CompiledMethod*> MethodTable;
+ typedef SafeMap<const MethodReference, CompiledMethod*, MethodReferenceComparator> MethodTable;
// All method references that this compiler has compiled.
mutable Mutex compiled_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
MethodTable compiled_methods_ GUARDED_BY(compiled_methods_lock_);
diff --git a/src/compiler/driver/compiler_driver_test.cc b/src/compiler/driver/compiler_driver_test.cc
index c87fefda16..a7fad6fb21 100644
--- a/src/compiler/driver/compiler_driver_test.cc
+++ b/src/compiler/driver/compiler_driver_test.cc
@@ -26,7 +26,7 @@
#include "heap.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
-#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
diff --git a/src/compiler/driver/dex_compilation_unit.cc b/src/compiler/driver/dex_compilation_unit.cc
index 962df42a21..c7a4df6ea4 100644
--- a/src/compiler/driver/dex_compilation_unit.cc
+++ b/src/compiler/driver/dex_compilation_unit.cc
@@ -31,18 +31,17 @@ DexCompilationUnit::DexCompilationUnit(CompilationUnit* cu)
code_item_(cu->code_item),
class_def_idx_(cu->class_def_idx),
dex_method_idx_(cu->method_idx),
- access_flags_(cu->access_flags),
- symbol_(StringPrintf("dex_%s", MangleForJni(PrettyMethod(dex_method_idx_, *dex_file_)).c_str())) {
+ access_flags_(cu->access_flags) {
}
-DexCompilationUnit:: DexCompilationUnit(CompilationUnit* cu,
- jobject class_loader,
- ClassLinker* class_linker,
- const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
- uint32_t class_def_idx,
- uint32_t method_idx,
- uint32_t access_flags)
+DexCompilationUnit::DexCompilationUnit(CompilationUnit* cu,
+ jobject class_loader,
+ ClassLinker* class_linker,
+ const DexFile& dex_file,
+ const DexFile::CodeItem* code_item,
+ uint32_t class_def_idx,
+ uint32_t method_idx,
+ uint32_t access_flags)
: cu_(cu),
class_loader_(class_loader),
class_linker_(class_linker),
@@ -50,8 +49,15 @@ DexCompilationUnit:: DexCompilationUnit(CompilationUnit* cu,
code_item_(code_item),
class_def_idx_(class_def_idx),
dex_method_idx_(method_idx),
- access_flags_(access_flags),
- symbol_(StringPrintf("dex_%s", MangleForJni(PrettyMethod(dex_method_idx_, *dex_file_)).c_str())) {
+ access_flags_(access_flags) {
+}
+
+const std::string& DexCompilationUnit::GetSymbol() {
+ if (symbol_.empty()) {
+ symbol_ = "dex_";
+ symbol_ += MangleForJni(PrettyMethod(dex_method_idx_, *dex_file_));
+ }
+ return symbol_;
}
} // namespace art
diff --git a/src/compiler/driver/dex_compilation_unit.h b/src/compiler/driver/dex_compilation_unit.h
index 0b90aaafdf..3c6129d642 100644
--- a/src/compiler/driver/dex_compilation_unit.h
+++ b/src/compiler/driver/dex_compilation_unit.h
@@ -92,9 +92,7 @@ class DexCompilationUnit {
return ((access_flags_ & kAccSynchronized) != 0);
}
- const std::string& GetSymbol() const {
- return symbol_;
- }
+ const std::string& GetSymbol();
private:
CompilationUnit* const cu_;
@@ -110,7 +108,7 @@ class DexCompilationUnit {
const uint32_t dex_method_idx_;
const uint32_t access_flags_;
- const std::string symbol_;
+ std::string symbol_;
};
} // namespace art
diff --git a/src/compiler/llvm/gbc_expander.cc b/src/compiler/llvm/gbc_expander.cc
index 99c8fd5ca7..bdf9aca68f 100644
--- a/src/compiler/llvm/gbc_expander.cc
+++ b/src/compiler/llvm/gbc_expander.cc
@@ -776,7 +776,8 @@ llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) {
art::InvokeType invoke_type =
static_cast<art::InvokeType>(LV2UInt(call_inst.getArgOperand(0)));
bool is_static = (invoke_type == art::kStatic);
- uint32_t callee_method_idx = LV2UInt(call_inst.getArgOperand(1));
+ art::CompilerDriver::MethodReference target_method(dex_compilation_unit_->GetDexFile(),
+ LV2UInt(call_inst.getArgOperand(1)));
// Load *this* actual parameter
llvm::Value* this_addr = (!is_static) ? call_inst.getArgOperand(3) : NULL;
@@ -785,18 +786,17 @@ llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) {
int vtable_idx = -1;
uintptr_t direct_code = 0;
uintptr_t direct_method = 0;
- // TODO: pass actual value of dex PC (instead of kDexPCNotready) needed by verifier based
- // sharpening after LLVM re-factoring is finished.
- bool is_fast_path = driver_->
- ComputeInvokeInfo(callee_method_idx, art::kDexPCNotReady, dex_compilation_unit_,
- invoke_type, vtable_idx, direct_code, direct_method);
-
+ bool is_fast_path = driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc,
+ invoke_type, target_method,
+ vtable_idx,
+ direct_code, direct_method,
+ true);
// Load the method object
llvm::Value* callee_method_object_addr = NULL;
if (!is_fast_path) {
callee_method_object_addr =
- EmitCallRuntimeForCalleeMethodObjectAddr(callee_method_idx, invoke_type,
+ EmitCallRuntimeForCalleeMethodObjectAddr(target_method.dex_method_index, invoke_type,
this_addr, dex_pc, is_fast_path);
} else {
switch (invoke_type) {
@@ -809,7 +809,7 @@ llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) {
irb_.getJObjectTy());
} else {
callee_method_object_addr =
- EmitLoadSDCalleeMethodObjectAddr(callee_method_idx);
+ EmitLoadSDCalleeMethodObjectAddr(target_method.dex_method_index);
}
break;
@@ -826,7 +826,7 @@ llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) {
case art::kInterface:
callee_method_object_addr =
- EmitCallRuntimeForCalleeMethodObjectAddr(callee_method_idx,
+ EmitCallRuntimeForCalleeMethodObjectAddr(target_method.dex_method_index,
invoke_type, this_addr,
dex_pc, is_fast_path);
break;
@@ -844,7 +844,7 @@ llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) {
llvm::Value* code_addr;
llvm::Type* func_type = GetFunctionType(call_inst.getType(),
- callee_method_idx, is_static);
+ target_method.dex_method_index, is_static);
if (direct_code != 0u && direct_code != static_cast<uintptr_t>(-1)) {
code_addr =
irb_.CreateIntToPtr(irb_.getPtrEquivInt(direct_code),
diff --git a/src/compiler/llvm/llvm_compilation_unit.h b/src/compiler/llvm/llvm_compilation_unit.h
index d96e778912..857d924840 100644
--- a/src/compiler/llvm/llvm_compilation_unit.h
+++ b/src/compiler/llvm/llvm_compilation_unit.h
@@ -81,10 +81,10 @@ class LlvmCompilationUnit {
void SetCompilerDriver(CompilerDriver* driver) {
driver_ = driver;
}
- const DexCompilationUnit* GetDexCompilationUnit() {
+ DexCompilationUnit* GetDexCompilationUnit() {
return dex_compilation_unit_;
}
- void SetDexCompilationUnit(const DexCompilationUnit* dex_compilation_unit) {
+ void SetDexCompilationUnit(DexCompilationUnit* dex_compilation_unit) {
dex_compilation_unit_ = dex_compilation_unit;
}
@@ -113,7 +113,7 @@ class LlvmCompilationUnit {
UniquePtr<IntrinsicHelper> intrinsic_helper_;
UniquePtr<LLVMInfo> llvm_info_;
CompilerDriver* driver_;
- const DexCompilationUnit* dex_compilation_unit_;
+ DexCompilationUnit* dex_compilation_unit_;
std::string bitcode_filename_;
diff --git a/src/compiler/llvm/runtime_support_llvm.cc b/src/compiler/llvm/runtime_support_llvm.cc
index bd6b01b211..6f2d07a94c 100644
--- a/src/compiler/llvm/runtime_support_llvm.cc
+++ b/src/compiler/llvm/runtime_support_llvm.cc
@@ -24,6 +24,7 @@
#include "dex_instruction.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/field-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
diff --git a/src/compiler/stubs/portable/stubs.cc b/src/compiler/stubs/portable/stubs.cc
new file mode 100644
index 0000000000..db551bf368
--- /dev/null
+++ b/src/compiler/stubs/portable/stubs.cc
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler/stubs/stubs.h"
+#include "jni_internal.h"
+#include "oat/utils/arm/assembler_arm.h"
+#include "oat/utils/mips/assembler_mips.h"
+#include "oat/utils/x86/assembler_x86.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "stack_indirect_reference_table.h"
+#include "sirt_ref.h"
+
+#define __ assembler->
+
+namespace art {
+
+namespace arm {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
+ UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+ RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR);
+
+ __ PushList(save);
+ __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+ __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3
+ __ mov(R2, ShifterOperand(SP)); // Pass sp for Method** callee_addr
+ __ IncreaseFrameSize(12); // 3 words of space for alignment
+ // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
+ __ blx(R12);
+ __ mov(R12, ShifterOperand(R0)); // Save code address returned into R12
+ __ DecreaseFrameSize(12);
+ __ PopList(save);
+ __ cmp(R12, ShifterOperand(0));
+ __ bx(R12, NE); // If R12 != 0 tail call method's code
+ __ bx(LR); // Return to caller to handle exception
+
+ assembler->EmitSlowPaths();
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+ assembler->FinalizeInstructions(code);
+
+ return resolution_trampoline.release();
+}
+} // namespace arm
+
+namespace mips {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
+ UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+ // Build frame and save argument registers and RA.
+ __ AddConstant(SP, SP, -32);
+ __ StoreToOffset(kStoreWord, RA, SP, 28);
+ __ StoreToOffset(kStoreWord, A3, SP, 12);
+ __ StoreToOffset(kStoreWord, A2, SP, 8);
+ __ StoreToOffset(kStoreWord, A1, SP, 4);
+ __ StoreToOffset(kStoreWord, A0, SP, 0);
+
+ __ LoadFromOffset(kLoadWord, T9, S1,
+ ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+ __ Move(A3, S1); // Pass Thread::Current() in A3
+ __ Move(A2, SP); // Pass SP for Method** callee_addr
+ __ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
+
+ // Restore frame, argument registers, and RA.
+ __ LoadFromOffset(kLoadWord, A0, SP, 0);
+ __ LoadFromOffset(kLoadWord, A1, SP, 4);
+ __ LoadFromOffset(kLoadWord, A2, SP, 8);
+ __ LoadFromOffset(kLoadWord, A3, SP, 12);
+ __ LoadFromOffset(kLoadWord, RA, SP, 28);
+ __ AddConstant(SP, SP, 32);
+
+ Label resolve_fail;
+ __ EmitBranch(V0, ZERO, &resolve_fail, true);
+ __ Jr(V0); // If V0 != 0 tail call method's code
+ __ Bind(&resolve_fail, false);
+ __ Jr(RA); // Return to caller to handle exception
+
+ assembler->EmitSlowPaths();
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+ assembler->FinalizeInstructions(code);
+
+ return resolution_trampoline.release();
+}
+} // namespace mips
+
+namespace x86 {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
+ UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+ __ pushl(EBP);
+ __ movl(EBP, ESP); // save ESP
+ __ subl(ESP, Immediate(8)); // Align stack
+ __ movl(EAX, Address(EBP, 8)); // Method* called
+ __ leal(EDX, Address(EBP, 8)); // Method** called_addr
+ __ fs()->pushl(Address::Absolute(Thread::SelfOffset())); // pass thread
+ __ pushl(EDX); // pass called_addr
+ __ pushl(ECX); // pass receiver
+ __ pushl(EAX); // pass called
+ // Call to resolve method.
+ __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
+ X86ManagedRegister::FromCpuRegister(ECX));
+ __ leave();
+
+ Label resolve_fail; // forward declaration
+ __ cmpl(EAX, Immediate(0));
+ __ j(kEqual, &resolve_fail);
+ __ jmp(EAX);
+ // Tail call to intended method.
+ __ Bind(&resolve_fail);
+ __ ret();
+
+ assembler->EmitSlowPaths();
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+ assembler->FinalizeInstructions(code);
+
+ return resolution_trampoline.release();
+}
+} // namespace x86
+
+} // namespace art
diff --git a/src/compiler/stubs/quick/stubs.cc b/src/compiler/stubs/quick/stubs.cc
new file mode 100644
index 0000000000..a8e691f35b
--- /dev/null
+++ b/src/compiler/stubs/quick/stubs.cc
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler/stubs/stubs.h"
+#include "jni_internal.h"
+#include "oat/utils/arm/assembler_arm.h"
+#include "oat/utils/mips/assembler_mips.h"
+#include "oat/utils/x86/assembler_x86.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "stack_indirect_reference_table.h"
+#include "sirt_ref.h"
+
+#define __ assembler->
+
+namespace art {
+
+namespace arm {
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
+ UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+ // | Out args |
+ // | Method* | <- SP on entry
+ // | LR | return address into caller
+ // | ... | callee saves
+ // | R3 | possible argument
+ // | R2 | possible argument
+ // | R1 | possible argument
+ // | R0 | junk on call to QuickResolutionTrampolineFromCode, holds result Method*
+ // | Method* | Callee save Method* set up by QuickResoltuionTrampolineFromCode
+ // Save callee saves and ready frame for exception delivery
+ RegList save = (1 << R1) | (1 << R2) | (1 << R3) | (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) |
+ (1 << R10) | (1 << R11) | (1 << LR);
+ // TODO: enable when GetCalleeSaveMethod is available at stub generation time
+ // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask());
+ __ PushList(save);
+ __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+ __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3
+ __ IncreaseFrameSize(8); // 2 words of space for alignment
+ __ mov(R2, ShifterOperand(SP)); // Pass SP
+ // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
+ __ blx(R12);
+ __ mov(R12, ShifterOperand(R0)); // Save code address returned into R12
+ // Restore registers which may have been modified by GC, "R0" will hold the Method*
+ __ DecreaseFrameSize(4);
+ __ PopList((1 << R0) | save);
+ __ bx(R12); // Leaf call to method's code
+ __ bkpt(0);
+
+ assembler->EmitSlowPaths();
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+ assembler->FinalizeInstructions(code);
+
+ return resolution_trampoline.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
+ UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+
+ __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ bkpt(0);
+
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ assembler->FinalizeInstructions(code);
+
+ return entry_stub.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
+ UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+
+ __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
+ __ bkpt(0);
+
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ assembler->FinalizeInstructions(code);
+
+ return entry_stub.release();
+}
+} // namespace arm
+
+namespace mips {
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
+ UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+ // | Out args |
+ // | Method* | <- SP on entry
+ // | RA | return address into caller
+ // | ... | callee saves
+ // | A3 | possible argument
+ // | A2 | possible argument
+ // | A1 | possible argument
+ // | A0/Method* | Callee save Method* set up by UnresolvedDirectMethodTrampolineFromCode
+ // Save callee saves and ready frame for exception delivery
+ __ AddConstant(SP, SP, -64);
+ __ StoreToOffset(kStoreWord, RA, SP, 60);
+ __ StoreToOffset(kStoreWord, FP, SP, 56);
+ __ StoreToOffset(kStoreWord, GP, SP, 52);
+ __ StoreToOffset(kStoreWord, S7, SP, 48);
+ __ StoreToOffset(kStoreWord, S6, SP, 44);
+ __ StoreToOffset(kStoreWord, S5, SP, 40);
+ __ StoreToOffset(kStoreWord, S4, SP, 36);
+ __ StoreToOffset(kStoreWord, S3, SP, 32);
+ __ StoreToOffset(kStoreWord, S2, SP, 28);
+ __ StoreToOffset(kStoreWord, A3, SP, 12);
+ __ StoreToOffset(kStoreWord, A2, SP, 8);
+ __ StoreToOffset(kStoreWord, A1, SP, 4);
+
+ __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+ __ Move(A3, S1); // Pass Thread::Current() in A3
+ __ Move(A2, SP); // Pass SP for Method** callee_addr
+ __ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
+
+ // Restore registers which may have been modified by GC
+ __ LoadFromOffset(kLoadWord, A0, SP, 0);
+ __ LoadFromOffset(kLoadWord, A1, SP, 4);
+ __ LoadFromOffset(kLoadWord, A2, SP, 8);
+ __ LoadFromOffset(kLoadWord, A3, SP, 12);
+ __ LoadFromOffset(kLoadWord, S2, SP, 28);
+ __ LoadFromOffset(kLoadWord, S3, SP, 32);
+ __ LoadFromOffset(kLoadWord, S4, SP, 36);
+ __ LoadFromOffset(kLoadWord, S5, SP, 40);
+ __ LoadFromOffset(kLoadWord, S6, SP, 44);
+ __ LoadFromOffset(kLoadWord, S7, SP, 48);
+ __ LoadFromOffset(kLoadWord, GP, SP, 52);
+ __ LoadFromOffset(kLoadWord, FP, SP, 56);
+ __ LoadFromOffset(kLoadWord, RA, SP, 60);
+ __ AddConstant(SP, SP, 64);
+
+ __ Move(T9, V0); // Put method's code in T9
+ __ Jr(T9); // Leaf call to method's code
+
+ __ Break();
+
+ assembler->EmitSlowPaths();
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+ assembler->FinalizeInstructions(code);
+
+ return resolution_trampoline.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
+ UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+
+ __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ Jr(T9);
+ __ Break();
+
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ assembler->FinalizeInstructions(code);
+
+ return entry_stub.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
+ UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+
+ __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ Jr(T9);
+ __ Break();
+
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ assembler->FinalizeInstructions(code);
+
+ return entry_stub.release();
+}
+} // namespace mips
+
+namespace x86 {
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
+ UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+ // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
+ // return address
+ __ pushl(EDI);
+ __ pushl(ESI);
+ __ pushl(EBP);
+ __ pushl(EBX);
+ __ pushl(EDX);
+ __ pushl(ECX);
+ __ pushl(EAX); // <-- callee save Method* to go here
+ __ movl(EDX, ESP); // save ESP
+ __ fs()->pushl(Address::Absolute(Thread::SelfOffset())); // pass Thread*
+ __ pushl(EDX); // pass ESP for Method*
+ __ pushl(ECX); // pass receiver
+ __ pushl(EAX); // pass Method*
+
+ // Call to resolve method.
+ __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
+ X86ManagedRegister::FromCpuRegister(ECX));
+
+ __ movl(EDI, EAX); // save code pointer in EDI
+ __ addl(ESP, Immediate(16)); // Pop arguments
+ __ popl(EAX); // Restore args.
+ __ popl(ECX);
+ __ popl(EDX);
+ __ popl(EBX);
+ __ popl(EBP); // Restore callee saves.
+ __ popl(ESI);
+ // Swap EDI callee save with code pointer
+ __ xchgl(EDI, Address(ESP, 0));
+ // Tail call to intended method.
+ __ ret();
+
+ assembler->EmitSlowPaths();
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
+ assembler->FinalizeInstructions(code);
+
+ return resolution_trampoline.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
+ UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+ __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
+
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ assembler->FinalizeInstructions(code);
+
+ return entry_stub.release();
+}
+
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
+ UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+ __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
+
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ assembler->FinalizeInstructions(code);
+
+ return entry_stub.release();
+}
+} // namespace x86
+
+} // namespace art
diff --git a/src/compiler/stubs/stubs.h b/src/compiler/stubs/stubs.h
new file mode 100644
index 0000000000..ebe761df35
--- /dev/null
+++ b/src/compiler/stubs/stubs.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_STUBS_STUBS_H_
+#define ART_SRC_COMPILER_STUBS_STUBS_H_
+
+#include "runtime.h"
+
+namespace art {
+
+namespace arm {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+}
+
+namespace mips {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+}
+
+namespace x86 {
+const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+}
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_STUBS_STUBS_H_
diff --git a/src/dex2oat.cc b/src/dex2oat.cc
index 6df381eebb..33c1ad445f 100644
--- a/src/dex2oat.cc
+++ b/src/dex2oat.cc
@@ -280,11 +280,11 @@ class Dex2Oat {
std::string image_file_location;
uint32_t image_file_location_oat_checksum = 0;
uint32_t image_file_location_oat_data_begin = 0;
- Heap* heap = Runtime::Current()->GetHeap();
- if (heap->GetSpaces().size() > 1) {
- ImageSpace* image_space = heap->GetImageSpace();
+ if (!driver->IsImage()) {
+ ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
image_file_location_oat_checksum = image_space->GetImageHeader().GetOatChecksum();
- image_file_location_oat_data_begin = reinterpret_cast<uint32_t>(image_space->GetImageHeader().GetOatDataBegin());
+ image_file_location_oat_data_begin =
+ reinterpret_cast<uint32_t>(image_space->GetImageHeader().GetOatDataBegin());
image_file_location = image_space->GetImageFilename();
if (host_prefix != NULL && StartsWith(image_file_location, host_prefix->c_str())) {
image_file_location = image_file_location.substr(host_prefix->size());
@@ -292,6 +292,13 @@ class Dex2Oat {
}
std::vector<uint8_t> oat_contents;
+ // TODO: change ElfWriterQuick to not require the creation of oat_contents. The old pre-mclinker
+ // OatWriter streamed directly to disk. The new could can be adapted to do it as follows:
+ // 1.) use first pass of OatWriter to calculate size of oat structure,
+ // 2.) call ElfWriterQuick with pointer to OatWriter instead of contents,
+ // 3.) have ElfWriterQuick call back to OatWriter to stream generate the output directly in
+ // place in the elf file.
+ oat_contents.reserve(5 * MB);
VectorOutputStream vector_output_stream(oat_file->GetPath(), oat_contents);
if (!OatWriter::Create(vector_output_stream,
dex_files,
diff --git a/src/dex_file.cc b/src/dex_file.cc
index 0f0bed4db9..dad083cc3f 100644
--- a/src/dex_file.cc
+++ b/src/dex_file.cc
@@ -373,10 +373,10 @@ const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_kl
const uint16_t class_idx = GetIndexForTypeId(declaring_klass);
const uint32_t name_idx = GetIndexForStringId(name);
const uint16_t type_idx = GetIndexForTypeId(type);
- uint32_t lo = 0;
- uint32_t hi = NumFieldIds() - 1;
+ int32_t lo = 0;
+ int32_t hi = NumFieldIds() - 1;
while (hi >= lo) {
- uint32_t mid = (hi + lo) / 2;
+ int32_t mid = (hi + lo) / 2;
const DexFile::FieldId& field = GetFieldId(mid);
if (class_idx > field.class_idx_) {
lo = mid + 1;
@@ -408,10 +408,10 @@ const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_
const uint16_t class_idx = GetIndexForTypeId(declaring_klass);
const uint32_t name_idx = GetIndexForStringId(name);
const uint16_t proto_idx = GetIndexForProtoId(signature);
- uint32_t lo = 0;
- uint32_t hi = NumMethodIds() - 1;
+ int32_t lo = 0;
+ int32_t hi = NumMethodIds() - 1;
while (hi >= lo) {
- uint32_t mid = (hi + lo) / 2;
+ int32_t mid = (hi + lo) / 2;
const DexFile::MethodId& method = GetMethodId(mid);
if (class_idx > method.class_idx_) {
lo = mid + 1;
@@ -436,15 +436,35 @@ const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_
return NULL;
}
-const DexFile::StringId* DexFile::FindStringId(const std::string& string) const {
- uint32_t lo = 0;
- uint32_t hi = NumStringIds() - 1;
+const DexFile::StringId* DexFile::FindStringId(const char* string) const {
+ int32_t lo = 0;
+ int32_t hi = NumStringIds() - 1;
while (hi >= lo) {
- uint32_t mid = (hi + lo) / 2;
+ int32_t mid = (hi + lo) / 2;
uint32_t length;
const DexFile::StringId& str_id = GetStringId(mid);
const char* str = GetStringDataAndLength(str_id, &length);
- int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string.c_str(), str);
+ int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string, str);
+ if (compare > 0) {
+ lo = mid + 1;
+ } else if (compare < 0) {
+ hi = mid - 1;
+ } else {
+ return &str_id;
+ }
+ }
+ return NULL;
+}
+
+const DexFile::StringId* DexFile::FindStringId(const uint16_t* string) const {
+ int32_t lo = 0;
+ int32_t hi = NumStringIds() - 1;
+ while (hi >= lo) {
+ int32_t mid = (hi + lo) / 2;
+ uint32_t length;
+ const DexFile::StringId& str_id = GetStringId(mid);
+ const char* str = GetStringDataAndLength(str_id, &length);
+ int compare = CompareModifiedUtf8ToUtf16AsCodePointValues(str, string);
if (compare > 0) {
lo = mid + 1;
} else if (compare < 0) {
@@ -457,10 +477,10 @@ const DexFile::StringId* DexFile::FindStringId(const std::string& string) const
}
const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
- uint32_t lo = 0;
- uint32_t hi = NumTypeIds() - 1;
+ int32_t lo = 0;
+ int32_t hi = NumTypeIds() - 1;
while (hi >= lo) {
- uint32_t mid = (hi + lo) / 2;
+ int32_t mid = (hi + lo) / 2;
const TypeId& type_id = GetTypeId(mid);
if (string_idx > type_id.descriptor_idx_) {
lo = mid + 1;
@@ -475,10 +495,10 @@ const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx,
const std::vector<uint16_t>& signature_type_idxs) const {
- uint32_t lo = 0;
- uint32_t hi = NumProtoIds() - 1;
+ int32_t lo = 0;
+ int32_t hi = NumProtoIds() - 1;
while (hi >= lo) {
- uint32_t mid = (hi + lo) / 2;
+ int32_t mid = (hi + lo) / 2;
const DexFile::ProtoId& proto = GetProtoId(mid);
int compare = return_type_idx - proto.return_type_idx_;
if (compare == 0) {
@@ -544,7 +564,7 @@ bool DexFile::CreateTypeList(uint16_t* return_type_idx, std::vector<uint16_t>* p
descriptor += c;
} while (c != ';');
}
- const DexFile::StringId* string_id = FindStringId(descriptor);
+ const DexFile::StringId* string_id = FindStringId(descriptor.c_str());
if (string_id == NULL) {
return false;
}
diff --git a/src/dex_file.h b/src/dex_file.h
index 6e34b5737f..ecc985fb3a 100644
--- a/src/dex_file.h
+++ b/src/dex_file.h
@@ -436,8 +436,11 @@ class DexFile {
return StringDataAndLengthByIdx(idx, &unicode_length);
}
- // Looks up a string id for a given string
- const StringId* FindStringId(const std::string& string) const;
+ // Looks up a string id for a given modified utf8 string.
+ const StringId* FindStringId(const char* string) const;
+
+ // Looks up a string id for a given utf16 string.
+ const StringId* FindStringId(const uint16_t* string) const;
// Returns the number of type identifiers in the .dex file.
size_t NumTypeIds() const {
@@ -974,7 +977,7 @@ class ClassDataItemIterator {
bool HasNext() const {
return pos_ < EndOfVirtualMethodsPos();
}
- void Next() {
+ inline void Next() {
pos_++;
if (pos_ < EndOfStaticFieldsPos()) {
last_idx_ = GetMemberIndex();
diff --git a/src/dex_file_verifier.cc b/src/dex_file_verifier.cc
index b1efcaadbd..6df4411565 100644
--- a/src/dex_file_verifier.cc
+++ b/src/dex_file_verifier.cc
@@ -369,10 +369,12 @@ bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx, uint32_t access_fla
}
if (expect_code && code_offset == 0) {
- LOG(ERROR) << StringPrintf("Unexpected zero value for class_data_item method code_off with access flags %x", access_flags);
+ LOG(ERROR)<< StringPrintf("Unexpected zero value for class_data_item method code_off"
+ " with access flags %x", access_flags);
return false;
} else if (!expect_code && code_offset != 0) {
- LOG(ERROR) << StringPrintf("Unexpected non-zero value %x for class_data_item method code_off with access flags %x", code_offset, access_flags);
+ LOG(ERROR) << StringPrintf("Unexpected non-zero value %x for class_data_item method code_off"
+ " with access flags %x", code_offset, access_flags);
return false;
}
@@ -544,7 +546,8 @@ bool DexFileVerifier::CheckEncodedAnnotation() {
}
if (last_idx >= idx && i != 0) {
- LOG(ERROR) << StringPrintf("Out-of-order annotation_element name_idx: %x then %x", last_idx, idx);
+ LOG(ERROR) << StringPrintf("Out-of-order annotation_element name_idx: %x then %x",
+ last_idx, idx);
return false;
}
@@ -651,7 +654,8 @@ bool DexFileVerifier::CheckIntraCodeItem() {
uint32_t last_addr = 0;
while (try_items_size--) {
if (try_items->start_addr_ < last_addr) {
- LOG(ERROR) << StringPrintf("Out-of_order try_item with start_addr: %x", try_items->start_addr_);
+ LOG(ERROR) << StringPrintf("Out-of_order try_item with start_addr: %x",
+ try_items->start_addr_);
return false;
}
@@ -933,7 +937,8 @@ bool DexFileVerifier::CheckIntraAnnotationsDirectoryItem() {
last_idx = 0;
for (uint32_t i = 0; i < method_count; i++) {
if (last_idx >= method_item->method_idx_ && i != 0) {
- LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x", last_idx, method_item->method_idx_);
+ LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x",
+ last_idx, method_item->method_idx_);
return false;
}
last_idx = method_item->method_idx_;
@@ -944,14 +949,16 @@ bool DexFileVerifier::CheckIntraAnnotationsDirectoryItem() {
const DexFile::ParameterAnnotationsItem* parameter_item =
reinterpret_cast<const DexFile::ParameterAnnotationsItem*>(method_item);
uint32_t parameter_count = item->parameters_size_;
- if (!CheckListSize(parameter_item, parameter_count, sizeof(DexFile::ParameterAnnotationsItem), "parameter_annotations list")) {
+ if (!CheckListSize(parameter_item, parameter_count, sizeof(DexFile::ParameterAnnotationsItem),
+ "parameter_annotations list")) {
return false;
}
last_idx = 0;
for (uint32_t i = 0; i < parameter_count; i++) {
if (last_idx >= parameter_item->method_idx_ && i != 0) {
- LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x", last_idx, parameter_item->method_idx_);
+ LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x",
+ last_idx, parameter_item->method_idx_);
return false;
}
last_idx = parameter_item->method_idx_;
@@ -1051,7 +1058,8 @@ bool DexFileVerifier::CheckIntraSectionIterate(uint32_t offset, uint32_t count,
uint32_t count = list->size_;
if (!CheckPointerRange(list, list + 1, "annotation_set_ref_list") ||
- !CheckListSize(item, count, sizeof(DexFile::AnnotationSetRefItem), "annotation_set_ref_list size")) {
+ !CheckListSize(item, count, sizeof(DexFile::AnnotationSetRefItem),
+ "annotation_set_ref_list size")) {
return false;
}
ptr_ = reinterpret_cast<const byte*>(item + count);
@@ -1257,7 +1265,8 @@ bool DexFileVerifier::CheckIntraSection() {
return false;
}
if (section_offset != header_->map_off_) {
- LOG(ERROR) << StringPrintf("Map not at header-defined offset: %x, expected %x", section_offset, header_->map_off_);
+ LOG(ERROR) << StringPrintf("Map not at header-defined offset: %x, expected %x",
+ section_offset, header_->map_off_);
return false;
}
ptr_ += sizeof(uint32_t) + (map->size_ * sizeof(DexFile::MapItem));
@@ -1297,7 +1306,8 @@ bool DexFileVerifier::CheckOffsetToTypeMap(uint32_t offset, uint16_t type) {
return false;
}
if (it->second != type) {
- LOG(ERROR) << StringPrintf("Unexpected data map entry @ %x; expected %x, found %x", offset, type, it->second);
+ LOG(ERROR) << StringPrintf("Unexpected data map entry @ %x; expected %x, found %x",
+ offset, type, it->second);
return false;
}
return true;
@@ -1380,7 +1390,8 @@ bool DexFileVerifier::CheckInterTypeIdItem() {
if (previous_item_ != NULL) {
const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
if (prev_item->descriptor_idx_ >= item->descriptor_idx_) {
- LOG(ERROR) << StringPrintf("Out-of-order type_ids: %x then %x", prev_item->descriptor_idx_, item->descriptor_idx_);
+ LOG(ERROR) << StringPrintf("Out-of-order type_ids: %x then %x",
+ prev_item->descriptor_idx_, item->descriptor_idx_);
return false;
}
}
@@ -1757,7 +1768,8 @@ bool DexFileVerifier::CheckInterAnnotationsDirectoryItem() {
LOG(ERROR) << "Mismatched defining class for parameter_annotation";
return false;
}
- if (!CheckOffsetToTypeMap(parameter_item->annotations_off_, DexFile::kDexTypeAnnotationSetRefList)) {
+ if (!CheckOffsetToTypeMap(parameter_item->annotations_off_,
+ DexFile::kDexTypeAnnotationSetRefList)) {
return false;
}
parameter_item++;
diff --git a/src/dex_instruction-inl.h b/src/dex_instruction-inl.h
index 99dab121cf..b426e66a1c 100644
--- a/src/dex_instruction-inl.h
+++ b/src/dex_instruction-inl.h
@@ -21,13 +21,6 @@
namespace art {
-inline const Instruction* Instruction::Next_51l() const {
- DCHECK_EQ(FormatOf(Opcode()), k51l);
- size_t current_size_in_bytes = 5 * sizeof(uint16_t);
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- return reinterpret_cast<const Instruction*>(ptr + current_size_in_bytes);
-}
-
//------------------------------------------------------------------------------
// VRegA
//------------------------------------------------------------------------------
@@ -36,6 +29,11 @@ inline int8_t Instruction::VRegA_10t() const {
return static_cast<int8_t>(InstAA());
}
+inline uint8_t Instruction::VRegA_10x() const {
+ DCHECK_EQ(FormatOf(Opcode()), k10x);
+ return InstAA();
+}
+
inline uint4_t Instruction::VRegA_11n() const {
DCHECK_EQ(FormatOf(Opcode()), k11n);
return InstA();
diff --git a/src/dex_instruction.cc b/src/dex_instruction.cc
index 1b7d3bb3c4..b18b4d0226 100644
--- a/src/dex_instruction.cc
+++ b/src/dex_instruction.cc
@@ -299,7 +299,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
case NEW_INSTANCE:
if (file != NULL) {
uint32_t type_idx = VRegB_21c();
- os << opcode << " v" << VRegA_21c() << ", " << PrettyType(type_idx, *file)
+ os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyType(type_idx, *file)
<< " // type@" << type_idx;
break;
} // else fall-through
@@ -312,7 +312,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
case SGET_SHORT:
if (file != NULL) {
uint32_t field_idx = VRegB_21c();
- os << opcode << " v" << VRegA_21c() << ", " << PrettyField(field_idx, *file, true)
+ os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyField(field_idx, *file, true)
<< " // field@" << field_idx;
break;
} // else fall-through
@@ -325,7 +325,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
case SPUT_SHORT:
if (file != NULL) {
uint32_t field_idx = VRegB_21c();
- os << opcode << " v" << VRegA_21c() << ", " << PrettyField(field_idx, *file, true)
+ os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyField(field_idx, *file, true)
<< " // field@" << field_idx;
break;
} // else fall-through
@@ -350,7 +350,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
case IGET_SHORT:
if (file != NULL) {
uint32_t field_idx = VRegC_22c();
- os << opcode << " v" << VRegA_22c() << ", v" << VRegB_22c() << ", "
+ os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyField(field_idx, *file, true) << " // field@" << field_idx;
break;
} // else fall-through
@@ -363,21 +363,21 @@ std::string Instruction::DumpString(const DexFile* file) const {
case IPUT_SHORT:
if (file != NULL) {
uint32_t field_idx = VRegC_22c();
- os << opcode << " v" << VRegA_22c() << ", v" << VRegB_22c() << ", "
+ os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyField(field_idx, *file, true) << " // field@" << field_idx;
break;
} // else fall-through
case INSTANCE_OF:
if (file != NULL) {
uint32_t type_idx = VRegC_22c();
- os << opcode << " v" << VRegA_22c() << ", v" << VRegB_22c() << ", "
+ os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyType(type_idx, *file) << " // type@" << type_idx;
break;
}
case NEW_ARRAY:
if (file != NULL) {
uint32_t type_idx = VRegC_22c();
- os << opcode << " v" << VRegA_22c() << ", v" << VRegB_22c() << ", "
+ os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyType(type_idx, *file) << " // type@" << type_idx;
break;
} // else fall-through
diff --git a/src/dex_instruction.h b/src/dex_instruction.h
index 218acb6fe1..adaada7f87 100644
--- a/src/dex_instruction.h
+++ b/src/dex_instruction.h
@@ -162,39 +162,45 @@ class Instruction {
}
}
+ // Reads an instruction out of the stream at the specified address.
+ static const Instruction* At(const uint16_t* code) {
+ DCHECK(code != NULL);
+ return reinterpret_cast<const Instruction*>(code);
+ }
+
+ // Reads an instruction out of the stream from the current address plus an offset.
+ const Instruction* RelativeAt(int32_t offset) const {
+ return At(reinterpret_cast<const uint16_t*>(this) + offset);
+ }
+
// Returns a pointer to the next instruction in the stream.
const Instruction* Next() const {
- size_t current_size_in_bytes = SizeInCodeUnits() * sizeof(uint16_t);
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- return reinterpret_cast<const Instruction*>(ptr + current_size_in_bytes);
+ return RelativeAt(SizeInCodeUnits());
}
// Returns a pointer to the instruction after this 1xx instruction in the stream.
const Instruction* Next_1xx() const {
DCHECK(FormatOf(Opcode()) >= k10x && FormatOf(Opcode()) <= k10t);
- size_t current_size_in_bytes = 1 * sizeof(uint16_t);
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- return reinterpret_cast<const Instruction*>(ptr + current_size_in_bytes);
+ return RelativeAt(1);
}
// Returns a pointer to the instruction after this 2xx instruction in the stream.
const Instruction* Next_2xx() const {
DCHECK(FormatOf(Opcode()) >= k20t && FormatOf(Opcode()) <= k22c);
- size_t current_size_in_bytes = 2 * sizeof(uint16_t);
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- return reinterpret_cast<const Instruction*>(ptr + current_size_in_bytes);
+ return RelativeAt(2);
}
// Returns a pointer to the instruction after this 3xx instruction in the stream.
const Instruction* Next_3xx() const {
DCHECK(FormatOf(Opcode()) >= k32x && FormatOf(Opcode()) <= k3rc);
- size_t current_size_in_bytes = 3 * sizeof(uint16_t);
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- return reinterpret_cast<const Instruction*>(ptr + current_size_in_bytes);
+ return RelativeAt(3);
}
// Returns a pointer to the instruction after this 51l instruction in the stream.
- const Instruction* Next_51l() const;
+ const Instruction* Next_51l() const {
+ DCHECK(FormatOf(Opcode()) == k51l);
+ return RelativeAt(5);
+ }
// Returns the name of this instruction's opcode.
const char* Name() const {
@@ -208,6 +214,7 @@ class Instruction {
// VRegA
int8_t VRegA_10t() const;
+ uint8_t VRegA_10x() const;
uint4_t VRegA_11n() const;
uint8_t VRegA_11x() const;
uint4_t VRegA_12x() const;
@@ -271,12 +278,6 @@ class Instruction {
return static_cast<Code>(opcode);
}
- // Reads an instruction out of the stream at the specified address.
- static const Instruction* At(const uint16_t* code) {
- CHECK(code != NULL);
- return reinterpret_cast<const Instruction*>(code);
- }
-
// Returns the format of the given opcode.
static Format FormatOf(Code opcode) {
return kInstructionFormats[opcode];
diff --git a/src/image_test.cc b/src/image_test.cc
index 8066a90957..cd1a34fa56 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -48,7 +48,8 @@ TEST_F(ImageTest, WriteRead) {
dex_files.push_back(java_lang_dex_file_);
dex_files.push_back(conscrypt_file_);
VectorOutputStream output_stream(tmp_elf.GetFilename(), oat_contents);
- bool success_oat = OatWriter::Create(output_stream, dex_files, 0, 0, "", *compiler_driver_.get());
+ bool success_oat = OatWriter::Create(output_stream, dex_files, 0, 0, "",
+ *compiler_driver_.get());
ASSERT_TRUE(success_oat);
// Force all system classes into memory
diff --git a/src/image_writer.cc b/src/image_writer.cc
index a989a4e0a3..4ba99fe3d0 100644
--- a/src/image_writer.cc
+++ b/src/image_writer.cc
@@ -36,7 +36,7 @@
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/field-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object-inl.h"
@@ -81,6 +81,10 @@ bool ImageWriter::Write(const std::string& image_filename,
}
oat_file_ = OatFile::OpenWritable(oat_file.get(), oat_location);
class_linker->RegisterOatFile(*oat_file_);
+ interpreter_to_interpreter_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToInterpreterEntryOffset();
+ interpreter_to_quick_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToQuickEntryOffset();
+ portable_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
+ quick_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
{
Thread::Current()->TransitionFromSuspendedToRunnable();
@@ -168,13 +172,13 @@ void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
return;
}
String* string = obj->AsString();
- std::string utf8_string(string->ToModifiedUtf8());
+ const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset();
ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
typedef Set::const_iterator CacheIt; // TODO: C++0x auto
for (CacheIt it = writer->dex_caches_.begin(), end = writer->dex_caches_.end(); it != end; ++it) {
DexCache* dex_cache = *it;
const DexFile& dex_file = *dex_cache->GetDexFile();
- const DexFile::StringId* string_id = dex_file.FindStringId(utf8_string);
+ const DexFile::StringId* string_id = dex_file.FindStringId(utf16_string);
if (string_id != NULL) {
// This string occurs in this dex file, assign the dex cache entry.
uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
@@ -512,17 +516,34 @@ void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy)
if (orig->IsAbstract()) {
// Code for abstract methods is set to the abstract method error stub when we load the image.
copy->SetEntryPointFromCompiledCode(NULL);
+ copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+ (GetOatAddress(interpreter_to_interpreter_entry_offset_)));
return;
+ } else {
+ copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+ (GetOatAddress(interpreter_to_quick_entry_offset_)));
}
if (orig == Runtime::Current()->GetResolutionMethod()) {
- // The resolution method's code is set to the resolution trampoline when we load the image.
- copy->SetEntryPointFromCompiledCode(NULL);
+#if defined(ART_USE_PORTABLE_COMPILER)
+ copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+#else
+ copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+#endif
return;
}
- // Non-abstract methods have code
- copy->SetEntryPointFromCompiledCode(GetOatAddress(orig->GetOatCodeOffset()));
+ // Use original code if it exists. Otherwise, set the code pointer to the resolution trampoline.
+ const byte* code = GetOatAddress(orig->GetOatCodeOffset());
+ if (code != NULL) {
+ copy->SetEntryPointFromCompiledCode(code);
+ } else {
+#if defined(ART_USE_PORTABLE_COMPILER)
+ copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+#else
+ copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+#endif
+ }
if (orig->IsNative()) {
// The native method's pointer is set to a stub to lookup via dlsym when we load the image.
diff --git a/src/image_writer.h b/src/image_writer.h
index 30a7f7f5da..4628e5a05f 100644
--- a/src/image_writer.h
+++ b/src/image_writer.h
@@ -40,7 +40,9 @@ class ImageWriter {
typedef std::set<std::string> DescriptorSet;
explicit ImageWriter(DescriptorSet* image_classes)
: oat_file_(NULL), image_end_(0), image_begin_(NULL), image_classes_(image_classes),
- oat_data_begin_(NULL) {}
+ oat_data_begin_(NULL), interpreter_to_interpreter_entry_offset_(0),
+ interpreter_to_quick_entry_offset_(0), portable_resolution_trampoline_offset_(0),
+ quick_resolution_trampoline_offset_(0) {}
~ImageWriter() {}
@@ -197,7 +199,13 @@ class ImageWriter {
// Beginning target oat address for the pointers from the output image to its oat file.
const byte* oat_data_begin_;
- // DexCaches seen while scanning for fixing up CodeAndDirectMethods.
+ // Offset from oat_data_begin_ to the stubs.
+ uint32_t interpreter_to_interpreter_entry_offset_;
+ uint32_t interpreter_to_quick_entry_offset_;
+ uint32_t portable_resolution_trampoline_offset_;
+ uint32_t quick_resolution_trampoline_offset_;
+
+ // DexCaches seen while scanning for fixing up CodeAndDirectMethods
typedef std::set<mirror::DexCache*> Set;
Set dex_caches_;
};
diff --git a/src/instrumentation.cc b/src/instrumentation.cc
index 39fd37766a..8af0885ef4 100644
--- a/src/instrumentation.cc
+++ b/src/instrumentation.cc
@@ -62,7 +62,7 @@ bool Instrumentation::InstallStubsForClass(mirror::Class* klass) {
if (is_initialized || !method->IsStatic() || method->IsConstructor()) {
new_code = class_linker->GetOatCodeFor(method);
} else {
- new_code = GetResolutionTrampoline();
+ new_code = GetResolutionTrampoline(class_linker);
}
} else { // !uninstall
if (!interpreter_stubs_installed_ || method->IsNative()) {
@@ -380,7 +380,7 @@ const void* Instrumentation::GetQuickCodeFor(const mirror::AbstractMethod* metho
if (LIKELY(!instrumentation_stubs_installed_)) {
const void* code = method->GetEntryPointFromCompiledCode();
DCHECK(code != NULL);
- if (LIKELY(code != GetResolutionTrampoline())) {
+ if (LIKELY(code != GetResolutionTrampoline(runtime->GetClassLinker()))) {
return code;
}
}
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index e2cc3d63aa..c7b9c58c74 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -50,12 +50,13 @@ static const int32_t kMinInt = std::numeric_limits<int32_t>::min();
static const int64_t kMaxLong = std::numeric_limits<int64_t>::max();
static const int64_t kMinLong = std::numeric_limits<int64_t>::min();
-static void UnstartedRuntimeInvoke(Thread* self, AbstractMethod* target_method,
- ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame,
+ JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// In a runtime that's not started we intercept certain methods to avoid complicated dependency
// problems in core libraries.
- std::string name(PrettyMethod(target_method));
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
ClassLoader* class_loader = NULL; // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
@@ -132,7 +133,7 @@ static void UnstartedRuntimeInvoke(Thread* self, AbstractMethod* target_method,
}
} else {
// Not special, continue with regular interpreter execution.
- EnterInterpreterFromInterpreter(self, shadow_frame, result);
+ artInterpreterToInterpreterEntry(self, mh, code_item, shadow_frame, result);
}
}
@@ -383,51 +384,49 @@ static void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
ref->MonitorExit(self);
}
-static void DoInvoke(Thread* self, MethodHelper& mh, ShadowFrame& shadow_frame,
- const Instruction* inst, InvokeType type, bool is_range,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- Object* receiver;
- if (type == kStatic) {
- receiver = NULL;
- } else {
- receiver = shadow_frame.GetVRegReference(vregC);
- }
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<InvokeType type, bool is_range>
+static void DoInvoke(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
+
+template<InvokeType type, bool is_range>
+static void DoInvoke(Thread* self, ShadowFrame& shadow_frame,
+ const Instruction* inst, JValue* result) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- AbstractMethod* target_method = FindMethodFromCode(method_idx, receiver,
- shadow_frame.GetMethod(),
- self, true, type);
- if (UNLIKELY(target_method == NULL)) {
+ uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ Object* receiver = (type == kStatic) ? NULL : shadow_frame.GetVRegReference(vregC);
+ AbstractMethod* method = FindMethodFromCode(method_idx, receiver, shadow_frame.GetMethod(), self,
+ true, type);
+ if (UNLIKELY(method == NULL)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
return;
}
- MethodHelper target_mh(target_method);
- const DexFile::CodeItem* code_item = target_mh.GetCodeItem();
+ MethodHelper mh(method);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
uint16_t num_regs;
uint16_t num_ins;
- if (code_item != NULL) {
+ if (LIKELY(code_item != NULL)) {
num_regs = code_item->registers_size_;
num_ins = code_item->ins_size_;
- } else if (target_method->IsAbstract()) {
+ } else if (method->IsAbstract()) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
self->ThrowNewExceptionF(throw_location, "Ljava/lang/AbstractMethodError;",
- "abstract method \"%s\"", PrettyMethod(target_method).c_str());
+ "abstract method \"%s\"", PrettyMethod(method).c_str());
return;
} else {
- DCHECK(target_method->IsNative() || target_method->IsProxyMethod());
- num_regs = num_ins = AbstractMethod::NumArgRegisters(target_mh.GetShorty());
- if (!target_method->IsStatic()) {
+ DCHECK(method->IsNative() || method->IsProxyMethod());
+ num_regs = num_ins = AbstractMethod::NumArgRegisters(mh.GetShorty());
+ if (!method->IsStatic()) {
num_regs++;
num_ins++;
}
}
void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
- ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame,
- target_method, 0, memory));
+ ShadowFrame* new_shadow_frame(ShadowFrame::Create(num_regs, &shadow_frame, method, 0, memory));
size_t cur_reg = num_regs - num_ins;
if (receiver != NULL) {
new_shadow_frame->SetVRegReference(cur_reg, receiver);
@@ -435,13 +434,13 @@ static void DoInvoke(Thread* self, MethodHelper& mh, ShadowFrame& shadow_frame,
}
size_t arg_offset = (receiver == NULL) ? 0 : 1;
- const char* shorty = target_mh.GetShorty();
+ const char* shorty = mh.GetShorty();
uint32_t arg[5];
if (!is_range) {
inst->GetArgs(arg);
}
for (size_t shorty_pos = 0; cur_reg < num_regs; ++shorty_pos, cur_reg++, arg_offset++) {
- DCHECK_LT(shorty_pos + 1, target_mh.GetShortyLength());
+ DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
size_t arg_pos = is_range ? vregC + arg_offset : arg[arg_offset];
switch (shorty[shorty_pos + 1]) {
case 'L': {
@@ -464,20 +463,28 @@ static void DoInvoke(Thread* self, MethodHelper& mh, ShadowFrame& shadow_frame,
}
if (LIKELY(Runtime::Current()->IsStarted())) {
- (target_method->GetEntryPointFromInterpreter())(self, new_shadow_frame, result);
+ (method->GetEntryPointFromInterpreter())(self, mh, code_item, new_shadow_frame, result);
} else {
- UnstartedRuntimeInvoke(self, target_method, new_shadow_frame, result, num_regs - num_ins);
+ UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
}
}
+// We use template functions to optimize compiler inlining process. Otherwise,
+// some parts of the code (like a switch statement) which depend on a constant
+// parameter would not be inlined while it should be. These constant parameters
+// are now part of the template arguments.
+// Note these template functions are static and inlined so they should not be
+// part of the final object file.
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<FindFieldType find_type, Primitive::Type field_type>
static void DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, FindFieldType find_type,
- Primitive::Type field_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
+ const Instruction* inst)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
+template<FindFieldType find_type, Primitive::Type field_type>
static inline void DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, FindFieldType find_type,
- Primitive::Type field_type) {
+ const Instruction* inst) {
bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
Field* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
@@ -524,14 +531,16 @@ static inline void DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
}
}
-static void DoFieldPut(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, FindFieldType find_type,
- Primitive::Type field_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE;
+// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
+// specialization.
+template<FindFieldType find_type, Primitive::Type field_type>
+static void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+ const Instruction* inst)
+ NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
-static inline void DoFieldPut(Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, FindFieldType find_type,
- Primitive::Type field_type) {
+template<FindFieldType find_type, Primitive::Type field_type>
+static inline void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+ const Instruction* inst) {
bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
Field* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
@@ -716,7 +725,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
// As the 'this' object won't change during the execution of current code, we
// want to cache it in local variables. Nevertheless, in order to let the
// garbage collector access it, we store it into sirt references.
- SirtRef<Object> this_object_ref(self, shadow_frame.GetThisObject());
+ SirtRef<Object> this_object_ref(self, shadow_frame.GetThisObject(code_item->ins_size_));
const Instruction* inst = Instruction::At(insns + shadow_frame.GetDexPC());
if (inst->GetDexPc(insns) == 0) { // We are entering the method as opposed to deoptimizing..
@@ -1193,34 +1202,34 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
}
case Instruction::GOTO: {
PREAMBLE();
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegA_10t());
+ inst = inst->RelativeAt(inst->VRegA_10t());
break;
}
case Instruction::GOTO_16: {
PREAMBLE();
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegA_20t());
+ inst = inst->RelativeAt(inst->VRegA_20t());
break;
}
case Instruction::GOTO_32: {
PREAMBLE();
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegA_30t());
+ inst = inst->RelativeAt(inst->VRegA_30t());
break;
}
case Instruction::PACKED_SWITCH: {
PREAMBLE();
- const uint16_t* switch_data = insns + inst->GetDexPc(insns) + inst->VRegB_31t();
+ const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t());
DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
uint16_t size = switch_data[1];
DCHECK_GT(size, 0);
const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- CHECK(IsAligned<4>(keys));
+ DCHECK(IsAligned<4>(keys));
int32_t first_key = keys[0];
const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]);
DCHECK(IsAligned<4>(targets));
int32_t index = test_val - first_key;
if (index >= 0 && index < size) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + targets[index]);
+ inst = inst->RelativeAt(targets[index]);
} else {
inst = inst->Next_3xx();
}
@@ -1228,18 +1237,18 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
}
case Instruction::SPARSE_SWITCH: {
PREAMBLE();
- uint32_t dex_pc = inst->GetDexPc(insns);
- const uint16_t* switch_data = insns + dex_pc + inst->VRegB_31t();
+ const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t());
- CHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
+ DCHECK_EQ(switch_data[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
uint16_t size = switch_data[1];
- CHECK_GT(size, 0);
+ DCHECK_GT(size, 0);
const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]);
- CHECK(IsAligned<4>(keys));
+ DCHECK(IsAligned<4>(keys));
const int32_t* entries = keys + size;
- CHECK(IsAligned<4>(entries));
+ DCHECK(IsAligned<4>(entries));
int lo = 0;
int hi = size - 1;
+ const Instruction* current_inst = inst;
inst = inst->Next_3xx();
while (lo <= hi) {
int mid = (lo + hi) / 2;
@@ -1249,7 +1258,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
} else if (test_val > foundVal) {
lo = mid + 1;
} else {
- inst = Instruction::At(insns + dex_pc + entries[mid]);
+ inst = current_inst->RelativeAt(entries[mid]);
break;
}
}
@@ -1339,7 +1348,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_EQ: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_22t()) == shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegC_22t());
+ inst = inst->RelativeAt(inst->VRegC_22t());
} else {
inst = inst->Next_2xx();
}
@@ -1348,7 +1357,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_NE: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_22t()) != shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegC_22t());
+ inst = inst->RelativeAt(inst->VRegC_22t());
} else {
inst = inst->Next_2xx();
}
@@ -1357,7 +1366,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_LT: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_22t()) < shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegC_22t());
+ inst = inst->RelativeAt(inst->VRegC_22t());
} else {
inst = inst->Next_2xx();
}
@@ -1366,7 +1375,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_GE: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_22t()) >= shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegC_22t());
+ inst = inst->RelativeAt(inst->VRegC_22t());
} else {
inst = inst->Next_2xx();
}
@@ -1375,7 +1384,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_GT: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_22t()) > shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegC_22t());
+ inst = inst->RelativeAt(inst->VRegC_22t());
} else {
inst = inst->Next_2xx();
}
@@ -1384,7 +1393,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_LE: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_22t()) <= shadow_frame.GetVReg(inst->VRegB_22t())) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegC_22t());
+ inst = inst->RelativeAt(inst->VRegC_22t());
} else {
inst = inst->Next_2xx();
}
@@ -1393,7 +1402,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_EQZ: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_21t()) == 0) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegB_21t());
+ inst = inst->RelativeAt(inst->VRegB_21t());
} else {
inst = inst->Next_2xx();
}
@@ -1402,7 +1411,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_NEZ: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_21t()) != 0) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegB_21t());
+ inst = inst->RelativeAt(inst->VRegB_21t());
} else {
inst = inst->Next_2xx();
}
@@ -1411,7 +1420,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_LTZ: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_21t()) < 0) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegB_21t());
+ inst = inst->RelativeAt(inst->VRegB_21t());
} else {
inst = inst->Next_2xx();
}
@@ -1420,7 +1429,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_GEZ: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_21t()) >= 0) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegB_21t());
+ inst = inst->RelativeAt(inst->VRegB_21t());
} else {
inst = inst->Next_2xx();
}
@@ -1429,7 +1438,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_GTZ: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_21t()) > 0) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegB_21t());
+ inst = inst->RelativeAt(inst->VRegB_21t());
} else {
inst = inst->Next_2xx();
}
@@ -1438,7 +1447,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
case Instruction::IF_LEZ: {
PREAMBLE();
if (shadow_frame.GetVReg(inst->VRegA_21t()) <= 0) {
- inst = Instruction::At(insns + inst->GetDexPc(insns) + inst->VRegB_21t());
+ inst = inst->RelativeAt(inst->VRegB_21t());
} else {
inst = inst->Next_2xx();
}
@@ -1705,192 +1714,192 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
}
case Instruction::IGET_BOOLEAN:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, InstancePrimitiveRead, Primitive::kPrimBoolean);
+ DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IGET_BYTE:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, InstancePrimitiveRead, Primitive::kPrimByte);
+ DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IGET_CHAR:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, InstancePrimitiveRead, Primitive::kPrimChar);
+ DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IGET_SHORT:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, InstancePrimitiveRead, Primitive::kPrimShort);
+ DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IGET:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, InstancePrimitiveRead, Primitive::kPrimInt);
+ DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IGET_WIDE:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, InstancePrimitiveRead, Primitive::kPrimLong);
+ DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IGET_OBJECT:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, InstanceObjectRead, Primitive::kPrimNot);
+ DoFieldGet<InstanceObjectRead, Primitive::kPrimNot>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SGET_BOOLEAN:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, StaticPrimitiveRead, Primitive::kPrimBoolean);
+ DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SGET_BYTE:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, StaticPrimitiveRead, Primitive::kPrimByte);
+ DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SGET_CHAR:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, StaticPrimitiveRead, Primitive::kPrimChar);
+ DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SGET_SHORT:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, StaticPrimitiveRead, Primitive::kPrimShort);
+ DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SGET:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, StaticPrimitiveRead, Primitive::kPrimInt);
+ DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SGET_WIDE:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, StaticPrimitiveRead, Primitive::kPrimLong);
+ DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SGET_OBJECT:
PREAMBLE();
- DoFieldGet(self, shadow_frame, inst, StaticObjectRead, Primitive::kPrimNot);
+ DoFieldGet<StaticObjectRead, Primitive::kPrimNot>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IPUT_BOOLEAN:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, InstancePrimitiveWrite, Primitive::kPrimBoolean);
+ DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IPUT_BYTE:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, InstancePrimitiveWrite, Primitive::kPrimByte);
+ DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IPUT_CHAR:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, InstancePrimitiveWrite, Primitive::kPrimChar);
+ DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IPUT_SHORT:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, InstancePrimitiveWrite, Primitive::kPrimShort);
+ DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IPUT:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, InstancePrimitiveWrite, Primitive::kPrimInt);
+ DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IPUT_WIDE:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, InstancePrimitiveWrite, Primitive::kPrimLong);
+ DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::IPUT_OBJECT:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, InstanceObjectWrite, Primitive::kPrimNot);
+ DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SPUT_BOOLEAN:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, StaticPrimitiveWrite, Primitive::kPrimBoolean);
+ DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SPUT_BYTE:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, StaticPrimitiveWrite, Primitive::kPrimByte);
+ DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SPUT_CHAR:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, StaticPrimitiveWrite, Primitive::kPrimChar);
+ DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SPUT_SHORT:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, StaticPrimitiveWrite, Primitive::kPrimShort);
+ DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SPUT:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, StaticPrimitiveWrite, Primitive::kPrimInt);
+ DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SPUT_WIDE:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, StaticPrimitiveWrite, Primitive::kPrimLong);
+ DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::SPUT_OBJECT:
PREAMBLE();
- DoFieldPut(self, shadow_frame, inst, StaticObjectWrite, Primitive::kPrimNot);
+ DoFieldPut<StaticObjectWrite, Primitive::kPrimNot>(self, shadow_frame, inst);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
break;
case Instruction::INVOKE_VIRTUAL:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kVirtual, false, &result_register);
+ DoInvoke<kVirtual, false>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kVirtual, true, &result_register);
+ DoInvoke<kVirtual, true>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_SUPER:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kSuper, false, &result_register);
+ DoInvoke<kSuper, false>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_SUPER_RANGE:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kSuper, true, &result_register);
+ DoInvoke<kSuper, true>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_DIRECT:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kDirect, false, &result_register);
+ DoInvoke<kDirect, false>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_DIRECT_RANGE:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kDirect, true, &result_register);
+ DoInvoke<kDirect, true>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_INTERFACE:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kInterface, false, &result_register);
+ DoInvoke<kInterface, false>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_INTERFACE_RANGE:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kInterface, true, &result_register);
+ DoInvoke<kInterface, true>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_STATIC:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kStatic, false, &result_register);
+ DoInvoke<kStatic, false>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::INVOKE_STATIC_RANGE:
PREAMBLE();
- DoInvoke(self, mh, shadow_frame, inst, kStatic, true, &result_register);
+ DoInvoke<kStatic, true>(self, shadow_frame, inst, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
break;
case Instruction::NEG_INT:
@@ -2296,6 +2305,14 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
inst = inst->Next_1xx();
break;
}
+ case Instruction::DIV_INT_2ADDR: {
+ PREAMBLE();
+ uint32_t vregA = inst->VRegA_12x();
+ DoIntDivide(self, shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x()));
+ inst = inst->Next_1xx();
+ break;
+ }
case Instruction::REM_INT_2ADDR: {
PREAMBLE();
uint32_t vregA = inst->VRegA_12x();
@@ -2358,14 +2375,6 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
inst = inst->Next_1xx();
break;
}
- case Instruction::DIV_INT_2ADDR: {
- PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
- DoIntDivide(self, shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
- break;
- }
case Instruction::ADD_LONG_2ADDR: {
PREAMBLE();
uint32_t vregA = inst->VRegA_12x();
@@ -2695,7 +2704,7 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c
void EnterInterpreterFromInvoke(Thread* self, AbstractMethod* method, Object* receiver,
uint32_t* args, JValue* result) {
DCHECK_EQ(self, Thread::Current());
- if (__builtin_frame_address(0) < self->GetStackEnd()) {
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
ThrowStackOverflowError(self);
return;
}
@@ -2799,7 +2808,7 @@ JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::C
ShadowFrame& shadow_frame)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(self, Thread::Current());
- if (__builtin_frame_address(0) < self->GetStackEnd()) {
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
ThrowStackOverflowError(self);
return JValue();
}
@@ -2807,9 +2816,11 @@ JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::C
return Execute(self, mh, code_item, shadow_frame, JValue());
}
-void EnterInterpreterFromInterpreter(Thread* self, ShadowFrame* shadow_frame, JValue* result)
+void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (__builtin_frame_address(0) < self->GetStackEnd()) {
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
ThrowStackOverflowError(self);
return;
}
@@ -2826,8 +2837,6 @@ void EnterInterpreterFromInterpreter(Thread* self, ShadowFrame* shadow_frame, JV
self->PushShadowFrame(shadow_frame);
- MethodHelper mh(method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
if (LIKELY(!method->IsNative())) {
result->SetJ(Execute(self, mh, code_item, *shadow_frame, JValue()).GetJ());
} else {
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index 96fa05034e..20166ac545 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -47,7 +47,9 @@ extern JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh,
ShadowFrame& shadow_frame)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-extern void EnterInterpreterFromInterpreter(Thread* self, ShadowFrame* shadow_frame, JValue* result)
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
} // namespace interpreter
diff --git a/src/locks.cc b/src/locks.cc
index eb0620c0c3..51a40c383a 100644
--- a/src/locks.cc
+++ b/src/locks.cc
@@ -22,7 +22,7 @@ namespace art {
Mutex* Locks::abort_lock_ = NULL;
Mutex* Locks::breakpoint_lock_ = NULL;
-Mutex* Locks::classlinker_classes_lock_ = NULL;
+ReaderWriterMutex* Locks::classlinker_classes_lock_ = NULL;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = NULL;
Mutex* Locks::logging_lock_ = NULL;
ReaderWriterMutex* Locks::mutator_lock_ = NULL;
@@ -52,7 +52,8 @@ void Locks::Init() {
DCHECK(breakpoint_lock_ == NULL);
breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock);
DCHECK(classlinker_classes_lock_ == NULL);
- classlinker_classes_lock_ = new Mutex("ClassLinker classes lock", kClassLinkerClassesLock);
+ classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
+ kClassLinkerClassesLock);
DCHECK(heap_bitmap_lock_ == NULL);
heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock);
DCHECK(mutator_lock_ == NULL);
diff --git a/src/locks.h b/src/locks.h
index 431a14816a..ceb04b937a 100644
--- a/src/locks.h
+++ b/src/locks.h
@@ -143,7 +143,7 @@ class Locks {
static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_);
// Guards lists of classes within the class linker.
- static Mutex* classlinker_classes_lock_ ACQUIRED_AFTER(trace_lock_);
+ static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(trace_lock_);
// When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to hold a higher level Mutex.
diff --git a/src/mirror/abstract_method-inl.h b/src/mirror/abstract_method-inl.h
index d4f0f2c6bc..a8238867aa 100644
--- a/src/mirror/abstract_method-inl.h
+++ b/src/mirror/abstract_method-inl.h
@@ -117,7 +117,8 @@ inline void AbstractMethod::AssertPcIsWithinCode(uintptr_t pc) const {
if (GetEntryPointFromCompiledCode() == GetInterpreterEntryPoint()) {
return;
}
- if (GetEntryPointFromCompiledCode() == GetResolutionTrampoline()) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (GetEntryPointFromCompiledCode() == GetResolutionTrampoline(class_linker)) {
return;
}
DCHECK(IsWithinCode(pc))
diff --git a/src/mirror/abstract_method.cc b/src/mirror/abstract_method.cc
index 5258795ace..c2ab29e031 100644
--- a/src/mirror/abstract_method.cc
+++ b/src/mirror/abstract_method.cc
@@ -268,45 +268,28 @@ void AbstractMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JV
result->SetJ(0);
}
} else {
- bool interpret = runtime->GetInstrumentation()->InterpretOnly() && !IsNative() &&
- !IsProxyMethod();
const bool kLogInvocationStartAndReturn = false;
if (GetEntryPointFromCompiledCode() != NULL) {
- if (!interpret) {
- if (kLogInvocationStartAndReturn) {
- LOG(INFO) << StringPrintf("Invoking '%s' code=%p", PrettyMethod(this).c_str(), GetEntryPointFromCompiledCode());
- }
+ if (kLogInvocationStartAndReturn) {
+ LOG(INFO) << StringPrintf("Invoking '%s' code=%p", PrettyMethod(this).c_str(), GetEntryPointFromCompiledCode());
+ }
#ifdef ART_USE_PORTABLE_COMPILER
- (*art_portable_invoke_stub)(this, args, args_size, self, result, result_type);
+ (*art_portable_invoke_stub)(this, args, args_size, self, result, result_type);
#else
- (*art_quick_invoke_stub)(this, args, args_size, self, result, result_type);
+ (*art_quick_invoke_stub)(this, args, args_size, self, result, result_type);
#endif
- if (UNLIKELY(reinterpret_cast<int32_t>(self->GetException(NULL)) == -1)) {
- // Unusual case where we were running LLVM generated code and an
- // exception was thrown to force the activations to be removed from the
- // stack. Continue execution in the interpreter.
- self->ClearException();
- ShadowFrame* shadow_frame = self->GetAndClearDeoptimizationShadowFrame(result);
- self->SetTopOfStack(NULL, 0);
- self->SetTopOfShadowStack(shadow_frame);
- interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result);
- }
- if (kLogInvocationStartAndReturn) {
- LOG(INFO) << StringPrintf("Returned '%s' code=%p", PrettyMethod(this).c_str(), GetEntryPointFromCompiledCode());
- }
- } else {
- if (kLogInvocationStartAndReturn) {
- LOG(INFO) << "Interpreting " << PrettyMethod(this) << "'";
- }
- if (this->IsStatic()) {
- art::interpreter::EnterInterpreterFromInvoke(self, this, NULL, args, result);
- } else {
- Object* receiver = reinterpret_cast<Object*>(args[0]);
- art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args + 1, result);
- }
- if (kLogInvocationStartAndReturn) {
- LOG(INFO) << "Returned '" << PrettyMethod(this) << "'";
- }
+ if (UNLIKELY(reinterpret_cast<int32_t>(self->GetException(NULL)) == -1)) {
+ // Unusual case where we were running LLVM generated code and an
+ // exception was thrown to force the activations to be removed from the
+ // stack. Continue execution in the interpreter.
+ self->ClearException();
+ ShadowFrame* shadow_frame = self->GetAndClearDeoptimizationShadowFrame(result);
+ self->SetTopOfStack(NULL, 0);
+ self->SetTopOfShadowStack(shadow_frame);
+ interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result);
+ }
+ if (kLogInvocationStartAndReturn) {
+ LOG(INFO) << StringPrintf("Returned '%s' code=%p", PrettyMethod(this).c_str(), GetEntryPointFromCompiledCode());
}
} else {
LOG(INFO) << "Not invoking '" << PrettyMethod(this)
diff --git a/src/mirror/abstract_method.h b/src/mirror/abstract_method.h
index c8aa11e5df..59dfdd5760 100644
--- a/src/mirror/abstract_method.h
+++ b/src/mirror/abstract_method.h
@@ -18,6 +18,7 @@
#define ART_SRC_MIRROR_METHOD_H_
#include "class.h"
+#include "dex_file.h"
#include "invoke_type.h"
#include "locks.h"
#include "modifiers.h"
@@ -29,6 +30,7 @@ struct AbstractMethodOffsets;
struct ConstructorMethodOffsets;
union JValue;
struct MethodClassOffsets;
+class MethodHelper;
struct MethodOffsets;
class StringPiece;
class ShadowFrame;
@@ -37,7 +39,8 @@ namespace mirror {
class StaticStorageBase;
-typedef void (EntryPointFromInterpreter)(Thread* self, ShadowFrame* shadow_frame, JValue* result);
+typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result);
// C++ mirror of java.lang.reflect.Method and java.lang.reflect.Constructor
class MANAGED AbstractMethod : public Object {
diff --git a/src/mirror/dex_cache-inl.h b/src/mirror/dex_cache-inl.h
new file mode 100644
index 0000000000..3b17c428a5
--- /dev/null
+++ b/src/mirror/dex_cache-inl.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_MIRROR_DEX_CACHE_INL_H_
+#define ART_SRC_MIRROR_DEX_CACHE_INL_H_
+
+#include "dex_cache.h"
+
+namespace art {
+namespace mirror {
+
+inline AbstractMethod* DexCache::GetResolvedMethod(uint32_t method_idx) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ AbstractMethod* method = GetResolvedMethods()->Get(method_idx);
+ // Hide resolution trampoline methods from the caller
+ if (method != NULL && method->IsRuntimeMethod()) {
+ DCHECK(method == Runtime::Current()->GetResolutionMethod());
+ return NULL;
+ } else {
+ return method;
+ }
+}
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_SRC_MIRROR_DEX_CACHE_INL_H_
diff --git a/src/mirror/dex_cache.cc b/src/mirror/dex_cache.cc
index 3009786baa..d9c05fb7fc 100644
--- a/src/mirror/dex_cache.cc
+++ b/src/mirror/dex_cache.cc
@@ -78,17 +78,5 @@ void DexCache::Fixup(AbstractMethod* trampoline) {
}
}
-AbstractMethod* DexCache::GetResolvedMethod(uint32_t method_idx) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* method = GetResolvedMethods()->Get(method_idx);
- // Hide resolution trampoline methods from the caller
- if (method != NULL && method->IsRuntimeMethod()) {
- DCHECK(method == Runtime::Current()->GetResolutionMethod());
- return NULL;
- } else {
- return method;
- }
-}
-
} // namespace mirror
} // namespace art
diff --git a/src/oat.cc b/src/oat.cc
index 4eb97f5e41..e606953ed5 100644
--- a/src/oat.cc
+++ b/src/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '0', '5', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '0', '6', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
@@ -57,6 +57,10 @@ OatHeader::OatHeader(InstructionSet instruction_set,
UpdateChecksum(image_file_location.data(), image_file_location_size_);
executable_offset_ = 0;
+ interpreter_to_interpreter_entry_offset_ = 0;
+ interpreter_to_quick_entry_offset_ = 0;
+ portable_resolution_trampoline_offset_ = 0;
+ quick_resolution_trampoline_offset_ = 0;
}
bool OatHeader::IsValid() const {
@@ -97,6 +101,92 @@ uint32_t OatHeader::GetExecutableOffset() const {
return executable_offset_;
}
+void OatHeader::SetExecutableOffset(uint32_t executable_offset) {
+ DCHECK_ALIGNED(executable_offset, kPageSize);
+ CHECK_GT(executable_offset, sizeof(OatHeader));
+ DCHECK(IsValid());
+ DCHECK_EQ(executable_offset_, 0U);
+
+ executable_offset_ = executable_offset;
+ UpdateChecksum(&executable_offset_, sizeof(executable_offset));
+}
+
+const void* OatHeader::GetInterpreterToInterpreterEntry() const {
+ return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToInterpreterEntryOffset();
+}
+
+uint32_t OatHeader::GetInterpreterToInterpreterEntryOffset() const {
+ DCHECK(IsValid());
+ CHECK_GE(interpreter_to_interpreter_entry_offset_, executable_offset_);
+ return interpreter_to_interpreter_entry_offset_;
+}
+
+void OatHeader::SetInterpreterToInterpreterEntryOffset(uint32_t offset) {
+ CHECK(offset == 0 || offset >= executable_offset_);
+ DCHECK(IsValid());
+ DCHECK_EQ(interpreter_to_interpreter_entry_offset_, 0U) << offset;
+
+ interpreter_to_interpreter_entry_offset_ = offset;
+ UpdateChecksum(&interpreter_to_interpreter_entry_offset_, sizeof(offset));
+}
+
+const void* OatHeader::GetInterpreterToQuickEntry() const {
+ return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToQuickEntryOffset();
+}
+
+uint32_t OatHeader::GetInterpreterToQuickEntryOffset() const {
+ DCHECK(IsValid());
+ CHECK_GE(interpreter_to_quick_entry_offset_, interpreter_to_interpreter_entry_offset_);
+ return interpreter_to_quick_entry_offset_;
+}
+
+void OatHeader::SetInterpreterToQuickEntryOffset(uint32_t offset) {
+ CHECK(offset == 0 || offset >= interpreter_to_interpreter_entry_offset_);
+ DCHECK(IsValid());
+ DCHECK_EQ(interpreter_to_quick_entry_offset_, 0U) << offset;
+
+ interpreter_to_quick_entry_offset_ = offset;
+ UpdateChecksum(&interpreter_to_quick_entry_offset_, sizeof(offset));
+}
+
+const void* OatHeader::GetPortableResolutionTrampoline() const {
+ return reinterpret_cast<const uint8_t*>(this) + GetPortableResolutionTrampolineOffset();
+}
+
+uint32_t OatHeader::GetPortableResolutionTrampolineOffset() const {
+ DCHECK(IsValid());
+ CHECK_GE(portable_resolution_trampoline_offset_, interpreter_to_quick_entry_offset_);
+ return portable_resolution_trampoline_offset_;
+}
+
+void OatHeader::SetPortableResolutionTrampolineOffset(uint32_t offset) {
+ CHECK(offset == 0 || offset >= interpreter_to_quick_entry_offset_);
+ DCHECK(IsValid());
+ DCHECK_EQ(portable_resolution_trampoline_offset_, 0U) << offset;
+
+ portable_resolution_trampoline_offset_ = offset;
+ UpdateChecksum(&portable_resolution_trampoline_offset_, sizeof(offset));
+}
+
+const void* OatHeader::GetQuickResolutionTrampoline() const {
+ return reinterpret_cast<const uint8_t*>(this) + GetQuickResolutionTrampolineOffset();
+}
+
+uint32_t OatHeader::GetQuickResolutionTrampolineOffset() const {
+ DCHECK(IsValid());
+ CHECK_GE(quick_resolution_trampoline_offset_, portable_resolution_trampoline_offset_);
+ return quick_resolution_trampoline_offset_;
+}
+
+void OatHeader::SetQuickResolutionTrampolineOffset(uint32_t offset) {
+ CHECK(offset == 0 || offset >= portable_resolution_trampoline_offset_);
+ DCHECK(IsValid());
+ DCHECK_EQ(quick_resolution_trampoline_offset_, 0U) << offset;
+
+ quick_resolution_trampoline_offset_ = offset;
+ UpdateChecksum(&quick_resolution_trampoline_offset_, sizeof(offset));
+}
+
uint32_t OatHeader::GetImageFileLocationOatChecksum() const {
CHECK(IsValid());
return image_file_location_oat_checksum_;
@@ -123,16 +213,6 @@ std::string OatHeader::GetImageFileLocation() const {
GetImageFileLocationSize());
}
-void OatHeader::SetExecutableOffset(uint32_t executable_offset) {
- DCHECK_ALIGNED(executable_offset, kPageSize);
- CHECK_GT(executable_offset, sizeof(OatHeader));
- DCHECK(IsValid());
- DCHECK_EQ(executable_offset_, 0U);
-
- executable_offset_ = executable_offset;
- UpdateChecksum(&executable_offset_, sizeof(executable_offset));
-}
-
OatMethodOffsets::OatMethodOffsets()
: code_offset_(0),
frame_size_in_bytes_(0),
diff --git a/src/oat.h b/src/oat.h
index cf988918f0..c67a1a6630 100644
--- a/src/oat.h
+++ b/src/oat.h
@@ -43,8 +43,20 @@ class PACKED(4) OatHeader {
return dex_file_count_;
}
uint32_t GetExecutableOffset() const;
- InstructionSet GetInstructionSet() const;
void SetExecutableOffset(uint32_t executable_offset);
+ const void* GetInterpreterToInterpreterEntry() const;
+ uint32_t GetInterpreterToInterpreterEntryOffset() const;
+ void SetInterpreterToInterpreterEntryOffset(uint32_t offset);
+ const void* GetInterpreterToQuickEntry() const;
+ uint32_t GetInterpreterToQuickEntryOffset() const;
+ void SetInterpreterToQuickEntryOffset(uint32_t offset);
+ const void* GetPortableResolutionTrampoline() const;
+ uint32_t GetPortableResolutionTrampolineOffset() const;
+ void SetPortableResolutionTrampolineOffset(uint32_t offset);
+ const void* GetQuickResolutionTrampoline() const;
+ uint32_t GetQuickResolutionTrampolineOffset() const;
+ void SetQuickResolutionTrampolineOffset(uint32_t offset);
+ InstructionSet GetInstructionSet() const;
uint32_t GetImageFileLocationOatChecksum() const;
uint32_t GetImageFileLocationOatDataBegin() const;
uint32_t GetImageFileLocationSize() const;
@@ -62,6 +74,10 @@ class PACKED(4) OatHeader {
InstructionSet instruction_set_;
uint32_t dex_file_count_;
uint32_t executable_offset_;
+ uint32_t interpreter_to_interpreter_entry_offset_;
+ uint32_t interpreter_to_quick_entry_offset_;
+ uint32_t portable_resolution_trampoline_offset_;
+ uint32_t quick_resolution_trampoline_offset_;
uint32_t image_file_location_oat_checksum_;
uint32_t image_file_location_oat_data_begin_;
diff --git a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc
index 1a5fe47e58..2e9453ce9c 100644
--- a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc
+++ b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc
@@ -91,12 +91,26 @@ extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
// Intrinsic entrypoints.
extern "C" int32_t __memcmp16(void*, void*, int32_t);
extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
extern "C" int32_t art_quick_string_compareto(void*, void*);
// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -187,6 +201,10 @@ void InitEntryPoints(EntryPoints* points) {
points->pShrLong = art_quick_shr_long;
points->pUshrLong = art_quick_ushr_long;
+ // Interpreter
+ points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
// Intrinsics
points->pIndexOf = art_quick_indexof;
points->pMemcmp16 = __memcmp16;
@@ -194,6 +212,8 @@ void InitEntryPoints(EntryPoints* points) {
points->pMemcpy = memcpy;
// Invocation
+ points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+ points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
diff --git a/src/oat/runtime/arm/runtime_support_arm.S b/src/oat/runtime/arm/runtime_support_arm.S
index 3578ba0d16..f19e8bada0 100644
--- a/src/oat/runtime/arm/runtime_support_arm.S
+++ b/src/oat/runtime/arm/runtime_support_arm.S
@@ -246,48 +246,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvok
INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
- /*
- * Portable resolution trampoline.
- */
- .extern artPortableResolutionTrampoline
-ENTRY art_portable_resolution_trampoline
- push {r0, r1, r2, r3, lr} @ spill regs
- .save {r0, r1, r2, r3, lr}
- .pad #20
- .cfi_adjust_cfa_offset 20
- sub sp, #12 @ pad stack pointer to align frame
- .pad #12
- .cfi_adjust_cfa_offset 12
- mov r2, r9 @ pass Thread::Current
- mov r1, sp @ pass stack pointer
- blx artPortableResolutionTrampoline @ (method_idx, sp, Thread*)
- mov r12, r0 @ save method code pointer result
- add sp, #12 @ remove padding from stack pointer
- .cfi_adjust_cfa_offset -12
- pop {r0, r1, r2, r3, lr} @ restore regs
- .cfi_adjust_cfa_offset -20
- cmp r12, #0 @ is method code null?
- bxne r12 @ if non-null, tail call to method's code
- bx lr @ otherwise, return to caller to handle exception
-END art_portable_resolution_trampoline
-
- /*
- * Quick resolution trampoline.
- */
- .extern artQuickResolutionTrampoline
-ENTRY art_quick_resolution_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME @ save callee saves in case allocation triggers GC
- mov r2, r9 @ pass Thread::Current
- mov r1, sp @ pass stack pointer
- blx artQuickResolutionTrampoline @ (method_idx, sp, Thread*)
- mov r12, r0 @ save method code pointer result
- add sp, #4 @ set up stack pointer
- .cfi_adjust_cfa_offset -4
- pop {r0-r3, r5-r8, r10-r11, lr} @ 11 words, r0 will hold method*
- .cfi_adjust_cfa_offset -44
- bx r12 @ leaf call to method code
-END art_quick_resolution_trampoline
-
/*
* Portable invocation stub.
* On entry:
diff --git a/src/oat/runtime/mips/oat_support_entrypoints_mips.cc b/src/oat/runtime/mips/oat_support_entrypoints_mips.cc
index eb82c42894..8e066118cd 100644
--- a/src/oat/runtime/mips/oat_support_entrypoints_mips.cc
+++ b/src/oat/runtime/mips/oat_support_entrypoints_mips.cc
@@ -93,12 +93,26 @@ extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
// Intrinsic entrypoints.
extern "C" int32_t __memcmp16(void*, void*, int32_t);
extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
extern "C" int32_t art_quick_string_compareto(void*, void*);
// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -188,6 +202,10 @@ void InitEntryPoints(EntryPoints* points) {
points->pShrLong = art_quick_shr_long;
points->pUshrLong = art_quick_ushr_long;
+ // Interpreter
+ points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
// Intrinsics
points->pIndexOf = art_quick_indexof;
points->pMemcmp16 = __memcmp16;
@@ -195,6 +213,8 @@ void InitEntryPoints(EntryPoints* points) {
points->pMemcpy = memcpy;
// Invocation
+ points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+ points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
diff --git a/src/oat/runtime/mips/runtime_support_mips.S b/src/oat/runtime/mips/runtime_support_mips.S
index 2144e349ba..45d583e097 100644
--- a/src/oat/runtime/mips/runtime_support_mips.S
+++ b/src/oat/runtime/mips/runtime_support_mips.S
@@ -413,71 +413,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvoke
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
- * Portable resolution trampoline.
- */
- .extern artPortableResolutionTrampoline
-ENTRY art_portable_resolution_trampoline
- GENERATE_GLOBAL_POINTER
- addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra
- .cfi_adjust_cfa_offset 32
- sw $ra, 16($sp)
- .cfi_rel_offset 31, 16
- sw $a3, 12($sp)
- .cfi_rel_offset 7, 12
- sw $a2, 8($sp)
- .cfi_rel_offset 6, 8
- sw $a1, 4($sp)
- .cfi_rel_offset 5, 4
- sw $a0, 0($sp)
- .cfi_rel_offset 4, 0
- move $a2, $s1 # pass Thread::Current()
- jal artPortableResolutionTrampoline # (method_idx, sp, Thread*)
- move $a1, $sp # pass stack pointer
- lw $a0, 0($sp) # restore registers from stack
- lw $a1, 4($sp)
- lw $a2, 8($sp)
- lw $a3, 12($sp)
- lw $ra, 16($sp)
- beq $v0, $zero, resolve_fail
- addiu $sp, $sp, 32 # restore the stack
- .cfi_adjust_cfa_offset -32
- jr $t9 # leaf call to method's code
- move $t9, $v0 # put method code result in $t9
-resolve_fail:
- jr $ra
- nop
-END art_portable_resolution_trampoline
-
- /*
- * Quick resolution trampoline.
- */
- .extern artQuickResolutionTrampoline
-ENTRY art_quick_resolution_trampoline
- GENERATE_GLOBAL_POINTER
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- move $a2, $s1 # pass Thread::Current()
- jal artQuickResolutionTrampoline # (method_idx, sp, Thread*)
- move $a1, $sp # pass stack pointer
- move $t9, $v0 # put method code result in $t9
- lw $a0, 0($sp) # restore registers from stack
- lw $a1, 4($sp)
- lw $a2, 8($sp)
- lw $a3, 12($sp)
- lw $s2, 28($sp)
- lw $s3, 32($sp)
- lw $s4, 36($sp)
- lw $s5, 40($sp)
- lw $s6, 44($sp)
- lw $s7, 48($sp)
- lw $gp, 52($sp)
- lw $fp, 56($sp)
- lw $ra, 60($sp)
- jr $t9 # leaf call to method's code
- addiu $sp, $sp, 64 # restore the stack
- .cfi_adjust_cfa_offset -64
-END art_quick_resolution_trampoline
-
- /*
* Common invocation stub for portable and quick.
* On entry:
* a0 = method pointer
diff --git a/src/oat/runtime/oat_support_entrypoints.h b/src/oat/runtime/oat_support_entrypoints.h
index 72d5348556..c1a2587c45 100644
--- a/src/oat/runtime/oat_support_entrypoints.h
+++ b/src/oat/runtime/oat_support_entrypoints.h
@@ -17,6 +17,7 @@
#ifndef ART_SRC_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
#define ART_SRC_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
+#include "dex_file-inl.h"
#include "runtime.h"
#define ENTRYPOINT_OFFSET(x) \
@@ -30,6 +31,8 @@ class Class;
class Object;
} // namespace mirror
class DvmDex;
+class MethodHelper;
+class ShadowFrame;
class Thread;
struct PACKED(4) EntryPoints {
@@ -104,6 +107,14 @@ struct PACKED(4) EntryPoints {
uint64_t (*pShrLong)(uint64_t, uint32_t);
uint64_t (*pUshrLong)(uint64_t, uint32_t);
+ // Interpreter
+ void (*pInterpreterToInterpreterEntry)(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+ void (*pInterpreterToQuickEntry)(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
// Intrinsics
int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t);
int32_t (*pMemcmp16)(void*, void*, int32_t);
@@ -111,6 +122,10 @@ struct PACKED(4) EntryPoints {
void* (*pMemcpy)(void*, const void*, size_t);
// Invocation
+ const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
+ mirror::AbstractMethod**, Thread*);
+ const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
+ mirror::AbstractMethod**, Thread*);
void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
void (*pInvokeInterfaceTrampoline)(uint32_t, void*);
void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
@@ -131,24 +146,25 @@ struct PACKED(4) EntryPoints {
void (*pThrowStackOverflowFromCode)(void*);
};
+
// JNI entrypoints.
extern uint32_t JniMethodStart(Thread* self)
- UNLOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot));
+ UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self)
- UNLOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot));
+ UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot));
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked,
Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot));
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot));
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result,
uint32_t saved_local_ref_cookie,
jobject locked, Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) __attribute__ ((hot));
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
// Initialize an entry point data structure.
void InitEntryPoints(EntryPoints* points);
diff --git a/src/oat/runtime/support_interpreter.cc b/src/oat/runtime/support_interpreter.cc
index a5d6fa3c8b..55be54f2c2 100644
--- a/src/oat/runtime/support_interpreter.cc
+++ b/src/oat/runtime/support_interpreter.cc
@@ -110,12 +110,11 @@ extern "C" uint64_t artInterpreterEntry(mirror::AbstractMethod* method, Thread*
return result.GetJ();
}
-void artInterpreterToQuickEntry(Thread* self, ShadowFrame* shadow_frame, JValue* result)
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::AbstractMethod* method = shadow_frame->GetMethod();
- MethodHelper mh(method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
-
uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
arg_array.BuildArgArray(shadow_frame, arg_offset);
diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc
index a96555d8ea..6a95f3c8ff 100644
--- a/src/oat/runtime/support_invoke.cc
+++ b/src/oat/runtime/support_invoke.cc
@@ -17,6 +17,7 @@
#include "callee_save_frame.h"
#include "dex_instruction-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc
index df2dda2174..71b67d06bb 100644
--- a/src/oat/runtime/support_stubs.cc
+++ b/src/oat/runtime/support_stubs.cc
@@ -32,6 +32,7 @@ namespace art {
// Lazily resolve a method for portable. Called by stub code.
extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
mirror::AbstractMethod** called_addr,
Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -79,6 +80,14 @@ extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* c
invoke_type = kVirtual;
is_range = true;
break;
+ case Instruction::INVOKE_INTERFACE:
+ invoke_type = kInterface;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ is_range = true;
+ break;
default:
LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
// Avoid used uninitialized warnings.
@@ -87,6 +96,12 @@ extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* c
}
uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+ // Refine called method based on receiver.
+ if (invoke_type == kVirtual) {
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+ } else if (invoke_type == kInterface) {
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+ }
} else {
CHECK(called->IsStatic()) << PrettyMethod(called);
invoke_type = kStatic;
@@ -129,7 +144,7 @@ extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* c
// Expect class to at least be initializing.
DCHECK(called->GetDeclaringClass()->IsInitializing());
// Don't want infinite recursion.
- DCHECK(code != GetResolutionTrampoline());
+ DCHECK(code != GetResolutionTrampoline(linker));
// Set up entry into main method
*called_addr = called;
}
@@ -138,6 +153,7 @@ extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* c
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
mirror::AbstractMethod** sp, Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__arm__)
@@ -261,6 +277,14 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* call
invoke_type = kVirtual;
is_range = true;
break;
+ case Instruction::INVOKE_INTERFACE:
+ invoke_type = kInterface;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ is_range = true;
+ break;
default:
LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
// Avoid used uninitialized warnings.
@@ -334,6 +358,12 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* call
if (LIKELY(!thread->IsExceptionPending())) {
// Incompatible class change should have been handled in resolve method.
CHECK(!called->CheckIncompatibleClassChange(invoke_type));
+ // Refine called method based on receiver.
+ if (invoke_type == kVirtual) {
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+ } else if (invoke_type == kInterface) {
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+ }
// Ensure that the called method's class is initialized.
mirror::Class* called_class = called->GetDeclaringClass();
linker->EnsureInitialized(called_class, true, true);
@@ -363,7 +393,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* call
// Expect class to at least be initializing.
DCHECK(called->GetDeclaringClass()->IsInitializing());
// Don't want infinite recursion.
- DCHECK(code != GetResolutionTrampoline());
+ DCHECK(code != GetResolutionTrampoline(linker));
// Set up entry into main method
regs[0] = reinterpret_cast<uintptr_t>(called);
}
diff --git a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc
index 357bbe0819..a90a583e9f 100644
--- a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc
+++ b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc
@@ -75,6 +75,14 @@ extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t);
extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t);
extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t);
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
// Intrinsic entrypoints.
extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t);
extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
@@ -82,6 +90,12 @@ extern "C" int32_t art_quick_string_compareto(void*, void*);
extern "C" void* art_quick_memcpy(void*, const void*, size_t);
// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -171,6 +185,10 @@ void InitEntryPoints(EntryPoints* points) {
points->pShrLong = art_quick_lshr_from_code;
points->pUshrLong = art_quick_lushr_from_code;
+ // Interpreter
+ points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
// Intrinsics
points->pIndexOf = art_quick_indexof;
points->pMemcmp16 = art_quick_memcmp16;
@@ -178,6 +196,8 @@ void InitEntryPoints(EntryPoints* points) {
points->pMemcpy = art_quick_memcpy;
// Invocation
+ points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+ points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
diff --git a/src/oat/runtime/x86/runtime_support_x86.S b/src/oat/runtime/x86/runtime_support_x86.S
index d3a1fb73ff..ee6db0c3f8 100644
--- a/src/oat/runtime/x86/runtime_support_x86.S
+++ b/src/oat/runtime/x86/runtime_support_x86.S
@@ -301,55 +301,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvok
INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
- /*
- * Portable resolution trampoline.
- */
-DEFINE_FUNCTION art_portable_resolution_trampoline
- PUSH ebp // stash %ebp
- movl %esp, %ebp // save %esp
- .cfi_def_cfa_register ebp
- subl LITERAL(8), %esp // align stack
- movl 8(%ebp), %eax // load the called method* into %eax
- leal 8(%ebp), %edx // put the called method* address in %edx
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- PUSH edx // pass called method* address
- PUSH eax // pass method*
- call SYMBOL(artPortableResolutionTrampoline) // (method_idx, sp, Thread*)
- leave // restore the stack and %ebp
- .cfi_def_cfa esp, 4
- .cfi_restore ebp
- cmpl LITERAL(0), %eax // check if returned method code is null
- je resolve_fail // if null, jump to return to handle
- jmp *%eax // otherwise, tail call to intended method
-resolve_fail:
- ret
-END_FUNCTION art_portable_resolution_trampoline
-
- /*
- * Quick resolution trampoline.
- */
-DEFINE_FUNCTION art_quick_resolution_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- movl %esp, %ecx // save stack pointer
- PUSH eax // align stack
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
- PUSH ecx // pass stack pointer
- PUSH eax // pass method*
- call SYMBOL(artQuickResolutionTrampoline) // (method_idx, sp, Thread*)
- movl %eax, %edi // save returned code pointer in %edi
- addl LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
- POP eax // restore registers
- POP ecx
- POP edx
- POP ebx
- POP ebp
- POP esi
- xchgl %edi, (%esp) // swap %edi and code pointer
- ret // tail call to intended method
-END_FUNCTION art_quick_resolution_trampoline
-
/*
* Portable invocation stub.
* On entry:
diff --git a/src/oat_test.cc b/src/oat_test.cc
index dd336d9a9b..c7c063a9d1 100644
--- a/src/oat_test.cc
+++ b/src/oat_test.cc
@@ -68,16 +68,16 @@ TEST_F(OatTest, WriteRead) {
const bool compile = false; // DISABLED_ due to the time to compile libcore
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- jobject class_loader = NULL;
- if (compile) {
- // TODO: make selectable
+ // TODO: make selectable
#if defined(ART_USE_PORTABLE_COMPILER)
- CompilerBackend compiler_backend = kPortable;
+ CompilerBackend compiler_backend = kPortable;
#else
- CompilerBackend compiler_backend = kQuick;
+ CompilerBackend compiler_backend = kQuick;
#endif
- compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, 2, false,
- NULL, true, true));
+ compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, 2, false,
+ NULL, true, true));
+ jobject class_loader = NULL;
+ if (compile) {
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath());
}
@@ -143,7 +143,7 @@ TEST_F(OatTest, WriteRead) {
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(36U, sizeof(OatHeader));
+ EXPECT_EQ(52U, sizeof(OatHeader));
EXPECT_EQ(28U, sizeof(OatMethodOffsets));
}
diff --git a/src/oat_writer.cc b/src/oat_writer.cc
index 8acbfe9ca5..a4bd87d4c0 100644
--- a/src/oat_writer.cc
+++ b/src/oat_writer.cc
@@ -54,13 +54,35 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
uint32_t image_file_location_oat_begin,
const std::string& image_file_location,
const CompilerDriver* compiler)
- : compiler_driver_(compiler) {
- image_file_location_oat_checksum_ = image_file_location_oat_checksum;
- image_file_location_oat_begin_ = image_file_location_oat_begin;
- image_file_location_ = image_file_location;
- dex_files_ = &dex_files;
- oat_header_ = NULL;
- executable_offset_padding_length_ = 0;
+ : compiler_driver_(compiler),
+ dex_files_(&dex_files),
+ image_file_location_oat_checksum_(image_file_location_oat_checksum),
+ image_file_location_oat_begin_(image_file_location_oat_begin),
+ image_file_location_(image_file_location),
+ oat_header_(NULL),
+ size_dex_file_alignment_(0),
+ size_executable_offset_alignment_(0),
+ size_oat_header_(0),
+ size_oat_header_image_file_location_(0),
+ size_dex_file_(0),
+ size_interpreter_to_interpreter_entry_(0),
+ size_interpreter_to_quick_entry_(0),
+ size_portable_resolution_trampoline_(0),
+ size_quick_resolution_trampoline_(0),
+ size_stubs_alignment_(0),
+ size_code_size_(0),
+ size_code_(0),
+ size_code_alignment_(0),
+ size_mapping_table_(0),
+ size_vmap_table_(0),
+ size_gc_map_(0),
+ size_oat_dex_file_location_size_(0),
+ size_oat_dex_file_location_data_(0),
+ size_oat_dex_file_location_checksum_(0),
+ size_oat_dex_file_offset_(0),
+ size_oat_dex_file_methods_offsets_(0),
+ size_oat_class_status_(0),
+ size_oat_class_method_offsets_(0) {
size_t offset = InitOatHeader();
offset = InitOatDexFiles(offset);
@@ -70,6 +92,7 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
offset = InitOatCodeDexFiles(offset);
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
+ CHECK(image_file_location.empty() == compiler->IsImage());
}
OatWriter::~OatWriter() {
@@ -106,7 +129,9 @@ size_t OatWriter::InitDexFiles(size_t offset) {
// calculate the offsets within OatDexFiles to the DexFiles
for (size_t i = 0; i != dex_files_->size(); ++i) {
// dex files are required to be 4 byte aligned
+ size_t original_offset = offset;
offset = RoundUp(offset, 4);
+ size_dex_file_alignment_ += offset - original_offset;
// set offset in OatDexFile to DexFile
oat_dex_files_[i]->dex_file_offset_ = offset;
@@ -162,7 +187,33 @@ size_t OatWriter::InitOatCode(size_t offset) {
// required to be on a new page boundary
offset = RoundUp(offset, kPageSize);
oat_header_->SetExecutableOffset(offset);
- executable_offset_padding_length_ = offset - old_offset;
+ size_executable_offset_alignment_ = offset - old_offset;
+ if (compiler_driver_->IsImage()) {
+ InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
+ oat_header_->SetInterpreterToInterpreterEntryOffset(offset);
+ interpreter_to_interpreter_entry_.reset(compiler_driver_->CreateInterpreterToInterpreterEntry());
+ offset += interpreter_to_interpreter_entry_->size();
+
+ offset = CompiledCode::AlignCode(offset, instruction_set);
+ oat_header_->SetInterpreterToQuickEntryOffset(offset);
+ interpreter_to_quick_entry_.reset(compiler_driver_->CreateInterpreterToQuickEntry());
+ offset += interpreter_to_quick_entry_->size();
+
+ offset = CompiledCode::AlignCode(offset, instruction_set);
+ oat_header_->SetPortableResolutionTrampolineOffset(offset);
+ portable_resolution_trampoline_.reset(compiler_driver_->CreatePortableResolutionTrampoline());
+ offset += portable_resolution_trampoline_->size();
+
+ offset = CompiledCode::AlignCode(offset, instruction_set);
+ oat_header_->SetQuickResolutionTrampolineOffset(offset);
+ quick_resolution_trampoline_.reset(compiler_driver_->CreateQuickResolutionTrampoline());
+ offset += quick_resolution_trampoline_->size();
+ } else {
+ oat_header_->SetInterpreterToInterpreterEntryOffset(0);
+ oat_header_->SetInterpreterToQuickEntryOffset(0);
+ oat_header_->SetPortableResolutionTrampolineOffset(0);
+ oat_header_->SetQuickResolutionTrampolineOffset(0);
+ }
return offset;
}
@@ -389,11 +440,13 @@ bool OatWriter::Write(OutputStream& out) {
PLOG(ERROR) << "Failed to write oat header to " << out.GetLocation();
return false;
}
+ size_oat_header_ += sizeof(*oat_header_);
if (!out.WriteFully(image_file_location_.data(), image_file_location_.size())) {
PLOG(ERROR) << "Failed to write oat header image file location to " << out.GetLocation();
return false;
}
+ size_oat_header_image_file_location_ += image_file_location_.size();
if (!WriteTables(out)) {
LOG(ERROR) << "Failed to write oat tables to " << out.GetLocation();
@@ -412,12 +465,64 @@ bool OatWriter::Write(OutputStream& out) {
return false;
}
+ LOG(INFO) << "size_dex_file_alignment_=" << size_dex_file_alignment_;
+ LOG(INFO) << "size_executable_offset_alignment_=" << size_executable_offset_alignment_;
+ LOG(INFO) << "size_oat_header_=" << size_oat_header_;
+ LOG(INFO) << "size_oat_header_image_file_location_=" << size_oat_header_image_file_location_;
+ LOG(INFO) << "size_dex_file_=" << size_dex_file_;
+ LOG(INFO) << "size_interpreter_to_interpreter_entry_=" << size_interpreter_to_interpreter_entry_;
+ LOG(INFO) << "size_interpreter_to_quick_entry_=" << size_interpreter_to_quick_entry_;
+ LOG(INFO) << "size_portable_resolution_trampoline_=" << size_portable_resolution_trampoline_;
+ LOG(INFO) << "size_quick_resolution_trampoline_=" << size_quick_resolution_trampoline_;
+ LOG(INFO) << "size_stubs_alignment_=" << size_stubs_alignment_;
+ LOG(INFO) << "size_code_size_=" << size_code_size_;
+ LOG(INFO) << "size_code_=" << size_code_;
+ LOG(INFO) << "size_code_alignment_=" << size_code_alignment_;
+ LOG(INFO) << "size_mapping_table_=" << size_mapping_table_;
+ LOG(INFO) << "size_vmap_table_=" << size_vmap_table_;
+ LOG(INFO) << "size_gc_map_=" << size_gc_map_;
+ LOG(INFO) << "size_oat_dex_file_location_size_=" << size_oat_dex_file_location_size_;
+ LOG(INFO) << "size_oat_dex_file_location_data=" << size_oat_dex_file_location_data_;
+ LOG(INFO) << "size_oat_dex_file_location_checksum_=" << size_oat_dex_file_location_checksum_;
+ LOG(INFO) << "size_oat_dex_file_offset_=" << size_oat_dex_file_offset_;
+ LOG(INFO) << "size_oat_dex_file_methods_offsets_=" << size_oat_dex_file_methods_offsets_;
+ LOG(INFO) << "size_oat_class_status_=" << size_oat_class_status_;
+ LOG(INFO) << "size_oat_class_method_offsets=" << size_oat_class_method_offsets_;
+
+ uint32_t size_total =
+ size_dex_file_alignment_ +
+ size_executable_offset_alignment_ +
+ size_oat_header_ +
+ size_oat_header_image_file_location_ +
+ size_dex_file_ +
+ size_interpreter_to_interpreter_entry_ +
+ size_interpreter_to_quick_entry_ +
+ size_portable_resolution_trampoline_ +
+ size_quick_resolution_trampoline_ +
+ size_stubs_alignment_ +
+ size_code_size_ +
+ size_code_ +
+ size_code_alignment_ +
+ size_mapping_table_ +
+ size_vmap_table_ +
+ size_gc_map_ +
+ size_oat_dex_file_location_size_ +
+ size_oat_dex_file_location_data_ +
+ size_oat_dex_file_location_checksum_ +
+ size_oat_dex_file_offset_ +
+ size_oat_dex_file_methods_offsets_ +
+ size_oat_class_status_ +
+ size_oat_class_method_offsets_;
+
+ LOG(INFO) << "size_total=" << size_total;
+ DCHECK_EQ(size_total, static_cast<uint32_t>(out.Seek(0, kSeekCurrent)));
+
return true;
}
bool OatWriter::WriteTables(OutputStream& out) {
for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
- if (!oat_dex_files_[i]->Write(out)) {
+ if (!oat_dex_files_[i]->Write(this, out)) {
PLOG(ERROR) << "Failed to write oat dex information to " << out.GetLocation();
return false;
}
@@ -436,9 +541,10 @@ bool OatWriter::WriteTables(OutputStream& out) {
PLOG(ERROR) << "Failed to write dex file " << dex_file->GetLocation() << " to " << out.GetLocation();
return false;
}
+ size_dex_file_ += dex_file->GetHeader().file_size_;
}
for (size_t i = 0; i != oat_classes_.size(); ++i) {
- if (!oat_classes_[i]->Write(out)) {
+ if (!oat_classes_[i]->Write(this, out)) {
PLOG(ERROR) << "Failed to write oat methods information to " << out.GetLocation();
return false;
}
@@ -448,13 +554,59 @@ bool OatWriter::WriteTables(OutputStream& out) {
size_t OatWriter::WriteCode(OutputStream& out) {
uint32_t offset = oat_header_->GetExecutableOffset();
- off_t new_offset = out.Seek(executable_offset_padding_length_, kSeekCurrent);
+ off_t new_offset = out.Seek(size_executable_offset_alignment_, kSeekCurrent);
if (static_cast<uint32_t>(new_offset) != offset) {
PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
<< " Expected: " << offset << " File: " << out.GetLocation();
return 0;
}
DCHECK_OFFSET();
+ if (compiler_driver_->IsImage()) {
+ InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
+ if (!out.WriteFully(&(*interpreter_to_interpreter_entry_)[0], interpreter_to_interpreter_entry_->size())) {
+ PLOG(ERROR) << "Failed to write interpreter to interpreter entry to " << out.GetLocation();
+ return false;
+ }
+ size_interpreter_to_interpreter_entry_ += interpreter_to_interpreter_entry_->size();
+ offset += interpreter_to_interpreter_entry_->size();
+ DCHECK_OFFSET();
+
+ uint32_t aligned_offset = CompiledCode::AlignCode(offset, instruction_set);
+ uint32_t alignment_padding = aligned_offset - offset;
+ out.Seek(alignment_padding, kSeekCurrent);
+ size_stubs_alignment_ += alignment_padding;
+ if (!out.WriteFully(&(*interpreter_to_quick_entry_)[0], interpreter_to_quick_entry_->size())) {
+ PLOG(ERROR) << "Failed to write interpreter to quick entry to " << out.GetLocation();
+ return false;
+ }
+ size_interpreter_to_quick_entry_ += interpreter_to_quick_entry_->size();
+ offset += alignment_padding + interpreter_to_quick_entry_->size();
+ DCHECK_OFFSET();
+
+ aligned_offset = CompiledCode::AlignCode(offset, instruction_set);
+ alignment_padding = aligned_offset - offset;
+ out.Seek(alignment_padding, kSeekCurrent);
+ size_stubs_alignment_ += alignment_padding;
+ if (!out.WriteFully(&(*portable_resolution_trampoline_)[0], portable_resolution_trampoline_->size())) {
+ PLOG(ERROR) << "Failed to write portable resolution trampoline to " << out.GetLocation();
+ return false;
+ }
+ size_portable_resolution_trampoline_ += portable_resolution_trampoline_->size();
+ offset += alignment_padding + portable_resolution_trampoline_->size();
+ DCHECK_OFFSET();
+
+ aligned_offset = CompiledCode::AlignCode(offset, instruction_set);
+ alignment_padding = aligned_offset - offset;
+ out.Seek(alignment_padding, kSeekCurrent);
+ size_stubs_alignment_ += alignment_padding;
+ if (!out.WriteFully(&(*quick_resolution_trampoline_)[0], quick_resolution_trampoline_->size())) {
+ PLOG(ERROR) << "Failed to write quick resolution trampoline to " << out.GetLocation();
+ return false;
+ }
+ size_quick_resolution_trampoline_ += quick_resolution_trampoline_->size();
+ offset += alignment_padding + quick_resolution_trampoline_->size();
+ DCHECK_OFFSET();
+ }
return offset;
}
@@ -547,6 +699,7 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c
uint32_t aligned_code_delta = aligned_offset - offset;
if (aligned_code_delta != 0) {
off_t new_offset = out.Seek(aligned_code_delta, kSeekCurrent);
+ size_code_alignment_ += aligned_code_delta;
if (static_cast<uint32_t>(new_offset) != aligned_offset) {
PLOG(ERROR) << "Failed to seek to align oat code. Actual: " << new_offset
<< " Expected: " << aligned_offset << " File: " << out.GetLocation();
@@ -572,12 +725,14 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c
ReportWriteFailure("method code size", method_idx, dex_file, out);
return 0;
}
+ size_code_size_ += sizeof(code_size);
offset += sizeof(code_size);
DCHECK_OFFSET();
if (!out.WriteFully(&code[0], code_size)) {
ReportWriteFailure("method code", method_idx, dex_file, out);
return 0;
}
+ size_code_ += code_size;
offset += code_size;
}
DCHECK_OFFSET();
@@ -602,6 +757,7 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c
ReportWriteFailure("mapping table", method_idx, dex_file, out);
return 0;
}
+ size_mapping_table_ += mapping_table_size;
offset += mapping_table_size;
}
DCHECK_OFFSET();
@@ -625,6 +781,7 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c
ReportWriteFailure("vmap table", method_idx, dex_file, out);
return 0;
}
+ size_vmap_table_ += vmap_table_size;
offset += vmap_table_size;
}
DCHECK_OFFSET();
@@ -648,6 +805,7 @@ size_t OatWriter::WriteCodeMethod(OutputStream& out, size_t offset, size_t oat_c
ReportWriteFailure("GC map", method_idx, dex_file, out);
return 0;
}
+ size_gc_map_ += gc_map_size;
offset += gc_map_size;
}
DCHECK_OFFSET();
@@ -683,29 +841,35 @@ void OatWriter::OatDexFile::UpdateChecksum(OatHeader& oat_header) const {
sizeof(methods_offsets_[0]) * methods_offsets_.size());
}
-bool OatWriter::OatDexFile::Write(OutputStream& out) const {
+bool OatWriter::OatDexFile::Write(OatWriter* oat_writer, OutputStream& out) const {
DCHECK_OFFSET_();
if (!out.WriteFully(&dex_file_location_size_, sizeof(dex_file_location_size_))) {
PLOG(ERROR) << "Failed to write dex file location length to " << out.GetLocation();
return false;
}
+ oat_writer->size_oat_dex_file_location_size_ += sizeof(dex_file_location_size_);
if (!out.WriteFully(dex_file_location_data_, dex_file_location_size_)) {
PLOG(ERROR) << "Failed to write dex file location data to " << out.GetLocation();
return false;
}
+ oat_writer->size_oat_dex_file_location_data_ += dex_file_location_size_;
if (!out.WriteFully(&dex_file_location_checksum_, sizeof(dex_file_location_checksum_))) {
PLOG(ERROR) << "Failed to write dex file location checksum to " << out.GetLocation();
return false;
}
+ oat_writer->size_oat_dex_file_location_checksum_ += sizeof(dex_file_location_checksum_);
if (!out.WriteFully(&dex_file_offset_, sizeof(dex_file_offset_))) {
PLOG(ERROR) << "Failed to write dex file offset to " << out.GetLocation();
return false;
}
+ oat_writer->size_oat_dex_file_offset_ += sizeof(dex_file_offset_);
if (!out.WriteFully(&methods_offsets_[0],
sizeof(methods_offsets_[0]) * methods_offsets_.size())) {
PLOG(ERROR) << "Failed to write methods offsets to " << out.GetLocation();
return false;
}
+ oat_writer->size_oat_dex_file_methods_offsets_ +=
+ sizeof(methods_offsets_[0]) * methods_offsets_.size();
return true;
}
@@ -736,12 +900,13 @@ void OatWriter::OatClass::UpdateChecksum(OatHeader& oat_header) const {
sizeof(method_offsets_[0]) * method_offsets_.size());
}
-bool OatWriter::OatClass::Write(OutputStream& out) const {
+bool OatWriter::OatClass::Write(OatWriter* oat_writer, OutputStream& out) const {
DCHECK_OFFSET_();
if (!out.WriteFully(&status_, sizeof(status_))) {
PLOG(ERROR) << "Failed to write class status to " << out.GetLocation();
return false;
}
+ oat_writer->size_oat_class_status_ += sizeof(status_);
DCHECK_EQ(static_cast<off_t>(GetOatMethodOffsetsOffsetFromOatHeader(0)),
out.Seek(0, kSeekCurrent));
if (!out.WriteFully(&method_offsets_[0],
@@ -749,6 +914,7 @@ bool OatWriter::OatClass::Write(OutputStream& out) const {
PLOG(ERROR) << "Failed to write method offsets to " << out.GetLocation();
return false;
}
+ oat_writer->size_oat_class_method_offsets_ += sizeof(method_offsets_[0]) * method_offsets_.size();
DCHECK_EQ(static_cast<off_t>(GetOatMethodOffsetsOffsetFromOatHeader(method_offsets_.size())),
out.Seek(0, kSeekCurrent));
return true;
diff --git a/src/oat_writer.h b/src/oat_writer.h
index e1d76f459f..b201d6b4ee 100644
--- a/src/oat_writer.h
+++ b/src/oat_writer.h
@@ -83,7 +83,8 @@ class OatWriter {
size_t InitOatDexFiles(size_t offset);
size_t InitDexFiles(size_t offset);
size_t InitOatClasses(size_t offset);
- size_t InitOatCode(size_t offset);
+ size_t InitOatCode(size_t offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t InitOatCodeDexFiles(size_t offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t InitOatCodeDexFile(size_t offset,
@@ -120,7 +121,7 @@ class OatWriter {
explicit OatDexFile(size_t offset, const DexFile& dex_file);
size_t SizeOf() const;
void UpdateChecksum(OatHeader& oat_header) const;
- bool Write(OutputStream& out) const;
+ bool Write(OatWriter* oat_writer, OutputStream& out) const;
// Offset of start of OatDexFile from beginning of OatHeader. It is
// used to validate file position when writing.
@@ -144,7 +145,7 @@ class OatWriter {
size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const;
size_t SizeOf() const;
void UpdateChecksum(OatHeader& oat_header) const;
- bool Write(OutputStream& out) const;
+ bool Write(OatWriter* oat_writer, OutputStream& out) const;
// Offset of start of OatClass from beginning of OatHeader. It is
// used to validate file position when writing. For Portable, it
@@ -175,7 +176,35 @@ class OatWriter {
OatHeader* oat_header_;
std::vector<OatDexFile*> oat_dex_files_;
std::vector<OatClass*> oat_classes_;
- uint32_t executable_offset_padding_length_;
+ UniquePtr<const std::vector<uint8_t> > interpreter_to_interpreter_entry_;
+ UniquePtr<const std::vector<uint8_t> > interpreter_to_quick_entry_;
+ UniquePtr<const std::vector<uint8_t> > portable_resolution_trampoline_;
+ UniquePtr<const std::vector<uint8_t> > quick_resolution_trampoline_;
+
+ // output stats
+ uint32_t size_dex_file_alignment_;
+ uint32_t size_executable_offset_alignment_;
+ uint32_t size_oat_header_;
+ uint32_t size_oat_header_image_file_location_;
+ uint32_t size_dex_file_;
+ uint32_t size_interpreter_to_interpreter_entry_;
+ uint32_t size_interpreter_to_quick_entry_;
+ uint32_t size_portable_resolution_trampoline_;
+ uint32_t size_quick_resolution_trampoline_;
+ uint32_t size_stubs_alignment_;
+ uint32_t size_code_size_;
+ uint32_t size_code_;
+ uint32_t size_code_alignment_;
+ uint32_t size_mapping_table_;
+ uint32_t size_vmap_table_;
+ uint32_t size_gc_map_;
+ uint32_t size_oat_dex_file_location_size_;
+ uint32_t size_oat_dex_file_location_data_;
+ uint32_t size_oat_dex_file_location_checksum_;
+ uint32_t size_oat_dex_file_offset_;
+ uint32_t size_oat_dex_file_methods_offsets_;
+ uint32_t size_oat_class_status_;
+ uint32_t size_oat_class_method_offsets_;
template <class T> struct MapCompare {
public:
diff --git a/src/oatdump.cc b/src/oatdump.cc
index 7a99f8dc0e..538e1bb99a 100644
--- a/src/oatdump.cc
+++ b/src/oatdump.cc
@@ -880,7 +880,7 @@ class ImageDumper {
const void* GetOatCodeBegin(mirror::AbstractMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const void* code = m->GetEntryPointFromCompiledCode();
- if (code == GetResolutionTrampoline()) {
+ if (code == GetResolutionTrampoline(Runtime::Current()->GetClassLinker())) {
code = oat_dumper_->GetOatCode(m);
}
if (oat_dumper_->GetInstructionSet() == kThumb2) {
diff --git a/src/output_stream_test.cc b/src/output_stream_test.cc
index 0e02825ff8..c9e0edefcd 100644
--- a/src/output_stream_test.cc
+++ b/src/output_stream_test.cc
@@ -25,7 +25,7 @@ class OutputStreamTest : public CommonTest {
protected:
void CheckOffset(off_t expected) {
off_t actual = output_stream_->Seek(0, kSeekCurrent);
- CHECK_EQ(expected, actual);
+ EXPECT_EQ(expected, actual);
}
void SetOutputStream(OutputStream& output_stream) {
@@ -33,16 +33,16 @@ class OutputStreamTest : public CommonTest {
}
void GenerateTestOutput() {
- CHECK_EQ(3, output_stream_->Seek(3, kSeekCurrent));
+ EXPECT_EQ(3, output_stream_->Seek(3, kSeekCurrent));
CheckOffset(3);
- CHECK_EQ(2, output_stream_->Seek(2, kSeekSet));
+ EXPECT_EQ(2, output_stream_->Seek(2, kSeekSet));
CheckOffset(2);
uint8_t buf[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
- CHECK(output_stream_->WriteFully(buf, 2));
+ EXPECT_TRUE(output_stream_->WriteFully(buf, 2));
CheckOffset(4);
- CHECK_EQ(6, output_stream_->Seek(2, kSeekEnd));
+ EXPECT_EQ(6, output_stream_->Seek(2, kSeekEnd));
CheckOffset(6);
- CHECK(output_stream_->WriteFully(buf, 4));
+ EXPECT_TRUE(output_stream_->WriteFully(buf, 4));
CheckOffset(10);
}
@@ -50,8 +50,8 @@ class OutputStreamTest : public CommonTest {
uint8_t expected[] = {
0, 0, 1, 2, 0, 0, 1, 2, 3, 4
};
- CHECK_EQ(sizeof(expected), actual.size());
- CHECK_EQ(0, memcmp(expected, &actual[0], actual.size()));
+ EXPECT_EQ(sizeof(expected), actual.size());
+ EXPECT_EQ(0, memcmp(expected, &actual[0], actual.size()));
}
OutputStream* output_stream_;
@@ -63,10 +63,10 @@ TEST_F(OutputStreamTest, File) {
SetOutputStream(output_stream);
GenerateTestOutput();
UniquePtr<File> in(OS::OpenFile(tmp.GetFilename().c_str(), false));
- CHECK(in.get() != NULL);
+ EXPECT_TRUE(in.get() != NULL);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
- CHECK(readSuccess);
+ EXPECT_TRUE(readSuccess);
CheckTestOutput(actual);
}
diff --git a/src/runtime.cc b/src/runtime.cc
index 45d2988e97..c21a1c4924 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1109,7 +1109,9 @@ mirror::AbstractMethod* Runtime::CreateResolutionMethod() {
// TODO: use a special method for resolution method saves
method->SetDexMethodIndex(DexFile::kDexNoIndex16);
// When compiling, the code pointer will get set later when the image is loaded.
- method->SetEntryPointFromCompiledCode(Runtime::Current()->IsCompiler() ? NULL : GetResolutionTrampoline());
+ Runtime* r = Runtime::Current();
+ ClassLinker* cl = r->GetClassLinker();
+ method->SetEntryPointFromCompiledCode(r->IsCompiler() ? NULL : GetResolutionTrampoline(cl));
return method.get();
}
diff --git a/src/runtime_support.h b/src/runtime_support.h
index 5fc8da53b6..094e23a0fd 100644
--- a/src/runtime_support.h
+++ b/src/runtime_support.h
@@ -34,14 +34,12 @@ extern "C" void art_interpreter_invoke_handler();
extern "C" void art_jni_dlsym_lookup_stub();
extern "C" void art_portable_abstract_method_error_stub();
extern "C" void art_portable_proxy_invoke_handler();
-extern "C" void art_portable_resolution_trampoline();
extern "C" void art_quick_abstract_method_error_stub();
extern "C" void art_quick_deoptimize();
extern "C" void art_quick_instrumentation_entry_from_code(void*);
extern "C" void art_quick_instrumentation_exit_from_code();
extern "C" void art_quick_interpreter_entry(void*);
extern "C" void art_quick_proxy_invoke_handler();
-extern "C" void art_quick_resolution_trampoline();
extern "C" void art_work_around_app_jni_bugs();
extern "C" double art_l2d(int64_t l);
@@ -373,22 +371,20 @@ static inline void* GetInterpreterEntryPoint() {
return reinterpret_cast<void*>(art_quick_interpreter_entry);
}
-// Return address of portable resolution trampoline stub.
-static inline void* GetPortableResolutionTrampoline() {
- return reinterpret_cast<void*>(art_portable_resolution_trampoline);
+static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) {
+ return class_linker->GetPortableResolutionTrampoline();
}
-// Return address of quick resolution trampoline stub.
-static inline void* GetQuickResolutionTrampoline() {
- return reinterpret_cast<void*>(art_quick_resolution_trampoline);
+static inline const void* GetQuickResolutionTrampoline(ClassLinker* class_linker) {
+ return class_linker->GetQuickResolutionTrampoline();
}
// Return address of resolution trampoline stub for defined compiler.
-static inline void* GetResolutionTrampoline() {
+static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) {
#if defined(ART_USE_PORTABLE_COMPILER)
- return GetPortableResolutionTrampoline();
+ return GetPortableResolutionTrampoline(class_linker);
#else
- return GetQuickResolutionTrampoline();
+ return GetQuickResolutionTrampoline(class_linker);
#endif
}
diff --git a/src/stack.cc b/src/stack.cc
index 8690a36387..8672975453 100644
--- a/src/stack.cc
+++ b/src/stack.cc
@@ -42,6 +42,15 @@ mirror::Object* ShadowFrame::GetThisObject() const {
}
}
+mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
+ mirror::AbstractMethod* m = GetMethod();
+ if (m->IsStatic()) {
+ return NULL;
+ } else {
+ return GetVRegReference(NumberOfVRegs() - num_ins);
+ }
+}
+
ThrowLocation ShadowFrame::GetCurrentLocationForThrow() const {
return ThrowLocation(GetThisObject(), GetMethod(), GetDexPC());
}
diff --git a/src/stack.h b/src/stack.h
index 1b4d285216..fbfacb1733 100644
--- a/src/stack.h
+++ b/src/stack.h
@@ -202,6 +202,8 @@ class ShadowFrame {
mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
ThrowLocation GetCurrentLocationForThrow() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetMethod(mirror::AbstractMethod* method) {
@@ -254,13 +256,9 @@ class ShadowFrame {
CHECK_LT(num_vregs, static_cast<uint32_t>(kHasReferenceArray));
number_of_vregs_ |= kHasReferenceArray;
#endif
- for (size_t i = 0; i < num_vregs; ++i) {
- SetVRegReference(i, NULL);
- }
+ memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(mirror::Object*)));
} else {
- for (size_t i = 0; i < num_vregs; ++i) {
- SetVReg(i, 0);
- }
+ memset(vregs_, 0, num_vregs * sizeof(uint32_t));
}
}
diff --git a/src/thread.cc b/src/thread.cc
index c5bfb20ddb..9e865329f5 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -865,9 +865,10 @@ void Thread::DumpStack(std::ostream& os) const {
// TODO: we call this code when dying but may not have suspended the thread ourself. The
// IsSuspended check is therefore racy with the use for dumping (normally we inhibit
// the race with the thread_suspend_count_lock_).
- if (this == Thread::Current() || IsSuspended()) {
+ bool dump_for_abort = (gAborting > 0);
+ if (this == Thread::Current() || IsSuspended() || dump_for_abort) {
// If we're currently in native code, dump that stack before dumping the managed stack.
- if (ShouldShowNativeStack(this)) {
+ if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
DumpNativeStack(os, GetTid(), " native: ", false);
}
@@ -1647,6 +1648,8 @@ static const EntryPointInfo gThreadEntryPointInfo[] = {
ENTRY_POINT_INFO(pMemcmp16),
ENTRY_POINT_INFO(pStringCompareTo),
ENTRY_POINT_INFO(pMemcpy),
+ ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
+ ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
diff --git a/src/utf.cc b/src/utf.cc
index 8d3547e70c..1add7d9a68 100644
--- a/src/utf.cc
+++ b/src/utf.cc
@@ -119,6 +119,23 @@ int CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(const char* utf8_1,
}
}
+int CompareModifiedUtf8ToUtf16AsCodePointValues(const char* utf8_1, const uint16_t* utf8_2) {
+ for (;;) {
+ if (*utf8_1 == '\0') {
+ return (*utf8_2 == '\0') ? 0 : -1;
+ } else if (*utf8_2 == '\0') {
+ return 1;
+ }
+
+ int c1 = GetUtf16FromUtf8(&utf8_1);
+ int c2 = *utf8_2;
+
+ if (c1 != c2) {
+ return c1 > c2 ? 1 : -1;
+ }
+ }
+}
+
size_t CountUtf8Bytes(const uint16_t* chars, size_t char_count) {
size_t result = 0;
while (char_count--) {
diff --git a/src/utf.h b/src/utf.h
index 44899bfdd2..57c811f21d 100644
--- a/src/utf.h
+++ b/src/utf.h
@@ -56,6 +56,12 @@ void ConvertModifiedUtf8ToUtf16(uint16_t* utf16_out, const char* utf8_in);
int CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(const char* utf8_1, const char* utf8_2);
/*
+ * Compare a modified UTF-8 string with a UTF-16 string as code point values in a non-locale
+ * sensitive manner.
+ */
+int CompareModifiedUtf8ToUtf16AsCodePointValues(const char* utf8_1, const uint16_t* utf8_2);
+
+/*
* Convert from UTF-16 to Modified UTF-8. Note that the output is _not_
* NUL-terminated. You probably need to call CountUtf8Bytes before calling
* this anyway, so if you want a NUL-terminated string, you know where to
diff --git a/src/vector_output_stream.cc b/src/vector_output_stream.cc
index 96154ee92b..e5ff729036 100644
--- a/src/vector_output_stream.cc
+++ b/src/vector_output_stream.cc
@@ -16,8 +16,6 @@
#include "vector_output_stream.h"
-#include <string.h>
-
#include "base/logging.h"
namespace art {
@@ -25,14 +23,6 @@ namespace art {
VectorOutputStream::VectorOutputStream(const std::string& location, std::vector<uint8_t>& vector)
: OutputStream(location), offset_(vector.size()), vector_(vector) {}
-bool VectorOutputStream::WriteFully(const void* buffer, int64_t byte_count) {
- off_t new_offset = offset_ + byte_count;
- EnsureCapacity(new_offset);
- memcpy(&vector_[offset_], buffer, byte_count);
- offset_ = new_offset;
- return true;
-}
-
off_t VectorOutputStream::Seek(off_t offset, Whence whence) {
CHECK(whence == kSeekSet || whence == kSeekCurrent || whence == kSeekEnd) << whence;
off_t new_offset = 0;
@@ -55,10 +45,4 @@ off_t VectorOutputStream::Seek(off_t offset, Whence whence) {
return offset_;
}
-void VectorOutputStream::EnsureCapacity(off_t new_offset) {
- if (new_offset > static_cast<off_t>(vector_.size())) {
- vector_.resize(new_offset);
- }
-}
-
} // namespace art
diff --git a/src/vector_output_stream.h b/src/vector_output_stream.h
index a99128e6f3..3546c8d577 100644
--- a/src/vector_output_stream.h
+++ b/src/vector_output_stream.h
@@ -20,6 +20,7 @@
#include "output_stream.h"
#include <string>
+#include <string.h>
#include <vector>
namespace art {
@@ -30,12 +31,28 @@ class VectorOutputStream : public OutputStream {
virtual ~VectorOutputStream() {}
- virtual bool WriteFully(const void* buffer, int64_t byte_count);
+ bool WriteFully(const void* buffer, int64_t byte_count) {
+ if (static_cast<size_t>(offset_) == vector_.size()) {
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(buffer);
+ vector_.insert(vector_.end(), &start[0], &start[byte_count]);
+ offset_ += byte_count;
+ } else {
+ off_t new_offset = offset_ + byte_count;
+ EnsureCapacity(new_offset);
+ memcpy(&vector_[offset_], buffer, byte_count);
+ offset_ = new_offset;
+ }
+ return true;
+ }
- virtual off_t Seek(off_t offset, Whence whence);
+ off_t Seek(off_t offset, Whence whence);
private:
- void EnsureCapacity(off_t new_offset);
+ void EnsureCapacity(off_t new_offset) {
+ if (new_offset > static_cast<off_t>(vector_.size())) {
+ vector_.resize(new_offset);
+ }
+ }
off_t offset_;
std::vector<uint8_t>& vector_;
diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc
index 2eb0c203ea..8ca5b82018 100644
--- a/src/verifier/method_verifier.cc
+++ b/src/verifier/method_verifier.cc
@@ -19,11 +19,12 @@
#include <iostream>
#include "base/logging.h"
+#include "base/mutex-inl.h"
#include "base/stringpiece.h"
#include "class_linker.h"
#include "compiler/driver/compiler_driver.h"
#include "dex_file-inl.h"
-#include "dex_instruction.h"
+#include "dex_instruction-inl.h"
#include "dex_instruction_visitor.h"
#include "gc/card_table-inl.h"
#include "indenter.h"
@@ -32,11 +33,12 @@
#include "mirror/abstract_method-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
-#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/field-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
+#include "register_line-inl.h"
#include "runtime.h"
#include "verifier/dex_gc_map.h"
@@ -266,13 +268,14 @@ MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_ca
: reg_types_(can_load_classes),
work_insn_idx_(-1),
dex_method_idx_(dex_method_idx),
- foo_method_(method),
+ mirror_method_(method),
method_access_flags_(method_access_flags),
dex_file_(dex_file),
dex_cache_(dex_cache),
class_loader_(class_loader),
class_def_idx_(class_def_idx),
code_item_(code_item),
+ declaring_class_(NULL),
interesting_dex_pc_(-1),
monitor_enter_dex_pcs_(NULL),
have_pending_hard_failure_(false),
@@ -895,6 +898,7 @@ bool MethodVerifier::CheckVarArgRangeRegs(uint32_t vA, uint32_t vC) {
static const std::vector<uint8_t>* CreateLengthPrefixedDexGcMap(const std::vector<uint8_t>& gc_map) {
std::vector<uint8_t>* length_prefixed_gc_map = new std::vector<uint8_t>;
+ length_prefixed_gc_map->reserve(gc_map.size() + 4);
length_prefixed_gc_map->push_back((gc_map.size() & 0xff000000) >> 24);
length_prefixed_gc_map->push_back((gc_map.size() & 0x00ff0000) >> 16);
length_prefixed_gc_map->push_back((gc_map.size() & 0x0000ff00) >> 8);
@@ -949,15 +953,20 @@ bool MethodVerifier::VerifyCodeFlow() {
DCHECK_NE(failures_.size(), 0U);
return false; // Not a real failure, but a failure to encode
}
-#ifndef NDEBUG
- VerifyGcMap(*map);
-#endif
+ if (kIsDebugBuild) {
+ VerifyGcMap(*map);
+ }
const std::vector<uint8_t>* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get()));
verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map);
- MethodVerifier::PcToConreteMethod* pc_to_conrete_method = GenerateDevirtMap();
- if(pc_to_conrete_method != NULL ) {
- SetDevirtMap(ref, pc_to_conrete_method);
+ MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet();
+ if(method_to_safe_casts != NULL ) {
+ SetSafeCastMap(ref, method_to_safe_casts);
+ }
+
+ MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap();
+ if(pc_to_concrete_method != NULL ) {
+ SetDevirtMap(ref, pc_to_concrete_method);
}
return true;
}
@@ -1244,6 +1253,11 @@ bool MethodVerifier::CodeFlowVerifyMethod() {
if (dead_start >= 0) {
LogVerifyInfo() << "dead code " << reinterpret_cast<void*>(dead_start) << "-" << reinterpret_cast<void*>(insn_idx - 1);
}
+ // To dump the state of the verify after a method, do something like:
+ // if (PrettyMethod(dex_method_idx_, *dex_file_) ==
+ // "boolean java.lang.String.equals(java.lang.Object)") {
+ // LOG(INFO) << info_messages_.str();
+ // }
}
return true;
}
@@ -1280,7 +1294,6 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
*/
const uint16_t* insns = code_item_->insns_ + work_insn_idx_;
const Instruction* inst = Instruction::At(insns);
- DecodedInstruction dec_insn(inst);
int opcode_flags = Instruction::FlagsOf(inst->Opcode());
int32_t branch_target = 0;
@@ -1306,32 +1319,51 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
#endif
}
- switch (dec_insn.opcode) {
+
+ // We need to ensure the work line is consistent while performing validation. When we spot a
+ // peephole pattern we compute a new line for either the fallthrough instruction or the
+ // branch target.
+ UniquePtr<RegisterLine> branch_line;
+ UniquePtr<RegisterLine> fallthrough_line;
+
+ switch (inst->Opcode()) {
case Instruction::NOP:
/*
* A "pure" NOP has no effect on anything. Data tables start with
* a signature that looks like a NOP; if we see one of these in
* the course of executing code then we have a problem.
*/
- if (dec_insn.vA != 0) {
+ if (inst->VRegA_10x() != 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "encountered data table in instruction stream";
}
break;
case Instruction::MOVE:
+ work_line_->CopyRegister1(inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategory1nr);
+ break;
case Instruction::MOVE_FROM16:
+ work_line_->CopyRegister1(inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategory1nr);
+ break;
case Instruction::MOVE_16:
- work_line_->CopyRegister1(dec_insn.vA, dec_insn.vB, kTypeCategory1nr);
+ work_line_->CopyRegister1(inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategory1nr);
break;
case Instruction::MOVE_WIDE:
+ work_line_->CopyRegister2(inst->VRegA_12x(), inst->VRegB_12x());
+ break;
case Instruction::MOVE_WIDE_FROM16:
+ work_line_->CopyRegister2(inst->VRegA_22x(), inst->VRegB_22x());
+ break;
case Instruction::MOVE_WIDE_16:
- work_line_->CopyRegister2(dec_insn.vA, dec_insn.vB);
+ work_line_->CopyRegister2(inst->VRegA_32x(), inst->VRegB_32x());
break;
case Instruction::MOVE_OBJECT:
+ work_line_->CopyRegister1(inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategoryRef);
+ break;
case Instruction::MOVE_OBJECT_FROM16:
+ work_line_->CopyRegister1(inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategoryRef);
+ break;
case Instruction::MOVE_OBJECT_16:
- work_line_->CopyRegister1(dec_insn.vA, dec_insn.vB, kTypeCategoryRef);
+ work_line_->CopyRegister1(inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategoryRef);
break;
/*
@@ -1346,13 +1378,13 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* easier to read in some cases.)
*/
case Instruction::MOVE_RESULT:
- work_line_->CopyResultRegister1(dec_insn.vA, false);
+ work_line_->CopyResultRegister1(inst->VRegA_11x(), false);
break;
case Instruction::MOVE_RESULT_WIDE:
- work_line_->CopyResultRegister2(dec_insn.vA);
+ work_line_->CopyResultRegister2(inst->VRegA_11x());
break;
case Instruction::MOVE_RESULT_OBJECT:
- work_line_->CopyResultRegister1(dec_insn.vA, true);
+ work_line_->CopyResultRegister1(inst->VRegA_11x(), true);
break;
case Instruction::MOVE_EXCEPTION: {
@@ -1361,7 +1393,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* that as part of extracting the exception type from the catch block list.
*/
const RegType& res_type = GetCaughtExceptionType();
- work_line_->SetRegisterType(dec_insn.vA, res_type);
+ work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
break;
}
case Instruction::RETURN_VOID:
@@ -1380,16 +1412,17 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else {
// Compilers may generate synthetic functions that write byte values into boolean fields.
// Also, it may use integer values for boolean, byte, short, and character return types.
- const RegType& src_type = work_line_->GetRegisterType(dec_insn.vA);
+ const uint32_t vregA = inst->VRegA_11x();
+ const RegType& src_type = work_line_->GetRegisterType(vregA);
bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
((return_type.IsBoolean() || return_type.IsByte() ||
return_type.IsShort() || return_type.IsChar()) &&
src_type.IsInteger()));
/* check the register contents */
bool success =
- work_line_->VerifyRegisterType(dec_insn.vA, use_src ? src_type : return_type);
+ work_line_->VerifyRegisterType(vregA, use_src ? src_type : return_type);
if (!success) {
- AppendToLastFailMessage(StringPrintf(" return-1nr on invalid register v%d", dec_insn.vA));
+ AppendToLastFailMessage(StringPrintf(" return-1nr on invalid register v%d", vregA));
}
}
}
@@ -1402,9 +1435,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
} else {
/* check the register contents */
- bool success = work_line_->VerifyRegisterType(dec_insn.vA, return_type);
+ const uint32_t vregA = inst->VRegA_11x();
+ bool success = work_line_->VerifyRegisterType(vregA, return_type);
if (!success) {
- AppendToLastFailMessage(StringPrintf(" return-wide on invalid register v%d", dec_insn.vA));
+ AppendToLastFailMessage(StringPrintf(" return-wide on invalid register v%d", vregA));
}
}
}
@@ -1418,7 +1452,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* return_type is the *expected* return type, not register value */
DCHECK(!return_type.IsZero());
DCHECK(!return_type.IsUninitializedReference());
- const RegType& reg_type = work_line_->GetRegisterType(dec_insn.vA);
+ const uint32_t vregA = inst->VRegA_11x();
+ const RegType& reg_type = work_line_->GetRegisterType(vregA);
// Disallow returning uninitialized values and verify that the reference in vAA is an
// instance of the "return_type"
if (reg_type.IsUninitializedTypes()) {
@@ -1432,66 +1467,71 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
break;
/* could be boolean, int, float, or a null reference */
- case Instruction::CONST_4:
- work_line_->SetRegisterType(dec_insn.vA,
- reg_types_.FromCat1Const(static_cast<int32_t>(dec_insn.vB << 28) >> 28, true));
+ case Instruction::CONST_4: {
+ int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
+ work_line_->SetRegisterType(inst->VRegA_11n(), reg_types_.FromCat1Const(val, true));
break;
- case Instruction::CONST_16:
- work_line_->SetRegisterType(dec_insn.vA,
- reg_types_.FromCat1Const(static_cast<int16_t>(dec_insn.vB), true));
+ }
+ case Instruction::CONST_16: {
+ int16_t val = static_cast<int16_t>(inst->VRegB_21s());
+ work_line_->SetRegisterType(inst->VRegA_21s(), reg_types_.FromCat1Const(val, true));
break;
+ }
case Instruction::CONST:
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.FromCat1Const(dec_insn.vB, true));
+ work_line_->SetRegisterType(inst->VRegA_31i(),
+ reg_types_.FromCat1Const(inst->VRegB_31i(), true));
break;
case Instruction::CONST_HIGH16:
- work_line_->SetRegisterType(dec_insn.vA,
- reg_types_.FromCat1Const(dec_insn.vB << 16, true));
+ work_line_->SetRegisterType(inst->VRegA_21h(),
+ reg_types_.FromCat1Const(inst->VRegB_21h() << 16, true));
break;
/* could be long or double; resolved upon use */
case Instruction::CONST_WIDE_16: {
- int64_t val = static_cast<int16_t>(dec_insn.vB);
+ int64_t val = static_cast<int16_t>(inst->VRegB_21s());
const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
- work_line_->SetRegisterTypeWide(dec_insn.vA, lo, hi);
+ work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
break;
}
case Instruction::CONST_WIDE_32: {
- int64_t val = static_cast<int32_t>(dec_insn.vB);
+ int64_t val = static_cast<int32_t>(inst->VRegB_31i());
const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
- work_line_->SetRegisterTypeWide(dec_insn.vA, lo, hi);
+ work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
break;
}
case Instruction::CONST_WIDE: {
- int64_t val = dec_insn.vB_wide;
+ int64_t val = inst->VRegB_51l();
const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
- work_line_->SetRegisterTypeWide(dec_insn.vA, lo, hi);
+ work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
break;
}
case Instruction::CONST_WIDE_HIGH16: {
- int64_t val = static_cast<uint64_t>(dec_insn.vB) << 48;
+ int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
- work_line_->SetRegisterTypeWide(dec_insn.vA, lo, hi);
+ work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
break;
}
case Instruction::CONST_STRING:
+ work_line_->SetRegisterType(inst->VRegA_21c(), reg_types_.JavaLangString());
+ break;
case Instruction::CONST_STRING_JUMBO:
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.JavaLangString());
+ work_line_->SetRegisterType(inst->VRegA_31c(), reg_types_.JavaLangString());
break;
case Instruction::CONST_CLASS: {
// Get type from instruction if unresolved then we need an access check
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
- const RegType& res_type = ResolveClassAndCheckAccess(dec_insn.vB);
+ const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
// Register holds class, ie its type is class, on error it will hold Conflict.
- work_line_->SetRegisterType(dec_insn.vA,
+ work_line_->SetRegisterType(inst->VRegA_21c(),
res_type.IsConflict() ? res_type
: reg_types_.JavaLangClass(true));
break;
}
case Instruction::MONITOR_ENTER:
- work_line_->PushMonitor(dec_insn.vA, work_insn_idx_);
+ work_line_->PushMonitor(inst->VRegA_11x(), work_insn_idx_);
break;
case Instruction::MONITOR_EXIT:
/*
@@ -1515,7 +1555,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* "live" so we still need to check it.
*/
opcode_flags &= ~Instruction::kThrow;
- work_line_->PopMonitor(dec_insn.vA);
+ work_line_->PopMonitor(inst->VRegA_11x());
break;
case Instruction::CHECK_CAST:
@@ -1527,45 +1567,53 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* If it fails, an exception is thrown, which we deal with later by ignoring the update to
* dec_insn.vA when branching to a handler.
*/
- bool is_checkcast = dec_insn.opcode == Instruction::CHECK_CAST;
- const RegType& res_type =
- ResolveClassAndCheckAccess(is_checkcast ? dec_insn.vB : dec_insn.vC);
+ const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
+ const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
+ const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
DCHECK_NE(failures_.size(), 0U);
if (!is_checkcast) {
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.Boolean());
+ work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
}
break; // bad class
}
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
- const RegType& orig_type =
- work_line_->GetRegisterType(is_checkcast ? dec_insn.vA : dec_insn.vB);
+ uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
+ const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
if (!res_type.IsNonZeroReferenceTypes()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
+ if (is_checkcast) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance-of on unexpected class " << res_type;
+ }
} else if (!orig_type.IsReferenceTypes()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on non-reference in v" << dec_insn.vA;
+ if (is_checkcast) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on non-reference in v" << orig_type_reg;
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance-of on non-reference in v" << orig_type_reg;
+ }
} else {
if (is_checkcast) {
- work_line_->SetRegisterType(dec_insn.vA, res_type);
+ work_line_->SetRegisterType(inst->VRegA_21c(), res_type);
} else {
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.Boolean());
+ work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
}
}
break;
}
case Instruction::ARRAY_LENGTH: {
- const RegType& res_type = work_line_->GetRegisterType(dec_insn.vB);
+ const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
if (res_type.IsReferenceTypes()) {
if (!res_type.IsArrayTypes() && !res_type.IsZero()) { // ie not an array or null
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
} else {
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.Integer());
+ work_line_->SetRegisterType(inst->VRegA_12x(), reg_types_.Integer());
}
}
break;
}
case Instruction::NEW_INSTANCE: {
- const RegType& res_type = ResolveClassAndCheckAccess(dec_insn.vB);
+ const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
if (res_type.IsConflict()) {
DCHECK_NE(failures_.size(), 0U);
break; // bad class
@@ -1582,55 +1630,55 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// initialized must be marked invalid.
work_line_->MarkUninitRefsAsInvalid(uninit_type);
// add the new uninitialized reference to the register state
- work_line_->SetRegisterType(dec_insn.vA, uninit_type);
+ work_line_->SetRegisterType(inst->VRegA_21c(), uninit_type);
break;
}
case Instruction::NEW_ARRAY:
- VerifyNewArray(dec_insn, false, false);
+ VerifyNewArray(inst, false, false);
break;
case Instruction::FILLED_NEW_ARRAY:
- VerifyNewArray(dec_insn, true, false);
+ VerifyNewArray(inst, true, false);
just_set_result = true; // Filled new array sets result register
break;
case Instruction::FILLED_NEW_ARRAY_RANGE:
- VerifyNewArray(dec_insn, true, true);
+ VerifyNewArray(inst, true, true);
just_set_result = true; // Filled new array range sets result register
break;
case Instruction::CMPL_FLOAT:
case Instruction::CMPG_FLOAT:
- if (!work_line_->VerifyRegisterType(dec_insn.vB, reg_types_.Float())) {
+ if (!work_line_->VerifyRegisterType(inst->VRegB_23x(), reg_types_.Float())) {
break;
}
- if (!work_line_->VerifyRegisterType(dec_insn.vC, reg_types_.Float())) {
+ if (!work_line_->VerifyRegisterType(inst->VRegC_23x(), reg_types_.Float())) {
break;
}
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.Integer());
+ work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::CMPL_DOUBLE:
case Instruction::CMPG_DOUBLE:
- if (!work_line_->VerifyRegisterTypeWide(dec_insn.vB, reg_types_.DoubleLo(),
+ if (!work_line_->VerifyRegisterTypeWide(inst->VRegB_23x(), reg_types_.DoubleLo(),
reg_types_.DoubleHi())) {
break;
}
- if (!work_line_->VerifyRegisterTypeWide(dec_insn.vC, reg_types_.DoubleLo(),
+ if (!work_line_->VerifyRegisterTypeWide(inst->VRegC_23x(), reg_types_.DoubleLo(),
reg_types_.DoubleHi())) {
break;
}
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.Integer());
+ work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::CMP_LONG:
- if (!work_line_->VerifyRegisterTypeWide(dec_insn.vB, reg_types_.LongLo(),
+ if (!work_line_->VerifyRegisterTypeWide(inst->VRegB_23x(), reg_types_.LongLo(),
reg_types_.LongHi())) {
break;
}
- if (!work_line_->VerifyRegisterTypeWide(dec_insn.vC, reg_types_.LongLo(),
+ if (!work_line_->VerifyRegisterTypeWide(inst->VRegC_23x(), reg_types_.LongLo(),
reg_types_.LongHi())) {
break;
}
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.Integer());
+ work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::THROW: {
- const RegType& res_type = work_line_->GetRegisterType(dec_insn.vA);
+ const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "thrown class " << res_type << " not instanceof Throwable";
}
@@ -1645,12 +1693,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::PACKED_SWITCH:
case Instruction::SPARSE_SWITCH:
/* verify that vAA is an integer, or can be converted to one */
- work_line_->VerifyRegisterType(dec_insn.vA, reg_types_.Integer());
+ work_line_->VerifyRegisterType(inst->VRegA_31t(), reg_types_.Integer());
break;
case Instruction::FILL_ARRAY_DATA: {
/* Similar to the verification done for APUT */
- const RegType& array_type = work_line_->GetRegisterType(dec_insn.vA);
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
/* array_type can be null if the reg type is Zero */
if (!array_type.IsZero()) {
if (!array_type.IsArrayTypes()) {
@@ -1683,8 +1731,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
case Instruction::IF_EQ:
case Instruction::IF_NE: {
- const RegType& reg_type1 = work_line_->GetRegisterType(dec_insn.vA);
- const RegType& reg_type2 = work_line_->GetRegisterType(dec_insn.vB);
+ const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
bool mismatch = false;
if (reg_type1.IsZero()) { // zero then integral or reference expected
mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1703,8 +1751,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- const RegType& reg_type1 = work_line_->GetRegisterType(dec_insn.vA);
- const RegType& reg_type2 = work_line_->GetRegisterType(dec_insn.vB);
+ const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
<< reg_type2 << ") must be integral";
@@ -1713,17 +1761,94 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
case Instruction::IF_EQZ:
case Instruction::IF_NEZ: {
- const RegType& reg_type = work_line_->GetRegisterType(dec_insn.vA);
+ const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type << " unexpected as arg to if-eqz/if-nez";
}
+
+ // Find previous instruction - its existence is a precondition to peephole optimization.
+ uint32_t instance_of_idx = 0;
+ if (0 != work_insn_idx_) {
+ instance_of_idx = work_insn_idx_ - 1;
+ while(0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+ instance_of_idx--;
+ }
+ CHECK(insn_flags_[instance_of_idx].IsOpcode());
+ } else {
+ break;
+ }
+
+ const Instruction* instance_of_inst = Instruction::At(code_item_->insns_ + instance_of_idx);
+
+ /* Check for peep-hole pattern of:
+ * ...;
+ * instance-of vX, vY, T;
+ * ifXXX vX, label ;
+ * ...;
+ * label:
+ * ...;
+ * and sharpen the type of vY to be type T.
+ * Note, this pattern can't be if:
+ * - if there are other branches to this branch,
+ * - when vX == vY.
+ */
+ if (!CurrentInsnFlags()->IsBranchTarget() &&
+ (Instruction::INSTANCE_OF == instance_of_inst->Opcode()) &&
+ (inst->VRegA_21t() == instance_of_inst->VRegA_22c()) &&
+ (instance_of_inst->VRegA_22c() != instance_of_inst->VRegB_22c())) {
+ // Check that the we are not attempting conversion to interface types,
+ // which is not done because of the multiple inheritance implications.
+ const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
+
+ if(!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) {
+ RegisterLine* update_line = new RegisterLine(code_item_->registers_size_, this);
+ if (inst->Opcode() == Instruction::IF_EQZ) {
+ fallthrough_line.reset(update_line);
+ } else {
+ branch_line.reset(update_line);
+ }
+ update_line->CopyFromLine(work_line_.get());
+ update_line->SetRegisterType(instance_of_inst->VRegB_22c(), cast_type);
+ if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
+ // See if instance-of was preceded by a move-object operation, common due to the small
+ // register encoding space of instance-of, and propagate type information to the source
+ // of the move-object.
+ uint32_t move_idx = instance_of_idx - 1;
+ while(0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+ move_idx--;
+ }
+ CHECK(insn_flags_[move_idx].IsOpcode());
+ const Instruction* move_inst = Instruction::At(code_item_->insns_ + move_idx);
+ switch (move_inst->Opcode()) {
+ case Instruction::MOVE_OBJECT:
+ if (move_inst->VRegA_12x() == instance_of_inst->VRegB_22c()) {
+ update_line->SetRegisterType(move_inst->VRegB_12x(), cast_type);
+ }
+ break;
+ case Instruction::MOVE_OBJECT_FROM16:
+ if (move_inst->VRegA_22x() == instance_of_inst->VRegB_22c()) {
+ update_line->SetRegisterType(move_inst->VRegB_22x(), cast_type);
+ }
+ break;
+ case Instruction::MOVE_OBJECT_16:
+ if (move_inst->VRegA_32x() == instance_of_inst->VRegB_22c()) {
+ update_line->SetRegisterType(move_inst->VRegB_32x(), cast_type);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
break;
}
case Instruction::IF_LTZ:
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- const RegType& reg_type = work_line_->GetRegisterType(dec_insn.vA);
+ const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -1731,150 +1856,150 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
break;
}
case Instruction::AGET_BOOLEAN:
- VerifyAGet(dec_insn, reg_types_.Boolean(), true);
+ VerifyAGet(inst, reg_types_.Boolean(), true);
break;
case Instruction::AGET_BYTE:
- VerifyAGet(dec_insn, reg_types_.Byte(), true);
+ VerifyAGet(inst, reg_types_.Byte(), true);
break;
case Instruction::AGET_CHAR:
- VerifyAGet(dec_insn, reg_types_.Char(), true);
+ VerifyAGet(inst, reg_types_.Char(), true);
break;
case Instruction::AGET_SHORT:
- VerifyAGet(dec_insn, reg_types_.Short(), true);
+ VerifyAGet(inst, reg_types_.Short(), true);
break;
case Instruction::AGET:
- VerifyAGet(dec_insn, reg_types_.Integer(), true);
+ VerifyAGet(inst, reg_types_.Integer(), true);
break;
case Instruction::AGET_WIDE:
- VerifyAGet(dec_insn, reg_types_.LongLo(), true);
+ VerifyAGet(inst, reg_types_.LongLo(), true);
break;
case Instruction::AGET_OBJECT:
- VerifyAGet(dec_insn, reg_types_.JavaLangObject(false), false);
+ VerifyAGet(inst, reg_types_.JavaLangObject(false), false);
break;
case Instruction::APUT_BOOLEAN:
- VerifyAPut(dec_insn, reg_types_.Boolean(), true);
+ VerifyAPut(inst, reg_types_.Boolean(), true);
break;
case Instruction::APUT_BYTE:
- VerifyAPut(dec_insn, reg_types_.Byte(), true);
+ VerifyAPut(inst, reg_types_.Byte(), true);
break;
case Instruction::APUT_CHAR:
- VerifyAPut(dec_insn, reg_types_.Char(), true);
+ VerifyAPut(inst, reg_types_.Char(), true);
break;
case Instruction::APUT_SHORT:
- VerifyAPut(dec_insn, reg_types_.Short(), true);
+ VerifyAPut(inst, reg_types_.Short(), true);
break;
case Instruction::APUT:
- VerifyAPut(dec_insn, reg_types_.Integer(), true);
+ VerifyAPut(inst, reg_types_.Integer(), true);
break;
case Instruction::APUT_WIDE:
- VerifyAPut(dec_insn, reg_types_.LongLo(), true);
+ VerifyAPut(inst, reg_types_.LongLo(), true);
break;
case Instruction::APUT_OBJECT:
- VerifyAPut(dec_insn, reg_types_.JavaLangObject(false), false);
+ VerifyAPut(inst, reg_types_.JavaLangObject(false), false);
break;
case Instruction::IGET_BOOLEAN:
- VerifyISGet(dec_insn, reg_types_.Boolean(), true, false);
+ VerifyISGet(inst, reg_types_.Boolean(), true, false);
break;
case Instruction::IGET_BYTE:
- VerifyISGet(dec_insn, reg_types_.Byte(), true, false);
+ VerifyISGet(inst, reg_types_.Byte(), true, false);
break;
case Instruction::IGET_CHAR:
- VerifyISGet(dec_insn, reg_types_.Char(), true, false);
+ VerifyISGet(inst, reg_types_.Char(), true, false);
break;
case Instruction::IGET_SHORT:
- VerifyISGet(dec_insn, reg_types_.Short(), true, false);
+ VerifyISGet(inst, reg_types_.Short(), true, false);
break;
case Instruction::IGET:
- VerifyISGet(dec_insn, reg_types_.Integer(), true, false);
+ VerifyISGet(inst, reg_types_.Integer(), true, false);
break;
case Instruction::IGET_WIDE:
- VerifyISGet(dec_insn, reg_types_.LongLo(), true, false);
+ VerifyISGet(inst, reg_types_.LongLo(), true, false);
break;
case Instruction::IGET_OBJECT:
- VerifyISGet(dec_insn, reg_types_.JavaLangObject(false), false, false);
+ VerifyISGet(inst, reg_types_.JavaLangObject(false), false, false);
break;
case Instruction::IPUT_BOOLEAN:
- VerifyISPut(dec_insn, reg_types_.Boolean(), true, false);
+ VerifyISPut(inst, reg_types_.Boolean(), true, false);
break;
case Instruction::IPUT_BYTE:
- VerifyISPut(dec_insn, reg_types_.Byte(), true, false);
+ VerifyISPut(inst, reg_types_.Byte(), true, false);
break;
case Instruction::IPUT_CHAR:
- VerifyISPut(dec_insn, reg_types_.Char(), true, false);
+ VerifyISPut(inst, reg_types_.Char(), true, false);
break;
case Instruction::IPUT_SHORT:
- VerifyISPut(dec_insn, reg_types_.Short(), true, false);
+ VerifyISPut(inst, reg_types_.Short(), true, false);
break;
case Instruction::IPUT:
- VerifyISPut(dec_insn, reg_types_.Integer(), true, false);
+ VerifyISPut(inst, reg_types_.Integer(), true, false);
break;
case Instruction::IPUT_WIDE:
- VerifyISPut(dec_insn, reg_types_.LongLo(), true, false);
+ VerifyISPut(inst, reg_types_.LongLo(), true, false);
break;
case Instruction::IPUT_OBJECT:
- VerifyISPut(dec_insn, reg_types_.JavaLangObject(false), false, false);
+ VerifyISPut(inst, reg_types_.JavaLangObject(false), false, false);
break;
case Instruction::SGET_BOOLEAN:
- VerifyISGet(dec_insn, reg_types_.Boolean(), true, true);
+ VerifyISGet(inst, reg_types_.Boolean(), true, true);
break;
case Instruction::SGET_BYTE:
- VerifyISGet(dec_insn, reg_types_.Byte(), true, true);
+ VerifyISGet(inst, reg_types_.Byte(), true, true);
break;
case Instruction::SGET_CHAR:
- VerifyISGet(dec_insn, reg_types_.Char(), true, true);
+ VerifyISGet(inst, reg_types_.Char(), true, true);
break;
case Instruction::SGET_SHORT:
- VerifyISGet(dec_insn, reg_types_.Short(), true, true);
+ VerifyISGet(inst, reg_types_.Short(), true, true);
break;
case Instruction::SGET:
- VerifyISGet(dec_insn, reg_types_.Integer(), true, true);
+ VerifyISGet(inst, reg_types_.Integer(), true, true);
break;
case Instruction::SGET_WIDE:
- VerifyISGet(dec_insn, reg_types_.LongLo(), true, true);
+ VerifyISGet(inst, reg_types_.LongLo(), true, true);
break;
case Instruction::SGET_OBJECT:
- VerifyISGet(dec_insn, reg_types_.JavaLangObject(false), false, true);
+ VerifyISGet(inst, reg_types_.JavaLangObject(false), false, true);
break;
case Instruction::SPUT_BOOLEAN:
- VerifyISPut(dec_insn, reg_types_.Boolean(), true, true);
+ VerifyISPut(inst, reg_types_.Boolean(), true, true);
break;
case Instruction::SPUT_BYTE:
- VerifyISPut(dec_insn, reg_types_.Byte(), true, true);
+ VerifyISPut(inst, reg_types_.Byte(), true, true);
break;
case Instruction::SPUT_CHAR:
- VerifyISPut(dec_insn, reg_types_.Char(), true, true);
+ VerifyISPut(inst, reg_types_.Char(), true, true);
break;
case Instruction::SPUT_SHORT:
- VerifyISPut(dec_insn, reg_types_.Short(), true, true);
+ VerifyISPut(inst, reg_types_.Short(), true, true);
break;
case Instruction::SPUT:
- VerifyISPut(dec_insn, reg_types_.Integer(), true, true);
+ VerifyISPut(inst, reg_types_.Integer(), true, true);
break;
case Instruction::SPUT_WIDE:
- VerifyISPut(dec_insn, reg_types_.LongLo(), true, true);
+ VerifyISPut(inst, reg_types_.LongLo(), true, true);
break;
case Instruction::SPUT_OBJECT:
- VerifyISPut(dec_insn, reg_types_.JavaLangObject(false), false, true);
+ VerifyISPut(inst, reg_types_.JavaLangObject(false), false, true);
break;
case Instruction::INVOKE_VIRTUAL:
case Instruction::INVOKE_VIRTUAL_RANGE:
case Instruction::INVOKE_SUPER:
case Instruction::INVOKE_SUPER_RANGE: {
- bool is_range = (dec_insn.opcode == Instruction::INVOKE_VIRTUAL_RANGE ||
- dec_insn.opcode == Instruction::INVOKE_SUPER_RANGE);
- bool is_super = (dec_insn.opcode == Instruction::INVOKE_SUPER ||
- dec_insn.opcode == Instruction::INVOKE_SUPER_RANGE);
- mirror::AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_VIRTUAL,
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE ||
+ inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
+ bool is_super = (inst->Opcode() == Instruction::INVOKE_SUPER ||
+ inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
+ mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL,
is_range, is_super);
const char* descriptor;
if (called_method == NULL) {
- uint32_t method_idx = dec_insn.vB;
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -1892,13 +2017,13 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
case Instruction::INVOKE_DIRECT:
case Instruction::INVOKE_DIRECT_RANGE: {
- bool is_range = (dec_insn.opcode == Instruction::INVOKE_DIRECT_RANGE);
- mirror::AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_DIRECT,
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE);
+ mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, METHOD_DIRECT,
is_range, false);
const char* return_type_descriptor;
bool is_constructor;
if (called_method == NULL) {
- uint32_t method_idx = dec_insn.vB;
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
is_constructor = StringPiece(dex_file_->GetMethodName(method_id)) == "<init>";
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
@@ -1915,7 +2040,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* allowing the latter only if the "this" argument is the same as the "this" argument to
* this method (which implies that we're in a constructor ourselves).
*/
- const RegType& this_type = work_line_->GetInvocationThis(dec_insn);
+ const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsConflict()) // failure.
break;
@@ -1959,11 +2084,11 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE: {
- bool is_range = (dec_insn.opcode == Instruction::INVOKE_STATIC_RANGE);
- mirror::AbstractMethod* called_method = VerifyInvocationArgs(dec_insn, METHOD_STATIC, is_range, false);
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE);
+ mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, METHOD_STATIC, is_range, false);
const char* descriptor;
if (called_method == NULL) {
- uint32_t method_idx = dec_insn.vB;
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -1981,8 +2106,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
break;
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_INTERFACE_RANGE: {
- bool is_range = (dec_insn.opcode == Instruction::INVOKE_INTERFACE_RANGE);
- mirror::AbstractMethod* abs_method = VerifyInvocationArgs(dec_insn, METHOD_INTERFACE, is_range, false);
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
+ mirror::AbstractMethod* abs_method = VerifyInvocationArgs(inst, METHOD_INTERFACE, is_range, false);
if (abs_method != NULL) {
mirror::Class* called_interface = abs_method->GetDeclaringClass();
if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) {
@@ -1994,7 +2119,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* Get the type of the "this" arg, which should either be a sub-interface of called
* interface or Object (see comments in RegType::JoinClass).
*/
- const RegType& this_type = work_line_->GetInvocationThis(dec_insn);
+ const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsZero()) {
/* null pointer always passes (and always fails at runtime) */
} else {
@@ -2017,7 +2142,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
*/
const char* descriptor;
if (abs_method == NULL) {
- uint32_t method_idx = dec_insn.vB;
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
descriptor = dex_file_->StringByTypeIdx(return_type_idx);
@@ -2035,74 +2160,74 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
case Instruction::NEG_INT:
case Instruction::NOT_INT:
- work_line_->CheckUnaryOp(dec_insn, reg_types_.Integer(), reg_types_.Integer());
+ work_line_->CheckUnaryOp(inst, reg_types_.Integer(), reg_types_.Integer());
break;
case Instruction::NEG_LONG:
case Instruction::NOT_LONG:
- work_line_->CheckUnaryOpWide(dec_insn, reg_types_.LongLo(), reg_types_.LongHi(),
+ work_line_->CheckUnaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.LongLo(), reg_types_.LongHi());
break;
case Instruction::NEG_FLOAT:
- work_line_->CheckUnaryOp(dec_insn, reg_types_.Float(), reg_types_.Float());
+ work_line_->CheckUnaryOp(inst, reg_types_.Float(), reg_types_.Float());
break;
case Instruction::NEG_DOUBLE:
- work_line_->CheckUnaryOpWide(dec_insn, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ work_line_->CheckUnaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
reg_types_.DoubleLo(), reg_types_.DoubleHi());
break;
case Instruction::INT_TO_LONG:
- work_line_->CheckUnaryOpToWide(dec_insn, reg_types_.LongLo(), reg_types_.LongHi(),
+ work_line_->CheckUnaryOpToWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.Integer());
break;
case Instruction::INT_TO_FLOAT:
- work_line_->CheckUnaryOp(dec_insn, reg_types_.Float(), reg_types_.Integer());
+ work_line_->CheckUnaryOp(inst, reg_types_.Float(), reg_types_.Integer());
break;
case Instruction::INT_TO_DOUBLE:
- work_line_->CheckUnaryOpToWide(dec_insn, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ work_line_->CheckUnaryOpToWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
reg_types_.Integer());
break;
case Instruction::LONG_TO_INT:
- work_line_->CheckUnaryOpFromWide(dec_insn, reg_types_.Integer(),
+ work_line_->CheckUnaryOpFromWide(inst, reg_types_.Integer(),
reg_types_.LongLo(), reg_types_.LongHi());
break;
case Instruction::LONG_TO_FLOAT:
- work_line_->CheckUnaryOpFromWide(dec_insn, reg_types_.Float(),
+ work_line_->CheckUnaryOpFromWide(inst, reg_types_.Float(),
reg_types_.LongLo(), reg_types_.LongHi());
break;
case Instruction::LONG_TO_DOUBLE:
- work_line_->CheckUnaryOpWide(dec_insn, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ work_line_->CheckUnaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
reg_types_.LongLo(), reg_types_.LongHi());
break;
case Instruction::FLOAT_TO_INT:
- work_line_->CheckUnaryOp(dec_insn, reg_types_.Integer(), reg_types_.Float());
+ work_line_->CheckUnaryOp(inst, reg_types_.Integer(), reg_types_.Float());
break;
case Instruction::FLOAT_TO_LONG:
- work_line_->CheckUnaryOpToWide(dec_insn, reg_types_.LongLo(), reg_types_.LongHi(),
+ work_line_->CheckUnaryOpToWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.Float());
break;
case Instruction::FLOAT_TO_DOUBLE:
- work_line_->CheckUnaryOpToWide(dec_insn, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ work_line_->CheckUnaryOpToWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
reg_types_.Float());
break;
case Instruction::DOUBLE_TO_INT:
- work_line_->CheckUnaryOpFromWide(dec_insn, reg_types_.Integer(),
+ work_line_->CheckUnaryOpFromWide(inst, reg_types_.Integer(),
reg_types_.DoubleLo(), reg_types_.DoubleHi());
break;
case Instruction::DOUBLE_TO_LONG:
- work_line_->CheckUnaryOpWide(dec_insn, reg_types_.LongLo(), reg_types_.LongHi(),
+ work_line_->CheckUnaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.DoubleLo(), reg_types_.DoubleHi());
break;
case Instruction::DOUBLE_TO_FLOAT:
- work_line_->CheckUnaryOpFromWide(dec_insn, reg_types_.Float(),
+ work_line_->CheckUnaryOpFromWide(inst, reg_types_.Float(),
reg_types_.DoubleLo(), reg_types_.DoubleHi());
break;
case Instruction::INT_TO_BYTE:
- work_line_->CheckUnaryOp(dec_insn, reg_types_.Byte(), reg_types_.Integer());
+ work_line_->CheckUnaryOp(inst, reg_types_.Byte(), reg_types_.Integer());
break;
case Instruction::INT_TO_CHAR:
- work_line_->CheckUnaryOp(dec_insn, reg_types_.Char(), reg_types_.Integer());
+ work_line_->CheckUnaryOp(inst, reg_types_.Char(), reg_types_.Integer());
break;
case Instruction::INT_TO_SHORT:
- work_line_->CheckUnaryOp(dec_insn, reg_types_.Short(), reg_types_.Integer());
+ work_line_->CheckUnaryOp(inst, reg_types_.Short(), reg_types_.Integer());
break;
case Instruction::ADD_INT:
@@ -2113,13 +2238,13 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::SHL_INT:
case Instruction::SHR_INT:
case Instruction::USHR_INT:
- work_line_->CheckBinaryOp(dec_insn, reg_types_.Integer(), reg_types_.Integer(),
+ work_line_->CheckBinaryOp(inst, reg_types_.Integer(), reg_types_.Integer(),
reg_types_.Integer(), false);
break;
case Instruction::AND_INT:
case Instruction::OR_INT:
case Instruction::XOR_INT:
- work_line_->CheckBinaryOp(dec_insn, reg_types_.Integer(), reg_types_.Integer(),
+ work_line_->CheckBinaryOp(inst, reg_types_.Integer(), reg_types_.Integer(),
reg_types_.Integer(), true);
break;
case Instruction::ADD_LONG:
@@ -2130,7 +2255,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::AND_LONG:
case Instruction::OR_LONG:
case Instruction::XOR_LONG:
- work_line_->CheckBinaryOpWide(dec_insn, reg_types_.LongLo(), reg_types_.LongHi(),
+ work_line_->CheckBinaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.LongLo(), reg_types_.LongHi());
break;
@@ -2138,7 +2263,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::SHR_LONG:
case Instruction::USHR_LONG:
/* shift distance is Int, making these different from other binary operations */
- work_line_->CheckBinaryOpWideShift(dec_insn, reg_types_.LongLo(), reg_types_.LongHi(),
+ work_line_->CheckBinaryOpWideShift(inst, reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.Integer());
break;
case Instruction::ADD_FLOAT:
@@ -2146,14 +2271,14 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::MUL_FLOAT:
case Instruction::DIV_FLOAT:
case Instruction::REM_FLOAT:
- work_line_->CheckBinaryOp(dec_insn, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false);
+ work_line_->CheckBinaryOp(inst, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false);
break;
case Instruction::ADD_DOUBLE:
case Instruction::SUB_DOUBLE:
case Instruction::MUL_DOUBLE:
case Instruction::DIV_DOUBLE:
case Instruction::REM_DOUBLE:
- work_line_->CheckBinaryOpWide(dec_insn, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ work_line_->CheckBinaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
reg_types_.DoubleLo(), reg_types_.DoubleHi(),
reg_types_.DoubleLo(), reg_types_.DoubleHi());
break;
@@ -2164,15 +2289,15 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::SHL_INT_2ADDR:
case Instruction::SHR_INT_2ADDR:
case Instruction::USHR_INT_2ADDR:
- work_line_->CheckBinaryOp2addr(dec_insn, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false);
+ work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false);
break;
case Instruction::AND_INT_2ADDR:
case Instruction::OR_INT_2ADDR:
case Instruction::XOR_INT_2ADDR:
- work_line_->CheckBinaryOp2addr(dec_insn, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), true);
+ work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), true);
break;
case Instruction::DIV_INT_2ADDR:
- work_line_->CheckBinaryOp2addr(dec_insn, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false);
+ work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false);
break;
case Instruction::ADD_LONG_2ADDR:
case Instruction::SUB_LONG_2ADDR:
@@ -2182,14 +2307,14 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::AND_LONG_2ADDR:
case Instruction::OR_LONG_2ADDR:
case Instruction::XOR_LONG_2ADDR:
- work_line_->CheckBinaryOp2addrWide(dec_insn, reg_types_.LongLo(), reg_types_.LongHi(),
+ work_line_->CheckBinaryOp2addrWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.LongLo(), reg_types_.LongHi());
break;
case Instruction::SHL_LONG_2ADDR:
case Instruction::SHR_LONG_2ADDR:
case Instruction::USHR_LONG_2ADDR:
- work_line_->CheckBinaryOp2addrWideShift(dec_insn, reg_types_.LongLo(), reg_types_.LongHi(),
+ work_line_->CheckBinaryOp2addrWideShift(inst, reg_types_.LongLo(), reg_types_.LongHi(),
reg_types_.Integer());
break;
case Instruction::ADD_FLOAT_2ADDR:
@@ -2197,14 +2322,14 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::MUL_FLOAT_2ADDR:
case Instruction::DIV_FLOAT_2ADDR:
case Instruction::REM_FLOAT_2ADDR:
- work_line_->CheckBinaryOp2addr(dec_insn, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false);
+ work_line_->CheckBinaryOp2addr(inst, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false);
break;
case Instruction::ADD_DOUBLE_2ADDR:
case Instruction::SUB_DOUBLE_2ADDR:
case Instruction::MUL_DOUBLE_2ADDR:
case Instruction::DIV_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE_2ADDR:
- work_line_->CheckBinaryOp2addrWide(dec_insn, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ work_line_->CheckBinaryOp2addrWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
reg_types_.DoubleLo(), reg_types_.DoubleHi(),
reg_types_.DoubleLo(), reg_types_.DoubleHi());
break;
@@ -2213,12 +2338,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::MUL_INT_LIT16:
case Instruction::DIV_INT_LIT16:
case Instruction::REM_INT_LIT16:
- work_line_->CheckLiteralOp(dec_insn, reg_types_.Integer(), reg_types_.Integer(), false);
+ work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), false, true);
break;
case Instruction::AND_INT_LIT16:
case Instruction::OR_INT_LIT16:
case Instruction::XOR_INT_LIT16:
- work_line_->CheckLiteralOp(dec_insn, reg_types_.Integer(), reg_types_.Integer(), true);
+ work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), true, true);
break;
case Instruction::ADD_INT_LIT8:
case Instruction::RSUB_INT_LIT8:
@@ -2228,12 +2353,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::SHL_INT_LIT8:
case Instruction::SHR_INT_LIT8:
case Instruction::USHR_INT_LIT8:
- work_line_->CheckLiteralOp(dec_insn, reg_types_.Integer(), reg_types_.Integer(), false);
+ work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), false, false);
break;
case Instruction::AND_INT_LIT8:
case Instruction::OR_INT_LIT8:
case Instruction::XOR_INT_LIT8:
- work_line_->CheckLiteralOp(dec_insn, reg_types_.Integer(), reg_types_.Integer(), true);
+ work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), true, false);
break;
/* These should never appear during verification. */
@@ -2305,33 +2430,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
work_line_->SetResultTypeToUnknown();
}
- /* Handle "continue". Tag the next consecutive instruction. */
- if ((opcode_flags & Instruction::kContinue) != 0) {
- uint32_t next_insn_idx = work_insn_idx_ + CurrentInsnFlags()->GetLengthInCodeUnits();
- if (next_insn_idx >= code_item_->insns_size_in_code_units_) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Execution can walk off end of code area";
- return false;
- }
- // The only way to get to a move-exception instruction is to get thrown there. Make sure the
- // next instruction isn't one.
- if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
- return false;
- }
- RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
- if (next_line != NULL) {
- // Merge registers into what we have for the next instruction, and set the "changed" flag if
- // needed.
- if (!UpdateRegisters(next_insn_idx, work_line_.get())) {
- return false;
- }
- } else {
- /*
- * We're not recording register data for the next instruction, so we don't know what the prior
- * state was. We have to assume that something has changed and re-evaluate it.
- */
- insn_flags_[next_insn_idx].SetChanged();
- }
- }
+
/*
* Handle "branch". Tag the branch target.
@@ -2357,8 +2456,14 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
return false;
}
/* update branch target, set "changed" if appropriate */
- if (!UpdateRegisters(work_insn_idx_ + branch_target, work_line_.get())) {
- return false;
+ if (NULL != branch_line.get()) {
+ if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get())) {
+ return false;
+ }
+ } else {
+ if (!UpdateRegisters(work_insn_idx_ + branch_target, work_line_.get())) {
+ return false;
+ }
}
}
@@ -2433,7 +2538,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* monitor-enter and the monitor stack was empty, we don't need a catch-all (if it throws,
* it will do so before grabbing the lock).
*/
- if (dec_insn.opcode != Instruction::MONITOR_ENTER || work_line_->MonitorStackDepth() != 1) {
+ if (inst->Opcode() != Instruction::MONITOR_ENTER || work_line_->MonitorStackDepth() != 1) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "expected to be within a catch-all for an instruction where a monitor is held";
return false;
@@ -2441,6 +2546,42 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
}
+ /* Handle "continue". Tag the next consecutive instruction.
+ * Note: Keep the code handling "continue" case below the "branch" and "switch" cases,
+ * because it changes work_line_ when performing peephole optimization
+ * and this change should not be used in those cases.
+ */
+ if ((opcode_flags & Instruction::kContinue) != 0) {
+ uint32_t next_insn_idx = work_insn_idx_ + CurrentInsnFlags()->GetLengthInCodeUnits();
+ if (next_insn_idx >= code_item_->insns_size_in_code_units_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Execution can walk off end of code area";
+ return false;
+ }
+ // The only way to get to a move-exception instruction is to get thrown there. Make sure the
+ // next instruction isn't one.
+ if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
+ return false;
+ }
+ if (NULL != fallthrough_line.get()) {
+ // Make workline consistent with fallthrough computed from peephole optimization.
+ work_line_->CopyFromLine(fallthrough_line.get());
+ }
+ RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
+ if (next_line != NULL) {
+ // Merge registers into what we have for the next instruction,
+ // and set the "changed" flag if needed.
+ if (!UpdateRegisters(next_insn_idx, work_line_.get())) {
+ return false;
+ }
+ } else {
+ /*
+ * We're not recording register data for the next instruction, so we don't know what the
+ * prior state was. We have to assume that something has changed and re-evaluate it.
+ */
+ insn_flags_[next_insn_idx].SetChanged();
+ }
+ }
+
/* If we're returning from the method, make sure monitor stack is empty. */
if ((opcode_flags & Instruction::kReturn) != 0) {
if (!work_line_->VerifyMonitorStackEmpty()) {
@@ -2472,7 +2613,7 @@ const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
const RegType& referrer = GetDeclaringClass();
mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
const RegType& result =
- klass != NULL ? reg_types_.FromClass(klass, klass->IsFinal())
+ klass != NULL ? reg_types_.FromClass(descriptor, klass, klass->IsFinal())
: reg_types_.FromDescriptor(class_loader_, descriptor, false);
if (result.IsConflict()) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
@@ -2625,12 +2766,14 @@ mirror::AbstractMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex
return res_method;
}
-mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstruction& dec_insn,
- MethodType method_type, bool is_range,
+mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
+ MethodType method_type,
+ bool is_range,
bool is_super) {
// Resolve the method. This could be an abstract or concrete method depending on what sort of call
// we're making.
- mirror::AbstractMethod* res_method = ResolveMethodAndCheckAccess(dec_insn.vB, method_type);
+ const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ mirror::AbstractMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
if (res_method == NULL) { // error or class is unresolved
return NULL;
}
@@ -2660,7 +2803,7 @@ mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstru
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might might be calling through an abstract method
// definition (which doesn't have register count values).
- size_t expected_args = dec_insn.vA;
+ const size_t expected_args = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
/* caught by static verifier */
DCHECK(is_range || expected_args <= 5);
if (expected_args > code_item_->outs_size_) {
@@ -2676,7 +2819,7 @@ mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstru
*/
size_t actual_args = 0;
if (!res_method->IsStatic()) {
- const RegType& actual_arg_type = work_line_->GetInvocationThis(dec_insn);
+ const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
return NULL;
}
@@ -2686,7 +2829,8 @@ mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstru
}
if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
mirror::Class* klass = res_method->GetDeclaringClass();
- const RegType& res_method_class = reg_types_.FromClass(klass, klass->IsFinal());
+ const RegType& res_method_class = reg_types_.FromClass(ClassHelper(klass).GetDescriptor(),
+ klass, klass->IsFinal());
if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "'this' argument '" << actual_arg_type
<< "' not instance of '" << res_method_class << "'";
@@ -2702,6 +2846,10 @@ mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstru
MethodHelper mh(res_method);
const DexFile::TypeList* params = mh.GetParameterTypeList();
size_t params_size = params == NULL ? 0 : params->Size();
+ uint32_t arg[5];
+ if (!is_range) {
+ inst->GetArgs(arg);
+ }
for (size_t param_index = 0; param_index < params_size; param_index++) {
if (actual_args >= expected_args) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invalid call to '" << PrettyMethod(res_method)
@@ -2717,7 +2865,7 @@ mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstru
return NULL;
}
const RegType& reg_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
- uint32_t get_reg = is_range ? dec_insn.vC + actual_args : dec_insn.arg[actual_args];
+ uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
return res_method;
}
@@ -2732,9 +2880,20 @@ mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const DecodedInstru
}
}
-void MethodVerifier::VerifyNewArray(const DecodedInstruction& dec_insn, bool is_filled,
- bool is_range) {
- const RegType& res_type = ResolveClassAndCheckAccess(is_filled ? dec_insn.vB : dec_insn.vC);
+void MethodVerifier::VerifyNewArray(const Instruction* inst, bool is_filled,
+ bool is_range) {
+ uint32_t type_idx;
+ if (!is_filled) {
+ DCHECK_EQ(inst->Opcode(), Instruction::NEW_ARRAY);
+ type_idx = inst->VRegC_22c();
+ } else if (!is_range) {
+ DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
+ type_idx = inst->VRegB_35c();
+ } else {
+ DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
+ type_idx = inst->VRegB_3rc();
+ }
+ const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) { // bad class
DCHECK_NE(failures_.size(), 0U);
} else {
@@ -2743,16 +2902,20 @@ void MethodVerifier::VerifyNewArray(const DecodedInstruction& dec_insn, bool is_
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "new-array on non-array class " << res_type;
} else if (!is_filled) {
/* make sure "size" register is valid type */
- work_line_->VerifyRegisterType(dec_insn.vB, reg_types_.Integer());
+ work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
/* set register type to array class */
- work_line_->SetRegisterType(dec_insn.vA, res_type);
+ work_line_->SetRegisterType(inst->VRegA_22c(), res_type);
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_);
- uint32_t arg_count = dec_insn.vA;
+ uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
+ uint32_t arg[5];
+ if (!is_range) {
+ inst->GetArgs(arg);
+ }
for (size_t ui = 0; ui < arg_count; ui++) {
- uint32_t get_reg = is_range ? dec_insn.vC + ui : dec_insn.arg[ui];
+ uint32_t get_reg = is_range ? inst->VRegC_3rc() + ui : arg[ui];
if (!work_line_->VerifyRegisterType(get_reg, expected_type)) {
work_line_->SetResultRegisterType(reg_types_.Conflict());
return;
@@ -2764,22 +2927,22 @@ void MethodVerifier::VerifyNewArray(const DecodedInstruction& dec_insn, bool is_
}
}
-void MethodVerifier::VerifyAGet(const DecodedInstruction& dec_insn,
+void MethodVerifier::VerifyAGet(const Instruction* inst,
const RegType& insn_type, bool is_primitive) {
- const RegType& index_type = work_line_->GetRegisterType(dec_insn.vC);
+ const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- const RegType& array_type = work_line_->GetRegisterType(dec_insn.vB);
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array class; this code path will fail at runtime. Infer a merge-able type from the
// instruction type. TODO: have a proper notion of bottom here.
if (!is_primitive || insn_type.IsCategory1Types()) {
// Reference or category 1
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.Zero());
+ work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Zero());
} else {
// Category 2
- work_line_->SetRegisterTypeWide(dec_insn.vA, reg_types_.FromCat2ConstLo(0, false),
+ work_line_->SetRegisterTypeWide(inst->VRegA_23x(), reg_types_.FromCat2ConstLo(0, false),
reg_types_.FromCat2ConstHi(0, false));
}
} else if (!array_type.IsArrayTypes()) {
@@ -2803,9 +2966,9 @@ void MethodVerifier::VerifyAGet(const DecodedInstruction& dec_insn,
// instruction, which can't differentiate object types and ints from floats, longs from
// doubles.
if (!component_type.IsLowHalf()) {
- work_line_->SetRegisterType(dec_insn.vA, component_type);
+ work_line_->SetRegisterType(inst->VRegA_23x(), component_type);
} else {
- work_line_->SetRegisterTypeWide(dec_insn.vA, component_type,
+ work_line_->SetRegisterTypeWide(inst->VRegA_23x(), component_type,
component_type.HighHalf(&reg_types_));
}
}
@@ -2813,13 +2976,13 @@ void MethodVerifier::VerifyAGet(const DecodedInstruction& dec_insn,
}
}
-void MethodVerifier::VerifyAPut(const DecodedInstruction& dec_insn,
+void MethodVerifier::VerifyAPut(const Instruction* inst,
const RegType& insn_type, bool is_primitive) {
- const RegType& index_type = work_line_->GetRegisterType(dec_insn.vC);
+ const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- const RegType& array_type = work_line_->GetRegisterType(dec_insn.vB);
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array type; this code path will fail at runtime. Infer a merge-able type from the
// instruction type.
@@ -2843,7 +3006,7 @@ void MethodVerifier::VerifyAPut(const DecodedInstruction& dec_insn,
// The instruction agrees with the type of array, confirm the value to be stored does too
// Note: we use the instruction type (rather than the component type) for aput-object as
// incompatible classes will be caught at runtime as an array store exception
- work_line_->VerifyRegisterType(dec_insn.vA, is_primitive ? component_type : insn_type);
+ work_line_->VerifyRegisterType(inst->VRegA_23x(), is_primitive ? component_type : insn_type);
}
}
}
@@ -2865,7 +3028,7 @@ mirror::Field* MethodVerifier::GetStaticField(int field_idx) {
mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx,
dex_cache_, class_loader_);
if (field == NULL) {
- LOG(INFO) << "unable to resolve static field " << field_idx << " ("
+ LOG(INFO) << "Unable to resolve static field " << field_idx << " ("
<< dex_file_->GetFieldName(field_id) << ") in "
<< dex_file_->GetFieldDeclaringClassDescriptor(field_id);
DCHECK(Thread::Current()->IsExceptionPending());
@@ -2900,7 +3063,7 @@ mirror::Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int fie
mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx,
dex_cache_, class_loader_);
if (field == NULL) {
- LOG(INFO) << "unable to resolve instance field " << field_idx << " ("
+ LOG(INFO) << "Unable to resolve instance field " << field_idx << " ("
<< dex_file_->GetFieldName(field_id) << ") in "
<< dex_file_->GetFieldDeclaringClassDescriptor(field_id);
DCHECK(Thread::Current()->IsExceptionPending());
@@ -2920,7 +3083,9 @@ mirror::Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int fie
return field;
} else {
mirror::Class* klass = field->GetDeclaringClass();
- const RegType& field_klass = reg_types_.FromClass(klass, klass->IsFinal());
+ const RegType& field_klass =
+ reg_types_.FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
+ klass, klass->IsFinal());
if (obj_type.IsUninitializedTypes() &&
(!IsConstructor() || GetDeclaringClass().Equals(obj_type) ||
!field_klass.Equals(GetDeclaringClass()))) {
@@ -2943,14 +3108,14 @@ mirror::Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int fie
}
}
-void MethodVerifier::VerifyISGet(const DecodedInstruction& dec_insn,
- const RegType& insn_type, bool is_primitive, bool is_static) {
- uint32_t field_idx = is_static ? dec_insn.vB : dec_insn.vC;
+void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive, bool is_static) {
+ uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::Field* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- const RegType& object_type = work_line_->GetRegisterType(dec_insn.vB);
+ const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
const char* descriptor;
@@ -2964,6 +3129,7 @@ void MethodVerifier::VerifyISGet(const DecodedInstruction& dec_insn,
loader = class_loader_;
}
const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
if (is_primitive) {
if (field_type.Equals(insn_type) ||
(field_type.IsFloat() && insn_type.IsIntegralTypes()) ||
@@ -2985,25 +3151,25 @@ void MethodVerifier::VerifyISGet(const DecodedInstruction& dec_insn,
<< " to be compatible with type '" << insn_type
<< "' but found type '" << field_type
<< "' in get-object";
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.Conflict());
+ work_line_->SetRegisterType(vregA, reg_types_.Conflict());
return;
}
}
if (!field_type.IsLowHalf()) {
- work_line_->SetRegisterType(dec_insn.vA, field_type);
+ work_line_->SetRegisterType(vregA, field_type);
} else {
- work_line_->SetRegisterTypeWide(dec_insn.vA, field_type, field_type.HighHalf(&reg_types_));
+ work_line_->SetRegisterTypeWide(vregA, field_type, field_type.HighHalf(&reg_types_));
}
}
-void MethodVerifier::VerifyISPut(const DecodedInstruction& dec_insn,
- const RegType& insn_type, bool is_primitive, bool is_static) {
- uint32_t field_idx = is_static ? dec_insn.vB : dec_insn.vC;
+void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive, bool is_static) {
+ uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::Field* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- const RegType& object_type = work_line_->GetRegisterType(dec_insn.vB);
+ const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
const char* descriptor;
@@ -3024,11 +3190,12 @@ void MethodVerifier::VerifyISPut(const DecodedInstruction& dec_insn,
return;
}
}
+ const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
if (is_primitive) {
// Primitive field assignability rules are weaker than regular assignability rules
bool instruction_compatible;
bool value_compatible;
- const RegType& value_type = work_line_->GetRegisterType(dec_insn.vA);
+ const RegType& value_type = work_line_->GetRegisterType(vregA);
if (field_type.IsIntegralTypes()) {
instruction_compatible = insn_type.IsIntegralTypes();
value_compatible = value_type.IsIntegralTypes();
@@ -3056,7 +3223,7 @@ void MethodVerifier::VerifyISPut(const DecodedInstruction& dec_insn,
return;
}
if (!value_compatible) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << dec_insn.vA
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
<< " of type " << value_type
<< " but expected " << field_type
<< " for store to " << PrettyField(field) << " in put";
@@ -3070,7 +3237,7 @@ void MethodVerifier::VerifyISPut(const DecodedInstruction& dec_insn,
<< "' in put-object";
return;
}
- work_line_->VerifyRegisterType(dec_insn.vA, field_type);
+ work_line_->VerifyRegisterType(vregA, field_type);
}
}
@@ -3128,14 +3295,17 @@ const RegType& MethodVerifier::GetMethodReturnType() {
}
const RegType& MethodVerifier::GetDeclaringClass() {
- if (foo_method_ != NULL) {
- mirror::Class* klass = foo_method_->GetDeclaringClass();
- return reg_types_.FromClass(klass, klass->IsFinal());
- } else {
+ if (declaring_class_ == NULL) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
- return reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ if (mirror_method_ != NULL) {
+ mirror::Class* klass = mirror_method_->GetDeclaringClass();
+ declaring_class_ = &reg_types_.FromClass(descriptor, klass, klass->IsFinal());
+ } else {
+ declaring_class_ = &reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ }
}
+ return *declaring_class_;
}
void MethodVerifier::ComputeGcMapSizes(size_t* gc_points, size_t* ref_bitmap_bits,
@@ -3160,7 +3330,39 @@ void MethodVerifier::ComputeGcMapSizes(size_t* gc_points, size_t* ref_bitmap_bit
*log2_max_gc_pc = i;
}
-MethodVerifier::PcToConreteMethod* MethodVerifier::GenerateDevirtMap() {
+MethodVerifier::MethodSafeCastSet* MethodVerifier::GenerateSafeCastSet() {
+ /*
+ * Walks over the method code and adds any cast instructions in which
+ * the type cast is implicit to a set, which is used in the code generation
+ * to elide these casts.
+ */
+ if (!failure_messages_.empty()) {
+ return NULL;
+ }
+ UniquePtr<MethodSafeCastSet> mscs;
+ const Instruction* inst = Instruction::At(code_item_->insns_);
+ const Instruction* end = Instruction::At(code_item_->insns_ +
+ code_item_->insns_size_in_code_units_);
+
+ for (; inst < end; inst = inst->Next()) {
+ if (Instruction::CHECK_CAST != inst->Opcode()) {
+ continue;
+ }
+ uint32_t dex_pc = inst->GetDexPc(code_item_->insns_);
+ RegisterLine* line = reg_table_.GetLine(dex_pc);
+ const RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+ const RegType& cast_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ if (cast_type.IsStrictlyAssignableFrom(reg_type)) {
+ if (mscs.get() == NULL) {
+ mscs.reset(new MethodSafeCastSet());
+ }
+ mscs->insert(dex_pc);
+ }
+ }
+ return mscs.release();
+}
+
+MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() {
// It is risky to rely on reg_types for sharpening in cases of soft
// verification, we might end up sharpening to a wrong implementation. Just abort.
@@ -3168,39 +3370,43 @@ MethodVerifier::PcToConreteMethod* MethodVerifier::GenerateDevirtMap() {
return NULL;
}
- PcToConreteMethod* pc_to_concrete_method = new PcToConreteMethod();
- uint32_t dex_pc = 0;
+ UniquePtr<PcToConcreteMethodMap> pc_to_concrete_method_map;
const uint16_t* insns = code_item_->insns_ ;
const Instruction* inst = Instruction::At(insns);
+ const Instruction* end = Instruction::At(insns + code_item_->insns_size_in_code_units_);
- for (; dex_pc < code_item_->insns_size_in_code_units_;
- dex_pc += insn_flags_[dex_pc].GetLengthInCodeUnits(), inst = inst->Next()) {
-
+ for (; inst < end; inst = inst->Next()) {
bool is_virtual = (inst->Opcode() == Instruction::INVOKE_VIRTUAL) ||
(inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE);
bool is_interface = (inst->Opcode() == Instruction::INVOKE_INTERFACE) ||
(inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
- if(!(is_interface || is_virtual))
- continue;
-
- // Check if vC ("this" pointer in the instruction) has a precise type.
+ if(!is_interface && !is_virtual) {
+ continue;
+ }
+ // Get reg type for register holding the reference to the object that will be dispatched upon.
+ uint32_t dex_pc = inst->GetDexPc(insns);
RegisterLine* line = reg_table_.GetLine(dex_pc);
- DecodedInstruction dec_insn(inst);
- const RegType& reg_type(line->GetRegisterType(dec_insn.vC));
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) ||
+ (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
+ const RegType&
+ reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
- if (!reg_type.IsPreciseReference()) {
- continue;
+ if (!reg_type.HasClass()) {
+ // We will compute devirtualization information only when we know the Class of the reg type.
+ continue;
}
-
- CHECK(!(reg_type.GetClass()->IsInterface()));
- // If the class is an array class, it can be both Abstract and final and so
- // the reg_type will be created as precise.
- CHECK(!(reg_type.GetClass()->IsAbstract()) || reg_type.GetClass()->IsArrayClass());
- // Find the abstract method.
- // vB has the method index.
- mirror::AbstractMethod* abstract_method = NULL ;
- abstract_method = dex_cache_->GetResolvedMethod(dec_insn.vB);
+ mirror::Class* reg_class = reg_type.GetClass();
+ if (reg_class->IsInterface()) {
+ // We can't devirtualize when the known type of the register is an interface.
+ continue;
+ }
+ if (reg_class->IsAbstract() && !reg_class->IsArrayClass()) {
+ // We can't devirtualize abstract classes except on arrays of abstract classes.
+ continue;
+ }
+ mirror::AbstractMethod* abstract_method =
+ dex_cache_->GetResolvedMethod(is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
if(abstract_method == NULL) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
@@ -3214,28 +3420,24 @@ MethodVerifier::PcToConreteMethod* MethodVerifier::GenerateDevirtMap() {
if (is_virtual) {
concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(abstract_method);
}
-
- if(concrete_method == NULL) {
- // In cases where concrete_method is not found continue to the next invoke instead
- // of crashing.
+ if (concrete_method == NULL || concrete_method->IsAbstract()) {
+ // In cases where concrete_method is not found, or is abstract, continue to the next invoke.
continue;
}
-
- CHECK(!concrete_method->IsAbstract()) << PrettyMethod(concrete_method);
- // Build method reference.
- CompilerDriver::MethodReference concrete_ref(
- concrete_method->GetDeclaringClass()->GetDexCache()->GetDexFile(),
- concrete_method->GetDexMethodIndex());
- // Now Save the current PC and the concrete method reference to be used
- // in compiler driver.
- pc_to_concrete_method->Put(dex_pc, concrete_ref );
+ if (reg_type.IsPreciseReference() || concrete_method->IsFinal() ||
+ concrete_method->GetDeclaringClass()->IsFinal()) {
+ // If we knew exactly the class being dispatched upon, or if the target method cannot be
+ // overridden record the target to be used in the compiler driver.
+ if (pc_to_concrete_method_map.get() == NULL) {
+ pc_to_concrete_method_map.reset(new PcToConcreteMethodMap());
+ }
+ CompilerDriver::MethodReference concrete_ref(
+ concrete_method->GetDeclaringClass()->GetDexCache()->GetDexFile(),
+ concrete_method->GetDexMethodIndex());
+ pc_to_concrete_method_map->Put(dex_pc, concrete_ref);
}
-
- if (pc_to_concrete_method->size() == 0) {
- delete pc_to_concrete_method;
- return NULL ;
}
- return pc_to_concrete_method;
+ return pc_to_concrete_method_map.release();
}
const std::vector<uint8_t>* MethodVerifier::GenerateGcMap() {
@@ -3276,6 +3478,7 @@ const std::vector<uint8_t>* MethodVerifier::GenerateGcMap() {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Failed to encode GC map (size=" << table_size << ")";
return NULL;
}
+ table->reserve(table_size);
// Write table header
table->push_back(format | ((ref_bitmap_bytes >> DexPcToReferenceMap::kRegMapFormatShift) &
~DexPcToReferenceMap::kRegMapFormatMask));
@@ -3326,9 +3529,10 @@ void MethodVerifier::VerifyGcMap(const std::vector<uint8_t>& data) {
}
}
-void MethodVerifier::SetDexGcMap(CompilerDriver::MethodReference ref, const std::vector<uint8_t>& gc_map) {
+void MethodVerifier::SetDexGcMap(CompilerDriver::MethodReference ref,
+ const std::vector<uint8_t>& gc_map) {
{
- MutexLock mu(Thread::Current(), *dex_gc_maps_lock_);
+ WriterMutexLock mu(Thread::Current(), *dex_gc_maps_lock_);
DexGcMapTable::iterator it = dex_gc_maps_->find(ref);
if (it != dex_gc_maps_->end()) {
delete it->second;
@@ -3336,42 +3540,69 @@ void MethodVerifier::SetDexGcMap(CompilerDriver::MethodReference ref, const std:
}
dex_gc_maps_->Put(ref, &gc_map);
}
- CHECK(GetDexGcMap(ref) != NULL);
+ DCHECK(GetDexGcMap(ref) != NULL);
}
-void MethodVerifier::SetDevirtMap(CompilerDriver::MethodReference ref, const PcToConreteMethod* devirt_map) {
- MutexLock mu(Thread::Current(), *devirt_maps_lock_);
- DevirtualizationMapTable::iterator it = devirt_maps_->find(ref);
- if (it != devirt_maps_->end()) {
+void MethodVerifier::SetSafeCastMap(CompilerDriver::MethodReference ref,
+ const MethodSafeCastSet* cast_set) {
+ MutexLock mu(Thread::Current(), *safecast_map_lock_);
+ SafeCastMap::iterator it = safecast_map_->find(ref);
+ if (it != safecast_map_->end()) {
delete it->second;
- devirt_maps_->erase(it);
+ safecast_map_->erase(it);
}
- devirt_maps_->Put(ref, devirt_map);
- CHECK(devirt_maps_->find(ref) != devirt_maps_->end());
+ safecast_map_->Put(ref, cast_set);
+ CHECK(safecast_map_->find(ref) != safecast_map_->end());
+}
+
+bool MethodVerifier::IsSafeCast(CompilerDriver::MethodReference ref, uint32_t pc) {
+ MutexLock mu(Thread::Current(), *safecast_map_lock_);
+ SafeCastMap::const_iterator it = safecast_map_->find(ref);
+ if (it == safecast_map_->end()) {
+ return false;
+ }
+
+ // Look up the cast address in the set of safe casts
+ MethodVerifier::MethodSafeCastSet::const_iterator cast_it = it->second->find(pc);
+ return cast_it != it->second->end();
}
const std::vector<uint8_t>* MethodVerifier::GetDexGcMap(CompilerDriver::MethodReference ref) {
- MutexLock mu(Thread::Current(), *dex_gc_maps_lock_);
+ ReaderMutexLock mu(Thread::Current(), *dex_gc_maps_lock_);
DexGcMapTable::const_iterator it = dex_gc_maps_->find(ref);
if (it == dex_gc_maps_->end()) {
- LOG(WARNING) << "Didn't find GC map for: " << PrettyMethod(ref.second, *ref.first);
+ LOG(WARNING) << "Didn't find GC map for: " << PrettyMethod(ref.dex_method_index, *ref.dex_file);
return NULL;
}
CHECK(it->second != NULL);
return it->second;
}
-const CompilerDriver::MethodReference* MethodVerifier::GetDevirtMap(CompilerDriver::MethodReference ref, uint32_t pc) {
- MutexLock mu(Thread::Current(), *devirt_maps_lock_);
+void MethodVerifier::SetDevirtMap(CompilerDriver::MethodReference ref,
+ const PcToConcreteMethodMap* devirt_map) {
+ WriterMutexLock mu(Thread::Current(), *devirt_maps_lock_);
+ DevirtualizationMapTable::iterator it = devirt_maps_->find(ref);
+ if (it != devirt_maps_->end()) {
+ delete it->second;
+ devirt_maps_->erase(it);
+ }
+
+ devirt_maps_->Put(ref, devirt_map);
+ CHECK(devirt_maps_->find(ref) != devirt_maps_->end());
+}
+
+const CompilerDriver::MethodReference* MethodVerifier::GetDevirtMap(const CompilerDriver::MethodReference& ref,
+ uint32_t dex_pc) {
+ ReaderMutexLock mu(Thread::Current(), *devirt_maps_lock_);
DevirtualizationMapTable::const_iterator it = devirt_maps_->find(ref);
if (it == devirt_maps_->end()) {
return NULL;
}
// Look up the PC in the map, get the concrete method to execute and return its reference.
- MethodVerifier::PcToConreteMethod::const_iterator pc_to_concrete_method = it->second->find(pc);
+ MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method = it->second->find(dex_pc);
if(pc_to_concrete_method != it->second->end()) {
return &(pc_to_concrete_method->second);
} else {
@@ -3423,26 +3654,36 @@ std::vector<int32_t> MethodVerifier::DescribeVRegs(uint32_t dex_pc) {
return result;
}
-Mutex* MethodVerifier::dex_gc_maps_lock_ = NULL;
+ReaderWriterMutex* MethodVerifier::dex_gc_maps_lock_ = NULL;
MethodVerifier::DexGcMapTable* MethodVerifier::dex_gc_maps_ = NULL;
-Mutex* MethodVerifier::devirt_maps_lock_ = NULL;
+Mutex* MethodVerifier::safecast_map_lock_ = NULL;
+MethodVerifier::SafeCastMap* MethodVerifier::safecast_map_ = NULL;
+
+ReaderWriterMutex* MethodVerifier::devirt_maps_lock_ = NULL;
MethodVerifier::DevirtualizationMapTable* MethodVerifier::devirt_maps_ = NULL;
Mutex* MethodVerifier::rejected_classes_lock_ = NULL;
MethodVerifier::RejectedClassesTable* MethodVerifier::rejected_classes_ = NULL;
void MethodVerifier::Init() {
- dex_gc_maps_lock_ = new Mutex("verifier GC maps lock");
+ dex_gc_maps_lock_ = new ReaderWriterMutex("verifier GC maps lock");
Thread* self = Thread::Current();
{
- MutexLock mu(self, *dex_gc_maps_lock_);
+ WriterMutexLock mu(self, *dex_gc_maps_lock_);
dex_gc_maps_ = new MethodVerifier::DexGcMapTable;
}
- devirt_maps_lock_ = new Mutex("verifier Devirtualization lock");
+ safecast_map_lock_ = new Mutex("verifier Cast Elision lock");
+ {
+ MutexLock mu(self, *safecast_map_lock_);
+ safecast_map_ = new MethodVerifier::SafeCastMap();
+ }
+
+ devirt_maps_lock_ = new ReaderWriterMutex("verifier Devirtualization lock");
+
{
- MutexLock mu(self, *devirt_maps_lock_);
+ WriterMutexLock mu(self, *devirt_maps_lock_);
devirt_maps_ = new MethodVerifier::DevirtualizationMapTable();
}
@@ -3457,7 +3698,7 @@ void MethodVerifier::Init() {
void MethodVerifier::Shutdown() {
Thread* self = Thread::Current();
{
- MutexLock mu(self, *dex_gc_maps_lock_);
+ WriterMutexLock mu(self, *dex_gc_maps_lock_);
STLDeleteValues(dex_gc_maps_);
delete dex_gc_maps_;
dex_gc_maps_ = NULL;
@@ -3466,7 +3707,7 @@ void MethodVerifier::Shutdown() {
dex_gc_maps_lock_ = NULL;
{
- MutexLock mu(self, *devirt_maps_lock_);
+ WriterMutexLock mu(self, *devirt_maps_lock_);
STLDeleteValues(devirt_maps_);
delete devirt_maps_;
devirt_maps_ = NULL;
diff --git a/src/verifier/method_verifier.h b/src/verifier/method_verifier.h
index ab7e3cc8dc..198d8cb17b 100644
--- a/src/verifier/method_verifier.h
+++ b/src/verifier/method_verifier.h
@@ -187,9 +187,15 @@ class MethodVerifier {
static const std::vector<uint8_t>* GetDexGcMap(CompilerDriver::MethodReference ref)
LOCKS_EXCLUDED(dex_gc_maps_lock_);
- static const CompilerDriver::MethodReference* GetDevirtMap(CompilerDriver::MethodReference ref, uint32_t pc)
+ static const CompilerDriver::MethodReference* GetDevirtMap(const CompilerDriver::MethodReference& ref,
+ uint32_t dex_pc)
LOCKS_EXCLUDED(devirt_maps_lock_);
+ // Returns true if the cast can statically be verified to be redundant
+ // by using the check-cast elision peephole optimization in the verifier
+ static bool IsSafeCast(CompilerDriver::MethodReference ref, uint32_t pc)
+ LOCKS_EXCLUDED(safecast_map_lock_);
+
// Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
// to the locks held at 'dex_pc' in 'm'.
static void FindLocksAtDexPc(mirror::AbstractMethod* m, uint32_t dex_pc,
@@ -447,19 +453,18 @@ class MethodVerifier {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of a new array instruction
- void VerifyNewArray(const DecodedInstruction& dec_insn, bool is_filled,
- bool is_range)
+ void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aget instruction. The destination register's type will be set to
// be that of component type of the array unless the array type is unknown, in which case a
// bottom type inferred from the type of instruction is used. is_primitive is false for an
// aget-object.
- void VerifyAGet(const DecodedInstruction& insn, const RegType& insn_type,
+ void VerifyAGet(const Instruction* inst, const RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aput instruction.
- void VerifyAPut(const DecodedInstruction& insn, const RegType& insn_type,
+ void VerifyAPut(const Instruction* inst, const RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
@@ -470,12 +475,12 @@ class MethodVerifier {
mirror::Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget or sget instruction.
- void VerifyISGet(const DecodedInstruction& insn, const RegType& insn_type,
+ void VerifyISGet(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput or sput instruction.
- void VerifyISPut(const DecodedInstruction& insn, const RegType& insn_type,
+ void VerifyISPut(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -522,8 +527,9 @@ class MethodVerifier {
* Returns the resolved method on success, NULL on failure (with *failure
* set appropriately).
*/
- mirror::AbstractMethod* VerifyInvocationArgs(const DecodedInstruction& dec_insn,
- MethodType method_type, bool is_range, bool is_super)
+ mirror::AbstractMethod* VerifyInvocationArgs(const Instruction* inst,
+ MethodType method_type,
+ bool is_range, bool is_super)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -574,23 +580,36 @@ class MethodVerifier {
InstructionFlags* CurrentInsnFlags();
// All the GC maps that the verifier has created
- typedef SafeMap<const CompilerDriver::MethodReference, const std::vector<uint8_t>*> DexGcMapTable;
- static Mutex* dex_gc_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ typedef SafeMap<const CompilerDriver::MethodReference, const std::vector<uint8_t>*,
+ CompilerDriver::MethodReferenceComparator> DexGcMapTable;
+ static ReaderWriterMutex* dex_gc_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
static DexGcMapTable* dex_gc_maps_ GUARDED_BY(dex_gc_maps_lock_);
static void SetDexGcMap(CompilerDriver::MethodReference ref, const std::vector<uint8_t>& dex_gc_map)
LOCKS_EXCLUDED(dex_gc_maps_lock_);
+ // Cast elision types.
+ typedef std::set<uint32_t> MethodSafeCastSet;
+ typedef SafeMap<const CompilerDriver::MethodReference, const MethodSafeCastSet*,
+ CompilerDriver::MethodReferenceComparator> SafeCastMap;
+ MethodVerifier::MethodSafeCastSet* GenerateSafeCastSet()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetSafeCastMap(CompilerDriver::MethodReference ref, const MethodSafeCastSet* mscs);
+ LOCKS_EXCLUDED(safecast_map_lock_);
+ static Mutex* safecast_map_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ static SafeCastMap* safecast_map_ GUARDED_BY(safecast_map_lock_);
+
// Devirtualization map.
- typedef SafeMap<const uint32_t, CompilerDriver::MethodReference> PcToConreteMethod;
- typedef SafeMap<const CompilerDriver::MethodReference, const PcToConreteMethod*>
- DevirtualizationMapTable;
- MethodVerifier::PcToConreteMethod* GenerateDevirtMap()
+ typedef SafeMap<const uint32_t, CompilerDriver::MethodReference> PcToConcreteMethodMap;
+ typedef SafeMap<const CompilerDriver::MethodReference, const PcToConcreteMethodMap*,
+ CompilerDriver::MethodReferenceComparator> DevirtualizationMapTable;
+ MethodVerifier::PcToConcreteMethodMap* GenerateDevirtMap()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Mutex* devirt_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ static ReaderWriterMutex* devirt_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
static DevirtualizationMapTable* devirt_maps_ GUARDED_BY(devirt_maps_lock_);
- static void SetDevirtMap(CompilerDriver::MethodReference ref, const PcToConreteMethod* pc_method_map);
+ static void SetDevirtMap(CompilerDriver::MethodReference ref,
+ const PcToConcreteMethodMap* pc_method_map)
LOCKS_EXCLUDED(devirt_maps_lock_);
typedef std::set<CompilerDriver::ClassReference> RejectedClassesTable;
static Mutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -613,20 +632,20 @@ class MethodVerifier {
// Storage for the register status we're saving for later.
UniquePtr<RegisterLine> saved_line_;
- uint32_t dex_method_idx_; // The method we're working on.
+ const uint32_t dex_method_idx_; // The method we're working on.
// Its object representation if known.
- mirror::AbstractMethod* foo_method_ GUARDED_BY(Locks::mutator_lock_);
- uint32_t method_access_flags_; // Method's access flags.
- const DexFile* dex_file_; // The dex file containing the method.
+ mirror::AbstractMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
+ const uint32_t method_access_flags_; // Method's access flags.
+ const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
mirror::DexCache* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
// The class loader for the declaring class of the method.
mirror::ClassLoader* class_loader_ GUARDED_BY(Locks::mutator_lock_);
- uint32_t class_def_idx_; // The class def index of the declaring class of the method.
- const DexFile::CodeItem* code_item_; // The code item containing the code for the method.
+ const uint32_t class_def_idx_; // The class def index of the declaring class of the method.
+ const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
+ const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
UniquePtr<InstructionFlags[]> insn_flags_;
-
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
uint32_t interesting_dex_pc_;
// The container into which FindLocksAtDexPc should write the registers containing held locks,
diff --git a/src/verifier/reg_type.cc b/src/verifier/reg_type.cc
index 32679f6100..1c61a29cee 100644
--- a/src/verifier/reg_type.cc
+++ b/src/verifier/reg_type.cc
@@ -25,6 +25,7 @@
#include "mirror/object_array-inl.h"
#include "object_utils.h"
#include "reg_type_cache-inl.h"
+#include "scoped_thread_state_change.h"
#include <limits>
#include <sstream>
@@ -32,7 +33,6 @@
namespace art {
namespace verifier {
-static const bool kIsDebugBuild = false;
UndefinedType* UndefinedType::instance_ = NULL;
ConflictType* ConflictType::instance_ = NULL;
BooleanType* BooleanType::instance = NULL;
@@ -46,6 +46,41 @@ DoubleLoType* DoubleLoType::instance_ = NULL;
DoubleHiType* DoubleHiType::instance_ = NULL;
IntegerType* IntegerType::instance_ = NULL;
+int32_t RegType::ConstantValue() const {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Unexpected call to ConstantValue: " << *this;
+ return 0;
+}
+
+int32_t RegType::ConstantValueLo() const {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Unexpected call to ConstantValueLo: " << *this;
+ return 0;
+}
+
+int32_t RegType::ConstantValueHi() const {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Unexpected call to ConstantValueHi: " << *this;
+ return 0;
+}
+
+PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : RegType(klass, descriptor, cache_id) {
+ CHECK(klass != NULL);
+ CHECK(!descriptor.empty());
+}
+
+Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : PrimitiveType(klass, descriptor, cache_id) {
+}
+
+Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : PrimitiveType(klass, descriptor, cache_id) {
+}
+
std::string PreciseConstType::Dump() const {
std::stringstream result;
uint32_t val = ConstantValue();
@@ -70,36 +105,44 @@ std::string BooleanType::Dump() const {
std::string ConflictType::Dump() const {
return "Conflict";
}
+
std::string ByteType::Dump() const {
return "Byte";
}
+
std::string ShortType::Dump() const {
return "short";
}
+
std::string CharType::Dump() const {
return "Char";
}
+
std::string FloatType::Dump() const {
return "float";
}
+
std::string LongLoType::Dump() const {
return "long (Low Half)";
}
+
std::string LongHiType::Dump() const {
return "long (High Half)";
}
+
std::string DoubleLoType::Dump() const {
return "Double (Low Half)";
}
+
std::string DoubleHiType::Dump() const {
return "Double (High Half)";
}
+
std::string IntegerType::Dump() const {
return "Integer";
}
-
-DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new DoubleHiType(klass, descriptor, cache_id);
@@ -119,7 +162,7 @@ void DoubleHiType::Destroy() {
}
}
-DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new DoubleLoType(klass, descriptor, cache_id);
@@ -139,7 +182,7 @@ void DoubleLoType::Destroy() {
}
}
-LongLoType* LongLoType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new LongLoType(klass, descriptor, cache_id);
@@ -147,7 +190,7 @@ LongLoType* LongLoType::CreateInstance(mirror::Class* klass, std::string& descri
return instance_;
}
-LongHiType* LongHiType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new LongHiType(klass, descriptor, cache_id);
@@ -179,9 +222,8 @@ void LongLoType::Destroy() {
}
}
-FloatType* FloatType::CreateInstance(mirror::Class* klass, std::string& descriptor,
- uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new FloatType(klass, descriptor, cache_id);
}
@@ -199,17 +241,19 @@ void FloatType::Destroy() {
}
}
-CharType* CharType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new CharType(klass, descriptor, cache_id);
}
return instance_;
}
+
CharType* CharType::GetInstance() {
CHECK(instance_ != NULL);
return instance_;
}
+
void CharType::Destroy() {
if (instance_ != NULL) {
delete instance_;
@@ -217,81 +261,94 @@ void CharType::Destroy() {
}
}
-ShortType* ShortType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new ShortType(klass, descriptor, cache_id);
}
return instance_;
}
+
ShortType* ShortType::GetInstance() {
CHECK(instance_ != NULL);
return instance_;
}
+
void ShortType::Destroy() {
if (instance_ != NULL) {
delete instance_;
instance_ = NULL;
}
}
-ByteType* ByteType::CreateInstance(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+
+ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new ByteType(klass, descriptor, cache_id);
}
return instance_;
}
+
ByteType* ByteType::GetInstance() {
CHECK(instance_ != NULL);
return instance_;
}
+
void ByteType::Destroy() {
if (instance_ != NULL) {
delete instance_;
instance_ = NULL;
}
}
-IntegerType* IntegerType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+
+IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new IntegerType(klass, descriptor, cache_id);
}
return instance_;
}
+
IntegerType* IntegerType::GetInstance() {
CHECK(instance_ != NULL);
return instance_;
}
+
void IntegerType::Destroy() {
if (instance_ != NULL) {
delete instance_;
instance_ = NULL;
}
}
-ConflictType* ConflictType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+
+ConflictType* ConflictType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new ConflictType(klass, descriptor, cache_id);
}
return instance_;
}
+
ConflictType* ConflictType::GetInstance() {
CHECK(instance_ != NULL);
return instance_;
}
+
void ConflictType::Destroy() {
if (instance_ != NULL) {
delete instance_;
instance_ = NULL;
}
}
-BooleanType* BooleanType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+
+BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (BooleanType::instance == NULL) {
instance = new BooleanType(klass, descriptor, cache_id);
}
return BooleanType::instance;
}
+
BooleanType* BooleanType::GetInstance() {
CHECK(BooleanType::instance != NULL);
return BooleanType::instance;
@@ -307,23 +364,33 @@ void BooleanType::Destroy() {
std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return "Undefined";
}
-UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass, std::string& descriptor,
+
+UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) {
if (instance_ == NULL) {
instance_ = new UndefinedType(klass, descriptor, cache_id);
}
return instance_;
}
+
UndefinedType* UndefinedType::GetInstance() {
CHECK(instance_ != NULL);
return instance_;
}
+
void UndefinedType::Destroy() {
if (instance_ != NULL) {
delete instance_;
instance_ = NULL;
}
}
+
+PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ : RegType(klass, descriptor, cache_id) {
+ DCHECK(klass->IsInstantiable());
+}
+
std::string UnresolvedMergedType::Dump() const {
std::stringstream result;
std::set<uint16_t> types = GetMergedTypes();
@@ -338,6 +405,7 @@ std::string UnresolvedMergedType::Dump() const {
result << ")";
return result.str();
}
+
std::string UnresolvedSuperClass::Dump() const {
std::stringstream result;
uint16_t super_type_id = GetUnresolvedSuperClassChildId();
@@ -358,7 +426,7 @@ std::string UnresolvedUninitializedRefType::Dump() const {
return result.str();
}
-std::string UnresolvedUninitialisedThisRefType::Dump() const {
+std::string UnresolvedUninitializedThisRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized This Reference" << PrettyDescriptor(GetDescriptor());
return result.str();
@@ -376,13 +444,14 @@ std::string PreciseReferenceType::Dump() const {
return result.str();
}
-std::string UninitialisedReferenceType::Dump() const {
+std::string UninitializedReferenceType::Dump() const {
std::stringstream result;
result << "Uninitialized Reference" << ": " << PrettyDescriptor(GetClass());
result << " Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string UninitialisedThisReferenceType::Dump() const {
+
+std::string UninitializedThisReferenceType::Dump() const {
std::stringstream result;
result << "Uninitialized This Reference" << ": " << PrettyDescriptor(GetClass());
result << "Allocation PC: " << GetAllocationPc();
@@ -459,77 +528,8 @@ std::string ImpreciseConstHiType::Dump() const {
return result.str();
}
-BooleanType::BooleanType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-ConflictType::ConflictType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-ByteType::ByteType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-ShortType::ShortType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-CharType::CharType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-IntegerType::IntegerType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-ConstantType::ConstantType(uint32_t constat, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_): RegType(NULL, "", cache_id), constant_(constat) {
-}
-
-ReferenceType::ReferenceType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, std::string& descriptor,
- uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
- DCHECK(klass->IsInstantiable());
-}
-
-UnresolvedUninitialisedThisRefType::UnresolvedUninitialisedThisRefType(std::string& descriptor,
- uint16_t cache_id)
- : UninitializedType(NULL, descriptor, 0, cache_id) {
-}
-
-UnresolvedUninitializedRefType::UnresolvedUninitializedRefType( std::string& descriptor,
- uint32_t allocation_pc, uint16_t cache_id)
- : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
-}
-
-UninitialisedReferenceType::UninitialisedReferenceType(mirror::Class* klass,
- std::string& descriptor, uint32_t allocation_pc, uint16_t cache_id)
- : UninitializedType(klass, descriptor, allocation_pc, cache_id) {
-}
-
-LongHiType::LongHiType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-FloatType::FloatType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-DoubleLoType::DoubleLoType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-DoubleHiType::DoubleHiType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
-}
-
-LongLoType::LongLoType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id) {
+ConstantType::ConstantType(uint32_t constant, uint16_t cache_id)
+ : RegType(NULL, "", cache_id), constant_(constant) {
}
const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
@@ -575,6 +575,17 @@ Primitive::Type RegType::GetPrimitiveType() const {
}
}
+bool UninitializedType::IsUninitializedTypes() const {
+ return true;
+}
+
+bool UninitializedType::IsNonZeroReferenceTypes() const {
+ return true;
+}
+
+bool UnresolvedType::IsNonZeroReferenceTypes() const {
+ return true;
+}
std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
const RegType& _left(reg_type_cache_->GetFromId(refs.first));
@@ -612,7 +623,7 @@ const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
if (super_klass != NULL) {
// A super class of a precise type isn't precise as a precise type indicates the register
// holds exactly that type.
- return cache->FromClass(super_klass, false);
+ return cache->FromClass(ClassHelper(super_klass).GetDescriptor(), super_klass, false);
} else {
return cache->Zero();
}
@@ -697,62 +708,72 @@ ImpreciseConstType::ImpreciseConstType(uint32_t constat, uint16_t cache_id)
: ConstantType(constat, cache_id) {
}
-bool RegType::IsAssignableFrom(const RegType& src) const {
- if (Equals(src)) {
+static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (lhs.Equals(rhs)) {
return true;
} else {
- if (IsBoolean()) {
- return src.IsBooleanTypes();
- } else if (IsByte()) {
- return src.IsByteTypes();
- } else if (IsShort()) {
- return src.IsShortTypes();
- } else if (IsChar()) {
- return src.IsCharTypes();
- } else if (IsInteger()) {
- return src.IsIntegralTypes();
- } else if (IsFloat()) {
- return src.IsFloatTypes();
- } else if (IsLongLo()) {
- return src.IsLongTypes();
- } else if (IsDoubleLo()) {
- return src.IsDoubleTypes();
+ if (lhs.IsBoolean()) {
+ return rhs.IsBooleanTypes();
+ } else if (lhs.IsByte()) {
+ return rhs.IsByteTypes();
+ } else if (lhs.IsShort()) {
+ return rhs.IsShortTypes();
+ } else if (lhs.IsChar()) {
+ return rhs.IsCharTypes();
+ } else if (lhs.IsInteger()) {
+ return rhs.IsIntegralTypes();
+ } else if (lhs.IsFloat()) {
+ return rhs.IsFloatTypes();
+ } else if (lhs.IsLongLo()) {
+ return rhs.IsLongTypes();
+ } else if (lhs.IsDoubleLo()) {
+ return rhs.IsDoubleTypes();
} else {
- if (!IsReferenceTypes()) {
- LOG(FATAL) << "Unexpected register type in 4bleFrom: '" << src << "'";
+ CHECK(lhs.IsReferenceTypes())
+ << "Unexpected register type in IsAssignableFrom: '"
+ << lhs << "' := '" << rhs << "'";
+ if (rhs.IsZero()) {
+ return true; // All reference types can be assigned null.
+ } else if (!rhs.IsReferenceTypes()) {
+ return false; // Expect rhs to be a reference type.
+ } else if (lhs.IsJavaLangObject()) {
+ return true; // All reference types can be assigned to Object.
+ } else if (!strict && !lhs.IsUnresolvedTypes() && lhs.GetClass()->IsInterface()) {
+ // If we're not strict allow assignment to any interface, see comment in ClassJoin.
+ return true;
+ } else if (lhs.IsJavaLangObjectArray()) {
+ return rhs.IsObjectArrayTypes(); // All reference arrays may be assigned to Object[]
+ } else if (lhs.HasClass() && rhs.HasClass() &&
+ lhs.GetClass()->IsAssignableFrom(rhs.GetClass())) {
+ // We're assignable from the Class point-of-view.
+ return true;
+ } else {
+ // Unresolved types are only assignable for null and equality.
+ return false;
}
- if (src.IsZero()) {
- return true; // all reference types can be assigned null
- } else if (!src.IsReferenceTypes()) {
- return false; // expect src to be a reference type
- } else if (IsJavaLangObject()) {
- return true; // all reference types can be assigned to Object
- } else if (!IsUnresolvedTypes() && GetClass()->IsInterface()) {
- return true; // We allow assignment to any interface, see comment in ClassJoin
- } else if (IsJavaLangObjectArray()) {
- return src.IsObjectArrayTypes(); // All reference arrays may be assigned to Object[]
- } else if (!IsUnresolvedTypes() && !src.IsUnresolvedTypes() &&
- GetClass()->IsAssignableFrom(src.GetClass())) {
- // We're assignable from the Class point-of-view
- return true;
- } else if (IsUnresolvedTypes()) {
- // Unresolved types are only assignable for null, Object and equality.
- return (src.IsZero() || src.IsJavaLangObject());
- } else {
- return false;
- }
}
}
}
+bool RegType::IsAssignableFrom(const RegType& src) const {
+ return AssignableFrom(*this, src, false);
+}
+
+bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+ return AssignableFrom(*this, src, true);
+}
+
int32_t ConstantType::ConstantValue() const {
DCHECK(IsConstantTypes());
return constant_;
}
+
int32_t ConstantType::ConstantValueLo() const {
DCHECK(IsConstantLo());
return constant_;
}
+
int32_t ConstantType::ConstantValueHi() const {
if (IsConstantHi() || IsPreciseConstantHi() || IsImpreciseConstantHi()) {
return constant_;
@@ -761,6 +782,7 @@ int32_t ConstantType::ConstantValueHi() const {
return 0;
}
}
+
static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
return a.IsConstant() ? b : a;
}
@@ -884,7 +906,7 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty
} else if (c2 == join_class && !incoming_type.IsPreciseReference()) {
return incoming_type;
} else {
- return reg_types->FromClass(join_class, false);
+ return reg_types->FromClass(ClassHelper(join_class).GetDescriptor(), join_class, false);
}
}
} else {
@@ -949,33 +971,22 @@ void RegType::CheckInvariants() const {
CHECK(descriptor_.empty()) << *this;
CHECK(klass_ == NULL) << *this;
}
+ if (klass_ != NULL) {
+ CHECK(!descriptor_.empty()) << *this;
+ }
}
-UninitializedType::UninitializedType(mirror::Class* klass, std::string& descriptor,
- uint32_t allocation_pc, uint16_t cache_id)
- : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {
-}
-
-void UninitializedType::CheckInvariants() const {
- CHECK_EQ(allocation_pc_, 0U) << *this;
-}
-
-void UninitialisedThisReferenceType::CheckInvariants() const {
- UninitializedType::CheckInvariants();
-}
-
-UninitialisedThisReferenceType::UninitialisedThisReferenceType(mirror::Class* klass,
- std::string& descriptor, uint16_t cache_id) : UninitializedType(klass, descriptor, 0, cache_id) {
+void UninitializedThisReferenceType::CheckInvariants() const {
+ CHECK_EQ(GetAllocationPc(), 0U) << *this;
}
-void UnresolvedUninitialisedThisRefType::CheckInvariants() const {
- UninitializedType::CheckInvariants();
+void UnresolvedUninitializedThisRefType::CheckInvariants() const {
+ CHECK_EQ(GetAllocationPc(), 0U) << *this;
CHECK(!descriptor_.empty()) << *this;
CHECK(klass_ == NULL) << *this;
}
void UnresolvedUninitializedRefType::CheckInvariants() const {
- UninitializedType::CheckInvariants();
CHECK(!descriptor_.empty()) << *this;
CHECK(klass_ == NULL) << *this;
}
diff --git a/src/verifier/reg_type.h b/src/verifier/reg_type.h
index 7c4253604c..9ac0ecac8a 100644
--- a/src/verifier/reg_type.h
+++ b/src/verifier/reg_type.h
@@ -18,6 +18,7 @@
#define ART_SRC_VERIFIER_REG_TYPE_H_
#include "base/macros.h"
+#include "globals.h"
#include "primitive.h"
#include "jni.h"
@@ -39,105 +40,43 @@ class RegTypeCache;
*/
class RegType {
public:
- // The high half that corresponds to this low half
- const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- inline virtual bool IsUndefined() const {
- return false;
- }
- inline virtual bool IsConflict() const {
- return false;
- }
- inline virtual bool IsBoolean() const {
- return false;
- }
- inline virtual bool IsByte() const {
- return false;
- }
- inline virtual bool IsChar() const {
- return false;
- }
- inline virtual bool IsShort() const {
- return false;
- }
- inline virtual bool IsInteger() const {
- return false;
- }
- inline virtual bool IsLongLo() const {
- return false;
- }
- inline virtual bool IsLongHi() const {
- return false;
- }
- inline virtual bool IsFloat() const {
- return false;
- }
- inline virtual bool IsDouble() const {
- return false;
- }
- inline virtual bool IsDoubleLo() const {
- return false;
- }
- inline virtual bool IsDoubleHi() const {
- return false;
- }
- inline virtual bool IsUnresolvedReference() const {
- return false;
- }
- inline virtual bool IsUninitializedReference() const {
- return false;
- }
- inline virtual bool IsUninitializedThisReference() const {
- return false;
- }
- inline virtual bool IsUnresolvedAndUninitializedReference() const {
- return false;
- }
- inline virtual bool IsUnresolvedAndUninitializedThisReference() const {
- return false;
- }
- inline virtual bool IsUnresolvedMergedReference() const {
- return false;
- }
- inline virtual bool IsUnresolvedSuperClass() const {
- return false;
- }
- inline virtual bool IsReference() const {
- return false;
- }
- inline virtual bool IsPreciseReference() const {
- return false;
- }
- inline virtual bool IsPreciseConstant() const {
- return false;
- }
- inline virtual bool IsPreciseConstantLo() const {
- return false;
- }
- inline virtual bool IsPreciseConstantHi() const {
- return false;
- }
- inline virtual bool IsImpreciseConstantLo() const {
- return false;
- }
- inline virtual bool IsImpreciseConstantHi() const {
- return false;
- }
- virtual bool IsImpreciseConstant() const {
- return false;
- }
-
- inline virtual bool IsConstantTypes() const {
- return false;
- }
+ virtual bool IsUndefined() const { return false; }
+ virtual bool IsConflict() const { return false; }
+ virtual bool IsBoolean() const { return false; }
+ virtual bool IsByte() const { return false; }
+ virtual bool IsChar() const { return false; }
+ virtual bool IsShort() const { return false; }
+ virtual bool IsInteger() const { return false; }
+ virtual bool IsLongLo() const { return false; }
+ virtual bool IsLongHi() const { return false; }
+ virtual bool IsFloat() const { return false; }
+ virtual bool IsDouble() const { return false; }
+ virtual bool IsDoubleLo() const { return false; }
+ virtual bool IsDoubleHi() const { return false; }
+ virtual bool IsUnresolvedReference() const { return false; }
+ virtual bool IsUninitializedReference() const { return false; }
+ virtual bool IsUninitializedThisReference() const { return false; }
+ virtual bool IsUnresolvedAndUninitializedReference() const { return false; }
+ virtual bool IsUnresolvedAndUninitializedThisReference() const { return false; }
+ virtual bool IsUnresolvedMergedReference() const { return false; }
+ virtual bool IsUnresolvedSuperClass() const { return false; }
+ virtual bool IsReference() const { return false; }
+ virtual bool IsPreciseReference() const { return false; }
+ virtual bool IsPreciseConstant() const { return false; }
+ virtual bool IsPreciseConstantLo() const { return false; }
+ virtual bool IsPreciseConstantHi() const { return false; }
+ virtual bool IsImpreciseConstantLo() const { return false; }
+ virtual bool IsImpreciseConstantHi() const { return false; }
+ virtual bool IsImpreciseConstant() const { return false; }
+ virtual bool IsConstantTypes() const { return false; }
bool IsConstant() const {
- return (IsPreciseConstant() || IsImpreciseConstant());
+ return IsPreciseConstant() || IsImpreciseConstant();
}
bool IsConstantLo() const {
- return (IsPreciseConstantLo() || IsImpreciseConstantLo());
+ return IsPreciseConstantLo() || IsImpreciseConstantLo();
}
bool IsPrecise() const {
- return (IsPreciseConstantLo() || IsPreciseConstant() ||
- IsPreciseConstantHi());
+ return IsPreciseConstantLo() || IsPreciseConstant() || IsPreciseConstantHi();
}
bool IsLongConstant() const {
return IsConstantLo();
@@ -148,11 +87,7 @@ class RegType {
bool IsLongConstantHigh() const {
return IsConstantHi();
}
- bool IsUninitializedTypes() const {
- return IsUninitializedReference() || IsUninitializedThisReference() ||
- IsUnresolvedAndUninitializedReference() ||
- IsUnresolvedAndUninitializedThisReference();
- }
+ virtual bool IsUninitializedTypes() const { return false; }
bool IsUnresolvedTypes() const {
return IsUnresolvedReference() || IsUnresolvedAndUninitializedReference() ||
IsUnresolvedAndUninitializedThisReference() ||
@@ -170,7 +105,7 @@ class RegType {
bool IsLongOrDoubleTypes() const {
return IsLowHalf();
}
- // Check this is the low half, and that type_h is its matching high-half
+ // Check this is the low half, and that type_h is its matching high-half.
inline bool CheckWidePair(const RegType& type_h) const {
if (IsLowHalf()) {
return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
@@ -182,37 +117,36 @@ class RegType {
}
return false;
}
+ // The high half that corresponds to this low half
+ const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool IsConstantBoolean() const {
return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
}
- inline virtual bool IsConstantChar() const {
+ virtual bool IsConstantChar() const {
return false;
}
- inline virtual bool IsConstantByte() const {
+ virtual bool IsConstantByte() const {
return false;
}
- inline virtual bool IsConstantShort() const {
+ virtual bool IsConstantShort() const {
return false;
}
- inline virtual bool IsOne() const {
+ virtual bool IsOne() const {
return false;
}
- inline virtual bool IsZero() const {
+ virtual bool IsZero() const {
return false;
}
bool IsReferenceTypes() const {
return IsNonZeroReferenceTypes() || IsZero();
}
- bool IsNonZeroReferenceTypes() const {
- return IsReference() || IsPreciseReference() ||
- IsUninitializedReference() || IsUninitializedThisReference() ||
- IsUnresolvedReference() || IsUnresolvedAndUninitializedReference() ||
- IsUnresolvedAndUninitializedThisReference() ||
- IsUnresolvedMergedReference() || IsUnresolvedSuperClass();
+ virtual bool IsNonZeroReferenceTypes() const {
+ return false;
}
bool IsCategory1Types() const {
- return (IsChar() || IsInteger() || IsFloat() || IsConstant() || IsByte() ||
- IsShort() || IsBoolean() );
+ return IsChar() || IsInteger() || IsFloat() || IsConstant() || IsByte() || IsShort() ||
+ IsBoolean();
}
bool IsCategory2Types() const {
return IsLowHalf(); // Don't expect explicit testing of high halves
@@ -230,20 +164,12 @@ class RegType {
return IsChar() || IsBooleanTypes() || IsConstantChar();
}
bool IsIntegralTypes() const {
- return (IsInteger() || IsConstant() || IsByte() || IsShort() || IsChar() || IsBoolean() );
- }
- inline virtual int32_t ConstantValue() const {
- DCHECK(IsConstant());
- return -1;
- }
- inline virtual int32_t ConstantValueLo() const {
- DCHECK(IsConstantLo());
- return -1;
- }
- inline virtual int32_t ConstantValueHi() const {
- DCHECK(IsConstantHi());
- return -1;
+ return IsInteger() || IsConstant() || IsByte() || IsShort() || IsChar() || IsBoolean();
}
+ // Give the constant value encoded, but this shouldn't be called in the general case.
+ virtual int32_t ConstantValue() const;
+ virtual int32_t ConstantValueLo() const;
+ virtual int32_t ConstantValueHi() const;
bool IsArrayIndexTypes() const {
return IsIntegralTypes();
}
@@ -265,12 +191,11 @@ class RegType {
bool IsDoubleHighTypes() const {
return (IsDoubleHi() || IsPreciseConstantHi() || IsImpreciseConstantHi());
}
- inline virtual bool IsLong() const {
+ virtual bool IsLong() const {
return false;
}
- bool HasClass() const {
- return IsReference() || IsPreciseReference() || IsUninitializedReference() ||
- IsUninitializedThisReference();
+ virtual bool HasClass() const {
+ return false;
}
bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -279,210 +204,286 @@ class RegType {
bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsInstantiableTypes() const;
const std::string& GetDescriptor() const {
- DCHECK(IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass());
+ DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
+ !IsUnresolvedSuperClass()));
return descriptor_;
}
+ mirror::Class* GetClass() const {
+ DCHECK(!IsUnresolvedReference());
+ DCHECK(klass_ != NULL);
+ DCHECK(HasClass());
+ return klass_;
+ }
uint16_t GetId() const {
return cache_id_;
}
const RegType& GetSuperClass(RegTypeCache* cache) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+
// Can this type access other?
bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Can this type access a member with the given properties?
bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Can this type be assigned by src?
- bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Note: Object and interface types may always be assigned to one another, see comment on
+ // ClassJoin.
+ bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
+ // an interface from an Object.
+ bool IsStrictlyAssignableFrom(const RegType& src) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Are these RegTypes the same?
bool Equals(const RegType& other) const {
return GetId() == other.GetId();
}
+
// Compute the merge of this register from one edge (path) with incoming_type from another.
virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* GetClass() const {
- DCHECK(!IsUnresolvedReference());
- DCHECK(klass_ != NULL);
- DCHECK(HasClass());
- return klass_;
- }
- /*
- * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
- * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
- * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J
- * is the deepest (lowest upper bound) parent of S and T).
- *
- * This operation applies for regular classes and arrays, however, for interface types there needn't
- * be a partial ordering on the types. We could solve the problem of a lack of a partial order by
- * introducing sets of types, however, the only operation permissible on an interface is
- * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface
- * types until an invoke-interface call on the interface typed reference at runtime and allow
- * the perversion of Object being assignable to an interface type (note, however, that we don't
- * allow assignment of Object or Interface to any concrete class and are therefore type safe).
- *
- * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
- */
+ /*
+ * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
+ * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
+ * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J
+ * is the deepest (lowest upper bound) parent of S and T).
+ *
+ * This operation applies for regular classes and arrays, however, for interface types there
+ * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial
+ * order by introducing sets of types, however, the only operation permissible on an interface is
+ * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface
+ * types until an invoke-interface call on the interface typed reference at runtime and allow
+ * the perversion of Object being assignable to an interface type (note, however, that we don't
+ * allow assignment of Object or Interface to any concrete class and are therefore type safe).
+ *
+ * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
+ */
static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType(mirror::Class* klass, std::string descriptor, uint16_t cache_id)
+
+ virtual ~RegType() {}
+
+ protected:
+ RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
}
- inline virtual ~RegType() {
- }
- virtual void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- friend class RegTypeCache;
- protected:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+
const std::string descriptor_;
- mirror::Class* klass_;
+ mirror::Class* const klass_;
const uint16_t cache_id_;
+ friend class RegTypeCache;
+
DISALLOW_COPY_AND_ASSIGN(RegType);
};
+// Bottom type.
class ConflictType : public RegType {
public:
bool IsConflict() const {
return true;
}
+
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static ConflictType* CreateInstance(mirror::Class* klass, std::string& descriptor,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get the singleton Conflict instance.
static ConflictType* GetInstance();
+
+ // Create the singleton instance.
+ static ConflictType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Destroy the singleton instance.
static void Destroy();
private:
- ConflictType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : RegType(klass, descriptor, cache_id) {
+ }
+
static ConflictType* instance_;
};
+// A variant of the bottom type used to specify an undefined value in the incoming registers.
+// Merging with UndefinedType yields ConflictType which is the true bottom.
class UndefinedType : public RegType {
public:
bool IsUndefined() const {
return true;
}
+
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static UndefinedType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+
+ // Get the singleton Undefined instance.
+ static UndefinedType* GetInstance();
+
+ // Create the singleton instance.
+ static UndefinedType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static UndefinedType* GetInstance();
+
+ // Destroy the singleton instance.
static void Destroy();
private:
- UndefinedType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
+ UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {
}
+
virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static UndefinedType* instance_;
};
-class IntegerType : public RegType {
+class PrimitiveType : public RegType {
+ public:
+ PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class Cat1Type : public PrimitiveType {
+ public:
+ Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class IntegerType : public Cat1Type {
public:
bool IsInteger() const {
return true;
}
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static IntegerType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static IntegerType* GetInstance();
static void Destroy();
private:
- IntegerType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ IntegerType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
static IntegerType* instance_;
};
-class BooleanType : public RegType {
+class BooleanType : public Cat1Type {
public:
bool IsBoolean() const {
return true;
}
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static BooleanType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static BooleanType* GetInstance();
static void Destroy();
private:
- BooleanType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ BooleanType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
+
static BooleanType* instance;
};
-class ByteType : public RegType {
+class ByteType : public Cat1Type {
public:
bool IsByte() const {
return true;
}
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static ByteType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ByteType* GetInstance();
static void Destroy();
private:
- ByteType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ByteType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
static ByteType* instance_;
};
-class ShortType : public RegType {
+class ShortType : public Cat1Type {
public:
bool IsShort() const {
return true;
}
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static ShortType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ShortType* GetInstance();
static void Destroy();
private:
- ShortType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ShortType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
static ShortType* instance_;
};
-class CharType : public RegType {
+class CharType : public Cat1Type {
public:
bool IsChar() const {
return true;
}
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static CharType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static CharType* GetInstance();
static void Destroy();
private:
- CharType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CharType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
static CharType* instance_;
};
-class FloatType : public RegType {
+class FloatType : public Cat1Type {
public:
bool IsFloat() const {
return true;
}
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static FloatType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static FloatType* GetInstance();
static void Destroy();
private:
- FloatType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ FloatType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
static FloatType* instance_;
};
-class LongLoType : public RegType {
+class Cat2Type : public PrimitiveType {
+ public:
+ Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class LongLoType : public Cat2Type {
public:
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongLo() const {
@@ -491,35 +492,39 @@ class LongLoType : public RegType {
bool IsLong() const {
return true;
}
- static LongLoType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static LongLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static LongLoType* GetInstance();
static void Destroy();
private:
- LongLoType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ LongLoType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat2Type(klass, descriptor, cache_id) {
+ }
static LongLoType* instance_;
};
-class LongHiType : public RegType {
+class LongHiType : public Cat2Type {
public:
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongHi() const {
return true;
}
- static LongHiType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static LongHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static LongHiType* GetInstance();
static void Destroy();
private:
- LongHiType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ LongHiType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat2Type(klass, descriptor, cache_id) {
+ }
static LongHiType* instance_;
};
-class DoubleLoType : public RegType {
+class DoubleLoType : public Cat2Type {
public:
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDoubleLo() const {
@@ -528,31 +533,35 @@ class DoubleLoType : public RegType {
bool IsDouble() const {
return true;
}
- static DoubleLoType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static DoubleLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static DoubleLoType* GetInstance();
static void Destroy();
private:
- DoubleLoType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ DoubleLoType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat2Type(klass, descriptor, cache_id) {
+ }
static DoubleLoType* instance_;
};
-class DoubleHiType : public RegType {
+class DoubleHiType : public Cat2Type {
public:
std::string Dump() const;
virtual bool IsDoubleHi() const {
return true;
}
- static DoubleHiType* CreateInstance(mirror::Class* klass, std::string& descriptor,
+ static DoubleHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static DoubleHiType* GetInstance();
static void Destroy();
private:
- DoubleHiType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ DoubleHiType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat2Type(klass, descriptor, cache_id) {
+ }
static DoubleHiType* instance_;
};
@@ -560,9 +569,6 @@ class ConstantType : public RegType {
public:
ConstantType(uint32_t constat, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- inline virtual ~ConstantType() {
- }
- const uint32_t constant_;
// If this is a 32-bit constant, what is the value? This value may be imprecise in which case
// the value represents part of the integer range of values that may be held in the register.
virtual int32_t ConstantValue() const;
@@ -590,7 +596,10 @@ class ConstantType : public RegType {
ConstantValue() >= std::numeric_limits<jshort>::min() &&
ConstantValue() <= std::numeric_limits<jshort>::max();
}
- inline virtual bool IsConstantTypes() const { return true; }
+ virtual bool IsConstantTypes() const { return true; }
+
+ private:
+ const uint32_t constant_;
};
class PreciseConstType : public ConstantType {
@@ -662,147 +671,254 @@ class ImpreciseConstHiType : public ConstantType {
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
+// Common parent of all uninitialized types. Uninitialized types are created by "new" dex
+// instructions and must be passed to a constructor.
class UninitializedType : public RegType {
public:
- UninitializedType(mirror::Class* klass, std::string& descriptor, uint32_t allocation_pc,
- uint16_t cache_id);
- inline virtual ~UninitializedType() {
+ UninitializedType(mirror::Class* klass, const std::string& descriptor, uint32_t allocation_pc,
+ uint16_t cache_id)
+ : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {
}
+ bool IsUninitializedTypes() const;
+ bool IsNonZeroReferenceTypes() const;
+
uint32_t GetAllocationPc() const {
DCHECK(IsUninitializedTypes());
return allocation_pc_;
}
- virtual void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
const uint32_t allocation_pc_;
};
-class UninitialisedReferenceType : public UninitializedType {
+// Similar to ReferenceType but not yet having been passed to a constructor.
+class UninitializedReferenceType : public UninitializedType {
public:
- UninitialisedReferenceType(mirror::Class* klass, std::string& descriptor, uint32_t allocation_pc,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ UninitializedReferenceType(mirror::Class* klass, const std::string& descriptor,
+ uint32_t allocation_pc, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UninitializedType(klass, descriptor, allocation_pc, cache_id) {
+ }
bool IsUninitializedReference() const {
return true;
}
+
+ bool HasClass() const {
+ return true;
+ }
+
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
+// Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
class UnresolvedUninitializedRefType : public UninitializedType {
public:
- UnresolvedUninitializedRefType(std::string& descriptor, uint32_t allocation_pc,
- uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ UnresolvedUninitializedRefType(const std::string& descriptor, uint32_t allocation_pc,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
bool IsUnresolvedAndUninitializedReference() const {
return true;
}
+
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
-class UninitialisedThisReferenceType : public UninitializedType {
+// Similar to UninitializedReferenceType but special case for the this argument of a constructor.
+class UninitializedThisReferenceType : public UninitializedType {
public:
- UninitialisedThisReferenceType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- inline virtual bool IsUninitializedThisReference() const {
+ UninitializedThisReferenceType(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UninitializedType(klass, descriptor, 0, cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
+ virtual bool IsUninitializedThisReference() const {
return true;
}
+
+ bool HasClass() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
-class UnresolvedUninitialisedThisRefType : public UninitializedType {
+class UnresolvedUninitializedThisRefType : public UninitializedType {
public:
- UnresolvedUninitialisedThisRefType(std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ UnresolvedUninitializedThisRefType(const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UninitializedType(NULL, descriptor, 0, cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
bool IsUnresolvedAndUninitializedThisReference() const {
return true;
}
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
+// A type of register holding a reference to an Object of type GetClass or a sub-class.
class ReferenceType : public RegType {
public:
- ReferenceType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : RegType(klass, descriptor, cache_id) {
+ }
+
bool IsReference() const {
return true;
}
+
+ bool IsNonZeroReferenceTypes() const {
+ return true;
+ }
+
+ bool HasClass() const {
+ return true;
+ }
+
std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
+// A type of register holding a reference to an Object of type GetClass and only an object of that
+// type.
class PreciseReferenceType : public RegType {
public:
- PreciseReferenceType(mirror::Class* klass, std::string& descriptor, uint16_t cache_id)
+ PreciseReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool IsPreciseReference() const {
return true;
}
+
+ bool IsNonZeroReferenceTypes() const {
+ return true;
+ }
+
+ bool HasClass() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
-class UnresolvedReferenceType : public RegType {
+// Common parent of unresolved types.
+class UnresolvedType : public RegType {
public:
- UnresolvedReferenceType(std::string& descriptor, uint16_t cache_id)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : RegType(NULL, descriptor, cache_id) {
+ UnresolvedType(const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : RegType(NULL, descriptor, cache_id) {
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsNonZeroReferenceTypes() const;
+};
+
+// Similar to ReferenceType except the Class couldn't be loaded. Assignability and other tests made
+// of this type must be conservative.
+class UnresolvedReferenceType : public UnresolvedType {
+ public:
+ UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : UnresolvedType(descriptor, cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
bool IsUnresolvedReference() const {
return true;
}
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
-class UnresolvedSuperClass : public RegType {
+// Type representing the super-class of an unresolved type.
+class UnresolvedSuperClass : public UnresolvedType {
public:
UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : RegType(NULL, "", cache_id), unresolved_child_id_(child_id),
+ : UnresolvedType("", cache_id), unresolved_child_id_(child_id),
reg_type_cache_(reg_type_cache) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
}
+
bool IsUnresolvedSuperClass() const {
return true;
}
+
uint16_t GetUnresolvedSuperClassChildId() const {
DCHECK(IsUnresolvedSuperClass());
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
const uint16_t unresolved_child_id_;
const RegTypeCache* const reg_type_cache_;
};
-class UnresolvedMergedType : public RegType {
+// A merge of two unresolved types. If the types were resolved this may be Conflict or another
+// known ReferenceType.
+class UnresolvedMergedType : public UnresolvedType {
public:
- UnresolvedMergedType(uint16_t left_id, uint16_t right_id, const RegTypeCache* reg_type_cache, uint16_t cache_id)
+ UnresolvedMergedType(uint16_t left_id, uint16_t right_id, const RegTypeCache* reg_type_cache,
+ uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : RegType(NULL, "", cache_id), reg_type_cache_(reg_type_cache) ,merged_types_(left_id, right_id) {
+ : UnresolvedType("", cache_id), reg_type_cache_(reg_type_cache) ,merged_types_(left_id, right_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
}
+
// The top of a tree of merged types.
std::pair<uint16_t, uint16_t> GetTopMergedTypes() const {
DCHECK(IsUnresolvedMergedReference());
return merged_types_;
}
+
// The complete set of merged types.
std::set<uint16_t> GetMergedTypes() const;
+
bool IsUnresolvedMergedReference() const {
return true;
}
- void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
const RegTypeCache* const reg_type_cache_;
const std::pair<uint16_t, uint16_t> merged_types_;
};
std::ostream& operator<<(std::ostream& os, const RegType& rhs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
} // namespace verifier
} // namespace art
diff --git a/src/verifier/reg_type_cache-inl.h b/src/verifier/reg_type_cache-inl.h
index f6b0056536..42474d1849 100644
--- a/src/verifier/reg_type_cache-inl.h
+++ b/src/verifier/reg_type_cache-inl.h
@@ -24,7 +24,7 @@
namespace art {
namespace verifier {
template <class Type>
-Type* RegTypeCache::CreatePrimitiveTypeInstance(std::string descriptor) {
+Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
mirror::Class* klass = NULL;
// Try loading the class from linker.
if (!descriptor.empty()) {
@@ -35,6 +35,12 @@ Type* RegTypeCache::CreatePrimitiveTypeInstance(std::string descriptor) {
return entry;
}
+inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
+ DCHECK_LT(id, entries_.size());
+ RegType* result = entries_[id];
+ DCHECK(result != NULL);
+ return *result;
+}
} // namespace verifier
} // namespace art
#endif // ART_SRC_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/src/verifier/reg_type_cache.cc b/src/verifier/reg_type_cache.cc
index e914d1e679..a575f7705b 100644
--- a/src/verifier/reg_type_cache.cc
+++ b/src/verifier/reg_type_cache.cc
@@ -24,6 +24,7 @@
namespace art {
namespace verifier {
+
bool RegTypeCache::primitive_initialized_ = false;
uint16_t RegTypeCache::primitive_start_ = 0;
uint16_t RegTypeCache::primitive_count_ = 0;
@@ -49,9 +50,10 @@ void RegTypeCache::FillPrimitiveTypes() {
DCHECK_EQ(entries_.size(), primitive_count_);
}
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise) {
- CHECK(RegTypeCache::primitive_initialized_);
- if (std::string(descriptor).length() == 1) {
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
+ DCHECK(RegTypeCache::primitive_initialized_);
+ if (descriptor[1] == '\0') {
switch (descriptor[0]) {
case 'Z':
return Boolean();
@@ -80,15 +82,7 @@ const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const c
}
};
-const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
- DCHECK_LT(id, entries_.size());
- RegType* result = entries_[id];
- DCHECK(result != NULL);
- return *result;
-}
-
-const RegType& RegTypeCache::RegTypeFromPrimitiveType(
- Primitive::Type prim_type) const {
+const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
CHECK(RegTypeCache::primitive_initialized_);
switch (prim_type) {
case Primitive::kPrimBoolean:
@@ -113,41 +107,26 @@ const RegType& RegTypeCache::RegTypeFromPrimitiveType(
}
}
-bool RegTypeCache::MatchDescriptor(size_t idx, std::string& descriptor, bool precise) {
- RegType* cur_entry = entries_[idx];
- if (cur_entry->HasClass()) {
- // Check the descriptor in the reg_type if available.
- if(!cur_entry->descriptor_.empty()) {
- if (descriptor == cur_entry->descriptor_ && MatchingPrecisionForClass(cur_entry, precise)) {
- return true;
- }
- } else {
- // Descriptor not found in reg_type , maybe available in Class object.
- // So we might have cases where we have the class but not the descriptor
- // for that class we need the class helper to get the descriptor
- // and match it with the one we are given.
- ClassHelper kh(cur_entry->GetClass());
- if ((strcmp(descriptor.c_str(), kh.GetDescriptor()) == 0) &&
- MatchingPrecisionForClass(cur_entry, precise)) {
- return true;
- }
- }
- } else if (cur_entry->IsUnresolvedReference() && cur_entry->GetDescriptor() == descriptor) {
+bool RegTypeCache::MatchDescriptor(size_t idx, const char* descriptor, bool precise) {
+ RegType* entry = entries_[idx];
+ if (entry->descriptor_ != descriptor) {
+ return false;
+ }
+ if (entry->HasClass() && MatchingPrecisionForClass(entry, precise)) {
return true;
}
- return false;
+ return entry->IsUnresolvedReference();
}
-
-mirror::Class* RegTypeCache::ResolveClass(std::string descriptor, mirror::ClassLoader* loader) {
+mirror::Class* RegTypeCache::ResolveClass(const char* descriptor, mirror::ClassLoader* loader) {
// Class was not found, must create new type.
// Try resolving class
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* klass = NULL;
if (can_load_classes_) {
- klass = class_linker->FindClass(descriptor.c_str(), loader);
+ klass = class_linker->FindClass(descriptor, loader);
} else {
- klass = class_linker->LookupClass(descriptor.c_str(), loader);
+ klass = class_linker->LookupClass(descriptor, loader);
if (klass != NULL && !klass->IsLoaded()) {
// We found the class but without it being loaded its not safe for use.
klass = NULL;
@@ -155,6 +134,7 @@ mirror::Class* RegTypeCache::ResolveClass(std::string descriptor, mirror::ClassL
}
return klass;
}
+
void RegTypeCache::ClearException() {
if (can_load_classes_) {
DCHECK(Thread::Current()->IsExceptionPending());
@@ -163,8 +143,9 @@ void RegTypeCache::ClearException() {
DCHECK(!Thread::Current()->IsExceptionPending());
}
}
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, std::string descriptor, bool precise) {
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
// Try looking up the class in the cache first.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
if (MatchDescriptor(i, descriptor, precise)) {
@@ -198,7 +179,7 @@ const RegType& RegTypeCache::From(mirror::ClassLoader* loader, std::string descr
// We tried loading the class and failed, this might get an exception raised
// so we want to clear it before we go on.
ClearException();
- if (IsValidDescriptor(descriptor.c_str())) {
+ if (IsValidDescriptor(descriptor)) {
RegType* entry = new UnresolvedReferenceType(descriptor, entries_.size());
entries_.push_back(entry);
return *entry;
@@ -209,25 +190,24 @@ const RegType& RegTypeCache::From(mirror::ClassLoader* loader, std::string descr
}
}
}
-const RegType& RegTypeCache::FromClass(mirror::Class* klass, bool precise) {
+
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
if (klass->IsPrimitive()) {
return RegTypeFromPrimitiveType(klass->GetPrimitiveType());
} else {
// Look for the reference in the list of entries to have.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
- if ((cur_entry->HasClass()) && cur_entry->GetClass() == klass &&
- MatchingPrecisionForClass(cur_entry, precise)) {
+ if (cur_entry->klass_ == klass && MatchingPrecisionForClass(cur_entry, precise)) {
return *cur_entry;
}
}
// No reference to the class was found, create new reference.
RegType* entry;
- std::string empty = "";
if (precise) {
- entry = new PreciseReferenceType(klass, empty, entries_.size());
+ entry = new PreciseReferenceType(klass, descriptor, entries_.size());
} else {
- entry = new ReferenceType(klass, empty, entries_.size());
+ entry = new ReferenceType(klass, descriptor, entries_.size());
}
entries_.push_back(entry);
return *entry;
@@ -309,13 +289,14 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegT
// Create entry.
RegType* entry = new UnresolvedMergedType(left.GetId(), right.GetId(), this, entries_.size());
entries_.push_back(entry);
-#ifndef NDEBUG
- UnresolvedMergedType* tmp_entry = down_cast<UnresolvedMergedType*>(entry);
- std::set<uint16_t> check_types = tmp_entry->GetMergedTypes();
- CHECK(check_types == types);
-#endif
+ if (kIsDebugBuild) {
+ UnresolvedMergedType* tmp_entry = down_cast<UnresolvedMergedType*>(entry);
+ std::set<uint16_t> check_types = tmp_entry->GetMergedTypes();
+ CHECK(check_types == types);
+ }
return *entry;
}
+
const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
// Check if entry already exists.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -334,11 +315,12 @@ const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
entries_.push_back(entry);
return *entry;
}
+
const RegType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
RegType* entry = NULL;
RegType* cur_entry = NULL;
+ const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
- std::string descriptor(type.GetDescriptor());
for (size_t i = primitive_count_; i < entries_.size(); i++) {
cur_entry = entries_[i];
if (cur_entry->IsUnresolvedAndUninitializedReference() &&
@@ -353,23 +335,23 @@ const RegType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocat
for (size_t i = primitive_count_; i < entries_.size(); i++) {
cur_entry = entries_[i];
if (cur_entry->IsUninitializedReference() &&
- down_cast<UninitialisedReferenceType*>(cur_entry)
+ down_cast<UninitializedReferenceType*>(cur_entry)
->GetAllocationPc() == allocation_pc &&
cur_entry->GetClass() == klass) {
return *cur_entry;
}
}
- std::string descriptor("");
- entry = new UninitialisedReferenceType(klass, descriptor, allocation_pc, entries_.size());
+ entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
}
entries_.push_back(entry);
return *entry;
}
+
const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
RegType* entry;
if (uninit_type.IsUnresolvedTypes()) {
- std::string descriptor(uninit_type.GetDescriptor());
+ const std::string& descriptor(uninit_type.GetDescriptor());
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedReference() &&
@@ -377,7 +359,7 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
return *cur_entry;
}
}
- entry = new UnresolvedReferenceType(descriptor, entries_.size());
+ entry = new UnresolvedReferenceType(descriptor.c_str(), entries_.size());
} else {
mirror::Class* klass = uninit_type.GetClass();
if(uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -388,10 +370,8 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
return *cur_entry;
}
}
- std::string descriptor("");
- entry = new ReferenceType(klass, descriptor, entries_.size());
+ entry = new ReferenceType(klass, "", entries_.size());
} else {
- std::string descriptor;
if (klass->IsFinal()) {
if (klass->IsInstantiable()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -401,7 +381,7 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
}
}
// Precise type was not found , create one !
- entry = new PreciseReferenceType(klass, descriptor, entries_.size());
+ entry = new PreciseReferenceType(klass, "", entries_.size());
} else {
return Conflict();
}
@@ -414,26 +394,30 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
return *cur_entry;
}
}
- entry = new ReferenceType(klass, descriptor, entries_.size());
+ entry = new ReferenceType(klass, "", entries_.size());
}
}
}
entries_.push_back(entry);
return *entry;
}
-const RegType& RegTypeCache::ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+
+const RegType& RegTypeCache::ByteConstant() {
return FromCat1Const(std::numeric_limits<jbyte>::min(), false);
}
-const RegType& RegTypeCache::ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+
+const RegType& RegTypeCache::ShortConstant() {
return FromCat1Const(std::numeric_limits<jshort>::min(), false);
}
-const RegType& RegTypeCache::IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+
+const RegType& RegTypeCache::IntConstant() {
return FromCat1Const(std::numeric_limits<jint>::max(), false);
}
+
const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
RegType* entry;
+ const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
- std::string descriptor(type.GetDescriptor());
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedAndUninitializedThisReference() &&
@@ -441,7 +425,7 @@ const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
return *cur_entry;
}
}
- entry = new UnresolvedUninitialisedThisRefType(descriptor, entries_.size());
+ entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -451,16 +435,17 @@ const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
return *cur_entry;
}
}
- std::string descriptor("");
- entry = new UninitialisedThisReferenceType(klass, descriptor, entries_.size());
+ entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
}
entries_.push_back(entry);
return *entry;
}
+
const RegType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
- if (cur_entry->IsConstant() && cur_entry->IsPreciseConstant() == precise &&
+ if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
+ cur_entry->IsPreciseConstant() == precise &&
(down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
return *cur_entry;
}
@@ -514,12 +499,12 @@ const RegType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
CHECK(array.IsArrayTypes());
if (array.IsUnresolvedTypes()) {
- std::string descriptor(array.GetDescriptor());
- std::string component(descriptor.substr(1, descriptor.size() - 1));
+ const std::string& descriptor(array.GetDescriptor());
+ const std::string component(descriptor.substr(1, descriptor.size() - 1));
return FromDescriptor(loader, component.c_str(), false);
} else {
mirror::Class* klass = array.GetClass()->GetComponentType();
- return FromClass(klass, klass->IsFinal());
+ return FromClass(ClassHelper(klass).GetDescriptor(), klass, klass->IsFinal());
}
}
diff --git a/src/verifier/reg_type_cache.h b/src/verifier/reg_type_cache.h
index 602c95086b..d70123c2de 100644
--- a/src/verifier/reg_type_cache.h
+++ b/src/verifier/reg_type_cache.h
@@ -39,6 +39,7 @@ const size_t kNumPrimitives = 12;
class RegTypeCache {
public:
explicit RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
+ entries_.reserve(64);
FillPrimitiveTypes();
}
~RegTypeCache();
@@ -52,13 +53,13 @@ class RegTypeCache {
}
static void ShutDown();
const art::verifier::RegType& GetFromId(uint16_t id) const;
- const RegType& From(mirror::ClassLoader* loader, std::string descriptor, bool precise)
+ const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <class Type>
- static Type* CreatePrimitiveTypeInstance(std::string descriptor)
+ static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FillPrimitiveTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromClass(mirror::Class* klass, bool precise)
+ const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& FromCat1Const(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -152,10 +153,10 @@ class RegTypeCache {
// Whether or not we're allowed to load classes.
const bool can_load_classes_;
DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
- mirror::Class* ResolveClass(std::string descriptor, mirror::ClassLoader* loader)
+ mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ClearException();
- bool MatchDescriptor(size_t idx, std::string& descriptor, bool precise)
+ bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
diff --git a/src/verifier/reg_type_test.cc b/src/verifier/reg_type_test.cc
index 9b46a7fbea..f37edff6ac 100644
--- a/src/verifier/reg_type_test.cc
+++ b/src/verifier/reg_type_test.cc
@@ -74,7 +74,6 @@ TEST_F(RegTypeTest, Pairs) {
}
TEST_F(RegTypeTest, Primitives) {
-
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
@@ -108,6 +107,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(bool_reg_type.IsLongTypes());
EXPECT_FALSE(bool_reg_type.IsDoubleTypes());
EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
const RegType& byte_reg_type = cache.Byte();
EXPECT_FALSE(byte_reg_type.IsUndefined());
@@ -139,6 +139,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(byte_reg_type.IsLongTypes());
EXPECT_FALSE(byte_reg_type.IsDoubleTypes());
EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
const RegType& char_reg_type = cache.Char();
EXPECT_FALSE(char_reg_type.IsUndefined());
@@ -170,6 +171,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(char_reg_type.IsLongTypes());
EXPECT_FALSE(char_reg_type.IsDoubleTypes());
EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
const RegType& short_reg_type = cache.Short();
EXPECT_FALSE(short_reg_type.IsUndefined());
@@ -201,6 +203,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(short_reg_type.IsLongTypes());
EXPECT_FALSE(short_reg_type.IsDoubleTypes());
EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
const RegType& int_reg_type = cache.Integer();
EXPECT_FALSE(int_reg_type.IsUndefined());
@@ -232,6 +235,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(int_reg_type.IsLongTypes());
EXPECT_FALSE(int_reg_type.IsDoubleTypes());
EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
const RegType& long_reg_type = cache.LongLo();
EXPECT_FALSE(long_reg_type.IsUndefined());
@@ -263,6 +267,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_TRUE(long_reg_type.IsLongTypes());
EXPECT_FALSE(long_reg_type.IsDoubleTypes());
EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
const RegType& float_reg_type = cache.Float();
EXPECT_FALSE(float_reg_type.IsUndefined());
@@ -294,6 +299,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(float_reg_type.IsLongTypes());
EXPECT_FALSE(float_reg_type.IsDoubleTypes());
EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
const RegType& double_reg_type = cache.DoubleLo();
EXPECT_FALSE(double_reg_type.IsUndefined());
@@ -325,6 +331,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(double_reg_type.IsLongTypes());
EXPECT_TRUE(double_reg_type.IsDoubleTypes());
EXPECT_FALSE(double_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(double_reg_type.IsNonZeroReferenceTypes());
}
@@ -352,12 +359,14 @@ TEST_F(RegTypeReferenceTest, UnresolvedType) {
RegTypeCache cache(true);
const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
+ EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
const RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
EXPECT_TRUE(unresolved_super_class.IsUnresolvedSuperClass());
+ EXPECT_TRUE(unresolved_super_class.IsNonZeroReferenceTypes());
}
TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
@@ -372,6 +381,7 @@ TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_TRUE(unresolved_unintialised.IsUninitializedTypes());
+ EXPECT_TRUE(unresolved_unintialised.IsNonZeroReferenceTypes());
// Create an uninitialized type of this unresolved type with different PC
const RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
diff --git a/src/verifier/register_line-inl.h b/src/verifier/register_line-inl.h
new file mode 100644
index 0000000000..157e136cc1
--- /dev/null
+++ b/src/verifier/register_line-inl.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_REGISTER_LINE_INL_H_
+#define ART_SRC_VERIFIER_REGISTER_LINE_INL_H_
+
+#include "register_line.h"
+#include "method_verifier.h"
+
+namespace art {
+namespace verifier {
+
+inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+ // The register index was validated during the static pass, so we don't need to check it here.
+ DCHECK_LT(vsrc, num_regs_);
+ return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
+}
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_SRC_VERIFIER_REGISTER_LINE_INL_H_
diff --git a/src/verifier/register_line.cc b/src/verifier/register_line.cc
index 544a9ee4c0..3a2145b9bb 100644
--- a/src/verifier/register_line.cc
+++ b/src/verifier/register_line.cc
@@ -16,7 +16,9 @@
#include "register_line.h"
+#include "dex_instruction-inl.h"
#include "method_verifier.h"
+#include "register_line-inl.h"
namespace art {
namespace verifier {
@@ -92,22 +94,18 @@ void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
result_[1] = new_type2.GetId();
}
-const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
- // The register index was validated during the static pass, so we don't need to check it here.
- DCHECK_LT(vsrc, num_regs_);
- return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
-}
-
-const RegType& RegisterLine::GetInvocationThis(const DecodedInstruction& dec_insn) {
- if (dec_insn.vA < 1) {
+const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+ const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
+ if (args_count < 1) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
return verifier_->GetRegTypeCache()->Conflict();
}
/* get the element type of the array held in vsrc */
- const RegType& this_type = GetRegisterType(dec_insn.vC);
+ const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ const RegType& this_type = GetRegisterType(this_reg);
if (!this_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
- << dec_insn.vC << " (type=" << this_type << ")";
+ << this_reg << " (type=" << this_type << ")";
return verifier_->GetRegTypeCache()->Conflict();
}
return this_type;
@@ -260,125 +258,135 @@ void RegisterLine::CopyResultRegister2(uint32_t vdst) {
}
}
-void RegisterLine::CheckUnaryOp(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckUnaryOp(const Instruction* inst,
const RegType& dst_type,
const RegType& src_type) {
- if (VerifyRegisterType(dec_insn.vB, src_type)) {
- SetRegisterType(dec_insn.vA, dst_type);
+ if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
+ SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
-void RegisterLine::CheckUnaryOpWide(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1, const RegType& src_type2) {
- if (VerifyRegisterTypeWide(dec_insn.vB, src_type1, src_type2)) {
- SetRegisterTypeWide(dec_insn.vA, dst_type1, dst_type2);
+ if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
+ SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
-void RegisterLine::CheckUnaryOpToWide(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type) {
- if (VerifyRegisterType(dec_insn.vB, src_type)) {
- SetRegisterTypeWide(dec_insn.vA, dst_type1, dst_type2);
+ if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
+ SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
-void RegisterLine::CheckUnaryOpFromWide(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
const RegType& dst_type,
const RegType& src_type1, const RegType& src_type2) {
- if (VerifyRegisterTypeWide(dec_insn.vB, src_type1, src_type2)) {
- SetRegisterType(dec_insn.vA, dst_type);
+ if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
+ SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
-void RegisterLine::CheckBinaryOp(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckBinaryOp(const Instruction* inst,
const RegType& dst_type,
const RegType& src_type1, const RegType& src_type2,
bool check_boolean_op) {
- if (VerifyRegisterType(dec_insn.vB, src_type1) &&
- VerifyRegisterType(dec_insn.vC, src_type2)) {
+ const uint32_t vregB = inst->VRegB_23x();
+ const uint32_t vregC = inst->VRegC_23x();
+ if (VerifyRegisterType(vregB, src_type1) &&
+ VerifyRegisterType(vregC, src_type2)) {
if (check_boolean_op) {
DCHECK(dst_type.IsInteger());
- if (GetRegisterType(dec_insn.vB).IsBooleanTypes() &&
- GetRegisterType(dec_insn.vC).IsBooleanTypes()) {
- SetRegisterType(dec_insn.vA, verifier_->GetRegTypeCache()->Boolean());
+ if (GetRegisterType(vregB).IsBooleanTypes() &&
+ GetRegisterType(vregC).IsBooleanTypes()) {
+ SetRegisterType(inst->VRegA_23x(), verifier_->GetRegTypeCache()->Boolean());
return;
}
}
- SetRegisterType(dec_insn.vA, dst_type);
+ SetRegisterType(inst->VRegA_23x(), dst_type);
}
}
-void RegisterLine::CheckBinaryOpWide(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1_1, const RegType& src_type1_2,
const RegType& src_type2_1, const RegType& src_type2_2) {
- if (VerifyRegisterTypeWide(dec_insn.vB, src_type1_1, src_type1_2) &&
- VerifyRegisterTypeWide(dec_insn.vC, src_type2_1, src_type2_2)) {
- SetRegisterTypeWide(dec_insn.vA, dst_type1, dst_type2);
+ if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
+ VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
+ SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
}
}
-void RegisterLine::CheckBinaryOpWideShift(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
const RegType& long_lo_type, const RegType& long_hi_type,
const RegType& int_type) {
- if (VerifyRegisterTypeWide(dec_insn.vB, long_lo_type, long_hi_type) &&
- VerifyRegisterType(dec_insn.vC, int_type)) {
- SetRegisterTypeWide(dec_insn.vA, long_lo_type, long_hi_type);
+ if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
+ VerifyRegisterType(inst->VRegC_23x(), int_type)) {
+ SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
}
}
-void RegisterLine::CheckBinaryOp2addr(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
const RegType& dst_type, const RegType& src_type1,
const RegType& src_type2, bool check_boolean_op) {
- if (VerifyRegisterType(dec_insn.vA, src_type1) &&
- VerifyRegisterType(dec_insn.vB, src_type2)) {
+ const uint32_t vregA = inst->VRegA_12x();
+ const uint32_t vregB = inst->VRegB_12x();
+ if (VerifyRegisterType(vregA, src_type1) &&
+ VerifyRegisterType(vregB, src_type2)) {
if (check_boolean_op) {
DCHECK(dst_type.IsInteger());
- if (GetRegisterType(dec_insn.vA).IsBooleanTypes() &&
- GetRegisterType(dec_insn.vB).IsBooleanTypes()) {
- SetRegisterType(dec_insn.vA, verifier_->GetRegTypeCache()->Boolean());
+ if (GetRegisterType(vregA).IsBooleanTypes() &&
+ GetRegisterType(vregB).IsBooleanTypes()) {
+ SetRegisterType(vregA, verifier_->GetRegTypeCache()->Boolean());
return;
}
}
- SetRegisterType(dec_insn.vA, dst_type);
+ SetRegisterType(vregA, dst_type);
}
}
-void RegisterLine::CheckBinaryOp2addrWide(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1_1, const RegType& src_type1_2,
const RegType& src_type2_1, const RegType& src_type2_2) {
- if (VerifyRegisterTypeWide(dec_insn.vA, src_type1_1, src_type1_2) &&
- VerifyRegisterTypeWide(dec_insn.vB, src_type2_1, src_type2_2)) {
- SetRegisterTypeWide(dec_insn.vA, dst_type1, dst_type2);
+ const uint32_t vregA = inst->VRegA_12x();
+ const uint32_t vregB = inst->VRegB_12x();
+ if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
+ VerifyRegisterTypeWide(vregB, src_type2_1, src_type2_2)) {
+ SetRegisterTypeWide(vregA, dst_type1, dst_type2);
}
}
-void RegisterLine::CheckBinaryOp2addrWideShift(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
const RegType& long_lo_type, const RegType& long_hi_type,
const RegType& int_type) {
- if (VerifyRegisterTypeWide(dec_insn.vA, long_lo_type, long_hi_type) &&
- VerifyRegisterType(dec_insn.vB, int_type)) {
- SetRegisterTypeWide(dec_insn.vA, long_lo_type, long_hi_type);
+ const uint32_t vregA = inst->VRegA_12x();
+ const uint32_t vregB = inst->VRegB_12x();
+ if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
+ VerifyRegisterType(vregB, int_type)) {
+ SetRegisterTypeWide(vregA, long_lo_type, long_hi_type);
}
}
-void RegisterLine::CheckLiteralOp(const DecodedInstruction& dec_insn,
+void RegisterLine::CheckLiteralOp(const Instruction* inst,
const RegType& dst_type, const RegType& src_type,
- bool check_boolean_op) {
- if (VerifyRegisterType(dec_insn.vB, src_type)) {
+ bool check_boolean_op, bool is_lit16) {
+ const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
+ const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
+ if (VerifyRegisterType(vregB, src_type)) {
if (check_boolean_op) {
DCHECK(dst_type.IsInteger());
/* check vB with the call, then check the constant manually */
- if (GetRegisterType(dec_insn.vB).IsBooleanTypes() &&
- (dec_insn.vC == 0 || dec_insn.vC == 1)) {
- SetRegisterType(dec_insn.vA, verifier_->GetRegTypeCache()->Boolean());
+ const uint32_t val = is_lit16 ? inst->VRegC_22s() : inst->VRegC_22b();
+ if (GetRegisterType(vregB).IsBooleanTypes() && (val == 0 || val == 1)) {
+ SetRegisterType(vregA, verifier_->GetRegTypeCache()->Boolean());
return;
}
}
- SetRegisterType(dec_insn.vA, dst_type);
+ SetRegisterType(vregA, dst_type);
}
}
@@ -427,6 +435,8 @@ bool RegisterLine::VerifyMonitorStackEmpty() {
bool RegisterLine::MergeRegisters(const RegisterLine* incoming_line) {
bool changed = false;
+ CHECK(NULL != incoming_line);
+ CHECK(NULL != line_.get());
for (size_t idx = 0; idx < num_regs_; idx++) {
if (line_[idx] != incoming_line->line_[idx]) {
const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
diff --git a/src/verifier/register_line.h b/src/verifier/register_line.h
index 5719082518..5f17049e8e 100644
--- a/src/verifier/register_line.h
+++ b/src/verifier/register_line.h
@@ -169,28 +169,28 @@ class RegisterLine {
* The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
* versions. We just need to make sure vA is >= 1 and then return vC.
*/
- const RegType& GetInvocationThis(const DecodedInstruction& dec_insn)
+ const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
* "dst_type" is stored into vA, and "src_type" is verified against vB.
*/
- void CheckUnaryOp(const DecodedInstruction& dec_insn,
- const RegType& dst_type, const RegType& src_type)
+ void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
+ const RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckUnaryOpWide(const DecodedInstruction& dec_insn,
+ void CheckUnaryOpWide(const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1, const RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckUnaryOpToWide(const DecodedInstruction& dec_insn,
+ void CheckUnaryOpToWide(const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckUnaryOpFromWide(const DecodedInstruction& dec_insn,
+ void CheckUnaryOpFromWide(const Instruction* inst,
const RegType& dst_type,
const RegType& src_type1, const RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -200,18 +200,18 @@ class RegisterLine {
* "dst_type" is stored into vA, and "src_type1"/"src_type2" are verified
* against vB/vC.
*/
- void CheckBinaryOp(const DecodedInstruction& dec_insn,
+ void CheckBinaryOp(const Instruction* inst,
const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckBinaryOpWide(const DecodedInstruction& dec_insn,
+ void CheckBinaryOpWide(const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1_1, const RegType& src_type1_2,
const RegType& src_type2_1, const RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckBinaryOpWideShift(const DecodedInstruction& dec_insn,
+ void CheckBinaryOpWideShift(const Instruction* inst,
const RegType& long_lo_type, const RegType& long_hi_type,
const RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -220,19 +220,19 @@ class RegisterLine {
* Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
* are verified against vA/vB, then "dst_type" is stored into vA.
*/
- void CheckBinaryOp2addr(const DecodedInstruction& dec_insn,
+ void CheckBinaryOp2addr(const Instruction* inst,
const RegType& dst_type,
const RegType& src_type1, const RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckBinaryOp2addrWide(const DecodedInstruction& dec_insn,
+ void CheckBinaryOp2addrWide(const Instruction* inst,
const RegType& dst_type1, const RegType& dst_type2,
const RegType& src_type1_1, const RegType& src_type1_2,
const RegType& src_type2_1, const RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CheckBinaryOp2addrWideShift(const DecodedInstruction& dec_insn,
+ void CheckBinaryOp2addrWideShift(const Instruction* inst,
const RegType& long_lo_type, const RegType& long_hi_type,
const RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -243,8 +243,9 @@ class RegisterLine {
*
* If "check_boolean_op" is set, we use the constant value in vC.
*/
- void CheckLiteralOp(const DecodedInstruction& dec_insn,
- const RegType& dst_type, const RegType& src_type, bool check_boolean_op)
+ void CheckLiteralOp(const Instruction* inst,
+ const RegType& dst_type, const RegType& src_type,
+ bool check_boolean_op, bool is_lit16)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
diff --git a/test/045-reflect-array/expected.txt b/test/045-reflect-array/expected.txt
index 5990b34a40..b9a98c9849 100644
--- a/test/045-reflect-array/expected.txt
+++ b/test/045-reflect-array/expected.txt
@@ -6,4 +6,7 @@ ReflectArrayTest.testSingle passed
ReflectArrayTest.testMultiInt passed
zero one two ++
ReflectArrayTest.testMulti passed
+class [Ljava.lang.Number; modifiers: 1041
+class [Ljava.lang.Cloneable; modifiers: 1041
+ReflectArrayTest.testAbstract passed
ReflectArrayTest passed
diff --git a/test/045-reflect-array/src/Main.java b/test/045-reflect-array/src/Main.java
index 05cf84302a..7418eed824 100644
--- a/test/045-reflect-array/src/Main.java
+++ b/test/045-reflect-array/src/Main.java
@@ -16,6 +16,7 @@ public class Main {
testSingle();
testMultiInt();
testMulti();
+ testAbstract();
System.out.println("ReflectArrayTest passed");
}
@@ -255,4 +256,14 @@ public class Main {
}
System.out.println("ReflectArrayTest.testMulti passed");
}
+
+ static void testAbstract() {
+ Object arrayOfAbstractClasses = Array.newInstance(Number.class, 1);
+ System.out.println(arrayOfAbstractClasses.getClass().toString() + " modifiers: " +
+ arrayOfAbstractClasses.getClass().getModifiers());
+ arrayOfAbstractClasses = Array.newInstance(Cloneable.class, 1);
+ System.out.println(arrayOfAbstractClasses.getClass().toString() + " modifiers: " +
+ arrayOfAbstractClasses.getClass().getModifiers());
+ System.out.println("ReflectArrayTest.testAbstract passed");
+ }
}
diff --git a/test/108-check-cast/expected.txt b/test/108-check-cast/expected.txt
new file mode 100644
index 0000000000..d86bac9de5
--- /dev/null
+++ b/test/108-check-cast/expected.txt
@@ -0,0 +1 @@
+OK
diff --git a/test/108-check-cast/info.txt b/test/108-check-cast/info.txt
new file mode 100644
index 0000000000..e7ffa4fdd0
--- /dev/null
+++ b/test/108-check-cast/info.txt
@@ -0,0 +1,10 @@
+This test relies on the correct behavior of instance-of to test check-cast behavior,
+as shown below:
+
+
+CCE throw| InstanceOf | Correct?
+---------+------------+----------
+ 1 | 0 | OK
+ 1 | 1 | BAD
+ 0 | 0 | BAD
+ 0 | 1 | OK \ No newline at end of file
diff --git a/test/108-check-cast/src/Main.java b/test/108-check-cast/src/Main.java
new file mode 100644
index 0000000000..7ef13fd3ed
--- /dev/null
+++ b/test/108-check-cast/src/Main.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Testing check-cast, see comment in info.txt
+ */
+
+class B {}
+class D extends B {}
+
+public class Main {
+ public static void main(String args[]) {
+ B b = null;
+ try {
+ if (1 == args.length) {
+ b = new B();
+ } else {
+ b = new D();
+ }
+ D d = (D) b;
+ if (!(b instanceof D)) {
+ System.out.println("Error: No ClassCastException throuwn when it should have been.");
+ } else {
+ System.out.println("OK");
+ }
+ }
+ catch (ClassCastException cce) {
+ if (b instanceof D) {
+ System.out.println("Error: ClassCastException thrown when it shouldn't have been.");
+ } else {
+ System.out.println("OK");
+ }
+ }
+ }
+}