summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk2
-rw-r--r--build/Android.gtest.mk5
-rw-r--r--compiler/dex/mir_method_info.cc3
-rw-r--r--compiler/driver/compiler_driver-inl.h7
-rw-r--r--compiler/driver/compiler_driver.cc30
-rw-r--r--compiler/driver/compiler_driver_test.cc4
-rw-r--r--compiler/oat_writer.cc10
-rw-r--r--compiler/optimizing/builder.cc14
-rw-r--r--compiler/optimizing/code_generator.cc6
-rw-r--r--compiler/optimizing/code_generator.h1
-rw-r--r--compiler/optimizing/code_generator_arm.cc12
-rw-r--r--compiler/optimizing/inliner.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier.cc6
-rw-r--r--compiler/optimizing/intrinsics_arm.cc9
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc10
-rw-r--r--compiler/optimizing/reference_type_propagation.cc15
-rw-r--r--compiler/utils/arm/assembler_arm.cc6
-rw-r--r--compiler/utils/arm/assembler_arm.h175
-rw-r--r--compiler/utils/arm/assembler_arm32.cc182
-rw-r--r--compiler/utils/arm/assembler_arm32.h83
-rw-r--r--compiler/utils/arm/assembler_arm32_test.cc155
-rw-r--r--compiler/utils/arm/assembler_arm_test.h24
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc424
-rw-r--r--compiler/utils/arm/assembler_thumb2.h97
-rw-r--r--compiler/utils/assembler_thumb_test.cc235
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc239
-rwxr-xr-xdexlist/Android.mk6
-rw-r--r--dexlist/dexlist.cc2
-rw-r--r--dexlist/dexlist_test.cc5
-rw-r--r--oatdump/oatdump.cc47
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc3
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S11
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc4
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S12
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc4
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S12
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc4
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S12
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S17
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc2
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S15
-rw-r--r--runtime/art_method.cc9
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/hash_set_test.cc57
-rw-r--r--runtime/base/mutex.h2
-rw-r--r--runtime/class_linker.cc46
-rw-r--r--runtime/class_linker.h8
-rw-r--r--runtime/class_linker_test.cc2
-rw-r--r--runtime/debugger.cc5
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc21
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc3
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc6
-rw-r--r--runtime/entrypoints/runtime_asm_entrypoints.h3
-rw-r--r--runtime/entrypoints_order_test.cc9
-rw-r--r--runtime/gc/collector/concurrent_copying.cc2
-rw-r--r--runtime/gc/heap.cc70
-rw-r--r--runtime/gc/heap.h12
-rw-r--r--runtime/gc/reference_queue_test.cc4
-rw-r--r--runtime/handle_scope-inl.h18
-rw-r--r--runtime/hprof/hprof.cc9
-rw-r--r--runtime/instrumentation.cc3
-rw-r--r--runtime/java_vm_ext.cc7
-rw-r--r--runtime/java_vm_ext.h11
-rw-r--r--runtime/jni_internal.cc28
-rw-r--r--runtime/mirror/string-inl.h26
-rw-r--r--runtime/monitor_test.cc42
-rw-r--r--runtime/native/dalvik_system_DexFile.cc7
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc3
-rw-r--r--runtime/native/java_lang_Thread.cc1
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/runtime.cc5
-rw-r--r--runtime/thread.cc82
-rw-r--r--runtime/thread.h40
-rw-r--r--runtime/thread_state.h1
-rw-r--r--runtime/trace.cc3
-rw-r--r--runtime/verifier/method_verifier.cc86
-rw-r--r--runtime/verifier/method_verifier.h30
-rw-r--r--runtime/verifier/register_line-inl.h16
-rw-r--r--runtime/verifier/register_line.cc69
-rw-r--r--runtime/verifier/register_line.h8
-rw-r--r--test/002-sleep/src/Main.java4
-rw-r--r--test/004-JniTest/src/Main.java2
-rw-r--r--test/004-ReferenceMap/src/Main.java5
-rw-r--r--test/004-SignalTest/src/Main.java3
-rw-r--r--test/004-StackWalk/src/Main.java5
-rw-r--r--test/004-ThreadStress/src/Main.java3
-rw-r--r--test/004-UnsafeTest/src/Main.java5
-rw-r--r--test/051-thread/src/Main.java5
-rw-r--r--test/082-inline-execute/src/Main.java144
-rw-r--r--test/088-monitor-verification/expected.txt7
-rw-r--r--test/088-monitor-verification/smali/NotStructuredOverUnlock.smali21
-rw-r--r--test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali21
-rw-r--r--test/088-monitor-verification/smali/OK.smali68
-rw-r--r--test/088-monitor-verification/smali/TooDeep.smali82
-rw-r--r--test/088-monitor-verification/smali/UnbalancedJoin.smali31
-rw-r--r--test/088-monitor-verification/smali/UnbalancedStraight.smali18
-rw-r--r--test/088-monitor-verification/src/Main.java69
-rw-r--r--test/088-monitor-verification/src/TooDeep.java64
-rw-r--r--test/088-monitor-verification/stack_inspect.cc81
-rw-r--r--test/101-fibonacci/src/Main.java2
-rw-r--r--test/115-native-bridge/run2
-rw-r--r--test/115-native-bridge/src/NativeBridgeMain.java2
-rw-r--r--test/116-nodex2oat/src/Main.java5
-rw-r--r--test/117-nopatchoat/src/Main.java6
-rw-r--r--test/118-noimage-dex2oat/src/Main.java5
-rw-r--r--test/119-noimage-patchoat/src/Main.java5
-rw-r--r--test/131-structural-change/src/Main.java5
-rw-r--r--test/134-nodex2oat-nofallback/src/Main.java5
-rw-r--r--test/137-cfi/src/Main.java5
-rw-r--r--test/139-register-natives/src/Main.java5
-rw-r--r--test/454-get-vreg/src/Main.java5
-rw-r--r--test/455-set-vreg/src/Main.java5
-rw-r--r--test/457-regs/src/Main.java6
-rw-r--r--test/461-get-reference-vreg/src/Main.java5
-rw-r--r--test/466-get-live-vreg/src/Main.java5
-rw-r--r--test/474-fp-sub-neg/expected.txt7
-rw-r--r--test/474-fp-sub-neg/src/Main.java25
-rw-r--r--test/497-inlining-and-class-loader/expected.txt4
-rw-r--r--test/497-inlining-and-class-loader/src/Main.java5
-rw-r--r--test/Android.libarttest.mk20
-rw-r--r--test/Android.run-test.mk39
-rw-r--r--test/StackWalk2/StackWalk2.java5
-rwxr-xr-xtest/dexdump/run-all-tests2
-rwxr-xr-xtest/etc/run-test-jar11
-rwxr-xr-xtest/run-test6
-rw-r--r--tools/libcore_failures.txt23
130 files changed, 2435 insertions, 1103 deletions
diff --git a/Android.mk b/Android.mk
index ab3eca4ea2..8859d3a285 100644
--- a/Android.mk
+++ b/Android.mk
@@ -241,7 +241,7 @@ endif
# Dexdump/list regression test.
.PHONY: test-art-host-dexdump
-test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist2)
+test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist)
ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests
# Valgrind. Currently only 32b gtests.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1db654aa2a..c88d677738 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -106,15 +106,14 @@ ART_GTEST_dexdump_test_TARGET_DEPS := \
dexdump2
# The dexlist test requires an image and the dexlist utility.
-# TODO: rename into dexlist when migration completes
ART_GTEST_dexlist_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_default_no-pic_64) \
$(HOST_CORE_IMAGE_default_no-pic_32) \
- $(HOST_OUT_EXECUTABLES)/dexlist2
+ $(HOST_OUT_EXECUTABLES)/dexlist
ART_GTEST_dexlist_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_default_no-pic_64) \
$(TARGET_CORE_IMAGE_default_no-pic_32) \
- dexlist2
+ dexlist
# The imgdiag test has dependencies on core.oat since it needs to load it during the test.
# For the host, also add the installed tool (in the base size, that should suffice). For the
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index be913fe634..31c3808197 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -105,7 +105,8 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
// Don't devirt if we are in a different dex file since we can't have direct invokes in
// another dex file unless we always put a direct / patch pointer.
devirt_target = nullptr;
- current_dex_cache.Assign(runtime->GetClassLinker()->FindDexCache(*it->target_dex_file_));
+ current_dex_cache.Assign(runtime->GetClassLinker()->FindDexCache(
+ soa.Self(), *it->target_dex_file_));
CHECK(current_dex_cache.Get() != nullptr);
DexCompilationUnit cu(
mUnit->GetCompilationUnit(), mUnit->GetClassLoader(), mUnit->GetClassLinker(),
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 83f391de1a..8f1987a7db 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -31,7 +31,7 @@
namespace art {
inline mirror::DexCache* CompilerDriver::GetDexCache(const DexCompilationUnit* mUnit) {
- return mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false);
+ return mUnit->GetClassLinker()->FindDexCache(Thread::Current(), *mUnit->GetDexFile(), false);
}
inline mirror::ClassLoader* CompilerDriver::GetClassLoader(ScopedObjectAccess& soa,
@@ -87,7 +87,7 @@ inline ArtField* CompilerDriver::ResolveFieldWithDexFile(
}
inline mirror::DexCache* CompilerDriver::FindDexCache(const DexFile* dex_file) {
- return Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file, false);
+ return Runtime::Current()->GetClassLinker()->FindDexCache(Thread::Current(), *dex_file, false);
}
inline ArtField* CompilerDriver::ResolveField(
@@ -339,7 +339,8 @@ inline int CompilerDriver::IsFastInvoke(
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
CHECK_EQ(target_method->dex_file, mUnit->GetDexFile());
- DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false));
+ DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(
+ soa.Self(), *mUnit->GetDexFile(), false));
CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(
target_method->dex_method_index, pointer_size),
resolved_method) << PrettyMethod(resolved_method);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c006e62a16..6d3a960048 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -601,7 +601,7 @@ static void CompileMethod(Thread* self,
// Do not have failures that should punt to the interpreter.
!verified_method->HasRuntimeThrow() &&
(verified_method->GetEncounteredVerificationFailures() &
- verifier::VERIFY_ERROR_FORCE_INTERPRETER) == 0 &&
+ (verifier::VERIFY_ERROR_FORCE_INTERPRETER | verifier::VERIFY_ERROR_LOCKING)) == 0 &&
// Is eligable for compilation by methods-to-compile filter.
driver->IsMethodToCompile(method_ref);
if (compile) {
@@ -1175,7 +1175,7 @@ bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, u
{
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(
- dex_file, false);
+ soa.Self(), dex_file, false);
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
// Erroneous class.
@@ -1201,7 +1201,8 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file, false)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
+ soa.Self(), dex_file, false)));
class_linker->ResolveString(dex_file, string_idx, dex_cache);
result = true;
}
@@ -1227,7 +1228,8 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const Dex
*equals_referrers_class = false;
}
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file, false);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(
+ soa.Self(), dex_file, false);
// Get type from dex cache assuming it was populated by the verifier
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
@@ -1265,7 +1267,7 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id
uint32_t type_idx) {
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(
- dex_file, false);
+ soa.Self(), dex_file, false);
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
@@ -1294,7 +1296,8 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
uintptr_t* direct_type_ptr, bool* out_is_finalizable) {
ScopedObjectAccess soa(Thread::Current());
Runtime* runtime = Runtime::Current();
- mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(dex_file, false);
+ mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(
+ soa.Self(), dex_file, false);
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
if (resolved_class == nullptr) {
return false;
@@ -1423,7 +1426,8 @@ ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
{
StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache_handle(
- hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false)));
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(
+ soa.Self(), *mUnit->GetDexFile(), false)));
Handle<mirror::ClassLoader> class_loader_handle(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
resolved_field =
@@ -1473,7 +1477,8 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
{
StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache_handle(
- hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false)));
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(
+ soa.Self(), *mUnit->GetDexFile(), false)));
Handle<mirror::ClassLoader> class_loader_handle(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
resolved_field =
@@ -1659,7 +1664,8 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
// Try to resolve the method and compiling method's class.
StackHandleScope<3> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile(), false)));
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(
+ soa.Self(), *mUnit->GetDexFile(), false)));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
uint32_t method_idx = target_method->dex_method_index;
@@ -1911,7 +1917,8 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file, false)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
+ soa.Self(), dex_file, false)));
// Resolve the class.
mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
class_loader);
@@ -2090,7 +2097,8 @@ class VerifyClassVisitor : public CompilationVisitor {
* This is to ensure the class is structurally sound for compilation. An unsound class
* will be rejected by the verifier and later skipped during compilation in the compiler.
*/
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file, false)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
+ soa.Self(), dex_file, false)));
std::string error_msg;
if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
&class_def, true, &error_msg) ==
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index e35d07da83..1107599779 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -108,7 +108,7 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
ScopedObjectAccess soa(Thread::Current());
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
const DexFile& dex = *java_lang_dex_file_;
- mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
+ mirror::DexCache* dex_cache = class_linker_->FindDexCache(soa.Self(), dex);
EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
const mirror::String* string = dex_cache->GetResolvedString(i);
@@ -210,8 +210,8 @@ TEST_F(CompilerDriverMethodsTest, Selection) {
CompileAll(class_loader);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- StackHandleScope<1> hs(self);
ScopedObjectAccess soa(self);
+ StackHandleScope<1> hs(self);
Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
reinterpret_cast<mirror::ClassLoader*>(self->DecodeJObject(class_loader))));
mirror::Class* klass = class_linker->FindClass(self, "LStaticLeafMethods;", h_loader);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 0e0b224578..fdf904d1f0 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -617,7 +617,8 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file_)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(
+ Thread::Current(), *dex_file_)));
ArtMethod* method = linker->ResolveMethod(
*dex_file_, it.GetMemberIndex(), dex_cache, NullHandle<mirror::ClassLoader>(), nullptr,
invoke_type);
@@ -668,7 +669,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
SHARED_REQUIRES(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
- dex_cache_ = class_linker_->FindDexCache(*dex_file);
+ dex_cache_ = class_linker_->FindDexCache(Thread::Current(), *dex_file);
}
return true;
}
@@ -798,7 +799,8 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
SHARED_REQUIRES(Locks::mutator_lock_) {
MethodReference ref = patch.TargetMethod();
mirror::DexCache* dex_cache =
- (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(*ref.dex_file);
+ (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
+ Thread::Current(), *ref.dex_file);
ArtMethod* method = dex_cache->GetResolvedMethod(
ref.dex_method_index, class_linker_->GetImagePointerSize());
CHECK(method != nullptr);
@@ -832,7 +834,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
mirror::Class* GetTargetType(const LinkerPatch& patch)
SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile())
- ? dex_cache_ : class_linker_->FindDexCache(*patch.TargetTypeDexFile());
+ ? dex_cache_ : class_linker_->FindDexCache(Thread::Current(), *patch.TargetTypeDexFile());
mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
CHECK(type != nullptr);
return type;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 7b42db8a7f..23ab94e5fe 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -902,7 +902,7 @@ HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke(
StackHandleScope<4> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
dex_compilation_unit_->GetClassLinker()->FindDexCache(
- *dex_compilation_unit_->GetDexFile())));
+ soa.Self(), *dex_compilation_unit_->GetDexFile())));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
ArtMethod* resolved_method = compiler_driver_->ResolveMethod(
@@ -912,7 +912,7 @@ HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke(
const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
- outer_compilation_unit_->GetClassLinker()->FindDexCache(outer_dex_file)));
+ outer_compilation_unit_->GetClassLinker()->FindDexCache(soa.Self(), outer_dex_file)));
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
// The index at which the method's class is stored in the DexCache's type array.
@@ -1228,7 +1228,7 @@ static mirror::Class* GetClassFrom(CompilerDriver* driver,
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(compilation_unit.GetClassLoader())));
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
- compilation_unit.GetClassLinker()->FindDexCache(dex_file)));
+ compilation_unit.GetClassLinker()->FindDexCache(soa.Self(), dex_file)));
return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
}
@@ -1245,7 +1245,8 @@ bool HGraphBuilder::IsOutermostCompilingClass(uint16_t type_index) const {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<4> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
- dex_compilation_unit_->GetClassLinker()->FindDexCache(*dex_compilation_unit_->GetDexFile())));
+ dex_compilation_unit_->GetClassLinker()->FindDexCache(
+ soa.Self(), *dex_compilation_unit_->GetDexFile())));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
@@ -1264,7 +1265,8 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<4> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
- dex_compilation_unit_->GetClassLinker()->FindDexCache(*dex_compilation_unit_->GetDexFile())));
+ dex_compilation_unit_->GetClassLinker()->FindDexCache(
+ soa.Self(), *dex_compilation_unit_->GetDexFile())));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
ArtField* resolved_field = compiler_driver_->ResolveField(
@@ -1277,7 +1279,7 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
- outer_compilation_unit_->GetClassLinker()->FindDexCache(outer_dex_file)));
+ outer_compilation_unit_->GetClassLinker()->FindDexCache(soa.Self(), outer_dex_file)));
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
// The index at which the field's class is stored in the DexCache's type array.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 503187bd3d..7c60026642 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1071,12 +1071,6 @@ void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCod
<< instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
}
-void SlowPathCode::RecordPcInfo(CodeGenerator* codegen,
- HInstruction* instruction,
- uint32_t dex_pc) {
- codegen->RecordPcInfo(instruction, dex_pc, this);
-}
-
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
RegisterSet* register_set = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 938369b58c..cdd4675d2f 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -81,7 +81,6 @@ class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
- void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
bool IsCoreRegisterSaved(int reg) const {
return saved_core_stack_offsets_[reg] != kRegisterNotSaved;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index d90bdd47e8..0640179195 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2724,11 +2724,9 @@ void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
Register temp = locations->GetTemp(0).AsRegister<Register>();
// temp = reg1 / reg2 (integer division)
- // temp = temp * reg2
- // dest = reg1 - temp
+ // dest = reg1 - temp * reg2
__ sdiv(temp, reg1, reg2);
- __ mul(temp, temp, reg2);
- __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
+ __ mls(out.AsRegister<Register>(), temp, reg2, reg1);
} else {
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
@@ -2898,7 +2896,7 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
// If the shift is > 32 bits, override the high part
__ subs(temp, o_l, ShifterOperand(kArmBitsPerWord));
__ it(PL);
- __ Lsl(o_h, low, temp, false, PL);
+ __ Lsl(o_h, low, temp, PL);
// Shift the low part
__ Lsl(o_l, low, o_l);
} else if (op->IsShr()) {
@@ -2912,7 +2910,7 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
// If the shift is > 32 bits, override the low part
__ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
__ it(PL);
- __ Asr(o_l, high, temp, false, PL);
+ __ Asr(o_l, high, temp, PL);
// Shift the high part
__ Asr(o_h, high, o_h);
} else {
@@ -2924,7 +2922,7 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
__ orr(o_l, o_l, ShifterOperand(temp));
__ subs(temp, o_h, ShifterOperand(kArmBitsPerWord));
__ it(PL);
- __ Lsr(o_l, high, temp, false, PL);
+ __ Lsr(o_l, high, temp, PL);
__ Lsr(o_h, high, o_h);
}
break;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index ff90f32754..112d42e904 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -182,10 +182,10 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
ArtMethod* resolved_method;
if (invoke_instruction->IsInvokeStaticOrDirect()) {
MethodReference ref = invoke_instruction->AsInvokeStaticOrDirect()->GetTargetMethod();
- resolved_method = class_linker->FindDexCache(*ref.dex_file)->GetResolvedMethod(
+ resolved_method = class_linker->FindDexCache(soa.Self(), *ref.dex_file)->GetResolvedMethod(
ref.dex_method_index, class_linker->GetImagePointerSize());
} else {
- resolved_method = class_linker->FindDexCache(caller_dex_file)->GetResolvedMethod(
+ resolved_method = class_linker->FindDexCache(soa.Self(), caller_dex_file)->GetResolvedMethod(
method_index, class_linker->GetImagePointerSize());
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index df6e550b4a..0ac26de674 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -132,6 +132,12 @@ bool InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop(HBinaryOperation
// with
// ADD tmp, a, b
// NEG dst, tmp
+ // Note that we cannot optimize `(-a) + (-b)` to `-(a + b)` for floating-point.
+ // When `a` is `-0.0` and `b` is `0.0`, the former expression yields `0.0`,
+ // while the later yields `-0.0`.
+ if (!Primitive::IsIntegralType(binop->GetType())) {
+ return false;
+ }
binop->ReplaceInput(left_neg->GetInput(), 0);
binop->ReplaceInput(right_neg->GetInput(), 1);
left_neg->GetBlock()->RemoveInstruction(left_neg);
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 806fd7a8f4..69a3e627c9 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -103,7 +103,7 @@ class IntrinsicSlowPathARM : public SlowPathCodeARM {
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
Location::RegisterLocation(kArtMethodRegister));
- RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
UNREACHABLE();
@@ -989,10 +989,7 @@ void IntrinsicCodeGeneratorARM::VisitStringEquals(HInvoke* invoke) {
DCHECK_ALIGNED(value_offset, 4);
static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
- // temp cannot overflow because we cannot allocate a String object with size 4GiB or greater.
- __ add(temp, temp, ShifterOperand(temp));
__ LoadImmediate(temp1, value_offset);
- __ add(temp, temp, ShifterOperand(value_offset));
// Loop to compare strings 2 characters at a time starting at the front of the string.
// Ok to do this because strings with an odd length are zero-padded.
@@ -1002,8 +999,8 @@ void IntrinsicCodeGeneratorARM::VisitStringEquals(HInvoke* invoke) {
__ cmp(out, ShifterOperand(temp2));
__ b(&return_false, NE);
__ add(temp1, temp1, ShifterOperand(sizeof(uint32_t)));
- __ cmp(temp1, ShifterOperand(temp));
- __ b(&loop, LO);
+ __ subs(temp, temp, ShifterOperand(sizeof(uint32_t) / sizeof(uint16_t)));
+ __ b(&loop, GT);
// Return true and exit the function.
// If loop does not result in returning false, we return true.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a5332ea794..0171d6949d 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -112,7 +112,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
LocationFrom(kArtMethodRegister));
- RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
UNREACHABLE();
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index b7126b24e3..be076cd3ff 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -141,7 +141,7 @@ class IntrinsicSlowPathX86 : public SlowPathCodeX86 {
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
Location::RegisterLocation(EAX));
- RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
UNREACHABLE();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 15fbac1c63..1f35b597fe 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -132,7 +132,7 @@ class IntrinsicSlowPathX86_64 : public SlowPathCodeX86_64 {
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(
invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
- RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
UNREACHABLE();
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 2a7699105c..6f251e8e6c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -540,11 +540,14 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
CompilerDriver* compiler_driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer) const {
- StackHandleScopeCollection handles(Thread::Current());
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScopeCollection handles(soa.Self());
+ soa.Self()->TransitionFromRunnableToSuspended(kNative);
RunOptimizations(graph, compiler_driver, compilation_stats_.get(),
dex_compilation_unit, pass_observer, &handles);
if (graph->HasTryCatch()) {
+ soa.Self()->TransitionFromSuspendedToRunnable();
return nullptr;
}
@@ -582,6 +585,8 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
ArrayRef<const LinkerPatch>(linker_patches));
pass_observer->DumpDisassembly();
+
+ soa.Self()->TransitionFromSuspendedToRunnable();
return compiled_method;
}
@@ -709,7 +714,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<4> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit.GetClassLinker();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
+ soa.Self(), dex_file)));
Handle<mirror::ClassLoader> loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(class_loader)));
ArtMethod* art_method = compiler_driver->ResolveMethod(
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 824f28eb7c..516638b33c 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -79,6 +79,8 @@ ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
: HOptimization(graph, name),
handles_(handles),
worklist_(graph->GetArena(), kDefaultWorklistSize) {
+ // Mutator lock is required for NewHandle, but annotalysis ignores constructors.
+ ScopedObjectAccess soa(Thread::Current());
ClassLinker* linker = Runtime::Current()->GetClassLinker();
object_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject));
string_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangString));
@@ -87,7 +89,6 @@ ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangThrowable));
if (kIsDebugBuild) {
- ScopedObjectAccess soa(Thread::Current());
DCHECK(ReferenceTypeInfo::IsValidHandle(object_class_handle_));
DCHECK(ReferenceTypeInfo::IsValidHandle(class_class_handle_));
DCHECK(ReferenceTypeInfo::IsValidHandle(string_class_handle_));
@@ -362,7 +363,8 @@ void RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache = cl->FindDexCache(instr->AsInvoke()->GetDexFile(), false);
+ mirror::DexCache* dex_cache = cl->FindDexCache(
+ soa.Self(), instr->AsInvoke()->GetDexFile(), false);
ArtMethod* method = dex_cache->GetResolvedMethod(
instr->AsInvoke()->GetDexMethodIndex(), cl->GetImagePointerSize());
DCHECK(method != nullptr);
@@ -393,7 +395,8 @@ void RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr,
DCHECK_EQ(instr->GetType(), Primitive::kPrimNot);
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(
+ soa.Self(), dex_file, false);
// Get type from dex cache assuming it was populated by the verifier.
SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
}
@@ -431,7 +434,7 @@ void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr,
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache = cl->FindDexCache(info.GetDexFile());
+ mirror::DexCache* dex_cache = cl->FindDexCache(soa.Self(), info.GetDexFile(), false);
ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), dex_cache);
// TODO: There are certain cases where we can't resolve the field.
// b/21914925 is open to keep track of a repro case for this issue.
@@ -450,7 +453,7 @@ void RTPVisitor::VisitStaticFieldGet(HStaticFieldGet* instr) {
void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache =
- Runtime::Current()->GetClassLinker()->FindDexCache(instr->GetDexFile());
+ Runtime::Current()->GetClassLinker()->FindDexCache(soa.Self(), instr->GetDexFile(), false);
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
// TODO: investigating why we are still getting unresolved classes: b/22821472.
@@ -633,7 +636,7 @@ void RTPVisitor::VisitInvoke(HInvoke* instr) {
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache = cl->FindDexCache(instr->GetDexFile());
+ mirror::DexCache* dex_cache = cl->FindDexCache(soa.Self(), instr->GetDexFile());
ArtMethod* method = dex_cache->GetResolvedMethod(
instr->GetDexMethodIndex(), cl->GetImagePointerSize());
mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false);
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 0e3e08c2da..807bedaa04 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -137,10 +137,14 @@ uint32_t ShifterOperand::encodingThumb() const {
if (rs_ == kNoRegister) {
// Immediate shift.
if (shift_ == RRX) {
+ DCHECK_EQ(immed_, 0u);
// RRX is encoded as an ROR with imm 0.
return ROR << 4 | static_cast<uint32_t>(rm_);
} else {
- uint32_t imm3 = immed_ >> 2;
+ DCHECK((1 <= immed_ && immed_ <= 31) ||
+ (immed_ == 0u && shift_ == LSL) ||
+ (immed_ == 32u && (shift_ == ASR || shift_ == LSR)));
+ uint32_t imm3 = (immed_ >> 2) & 7 /* 0b111*/;
uint32_t imm2 = immed_ & 3U /* 0b11 */;
return imm3 << 12 | imm2 << 6 | shift_ << 4 |
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index ef60fefe4d..7825457d5c 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -375,6 +375,13 @@ enum ItState {
kItE = kItElse
};
+// Set condition codes request.
+enum SetCc {
+ kCcDontCare, // Allows prioritizing 16-bit instructions on Thumb2 whether they set CCs or not.
+ kCcSet,
+ kCcKeep,
+};
+
constexpr uint32_t kNoItCondition = 3;
constexpr uint32_t kInvalidModifiedImmediate = -1;
@@ -392,25 +399,61 @@ class ArmAssembler : public Assembler {
virtual bool IsThumb() const = 0;
// Data-processing instructions.
- virtual void and_(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void and_(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void ands(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ and_(rd, rn, so, cond, kCcSet);
+ }
- virtual void eor(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void eor(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
- virtual void sub(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
- virtual void subs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void eors(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ eor(rd, rn, so, cond, kCcSet);
+ }
+
+ virtual void sub(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void subs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ sub(rd, rn, so, cond, kCcSet);
+ }
- virtual void rsb(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
- virtual void rsbs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void rsb(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
- virtual void add(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void rsbs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ rsb(rd, rn, so, cond, kCcSet);
+ }
+
+ virtual void add(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void adds(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ add(rd, rn, so, cond, kCcSet);
+ }
+
+ virtual void adc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void adcs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ adc(rd, rn, so, cond, kCcSet);
+ }
- virtual void adds(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void sbc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
- virtual void adc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void sbcs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ sbc(rd, rn, so, cond, kCcSet);
+ }
- virtual void sbc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void rsc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
- virtual void rsc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void rscs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ rsc(rd, rn, so, cond, kCcSet);
+ }
virtual void tst(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
@@ -420,16 +463,33 @@ class ArmAssembler : public Assembler {
virtual void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
- virtual void orr(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
- virtual void orrs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void orr(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void orrs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ orr(rd, rn, so, cond, kCcSet);
+ }
+
+ virtual void mov(Register rd, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void movs(Register rd, const ShifterOperand& so, Condition cond = AL) {
+ mov(rd, so, cond, kCcSet);
+ }
- virtual void mov(Register rd, const ShifterOperand& so, Condition cond = AL) = 0;
- virtual void movs(Register rd, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void bic(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
- virtual void bic(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void bics(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ bic(rd, rn, so, cond, kCcSet);
+ }
- virtual void mvn(Register rd, const ShifterOperand& so, Condition cond = AL) = 0;
- virtual void mvns(Register rd, const ShifterOperand& so, Condition cond = AL) = 0;
+ virtual void mvn(Register rd, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void mvns(Register rd, const ShifterOperand& so, Condition cond = AL) {
+ mvn(rd, so, cond, kCcSet);
+ }
// Miscellaneous data-processing instructions.
virtual void clz(Register rd, Register rm, Condition cond = AL) = 0;
@@ -697,25 +757,68 @@ class ArmAssembler : public Assembler {
// Convenience shift instructions. Use mov instruction with shifter operand
// for variants setting the status flags or using a register shift count.
- virtual void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) = 0;
- virtual void Lsr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) = 0;
- virtual void Asr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) = 0;
- virtual void Ror(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) = 0;
- virtual void Rrx(Register rd, Register rm, bool setcc = false,
- Condition cond = AL) = 0;
+ virtual void Lsl(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
- virtual void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) = 0;
- virtual void Lsr(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) = 0;
- virtual void Asr(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) = 0;
- virtual void Ror(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) = 0;
+ void Lsls(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL) {
+ Lsl(rd, rm, shift_imm, cond, kCcSet);
+ }
+
+ virtual void Lsr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ void Lsrs(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL) {
+ Lsr(rd, rm, shift_imm, cond, kCcSet);
+ }
+
+ virtual void Asr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ void Asrs(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL) {
+ Asr(rd, rm, shift_imm, cond, kCcSet);
+ }
+
+ virtual void Ror(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ void Rors(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL) {
+ Ror(rd, rm, shift_imm, cond, kCcSet);
+ }
+
+ virtual void Rrx(Register rd, Register rm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ void Rrxs(Register rd, Register rm, Condition cond = AL) {
+ Rrx(rd, rm, cond, kCcSet);
+ }
+
+ virtual void Lsl(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ void Lsls(Register rd, Register rm, Register rn, Condition cond = AL) {
+ Lsl(rd, rm, rn, cond, kCcSet);
+ }
+
+ virtual void Lsr(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ void Lsrs(Register rd, Register rm, Register rn, Condition cond = AL) {
+ Lsr(rd, rm, rn, cond, kCcSet);
+ }
+
+ virtual void Asr(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ void Asrs(Register rd, Register rm, Register rn, Condition cond = AL) {
+ Asr(rd, rm, rn, cond, kCcSet);
+ }
+
+ virtual void Ror(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ void Rors(Register rd, Register rm, Register rn, Condition cond = AL) {
+ Ror(rd, rm, rn, cond, kCcSet);
+ }
// Returns whether the `immediate` can fit in a `ShifterOperand`. If yes,
// `shifter_op` contains the operand.
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 6e60ddc260..d91ddee9b9 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -57,126 +57,94 @@ bool Arm32Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
}
void Arm32Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), AND, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), AND, set_cc, rn, rd, so);
}
void Arm32Assembler::eor(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), EOR, set_cc, rn, rd, so);
}
void Arm32Assembler::sub(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), SUB, set_cc, rn, rd, so);
}
void Arm32Assembler::rsb(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), RSB, set_cc, rn, rd, so);
}
-void Arm32Assembler::rsbs(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
-}
-
-
void Arm32Assembler::add(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
-}
-
-
-void Arm32Assembler::adds(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
-}
-
-
-void Arm32Assembler::subs(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), ADD, set_cc, rn, rd, so);
}
void Arm32Assembler::adc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), ADC, set_cc, rn, rd, so);
}
void Arm32Assembler::sbc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), SBC, set_cc, rn, rd, so);
}
void Arm32Assembler::rsc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), RSC, set_cc, rn, rd, so);
}
void Arm32Assembler::tst(Register rn, const ShifterOperand& so, Condition cond) {
CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker.
- EmitType01(cond, so.type(), TST, 1, rn, R0, so);
+ EmitType01(cond, so.type(), TST, kCcSet, rn, R0, so);
}
void Arm32Assembler::teq(Register rn, const ShifterOperand& so, Condition cond) {
CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker.
- EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
+ EmitType01(cond, so.type(), TEQ, kCcSet, rn, R0, so);
}
void Arm32Assembler::cmp(Register rn, const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
+ EmitType01(cond, so.type(), CMP, kCcSet, rn, R0, so);
}
void Arm32Assembler::cmn(Register rn, const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
-}
-
-
-void Arm32Assembler::orr(Register rd, Register rn,
- const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
-}
-
-
-void Arm32Assembler::orrs(Register rd, Register rn,
- const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
+ EmitType01(cond, so.type(), CMN, kCcSet, rn, R0, so);
}
-void Arm32Assembler::mov(Register rd, const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
+void Arm32Assembler::orr(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), ORR, set_cc, rn, rd, so);
}
-void Arm32Assembler::movs(Register rd, const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
+void Arm32Assembler::mov(Register rd, const ShifterOperand& so,
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), MOV, set_cc, R0, rd, so);
}
void Arm32Assembler::bic(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
-}
-
-
-void Arm32Assembler::mvn(Register rd, const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), BIC, set_cc, rn, rd, so);
}
-void Arm32Assembler::mvns(Register rd, const ShifterOperand& so, Condition cond) {
- EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
+void Arm32Assembler::mvn(Register rd, const ShifterOperand& so,
+ Condition cond, SetCc set_cc) {
+ EmitType01(cond, so.type(), MVN, set_cc, R0, rd, so);
}
@@ -573,7 +541,7 @@ void Arm32Assembler::bl(Label* label, Condition cond) {
void Arm32Assembler::MarkExceptionHandler(Label* label) {
- EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
+ EmitType01(AL, 1, TST, kCcSet, PC, R0, ShifterOperand(0));
Label l;
b(&l);
EmitBranch(AL, label, false);
@@ -590,7 +558,7 @@ void Arm32Assembler::Emit(int32_t value) {
void Arm32Assembler::EmitType01(Condition cond,
int type,
Opcode opcode,
- int set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -599,7 +567,7 @@ void Arm32Assembler::EmitType01(Condition cond,
int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
type << kTypeShift |
static_cast<int32_t>(opcode) << kOpcodeShift |
- set_cc << kSShift |
+ (set_cc == kCcSet ? 1 : 0) << kSShift |
static_cast<int32_t>(rn) << kRnShift |
static_cast<int32_t>(rd) << kRdShift |
so.encodingArm();
@@ -1158,96 +1126,60 @@ void Arm32Assembler::EmitVFPds(Condition cond, int32_t opcode,
void Arm32Assembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CHECK_LE(shift_imm, 31u);
- if (setcc) {
- movs(rd, ShifterOperand(rm, LSL, shift_imm), cond);
- } else {
- mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
- }
+ mov(rd, ShifterOperand(rm, LSL, shift_imm), cond, set_cc);
}
void Arm32Assembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CHECK(1u <= shift_imm && shift_imm <= 32u);
if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
- if (setcc) {
- movs(rd, ShifterOperand(rm, LSR, shift_imm), cond);
- } else {
- mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
- }
+ mov(rd, ShifterOperand(rm, LSR, shift_imm), cond, set_cc);
}
void Arm32Assembler::Asr(Register rd, Register rm, uint32_t shift_imm,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CHECK(1u <= shift_imm && shift_imm <= 32u);
if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
- if (setcc) {
- movs(rd, ShifterOperand(rm, ASR, shift_imm), cond);
- } else {
- mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
- }
+ mov(rd, ShifterOperand(rm, ASR, shift_imm), cond, set_cc);
}
void Arm32Assembler::Ror(Register rd, Register rm, uint32_t shift_imm,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CHECK(1u <= shift_imm && shift_imm <= 31u);
- if (setcc) {
- movs(rd, ShifterOperand(rm, ROR, shift_imm), cond);
- } else {
- mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
- }
+ mov(rd, ShifterOperand(rm, ROR, shift_imm), cond, set_cc);
}
-void Arm32Assembler::Rrx(Register rd, Register rm, bool setcc, Condition cond) {
- if (setcc) {
- movs(rd, ShifterOperand(rm, ROR, 0), cond);
- } else {
- mov(rd, ShifterOperand(rm, ROR, 0), cond);
- }
+void Arm32Assembler::Rrx(Register rd, Register rm, Condition cond, SetCc set_cc) {
+ mov(rd, ShifterOperand(rm, ROR, 0), cond, set_cc);
}
void Arm32Assembler::Lsl(Register rd, Register rm, Register rn,
- bool setcc, Condition cond) {
- if (setcc) {
- movs(rd, ShifterOperand(rm, LSL, rn), cond);
- } else {
- mov(rd, ShifterOperand(rm, LSL, rn), cond);
- }
+ Condition cond, SetCc set_cc) {
+ mov(rd, ShifterOperand(rm, LSL, rn), cond, set_cc);
}
void Arm32Assembler::Lsr(Register rd, Register rm, Register rn,
- bool setcc, Condition cond) {
- if (setcc) {
- movs(rd, ShifterOperand(rm, LSR, rn), cond);
- } else {
- mov(rd, ShifterOperand(rm, LSR, rn), cond);
- }
+ Condition cond, SetCc set_cc) {
+ mov(rd, ShifterOperand(rm, LSR, rn), cond, set_cc);
}
void Arm32Assembler::Asr(Register rd, Register rm, Register rn,
- bool setcc, Condition cond) {
- if (setcc) {
- movs(rd, ShifterOperand(rm, ASR, rn), cond);
- } else {
- mov(rd, ShifterOperand(rm, ASR, rn), cond);
- }
+ Condition cond, SetCc set_cc) {
+ mov(rd, ShifterOperand(rm, ASR, rn), cond, set_cc);
}
void Arm32Assembler::Ror(Register rd, Register rm, Register rn,
- bool setcc, Condition cond) {
- if (setcc) {
- movs(rd, ShifterOperand(rm, ROR, rn), cond);
- } else {
- mov(rd, ShifterOperand(rm, ROR, rn), cond);
- }
+ Condition cond, SetCc set_cc) {
+ mov(rd, ShifterOperand(rm, ROR, rn), cond, set_cc);
}
void Arm32Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR
@@ -1434,24 +1366,24 @@ void Arm32Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value
Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperandCanHoldArm32(value, &shifter_op)) {
- adds(rd, rn, shifter_op, cond);
+ add(rd, rn, shifter_op, cond, kCcSet);
} else if (ShifterOperandCanHoldArm32(-value, &shifter_op)) {
- subs(rd, rn, shifter_op, cond);
+ sub(rd, rn, shifter_op, cond, kCcSet);
} else {
CHECK(rn != IP);
if (ShifterOperandCanHoldArm32(~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
- adds(rd, rn, ShifterOperand(IP), cond);
+ add(rd, rn, ShifterOperand(IP), cond, kCcSet);
} else if (ShifterOperandCanHoldArm32(~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
- subs(rd, rn, ShifterOperand(IP), cond);
+ sub(rd, rn, ShifterOperand(IP), cond, kCcSet);
} else {
movw(IP, Low16Bits(value), cond);
uint16_t value_high = High16Bits(value);
if (value_high != 0) {
movt(IP, value_high, cond);
}
- adds(rd, rn, ShifterOperand(IP), cond);
+ add(rd, rn, ShifterOperand(IP), cond, kCcSet);
}
}
}
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 1c38eec12c..b96bb74182 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -39,25 +39,29 @@ class Arm32Assembler FINAL : public ArmAssembler {
}
// Data-processing instructions.
- void and_(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void and_(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void eor(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void eor(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void sub(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void subs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void sub(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void rsb(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void rsbs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void rsb(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void add(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void add(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void adds(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void adc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void adc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void sbc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void sbc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- void rsc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void rsc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
void tst(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
@@ -67,16 +71,17 @@ class Arm32Assembler FINAL : public ArmAssembler {
void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void orr(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void orrs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void orr(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void mov(Register rd, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void movs(Register rd, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void mov(Register rd, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void bic(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void bic(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void mvn(Register rd, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void mvns(Register rd, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void mvn(Register rd, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
// Miscellaneous data-processing instructions.
void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
@@ -204,25 +209,25 @@ class Arm32Assembler FINAL : public ArmAssembler {
void bl(Label* label, Condition cond = AL) OVERRIDE;
void blx(Register rm, Condition cond = AL) OVERRIDE;
void bx(Register rm, Condition cond = AL) OVERRIDE;
- void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Lsr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Asr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Ror(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Rrx(Register rd, Register rm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
-
- void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Lsr(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Asr(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Ror(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) OVERRIDE;
+ virtual void Lsl(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Lsr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Asr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Ror(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Rrx(Register rd, Register rm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+
+ virtual void Lsl(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Lsr(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Asr(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Ror(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
void Push(Register rd, Condition cond = AL) OVERRIDE;
void Pop(Register rd, Condition cond = AL) OVERRIDE;
@@ -305,7 +310,7 @@ class Arm32Assembler FINAL : public ArmAssembler {
void EmitType01(Condition cond,
int type,
Opcode opcode,
- int set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index efd517b83a..e6412ac684 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -42,7 +42,8 @@ static constexpr bool kUseSparseShiftImmediates = true;
class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
arm::Register, arm::SRegister,
- uint32_t, arm::ShifterOperand, arm::Condition> {
+ uint32_t, arm::ShifterOperand, arm::Condition,
+ arm::SetCc> {
protected:
std::string GetArchitectureString() OVERRIDE {
return "arm";
@@ -125,6 +126,10 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
conditions_.push_back(arm::Condition::AL);
}
+ set_ccs_.push_back(arm::kCcDontCare);
+ set_ccs_.push_back(arm::kCcSet);
+ set_ccs_.push_back(arm::kCcKeep);
+
shifter_operands_.push_back(arm::ShifterOperand(0));
shifter_operands_.push_back(arm::ShifterOperand(1));
shifter_operands_.push_back(arm::ShifterOperand(2));
@@ -240,6 +245,15 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
return oss.str();
}
+ std::vector<arm::SetCc>& GetSetCcs() OVERRIDE {
+ return set_ccs_;
+ }
+
+ std::string GetSetCcString(arm::SetCc s) OVERRIDE {
+ // For arm32, kCcDontCare defaults to not setting condition codes.
+ return s == arm::kCcSet ? "s" : "";
+ }
+
arm::Register GetPCRegister() OVERRIDE {
return arm::R15;
}
@@ -369,12 +383,12 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
cond_index = after_cond_filter.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond_filter.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond_filter.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
if (EvalFilterString(after_cond_filter)) {
continue;
@@ -384,6 +398,30 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
}
}
+ void TemplateHelper(std::function<void(arm::SetCc)> f, int depth ATTRIBUTE_UNUSED,
+ bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::string filter,
+ std::ostringstream& oss) {
+ for (arm::SetCc s : GetSetCcs()) {
+ std::string after_cond = fmt;
+ std::string after_cond_filter = filter;
+
+ size_t cond_index = after_cond.find(SET_CC_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(SET_CC_TOKEN), GetSetCcString(s));
+ }
+
+ cond_index = after_cond_filter.find(SET_CC_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond_filter.replace(cond_index, ConstexprStrLen(SET_CC_TOKEN), GetSetCcString(s));
+ }
+ if (EvalFilterString(after_cond_filter)) {
+ continue;
+ }
+
+ ExecuteAndPrint([&] () { f(s); }, after_cond, oss);
+ }
+ }
+
template <typename... Args>
void TemplateHelper(std::function<void(arm::Register, Args...)> f, int depth, bool without_pc,
std::string fmt, std::string filter, std::ostringstream& oss) {
@@ -449,12 +487,12 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
cond_index = after_cond_filter.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond_filter.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond_filter.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
if (EvalFilterString(after_cond_filter)) {
continue;
@@ -466,25 +504,51 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
}
}
- template <typename T1, typename T2>
- std::function<void(T1, T2)> GetBoundFunction2(void (arm::Arm32Assembler::*f)(T1, T2)) {
+ template <typename... Args>
+ void TemplateHelper(std::function<void(arm::SetCc, Args...)> f, int depth, bool without_pc,
+ std::string fmt, std::string filter, std::ostringstream& oss) {
+ for (arm::SetCc s : GetSetCcs()) {
+ std::string after_cond = fmt;
+ std::string after_cond_filter = filter;
+
+ size_t cond_index = after_cond.find(SET_CC_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(SET_CC_TOKEN), GetSetCcString(s));
+ }
+
+ cond_index = after_cond_filter.find(SET_CC_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond_filter.replace(cond_index, ConstexprStrLen(SET_CC_TOKEN), GetSetCcString(s));
+ }
+ if (EvalFilterString(after_cond_filter)) {
+ continue;
+ }
+
+ auto lambda = [&] (Args... args) { f(s, args...); }; // NOLINT [readability/braces] [4]
+ TemplateHelper(std::function<void(Args...)>(lambda), depth, without_pc,
+ after_cond, after_cond_filter, oss);
+ }
+ }
+
+ template <typename Assembler, typename T1, typename T2>
+ std::function<void(T1, T2)> GetBoundFunction2(void (Assembler::*f)(T1, T2)) {
return std::bind(f, GetAssembler(), _1, _2);
}
- template <typename T1, typename T2, typename T3>
- std::function<void(T1, T2, T3)> GetBoundFunction3(void (arm::Arm32Assembler::*f)(T1, T2, T3)) {
+ template <typename Assembler, typename T1, typename T2, typename T3>
+ std::function<void(T1, T2, T3)> GetBoundFunction3(void (Assembler::*f)(T1, T2, T3)) {
return std::bind(f, GetAssembler(), _1, _2, _3);
}
- template <typename T1, typename T2, typename T3, typename T4>
+ template <typename Assembler, typename T1, typename T2, typename T3, typename T4>
std::function<void(T1, T2, T3, T4)> GetBoundFunction4(
- void (arm::Arm32Assembler::*f)(T1, T2, T3, T4)) {
+ void (Assembler::*f)(T1, T2, T3, T4)) {
return std::bind(f, GetAssembler(), _1, _2, _3, _4);
}
- template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ template <typename Assembler, typename T1, typename T2, typename T3, typename T4, typename T5>
std::function<void(T1, T2, T3, T4, T5)> GetBoundFunction5(
- void (arm::Arm32Assembler::*f)(T1, T2, T3, T4, T5)) {
+ void (Assembler::*f)(T1, T2, T3, T4, T5)) {
return std::bind(f, GetAssembler(), _1, _2, _3, _4, _5);
}
@@ -503,26 +567,26 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
DriverStr(oss.str(), test_name);
}
- template <typename... Args>
- void T2Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
+ template <typename Assembler, typename... Args>
+ void T2Helper(void (Assembler::*f)(Args...), bool without_pc, std::string fmt,
std::string test_name, std::string filter = "") {
GenericTemplateHelper(GetBoundFunction2(f), without_pc, fmt, test_name, filter);
}
- template <typename... Args>
- void T3Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
+ template <typename Assembler, typename... Args>
+ void T3Helper(void (Assembler::*f)(Args...), bool without_pc, std::string fmt,
std::string test_name, std::string filter = "") {
GenericTemplateHelper(GetBoundFunction3(f), without_pc, fmt, test_name, filter);
}
- template <typename... Args>
- void T4Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
+ template <typename Assembler, typename... Args>
+ void T4Helper(void (Assembler::*f)(Args...), bool without_pc, std::string fmt,
std::string test_name, std::string filter = "") {
GenericTemplateHelper(GetBoundFunction4(f), without_pc, fmt, test_name, filter);
}
- template <typename... Args>
- void T5Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
+ template <typename Assembler, typename... Args>
+ void T5Helper(void (Assembler::*f)(Args...), bool without_pc, std::string fmt,
std::string test_name, std::string filter = "") {
GenericTemplateHelper(GetBoundFunction5(f), without_pc, fmt, test_name, filter);
}
@@ -573,6 +637,7 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
std::vector<arm::Register*> registers_;
std::vector<arm::Condition> conditions_;
+ std::vector<arm::SetCc> set_ccs_;
std::vector<arm::ShifterOperand> shifter_operands_;
};
@@ -656,15 +721,23 @@ TEST_F(AssemblerArm32Test, Udiv) {
}
TEST_F(AssemblerArm32Test, And) {
- T4Helper(&arm::Arm32Assembler::and_, true, "and{cond} {reg1}, {reg2}, {shift}", "and");
+ T5Helper(&arm::Arm32Assembler::and_, true, "and{cond}{s} {reg1}, {reg2}, {shift}", "and");
+}
+
+TEST_F(AssemblerArm32Test, Ands) {
+ T4Helper(&arm::Arm32Assembler::ands, true, "and{cond}s {reg1}, {reg2}, {shift}", "ands");
}
TEST_F(AssemblerArm32Test, Eor) {
- T4Helper(&arm::Arm32Assembler::eor, true, "eor{cond} {reg1}, {reg2}, {shift}", "eor");
+ T5Helper(&arm::Arm32Assembler::eor, true, "eor{cond}{s} {reg1}, {reg2}, {shift}", "eor");
+}
+
+TEST_F(AssemblerArm32Test, Eors) {
+ T4Helper(&arm::Arm32Assembler::eors, true, "eor{cond}s {reg1}, {reg2}, {shift}", "eors");
}
TEST_F(AssemblerArm32Test, Orr) {
- T4Helper(&arm::Arm32Assembler::orr, true, "orr{cond} {reg1}, {reg2}, {shift}", "orr");
+ T5Helper(&arm::Arm32Assembler::orr, true, "orr{cond}{s} {reg1}, {reg2}, {shift}", "orr");
}
TEST_F(AssemblerArm32Test, Orrs) {
@@ -672,11 +745,15 @@ TEST_F(AssemblerArm32Test, Orrs) {
}
TEST_F(AssemblerArm32Test, Bic) {
- T4Helper(&arm::Arm32Assembler::bic, true, "bic{cond} {reg1}, {reg2}, {shift}", "bic");
+ T5Helper(&arm::Arm32Assembler::bic, true, "bic{cond}{s} {reg1}, {reg2}, {shift}", "bic");
+}
+
+TEST_F(AssemblerArm32Test, Bics) {
+ T4Helper(&arm::Arm32Assembler::bics, true, "bic{cond}s {reg1}, {reg2}, {shift}", "bics");
}
TEST_F(AssemblerArm32Test, Mov) {
- T3Helper(&arm::Arm32Assembler::mov, true, "mov{cond} {reg1}, {shift}", "mov");
+ T4Helper(&arm::Arm32Assembler::mov, true, "mov{cond}{s} {reg1}, {shift}", "mov");
}
TEST_F(AssemblerArm32Test, Movs) {
@@ -684,7 +761,7 @@ TEST_F(AssemblerArm32Test, Movs) {
}
TEST_F(AssemblerArm32Test, Mvn) {
- T3Helper(&arm::Arm32Assembler::mvn, true, "mvn{cond} {reg1}, {shift}", "mvn");
+ T4Helper(&arm::Arm32Assembler::mvn, true, "mvn{cond}{s} {reg1}, {shift}", "mvn");
}
TEST_F(AssemblerArm32Test, Mvns) {
@@ -692,7 +769,7 @@ TEST_F(AssemblerArm32Test, Mvns) {
}
TEST_F(AssemblerArm32Test, Add) {
- T4Helper(&arm::Arm32Assembler::add, false, "add{cond} {reg1}, {reg2}, {shift}", "add");
+ T5Helper(&arm::Arm32Assembler::add, false, "add{cond}{s} {reg1}, {reg2}, {shift}", "add");
}
TEST_F(AssemblerArm32Test, Adds) {
@@ -700,11 +777,15 @@ TEST_F(AssemblerArm32Test, Adds) {
}
TEST_F(AssemblerArm32Test, Adc) {
- T4Helper(&arm::Arm32Assembler::adc, false, "adc{cond} {reg1}, {reg2}, {shift}", "adc");
+ T5Helper(&arm::Arm32Assembler::adc, false, "adc{cond}{s} {reg1}, {reg2}, {shift}", "adc");
+}
+
+TEST_F(AssemblerArm32Test, Adcs) {
+ T4Helper(&arm::Arm32Assembler::adcs, false, "adc{cond}s {reg1}, {reg2}, {shift}", "adcs");
}
TEST_F(AssemblerArm32Test, Sub) {
- T4Helper(&arm::Arm32Assembler::sub, false, "sub{cond} {reg1}, {reg2}, {shift}", "sub");
+ T5Helper(&arm::Arm32Assembler::sub, false, "sub{cond}{s} {reg1}, {reg2}, {shift}", "sub");
}
TEST_F(AssemblerArm32Test, Subs) {
@@ -712,11 +793,15 @@ TEST_F(AssemblerArm32Test, Subs) {
}
TEST_F(AssemblerArm32Test, Sbc) {
- T4Helper(&arm::Arm32Assembler::sbc, false, "sbc{cond} {reg1}, {reg2}, {shift}", "sbc");
+ T5Helper(&arm::Arm32Assembler::sbc, false, "sbc{cond}{s} {reg1}, {reg2}, {shift}", "sbc");
+}
+
+TEST_F(AssemblerArm32Test, Sbcs) {
+ T4Helper(&arm::Arm32Assembler::sbcs, false, "sbc{cond}s {reg1}, {reg2}, {shift}", "sbcs");
}
TEST_F(AssemblerArm32Test, Rsb) {
- T4Helper(&arm::Arm32Assembler::rsb, true, "rsb{cond} {reg1}, {reg2}, {shift}", "rsb");
+ T5Helper(&arm::Arm32Assembler::rsb, true, "rsb{cond}{s} {reg1}, {reg2}, {shift}", "rsb");
}
TEST_F(AssemblerArm32Test, Rsbs) {
@@ -724,7 +809,11 @@ TEST_F(AssemblerArm32Test, Rsbs) {
}
TEST_F(AssemblerArm32Test, Rsc) {
- T4Helper(&arm::Arm32Assembler::rsc, true, "rsc{cond} {reg1}, {reg2}, {shift}", "rsc");
+ T5Helper(&arm::Arm32Assembler::rsc, true, "rsc{cond}{s} {reg1}, {reg2}, {shift}", "rsc");
+}
+
+TEST_F(AssemblerArm32Test, Rscs) {
+ T4Helper(&arm::Arm32Assembler::rscs, false, "rsc{cond}s {reg1}, {reg2}, {shift}", "rscs");
}
/* TODO: Need better filter support.
diff --git a/compiler/utils/arm/assembler_arm_test.h b/compiler/utils/arm/assembler_arm_test.h
index 838abb696d..a85a05e044 100644
--- a/compiler/utils/arm/assembler_arm_test.h
+++ b/compiler/utils/arm/assembler_arm_test.h
@@ -21,7 +21,13 @@
namespace art {
-template<typename Ass, typename Reg, typename FPReg, typename Imm, typename SOp, typename Cond>
+template<typename Ass,
+ typename Reg,
+ typename FPReg,
+ typename Imm,
+ typename SOp,
+ typename Cond,
+ typename SetCc>
class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
public:
typedef AssemblerTest<Ass, Reg, FPReg, Imm> Base;
@@ -94,7 +100,7 @@ class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
for (Imm i : immediates1) {
@@ -185,7 +191,7 @@ class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
for (std::pair<Imm, Imm>& pair : immediates) {
@@ -271,7 +277,7 @@ class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
for (auto reg1 : reg1_registers) {
@@ -337,7 +343,7 @@ class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
for (auto reg1 : reg1_registers) {
@@ -401,7 +407,7 @@ class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
for (const SOp& shift : shifts) {
@@ -457,7 +463,7 @@ class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
}
for (const SOp& shift : shifts) {
@@ -511,6 +517,9 @@ class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
virtual std::vector<Cond>& GetConditions() = 0;
virtual std::string GetConditionString(Cond c) = 0;
+ virtual std::vector<SetCc>& GetSetCcs() = 0;
+ virtual std::string GetSetCcString(SetCc s) = 0;
+
virtual std::vector<SOp>& GetShiftOperands() = 0;
virtual std::string GetShiftString(SOp sop) = 0;
@@ -534,6 +543,7 @@ class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
static constexpr const char* REG3_TOKEN = "{reg3}";
static constexpr const char* REG4_TOKEN = "{reg4}";
static constexpr const char* COND_TOKEN = "{cond}";
+ static constexpr const char* SET_CC_TOKEN = "{s}";
static constexpr const char* SHIFT_TOKEN = "{shift}";
private:
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 4e918e9574..90ed10c498 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -417,128 +417,96 @@ bool Thumb2Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
}
void Thumb2Assembler::and_(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, AND, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, AND, set_cc, rn, rd, so);
}
void Thumb2Assembler::eor(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, EOR, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, EOR, set_cc, rn, rd, so);
}
void Thumb2Assembler::sub(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, SUB, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, SUB, set_cc, rn, rd, so);
}
void Thumb2Assembler::rsb(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, RSB, 0, rn, rd, so);
-}
-
-
-void Thumb2Assembler::rsbs(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, RSB, 1, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, RSB, set_cc, rn, rd, so);
}
void Thumb2Assembler::add(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, ADD, 0, rn, rd, so);
-}
-
-
-void Thumb2Assembler::adds(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, ADD, 1, rn, rd, so);
-}
-
-
-void Thumb2Assembler::subs(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, SUB, 1, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, ADD, set_cc, rn, rd, so);
}
void Thumb2Assembler::adc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, ADC, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, ADC, set_cc, rn, rd, so);
}
void Thumb2Assembler::sbc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, SBC, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, SBC, set_cc, rn, rd, so);
}
void Thumb2Assembler::rsc(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, RSC, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, RSC, set_cc, rn, rd, so);
}
void Thumb2Assembler::tst(Register rn, const ShifterOperand& so, Condition cond) {
CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker.
- EmitDataProcessing(cond, TST, 1, rn, R0, so);
+ EmitDataProcessing(cond, TST, kCcSet, rn, R0, so);
}
void Thumb2Assembler::teq(Register rn, const ShifterOperand& so, Condition cond) {
CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker.
- EmitDataProcessing(cond, TEQ, 1, rn, R0, so);
+ EmitDataProcessing(cond, TEQ, kCcSet, rn, R0, so);
}
void Thumb2Assembler::cmp(Register rn, const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, CMP, 1, rn, R0, so);
+ EmitDataProcessing(cond, CMP, kCcSet, rn, R0, so);
}
void Thumb2Assembler::cmn(Register rn, const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, CMN, 1, rn, R0, so);
+ EmitDataProcessing(cond, CMN, kCcSet, rn, R0, so);
}
-void Thumb2Assembler::orr(Register rd, Register rn,
- const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, ORR, 0, rn, rd, so);
+void Thumb2Assembler::orr(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, ORR, set_cc, rn, rd, so);
}
-void Thumb2Assembler::orrs(Register rd, Register rn,
- const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, ORR, 1, rn, rd, so);
-}
-
-
-void Thumb2Assembler::mov(Register rd, const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, MOV, 0, R0, rd, so);
-}
-
-
-void Thumb2Assembler::movs(Register rd, const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, MOV, 1, R0, rd, so);
+void Thumb2Assembler::mov(Register rd, const ShifterOperand& so,
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, MOV, set_cc, R0, rd, so);
}
void Thumb2Assembler::bic(Register rd, Register rn, const ShifterOperand& so,
- Condition cond) {
- EmitDataProcessing(cond, BIC, 0, rn, rd, so);
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, BIC, set_cc, rn, rd, so);
}
-void Thumb2Assembler::mvn(Register rd, const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, MVN, 0, R0, rd, so);
-}
-
-
-void Thumb2Assembler::mvns(Register rd, const ShifterOperand& so, Condition cond) {
- EmitDataProcessing(cond, MVN, 1, R0, rd, so);
+void Thumb2Assembler::mvn(Register rd, const ShifterOperand& so,
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, MVN, set_cc, R0, rd, so);
}
@@ -1054,7 +1022,7 @@ void Thumb2Assembler::blx(Label* label) {
void Thumb2Assembler::MarkExceptionHandler(Label* label) {
- EmitDataProcessing(AL, TST, 1, PC, R0, ShifterOperand(0));
+ EmitDataProcessing(AL, TST, kCcSet, PC, R0, ShifterOperand(0));
Label l;
b(&l);
EmitBranch(AL, label, false, false);
@@ -1075,9 +1043,9 @@ void Thumb2Assembler::Emit16(int16_t value) {
}
-bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
+bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1086,7 +1054,7 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
}
// Check special case for SP relative ADD and SUB immediate.
- if ((opcode == ADD || opcode == SUB) && rn == SP && so.IsImmediate()) {
+ if ((opcode == ADD || opcode == SUB) && rn == SP && so.IsImmediate() && set_cc != kCcSet) {
// If the immediate is in range, use 16 bit.
if (rd == SP) {
if (so.GetImmediate() < (1 << 9)) { // 9 bit immediate.
@@ -1099,8 +1067,10 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
}
}
- bool can_contain_high_register = (opcode == MOV)
- || ((opcode == ADD) && (rn == rd) && !set_cc);
+ bool can_contain_high_register =
+ (opcode == CMP) ||
+ (opcode == MOV && set_cc != kCcSet) ||
+ ((opcode == ADD) && (rn == rd) && set_cc != kCcSet);
if (IsHighRegister(rd) || IsHighRegister(rn)) {
if (!can_contain_high_register) {
@@ -1146,39 +1116,80 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
}
if (so.IsImmediate()) {
- if (rn_is_valid && rn != rd) {
- // The only thumb1 instruction with a register and an immediate are ADD and SUB. The
- // immediate must be 3 bits.
- if (opcode != ADD && opcode != SUB) {
+ if (opcode == RSB) {
+ DCHECK(rn_is_valid);
+ if (so.GetImmediate() != 0u) {
return true;
- } else {
- // Check that the immediate is 3 bits for ADD and SUB.
- if (so.GetImmediate() >= 8) {
+ }
+ } else if (rn_is_valid && rn != rd) {
+ // The only thumb1 instructions with a register and an immediate are ADD and SUB
+ // with a 3-bit immediate, and RSB with zero immediate.
+ if (opcode == ADD || opcode == SUB) {
+ if (!IsUint<3>(so.GetImmediate())) {
return true;
}
+ } else {
+ return true;
}
} else {
// ADD, SUB, CMP and MOV may be thumb1 only if the immediate is 8 bits.
if (!(opcode == ADD || opcode == SUB || opcode == MOV || opcode == CMP)) {
return true;
} else {
- if (so.GetImmediate() > 255) {
+ if (!IsUint<8>(so.GetImmediate())) {
return true;
}
}
}
- }
-
- // Check for register shift operand.
- if (so.IsRegister() && so.IsShift()) {
- if (opcode != MOV) {
- return true;
- }
- // Check for MOV with an ROR.
- if (so.GetShift() == ROR) {
- if (so.GetImmediate() != 0) {
+ } else {
+ DCHECK(so.IsRegister());
+ if (so.IsShift()) {
+ // Shift operand - check if it is a MOV convertible to a 16-bit shift instruction.
+ if (opcode != MOV) {
return true;
}
+ // Check for MOV with an ROR/RRX. There is no 16-bit ROR immediate and no 16-bit RRX.
+ if (so.GetShift() == ROR || so.GetShift() == RRX) {
+ return true;
+ }
+ // 16-bit shifts set condition codes if and only if outside IT block,
+ // i.e. if and only if cond == AL.
+ if ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet) {
+ return true;
+ }
+ } else {
+ // Register operand without shift.
+ switch (opcode) {
+ case ADD:
+ // The 16-bit ADD that cannot contain high registers can set condition codes
+ // if and only if outside IT block, i.e. if and only if cond == AL.
+ if (!can_contain_high_register &&
+ ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
+ return true;
+ }
+ break;
+ case AND:
+ case BIC:
+ case EOR:
+ case ORR:
+ case MVN:
+ case ADC:
+ case SUB:
+ case SBC:
+ // These 16-bit opcodes set condition codes if and only if outside IT block,
+ // i.e. if and only if cond == AL.
+ if ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet) {
+ return true;
+ }
+ break;
+ case RSB:
+ case RSC:
+ // No 16-bit RSB/RSC Rd, Rm, Rn. It would be equivalent to SUB/SBC Rd, Rn, Rm.
+ return true;
+ case CMP:
+ default:
+ break;
+ }
}
}
@@ -1189,7 +1200,7 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1203,10 +1214,10 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
case ADC: thumb_opcode = 10U /* 0b1010 */; break;
case SBC: thumb_opcode = 11U /* 0b1011 */; break;
case RSC: break;
- case TST: thumb_opcode = 0U /* 0b0000 */; set_cc = true; rd = PC; break;
- case TEQ: thumb_opcode = 4U /* 0b0100 */; set_cc = true; rd = PC; break;
- case CMP: thumb_opcode = 13U /* 0b1101 */; set_cc = true; rd = PC; break;
- case CMN: thumb_opcode = 8U /* 0b1000 */; set_cc = true; rd = PC; break;
+ case TST: thumb_opcode = 0U /* 0b0000 */; DCHECK(set_cc == kCcSet); rd = PC; break;
+ case TEQ: thumb_opcode = 4U /* 0b0100 */; DCHECK(set_cc == kCcSet); rd = PC; break;
+ case CMP: thumb_opcode = 13U /* 0b1101 */; DCHECK(set_cc == kCcSet); rd = PC; break;
+ case CMN: thumb_opcode = 8U /* 0b1000 */; DCHECK(set_cc == kCcSet); rd = PC; break;
case ORR: thumb_opcode = 2U /* 0b0010 */; break;
case MOV: thumb_opcode = 2U /* 0b0010 */; rn = PC; break;
case BIC: thumb_opcode = 1U /* 0b0001 */; break;
@@ -1224,7 +1235,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
if (so.IsImmediate()) {
// Check special cases.
if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12))) {
- if (!set_cc) {
+ if (set_cc != kCcSet) {
if (opcode == SUB) {
thumb_opcode = 5U;
} else if (opcode == ADD) {
@@ -1238,7 +1249,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
uint32_t imm8 = imm & 0xff;
encoding = B31 | B30 | B29 | B28 |
- (set_cc ? B20 : B25) |
+ (set_cc == kCcSet ? B20 : B25) |
thumb_opcode << 21 |
rn << 16 |
rd << 8 |
@@ -1254,7 +1265,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
}
encoding = B31 | B30 | B29 | B28 |
thumb_opcode << 21 |
- (set_cc ? B20 : 0) |
+ (set_cc == kCcSet ? B20 : 0) |
rn << 16 |
rd << 8 |
imm;
@@ -1263,7 +1274,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
// Register (possibly shifted)
encoding = B31 | B30 | B29 | B27 | B25 |
thumb_opcode << 21 |
- (set_cc ? B20 : 0) |
+ (set_cc == kCcSet ? B20 : 0) |
rn << 16 |
rd << 8 |
so.encodingThumb();
@@ -1274,7 +1285,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1304,19 +1315,25 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
rn = so.GetRegister();
switch (so.GetShift()) {
- case LSL: thumb_opcode = 0U /* 0b00 */; break;
- case LSR: thumb_opcode = 1U /* 0b01 */; break;
- case ASR: thumb_opcode = 2U /* 0b10 */; break;
- case ROR:
- // ROR doesn't allow immediates.
- thumb_opcode = 7U /* 0b111 */;
- dp_opcode = 1U /* 0b01 */;
- opcode_shift = 6;
- use_immediate = false;
+ case LSL:
+ DCHECK_LE(immediate, 31u);
+ thumb_opcode = 0U /* 0b00 */;
+ break;
+ case LSR:
+ DCHECK(1 <= immediate && immediate <= 32);
+ immediate &= 31; // 32 is encoded as 0.
+ thumb_opcode = 1U /* 0b01 */;
+ break;
+ case ASR:
+ DCHECK(1 <= immediate && immediate <= 32);
+ immediate &= 31; // 32 is encoded as 0.
+ thumb_opcode = 2U /* 0b10 */;
break;
- case RRX: break;
+ case ROR: // No 16-bit ROR immediate.
+ case RRX: // No 16-bit RRX.
default:
- break;
+ LOG(FATAL) << "Unexpected shift: " << so.GetShift();
+ UNREACHABLE();
}
} else {
if (so.IsImmediate()) {
@@ -1334,6 +1351,9 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
case ADC:
case SBC:
case BIC: {
+ // Sets condition codes if and only if outside IT block,
+ // check that it complies with set_cc.
+ DCHECK((cond == AL) ? set_cc != kCcKeep : set_cc != kCcSet);
if (rn == rd) {
rn = so.GetRegister();
} else {
@@ -1348,9 +1368,17 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
rn = so.GetRegister();
break;
}
- case TST:
- case TEQ:
case MVN: {
+ // Sets condition codes if and only if outside IT block,
+ // check that it complies with set_cc.
+ DCHECK((cond == AL) ? set_cc != kCcKeep : set_cc != kCcSet);
+ CHECK_EQ(rn, 0);
+ rn = so.GetRegister();
+ break;
+ }
+ case TST:
+ case TEQ: {
+ DCHECK(set_cc == kCcSet);
CHECK_EQ(rn, 0);
rn = so.GetRegister();
break;
@@ -1371,6 +1399,7 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
case TST: thumb_opcode = 8U /* 0b1000 */; CHECK(!use_immediate); break;
case MVN: thumb_opcode = 15U /* 0b1111 */; CHECK(!use_immediate); break;
case CMP: {
+ DCHECK(set_cc == kCcSet);
if (use_immediate) {
// T2 encoding.
dp_opcode = 0;
@@ -1378,6 +1407,13 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
thumb_opcode = 5U /* 0b101 */;
rd_shift = 8;
rn_shift = 8;
+ } else if (IsHighRegister(rd) || IsHighRegister(rn)) {
+ // Special cmp for high registers.
+ dp_opcode = 1U /* 0b01 */;
+ opcode_shift = 7;
+ // Put the top bit of rd into the bottom bit of the opcode.
+ thumb_opcode = 10U /* 0b0001010 */ | static_cast<uint32_t>(rd) >> 3;
+ rd = static_cast<Register>(static_cast<uint32_t>(rd) & 7U /* 0b111 */);
} else {
thumb_opcode = 10U /* 0b1010 */;
}
@@ -1399,7 +1435,7 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
rn_shift = 8;
} else {
rn = so.GetRegister();
- if (IsHighRegister(rn) || IsHighRegister(rd)) {
+ if (set_cc != kCcSet) {
// Special mov for high registers.
dp_opcode = 1U /* 0b01 */;
opcode_shift = 7;
@@ -1407,6 +1443,8 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
thumb_opcode = 12U /* 0b0001100 */ | static_cast<uint32_t>(rd) >> 3;
rd = static_cast<Register>(static_cast<uint32_t>(rd) & 7U /* 0b111 */);
} else {
+ DCHECK(!IsHighRegister(rn));
+ DCHECK(!IsHighRegister(rd));
thumb_opcode = 0;
}
}
@@ -1436,9 +1474,9 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
// ADD and SUB are complex enough to warrant their own emitter.
-void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
+void Thumb2Assembler::Emit16BitAddSub(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1449,7 +1487,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
uint8_t immediate_shift = 0;
bool use_immediate = false;
uint32_t immediate = 0; // Should be at most 9 bits but keep the full immediate for CHECKs.
- uint8_t thumb_opcode;;
+ uint8_t thumb_opcode;
if (so.IsImmediate()) {
use_immediate = true;
@@ -1460,7 +1498,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
case ADD:
if (so.IsRegister()) {
Register rm = so.GetRegister();
- if (rn == rd && !set_cc) {
+ if (rn == rd && set_cc != kCcSet) {
// Can use T2 encoding (allows 4 bit registers)
dp_opcode = 1U /* 0b01 */;
opcode_shift = 10;
@@ -1471,6 +1509,12 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
rd = static_cast<Register>(static_cast<uint32_t>(rd) & 7U /* 0b111 */);
} else {
// T1.
+ DCHECK(!IsHighRegister(rd));
+ DCHECK(!IsHighRegister(rn));
+ DCHECK(!IsHighRegister(rm));
+ // Sets condition codes if and only if outside IT block,
+ // check that it complies with set_cc.
+ DCHECK((cond == AL) ? set_cc != kCcKeep : set_cc != kCcSet);
opcode_shift = 9;
thumb_opcode = 12U /* 0b01100 */;
immediate = static_cast<uint32_t>(so.GetRegister());
@@ -1523,40 +1567,47 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
case SUB:
if (so.IsRegister()) {
- // T1.
- opcode_shift = 9;
- thumb_opcode = 13U /* 0b01101 */;
- immediate = static_cast<uint32_t>(so.GetRegister());
- use_immediate = true;
- immediate_shift = 6;
- } else {
- if (rd == SP && rn == SP) {
- // SUB sp, sp, #imm
- dp_opcode = 2U /* 0b10 */;
- thumb_opcode = 0x61 /* 0b1100001 */;
- opcode_shift = 7;
- CHECK_LT(immediate, (1u << 9));
- CHECK_ALIGNED(immediate, 4);
-
- // Remove rd and rn from instruction by orring it with immed and clearing bits.
- rn = R0;
- rd = R0;
- rd_shift = 0;
- rn_shift = 0;
- immediate >>= 2;
- } else if (rn != rd) {
- // Must use T1.
- opcode_shift = 9;
- thumb_opcode = 15U /* 0b01111 */;
- immediate_shift = 6;
- } else {
- // T2 encoding.
- opcode_shift = 11;
- thumb_opcode = 7U /* 0b111 */;
- rd_shift = 8;
- rn_shift = 8;
- }
- }
+ // T1.
+ Register rm = so.GetRegister();
+ DCHECK(!IsHighRegister(rd));
+ DCHECK(!IsHighRegister(rn));
+ DCHECK(!IsHighRegister(rm));
+ // Sets condition codes if and only if outside IT block,
+ // check that it complies with set_cc.
+ DCHECK((cond == AL) ? set_cc != kCcKeep : set_cc != kCcSet);
+ opcode_shift = 9;
+ thumb_opcode = 13U /* 0b01101 */;
+ immediate = static_cast<uint32_t>(rm);
+ use_immediate = true;
+ immediate_shift = 6;
+ } else {
+ if (rd == SP && rn == SP) {
+ // SUB sp, sp, #imm
+ dp_opcode = 2U /* 0b10 */;
+ thumb_opcode = 0x61 /* 0b1100001 */;
+ opcode_shift = 7;
+ CHECK_LT(immediate, (1u << 9));
+ CHECK_ALIGNED(immediate, 4);
+
+ // Remove rd and rn from instruction by orring it with immed and clearing bits.
+ rn = R0;
+ rd = R0;
+ rd_shift = 0;
+ rn_shift = 0;
+ immediate >>= 2;
+ } else if (rn != rd) {
+ // Must use T1.
+ opcode_shift = 9;
+ thumb_opcode = 15U /* 0b01111 */;
+ immediate_shift = 6;
+ } else {
+ // T2 encoding.
+ opcode_shift = 11;
+ thumb_opcode = 7U /* 0b111 */;
+ rd_shift = 8;
+ rn_shift = 8;
+ }
+ }
break;
default:
LOG(FATAL) << "This opcode is not an ADD or SUB: " << opcode;
@@ -1575,7 +1626,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
void Thumb2Assembler::EmitDataProcessing(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1589,9 +1640,15 @@ void Thumb2Assembler::EmitDataProcessing(Condition cond,
}
}
-void Thumb2Assembler::EmitShift(Register rd, Register rm, Shift shift, uint8_t amount, bool setcc) {
+void Thumb2Assembler::EmitShift(Register rd,
+ Register rm,
+ Shift shift,
+ uint8_t amount,
+ Condition cond,
+ SetCc set_cc) {
CHECK_LT(amount, (1 << 5));
- if (IsHighRegister(rd) || IsHighRegister(rm) || shift == ROR || shift == RRX) {
+ if ((IsHighRegister(rd) || IsHighRegister(rm) || shift == ROR || shift == RRX) ||
+ ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
uint16_t opcode = 0;
switch (shift) {
case LSL: opcode = 0U /* 0b00 */; break;
@@ -1605,7 +1662,7 @@ void Thumb2Assembler::EmitShift(Register rd, Register rm, Shift shift, uint8_t a
}
// 32 bit.
int32_t encoding = B31 | B30 | B29 | B27 | B25 | B22 |
- 0xf << 16 | (setcc ? B20 : 0);
+ 0xf << 16 | (set_cc == kCcSet ? B20 : 0);
uint32_t imm3 = amount >> 2;
uint32_t imm2 = amount & 3U /* 0b11 */;
encoding |= imm3 << 12 | imm2 << 6 | static_cast<int16_t>(rm) |
@@ -1628,10 +1685,16 @@ void Thumb2Assembler::EmitShift(Register rd, Register rm, Shift shift, uint8_t a
}
}
-void Thumb2Assembler::EmitShift(Register rd, Register rn, Shift shift, Register rm, bool setcc) {
+void Thumb2Assembler::EmitShift(Register rd,
+ Register rn,
+ Shift shift,
+ Register rm,
+ Condition cond,
+ SetCc set_cc) {
CHECK_NE(shift, RRX);
bool must_be_32bit = false;
- if (IsHighRegister(rd) || IsHighRegister(rm) || IsHighRegister(rn) || rd != rn) {
+ if (IsHighRegister(rd) || IsHighRegister(rm) || IsHighRegister(rn) || rd != rn ||
+ ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
must_be_32bit = true;
}
@@ -1648,7 +1711,7 @@ void Thumb2Assembler::EmitShift(Register rd, Register rn, Shift shift, Register
}
// 32 bit.
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 |
- 0xf << 12 | (setcc ? B20 : 0);
+ 0xf << 12 | (set_cc == kCcSet ? B20 : 0);
encoding |= static_cast<int16_t>(rn) << 16 | static_cast<int16_t>(rm) |
static_cast<int16_t>(rd) << 8 | opcode << 21;
Emit32(encoding);
@@ -1658,6 +1721,7 @@ void Thumb2Assembler::EmitShift(Register rd, Register rn, Shift shift, Register
case LSL: opcode = 2U /* 0b0010 */; break;
case LSR: opcode = 3U /* 0b0011 */; break;
case ASR: opcode = 4U /* 0b0100 */; break;
+ case ROR: opcode = 7U /* 0b0111 */; break;
default:
LOG(FATAL) << "Unsupported thumb2 shift opcode";
UNREACHABLE();
@@ -2915,70 +2979,70 @@ void Thumb2Assembler::Bind(Label* label) {
void Thumb2Assembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CHECK_LE(shift_imm, 31u);
CheckCondition(cond);
- EmitShift(rd, rm, LSL, shift_imm, setcc);
+ EmitShift(rd, rm, LSL, shift_imm, cond, set_cc);
}
void Thumb2Assembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CHECK(1u <= shift_imm && shift_imm <= 32u);
if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
CheckCondition(cond);
- EmitShift(rd, rm, LSR, shift_imm, setcc);
+ EmitShift(rd, rm, LSR, shift_imm, cond, set_cc);
}
void Thumb2Assembler::Asr(Register rd, Register rm, uint32_t shift_imm,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CHECK(1u <= shift_imm && shift_imm <= 32u);
if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
CheckCondition(cond);
- EmitShift(rd, rm, ASR, shift_imm, setcc);
+ EmitShift(rd, rm, ASR, shift_imm, cond, set_cc);
}
void Thumb2Assembler::Ror(Register rd, Register rm, uint32_t shift_imm,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CHECK(1u <= shift_imm && shift_imm <= 31u);
CheckCondition(cond);
- EmitShift(rd, rm, ROR, shift_imm, setcc);
+ EmitShift(rd, rm, ROR, shift_imm, cond, set_cc);
}
-void Thumb2Assembler::Rrx(Register rd, Register rm, bool setcc, Condition cond) {
+void Thumb2Assembler::Rrx(Register rd, Register rm, Condition cond, SetCc set_cc) {
CheckCondition(cond);
- EmitShift(rd, rm, RRX, rm, setcc);
+ EmitShift(rd, rm, RRX, rm, cond, set_cc);
}
void Thumb2Assembler::Lsl(Register rd, Register rm, Register rn,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CheckCondition(cond);
- EmitShift(rd, rm, LSL, rn, setcc);
+ EmitShift(rd, rm, LSL, rn, cond, set_cc);
}
void Thumb2Assembler::Lsr(Register rd, Register rm, Register rn,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CheckCondition(cond);
- EmitShift(rd, rm, LSR, rn, setcc);
+ EmitShift(rd, rm, LSR, rn, cond, set_cc);
}
void Thumb2Assembler::Asr(Register rd, Register rm, Register rn,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CheckCondition(cond);
- EmitShift(rd, rm, ASR, rn, setcc);
+ EmitShift(rd, rm, ASR, rn, cond, set_cc);
}
void Thumb2Assembler::Ror(Register rd, Register rm, Register rn,
- bool setcc, Condition cond) {
+ Condition cond, SetCc set_cc) {
CheckCondition(cond);
- EmitShift(rd, rm, ROR, rn, setcc);
+ EmitShift(rd, rm, ROR, rn, cond, set_cc);
}
@@ -3173,24 +3237,24 @@ void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t valu
Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
- adds(rd, rn, shifter_op, cond);
+ add(rd, rn, shifter_op, cond, kCcSet);
} else if (ShifterOperandCanHold(rd, rn, ADD, -value, &shifter_op)) {
- subs(rd, rn, shifter_op, cond);
+ sub(rd, rn, shifter_op, cond, kCcSet);
} else {
CHECK(rn != IP);
if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
mvn(IP, shifter_op, cond);
- adds(rd, rn, ShifterOperand(IP), cond);
+ add(rd, rn, ShifterOperand(IP), cond, kCcSet);
} else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
mvn(IP, shifter_op, cond);
- subs(rd, rn, ShifterOperand(IP), cond);
+ sub(rd, rn, ShifterOperand(IP), cond, kCcSet);
} else {
movw(IP, Low16Bits(value), cond);
uint16_t value_high = High16Bits(value);
if (value_high != 0) {
movt(IP, value_high, cond);
}
- adds(rd, rn, ShifterOperand(IP), cond);
+ add(rd, rn, ShifterOperand(IP), cond, kCcSet);
}
}
}
@@ -3316,7 +3380,7 @@ void Thumb2Assembler::StoreToOffset(StoreOperandType type,
}
}
LoadImmediate(tmp_reg, offset, cond);
- add(tmp_reg, tmp_reg, ShifterOperand(base), cond);
+ add(tmp_reg, tmp_reg, ShifterOperand(base), AL);
base = tmp_reg;
offset = 0;
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 41eb5d36f2..c802c27ea6 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -63,25 +63,29 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void FinalizeCode() OVERRIDE;
// Data-processing instructions.
- void and_(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void and_(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void eor(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void eor(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void sub(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void subs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void sub(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void rsb(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void rsbs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void rsb(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void add(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void add(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void adds(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void adc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void adc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void sbc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void sbc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
-
- void rsc(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void rsc(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
void tst(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
@@ -91,16 +95,17 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void orr(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void orrs(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void orr(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void mov(Register rd, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void movs(Register rd, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void mov(Register rd, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void bic(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void bic(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
- void mvn(Register rd, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
- void mvns(Register rd, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
+ virtual void mvn(Register rd, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
// Miscellaneous data-processing instructions.
void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
@@ -245,25 +250,25 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void blx(Register rm, Condition cond = AL) OVERRIDE;
void bx(Register rm, Condition cond = AL) OVERRIDE;
- void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Lsr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Asr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Ror(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Rrx(Register rd, Register rm, bool setcc = false,
- Condition cond = AL) OVERRIDE;
-
- void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Lsr(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Asr(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) OVERRIDE;
- void Ror(Register rd, Register rm, Register rn, bool setcc = false,
- Condition cond = AL) OVERRIDE;
+ virtual void Lsl(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Lsr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Asr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Ror(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Rrx(Register rd, Register rm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+
+ virtual void Lsl(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Lsr(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Asr(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void Ror(Register rd, Register rm, Register rn,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
void Push(Register rd, Condition cond = AL) OVERRIDE;
void Pop(Register rd, Condition cond = AL) OVERRIDE;
@@ -600,7 +605,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
// Emit a single 32 or 16 bit data processing instruction.
void EmitDataProcessing(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
@@ -609,7 +614,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
// in 16 bits?
bool Is32BitDataProcessing(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
@@ -617,7 +622,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
// Emit a 32 bit data processing instruction.
void Emit32BitDataProcessing(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
@@ -625,14 +630,14 @@ class Thumb2Assembler FINAL : public ArmAssembler {
// Emit a 16 bit data processing instruction.
void Emit16BitDataProcessing(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
void Emit16BitAddSub(Condition cond,
Opcode opcode,
- bool set_cc,
+ SetCc set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
@@ -694,8 +699,10 @@ class Thumb2Assembler FINAL : public ArmAssembler {
static int DecodeBranchOffset(int32_t inst);
int32_t EncodeTstOffset(int offset, int32_t inst);
int DecodeTstOffset(int32_t inst);
- void EmitShift(Register rd, Register rm, Shift shift, uint8_t amount, bool setcc = false);
- void EmitShift(Register rd, Register rn, Shift shift, Register rm, bool setcc = false);
+ void EmitShift(Register rd, Register rm, Shift shift, uint8_t amount,
+ Condition cond = AL, SetCc set_cc = kCcDontCare);
+ void EmitShift(Register rd, Register rn, Shift shift, Register rm,
+ Condition cond = AL, SetCc set_cc = kCcDontCare);
// Whether the assembler can relocate branches. If false, unresolved branches will be
// emitted on 32bits.
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index cb01cea8ef..b2a354b63c 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -199,6 +199,7 @@ void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname) {
TEST(Thumb2AssemblerTest, SimpleMov) {
arm::Thumb2Assembler assembler;
+ __ movs(R0, ShifterOperand(R1));
__ mov(R0, ShifterOperand(R1));
__ mov(R8, ShifterOperand(R9));
@@ -222,8 +223,8 @@ TEST(Thumb2AssemblerTest, SimpleMovAdd) {
arm::Thumb2Assembler assembler;
__ mov(R0, ShifterOperand(R1));
- __ add(R0, R1, ShifterOperand(R2));
- __ add(R0, R1, ShifterOperand());
+ __ adds(R0, R1, ShifterOperand(R2));
+ __ add(R0, R1, ShifterOperand(0));
EmitAndCheck(&assembler, "SimpleMovAdd");
}
@@ -231,41 +232,132 @@ TEST(Thumb2AssemblerTest, SimpleMovAdd) {
TEST(Thumb2AssemblerTest, DataProcessingRegister) {
arm::Thumb2Assembler assembler;
+ // 32 bit variants using low registers.
+ __ mvn(R0, ShifterOperand(R1), AL, kCcKeep);
+ __ add(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ sbc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ rsb(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ teq(R0, ShifterOperand(R1));
+
+ // 16 bit variants using low registers.
+ __ movs(R0, ShifterOperand(R1));
+ __ mov(R0, ShifterOperand(R1), AL, kCcKeep);
+ __ mvns(R0, ShifterOperand(R1));
+ __ add(R0, R0, ShifterOperand(R1), AL, kCcKeep);
+ __ adds(R0, R1, ShifterOperand(R2));
+ __ subs(R0, R1, ShifterOperand(R2));
+ __ adcs(R0, R0, ShifterOperand(R1));
+ __ sbcs(R0, R0, ShifterOperand(R1));
+ __ ands(R0, R0, ShifterOperand(R1));
+ __ orrs(R0, R0, ShifterOperand(R1));
+ __ eors(R0, R0, ShifterOperand(R1));
+ __ bics(R0, R0, ShifterOperand(R1));
+ __ tst(R0, ShifterOperand(R1));
+ __ cmp(R0, ShifterOperand(R1));
+ __ cmn(R0, ShifterOperand(R1));
+
+ // 16-bit variants using high registers.
+ __ mov(R1, ShifterOperand(R8), AL, kCcKeep);
+ __ mov(R9, ShifterOperand(R0), AL, kCcKeep);
+ __ mov(R8, ShifterOperand(R9), AL, kCcKeep);
+ __ add(R1, R1, ShifterOperand(R8), AL, kCcKeep);
+ __ add(R9, R9, ShifterOperand(R0), AL, kCcKeep);
+ __ add(R8, R8, ShifterOperand(R9), AL, kCcKeep);
+ __ cmp(R0, ShifterOperand(R9));
+ __ cmp(R8, ShifterOperand(R1));
+ __ cmp(R9, ShifterOperand(R8));
+
+ // The 16-bit RSBS Rd, Rn, #0, also known as NEGS Rd, Rn is specified using
+ // an immediate (0) but emitted without any, so we test it here.
+ __ rsbs(R0, R1, ShifterOperand(0));
+ __ rsbs(R0, R0, ShifterOperand(0)); // Check Rd == Rn code path.
+
+ // 32 bit variants using high registers that would be 16-bit if using low registers.
+ __ movs(R0, ShifterOperand(R8));
+ __ mvns(R0, ShifterOperand(R8));
+ __ add(R0, R1, ShifterOperand(R8), AL, kCcKeep);
+ __ adds(R0, R1, ShifterOperand(R8));
+ __ subs(R0, R1, ShifterOperand(R8));
+ __ adcs(R0, R0, ShifterOperand(R8));
+ __ sbcs(R0, R0, ShifterOperand(R8));
+ __ ands(R0, R0, ShifterOperand(R8));
+ __ orrs(R0, R0, ShifterOperand(R8));
+ __ eors(R0, R0, ShifterOperand(R8));
+ __ bics(R0, R0, ShifterOperand(R8));
+ __ tst(R0, ShifterOperand(R8));
+ __ cmn(R0, ShifterOperand(R8));
+ __ rsbs(R0, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit.
+ __ rsbs(R8, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit (Rd == Rn).
+
+ // 32-bit variants of instructions that would be 16-bit outside IT block.
+ __ it(arm::EQ);
+ __ mvns(R0, ShifterOperand(R1), arm::EQ);
+ __ it(arm::EQ);
+ __ adds(R0, R1, ShifterOperand(R2), arm::EQ);
+ __ it(arm::EQ);
+ __ subs(R0, R1, ShifterOperand(R2), arm::EQ);
+ __ it(arm::EQ);
+ __ adcs(R0, R0, ShifterOperand(R1), arm::EQ);
+ __ it(arm::EQ);
+ __ sbcs(R0, R0, ShifterOperand(R1), arm::EQ);
+ __ it(arm::EQ);
+ __ ands(R0, R0, ShifterOperand(R1), arm::EQ);
+ __ it(arm::EQ);
+ __ orrs(R0, R0, ShifterOperand(R1), arm::EQ);
+ __ it(arm::EQ);
+ __ eors(R0, R0, ShifterOperand(R1), arm::EQ);
+ __ it(arm::EQ);
+ __ bics(R0, R0, ShifterOperand(R1), arm::EQ);
+
+ // 16-bit variants of instructions that would be 32-bit outside IT block.
+ __ it(arm::EQ);
+ __ mvn(R0, ShifterOperand(R1), arm::EQ, kCcKeep);
+ __ it(arm::EQ);
+ __ add(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
+ __ it(arm::EQ);
+ __ sub(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
+ __ it(arm::EQ);
+ __ adc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
+ __ it(arm::EQ);
+ __ sbc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
+ __ it(arm::EQ);
+ __ and_(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
+ __ it(arm::EQ);
+ __ orr(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
+ __ it(arm::EQ);
+ __ eor(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
+ __ it(arm::EQ);
+ __ bic(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
+
+ // 16 bit variants selected for the default kCcDontCare.
__ mov(R0, ShifterOperand(R1));
__ mvn(R0, ShifterOperand(R1));
-
- // 32 bit variants.
+ __ add(R0, R0, ShifterOperand(R1));
__ add(R0, R1, ShifterOperand(R2));
__ sub(R0, R1, ShifterOperand(R2));
- __ and_(R0, R1, ShifterOperand(R2));
- __ orr(R0, R1, ShifterOperand(R2));
- __ eor(R0, R1, ShifterOperand(R2));
- __ bic(R0, R1, ShifterOperand(R2));
- __ adc(R0, R1, ShifterOperand(R2));
- __ sbc(R0, R1, ShifterOperand(R2));
- __ rsb(R0, R1, ShifterOperand(R2));
-
- // 16 bit variants.
- __ add(R0, R1, ShifterOperand());
- __ sub(R0, R1, ShifterOperand());
+ __ adc(R0, R0, ShifterOperand(R1));
+ __ sbc(R0, R0, ShifterOperand(R1));
__ and_(R0, R0, ShifterOperand(R1));
__ orr(R0, R0, ShifterOperand(R1));
__ eor(R0, R0, ShifterOperand(R1));
__ bic(R0, R0, ShifterOperand(R1));
- __ adc(R0, R0, ShifterOperand(R1));
- __ sbc(R0, R0, ShifterOperand(R1));
- __ rsb(R0, R0, ShifterOperand(R1));
-
- __ tst(R0, ShifterOperand(R1));
- __ teq(R0, ShifterOperand(R1));
- __ cmp(R0, ShifterOperand(R1));
- __ cmn(R0, ShifterOperand(R1));
-
- __ movs(R0, ShifterOperand(R1));
- __ mvns(R0, ShifterOperand(R1));
+ __ mov(R1, ShifterOperand(R8));
+ __ mov(R9, ShifterOperand(R0));
+ __ mov(R8, ShifterOperand(R9));
+ __ add(R1, R1, ShifterOperand(R8));
+ __ add(R9, R9, ShifterOperand(R0));
+ __ add(R8, R8, ShifterOperand(R9));
+ __ rsb(R0, R1, ShifterOperand(0));
+ __ rsb(R0, R0, ShifterOperand(0));
- // 32 bit variants.
- __ add(R12, R1, ShifterOperand(R0));
+ // And an arbitrary 32-bit instruction using IP.
+ __ add(R12, R1, ShifterOperand(R0), AL, kCcKeep);
EmitAndCheck(&assembler, "DataProcessingRegister");
}
@@ -296,6 +388,9 @@ TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
__ movs(R0, ShifterOperand(0x55));
__ mvns(R0, ShifterOperand(0x55));
+ __ adds(R0, R1, ShifterOperand(5));
+ __ subs(R0, R1, ShifterOperand(5));
+
EmitAndCheck(&assembler, "DataProcessingImmediate");
}
@@ -340,18 +435,30 @@ TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
arm::Thumb2Assembler assembler;
- __ mov(R3, ShifterOperand(R4, LSL, 4));
- __ mov(R3, ShifterOperand(R4, LSR, 5));
- __ mov(R3, ShifterOperand(R4, ASR, 6));
- __ mov(R3, ShifterOperand(R4, ROR, 7));
- __ mov(R3, ShifterOperand(R4, ROR));
+ // 16-bit variants.
+ __ movs(R3, ShifterOperand(R4, LSL, 4));
+ __ movs(R3, ShifterOperand(R4, LSR, 5));
+ __ movs(R3, ShifterOperand(R4, ASR, 6));
- // 32 bit variants.
- __ mov(R8, ShifterOperand(R4, LSL, 4));
- __ mov(R8, ShifterOperand(R4, LSR, 5));
- __ mov(R8, ShifterOperand(R4, ASR, 6));
- __ mov(R8, ShifterOperand(R4, ROR, 7));
- __ mov(R8, ShifterOperand(R4, RRX));
+ // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
+ __ movs(R3, ShifterOperand(R4, ROR, 7));
+
+ // 32-bit RRX because RRX has no 16-bit version.
+ __ movs(R3, ShifterOperand(R4, RRX));
+
+ // 32 bit variants (not setting condition codes).
+ __ mov(R3, ShifterOperand(R4, LSL, 4), AL, kCcKeep);
+ __ mov(R3, ShifterOperand(R4, LSR, 5), AL, kCcKeep);
+ __ mov(R3, ShifterOperand(R4, ASR, 6), AL, kCcKeep);
+ __ mov(R3, ShifterOperand(R4, ROR, 7), AL, kCcKeep);
+ __ mov(R3, ShifterOperand(R4, RRX), AL, kCcKeep);
+
+ // 32 bit variants (high registers).
+ __ movs(R8, ShifterOperand(R4, LSL, 4));
+ __ movs(R8, ShifterOperand(R4, LSR, 5));
+ __ movs(R8, ShifterOperand(R4, ASR, 6));
+ __ movs(R8, ShifterOperand(R4, ROR, 7));
+ __ movs(R8, ShifterOperand(R4, RRX));
EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
}
@@ -1023,7 +1130,7 @@ TEST(Thumb2AssemblerTest, MixedBranch32) {
TEST(Thumb2AssemblerTest, Shifts) {
arm::Thumb2Assembler assembler;
- // 16 bit
+ // 16 bit selected for CcDontCare.
__ Lsl(R0, R1, 5);
__ Lsr(R0, R1, 5);
__ Asr(R0, R1, 5);
@@ -1031,6 +1138,32 @@ TEST(Thumb2AssemblerTest, Shifts) {
__ Lsl(R0, R0, R1);
__ Lsr(R0, R0, R1);
__ Asr(R0, R0, R1);
+ __ Ror(R0, R0, R1);
+
+ // 16 bit with kCcSet.
+ __ Lsls(R0, R1, 5);
+ __ Lsrs(R0, R1, 5);
+ __ Asrs(R0, R1, 5);
+
+ __ Lsls(R0, R0, R1);
+ __ Lsrs(R0, R0, R1);
+ __ Asrs(R0, R0, R1);
+ __ Rors(R0, R0, R1);
+
+ // 32-bit with kCcKeep.
+ __ Lsl(R0, R1, 5, AL, kCcKeep);
+ __ Lsr(R0, R1, 5, AL, kCcKeep);
+ __ Asr(R0, R1, 5, AL, kCcKeep);
+
+ __ Lsl(R0, R0, R1, AL, kCcKeep);
+ __ Lsr(R0, R0, R1, AL, kCcKeep);
+ __ Asr(R0, R0, R1, AL, kCcKeep);
+ __ Ror(R0, R0, R1, AL, kCcKeep);
+
+ // 32-bit because ROR immediate doesn't have a 16-bit version like the other shifts.
+ __ Ror(R0, R1, 5);
+ __ Rors(R0, R1, 5);
+ __ Ror(R0, R1, 5, AL, kCcKeep);
// 32 bit due to high registers.
__ Lsl(R8, R1, 5);
@@ -1052,21 +1185,21 @@ TEST(Thumb2AssemblerTest, Shifts) {
// S bit (all 32 bit)
// 32 bit due to high registers.
- __ Lsl(R8, R1, 5, true);
- __ Lsr(R0, R8, 5, true);
- __ Asr(R8, R1, 5, true);
- __ Ror(R0, R8, 5, true);
+ __ Lsls(R8, R1, 5);
+ __ Lsrs(R0, R8, 5);
+ __ Asrs(R8, R1, 5);
+ __ Rors(R0, R8, 5);
// 32 bit due to different Rd and Rn.
- __ Lsl(R0, R1, R2, true);
- __ Lsr(R0, R1, R2, true);
- __ Asr(R0, R1, R2, true);
- __ Ror(R0, R1, R2, true);
+ __ Lsls(R0, R1, R2);
+ __ Lsrs(R0, R1, R2);
+ __ Asrs(R0, R1, R2);
+ __ Rors(R0, R1, R2);
// 32 bit due to use of high registers.
- __ Lsl(R8, R1, R2, true);
- __ Lsr(R0, R8, R2, true);
- __ Asr(R0, R1, R8, true);
+ __ Lsls(R8, R1, R2);
+ __ Lsrs(R0, R8, R2);
+ __ Asrs(R0, R1, R8);
EmitAndCheck(&assembler, "Shifts");
}
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 280ed779b3..82ad6429bf 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -1,8 +1,9 @@
const char* SimpleMovResults[] = {
" 0: 0008 movs r0, r1\n",
- " 2: 46c8 mov r8, r9\n",
- " 4: 2001 movs r0, #1\n",
- " 6: f04f 0809 mov.w r8, #9\n",
+ " 2: 4608 mov r0, r1\n",
+ " 4: 46c8 mov r8, r9\n",
+ " 6: 2001 movs r0, #1\n",
+ " 8: f04f 0809 mov.w r8, #9\n",
nullptr
};
const char* SimpleMov32Results[] = {
@@ -11,39 +12,120 @@ const char* SimpleMov32Results[] = {
nullptr
};
const char* SimpleMovAddResults[] = {
- " 0: 0008 movs r0, r1\n",
+ " 0: 4608 mov r0, r1\n",
" 2: 1888 adds r0, r1, r2\n",
" 4: 1c08 adds r0, r1, #0\n",
nullptr
};
const char* DataProcessingRegisterResults[] = {
- " 0: 0008 movs r0, r1\n",
- " 2: 43c8 mvns r0, r1\n",
- " 4: 1888 adds r0, r1, r2\n",
- " 6: 1a88 subs r0, r1, r2\n",
- " 8: ea01 0002 and.w r0, r1, r2\n",
- " c: ea41 0002 orr.w r0, r1, r2\n",
- " 10: ea81 0002 eor.w r0, r1, r2\n",
- " 14: ea21 0002 bic.w r0, r1, r2\n",
- " 18: eb41 0002 adc.w r0, r1, r2\n",
- " 1c: eb61 0002 sbc.w r0, r1, r2\n",
- " 20: ebc1 0002 rsb r0, r1, r2\n",
- " 24: 1c08 adds r0, r1, #0\n",
- " 26: 1e08 subs r0, r1, #0\n",
- " 28: 4008 ands r0, r1\n",
- " 2a: 4308 orrs r0, r1\n",
- " 2c: 4048 eors r0, r1\n",
- " 2e: 4388 bics r0, r1\n",
- " 30: 4148 adcs r0, r1\n",
- " 32: 4188 sbcs r0, r1\n",
- " 34: 4248 negs r0, r1\n",
- " 36: 4208 tst r0, r1\n",
- " 38: ea90 0f01 teq r0, r1\n",
- " 3c: 4288 cmp r0, r1\n",
- " 3e: 42c8 cmn r0, r1\n",
- " 40: 0008 movs r0, r1\n",
- " 42: 43c8 mvns r0, r1\n",
- " 44: eb01 0c00 add.w ip, r1, r0\n",
+ " 0: ea6f 0001 mvn.w r0, r1\n",
+ " 4: eb01 0002 add.w r0, r1, r2\n",
+ " 8: eba1 0002 sub.w r0, r1, r2\n",
+ " c: ea01 0002 and.w r0, r1, r2\n",
+ " 10: ea41 0002 orr.w r0, r1, r2\n",
+ " 14: ea81 0002 eor.w r0, r1, r2\n",
+ " 18: ea21 0002 bic.w r0, r1, r2\n",
+ " 1c: eb41 0002 adc.w r0, r1, r2\n",
+ " 20: eb61 0002 sbc.w r0, r1, r2\n",
+ " 24: ebc1 0002 rsb r0, r1, r2\n",
+ " 28: ea90 0f01 teq r0, r1\n",
+ " 2c: 0008 movs r0, r1\n",
+ " 2e: 4608 mov r0, r1\n",
+ " 30: 43c8 mvns r0, r1\n",
+ " 32: 4408 add r0, r1\n",
+ " 34: 1888 adds r0, r1, r2\n",
+ " 36: 1a88 subs r0, r1, r2\n",
+ " 38: 4148 adcs r0, r1\n",
+ " 3a: 4188 sbcs r0, r1\n",
+ " 3c: 4008 ands r0, r1\n",
+ " 3e: 4308 orrs r0, r1\n",
+ " 40: 4048 eors r0, r1\n",
+ " 42: 4388 bics r0, r1\n",
+ " 44: 4208 tst r0, r1\n",
+ " 46: 4288 cmp r0, r1\n",
+ " 48: 42c8 cmn r0, r1\n",
+ " 4a: 4641 mov r1, r8\n",
+ " 4c: 4681 mov r9, r0\n",
+ " 4e: 46c8 mov r8, r9\n",
+ " 50: 4441 add r1, r8\n",
+ " 52: 4481 add r9, r0\n",
+ " 54: 44c8 add r8, r9\n",
+ " 56: 4548 cmp r0, r9\n",
+ " 58: 4588 cmp r8, r1\n",
+ " 5a: 45c1 cmp r9, r8\n",
+ " 5c: 4248 negs r0, r1\n",
+ " 5e: 4240 negs r0, r0\n",
+ " 60: ea5f 0008 movs.w r0, r8\n",
+ " 64: ea7f 0008 mvns.w r0, r8\n",
+ " 68: eb01 0008 add.w r0, r1, r8\n",
+ " 6c: eb11 0008 adds.w r0, r1, r8\n",
+ " 70: ebb1 0008 subs.w r0, r1, r8\n",
+ " 74: eb50 0008 adcs.w r0, r0, r8\n",
+ " 78: eb70 0008 sbcs.w r0, r0, r8\n",
+ " 7c: ea10 0008 ands.w r0, r0, r8\n",
+ " 80: ea50 0008 orrs.w r0, r0, r8\n",
+ " 84: ea90 0008 eors.w r0, r0, r8\n",
+ " 88: ea30 0008 bics.w r0, r0, r8\n",
+ " 8c: ea10 0f08 tst.w r0, r8\n",
+ " 90: eb10 0f08 cmn.w r0, r8\n",
+ " 94: f1d8 0000 rsbs r0, r8, #0\n",
+ " 98: f1d8 0800 rsbs r8, r8, #0\n",
+ " 9c: bf08 it eq\n",
+ " 9e: ea7f 0001 mvnseq.w r0, r1\n",
+ " a2: bf08 it eq\n",
+ " a4: eb11 0002 addseq.w r0, r1, r2\n",
+ " a8: bf08 it eq\n",
+ " aa: ebb1 0002 subseq.w r0, r1, r2\n",
+ " ae: bf08 it eq\n",
+ " b0: eb50 0001 adcseq.w r0, r0, r1\n",
+ " b4: bf08 it eq\n",
+ " b6: eb70 0001 sbcseq.w r0, r0, r1\n",
+ " ba: bf08 it eq\n",
+ " bc: ea10 0001 andseq.w r0, r0, r1\n",
+ " c0: bf08 it eq\n",
+ " c2: ea50 0001 orrseq.w r0, r0, r1\n",
+ " c6: bf08 it eq\n",
+ " c8: ea90 0001 eorseq.w r0, r0, r1\n",
+ " cc: bf08 it eq\n",
+ " ce: ea30 0001 bicseq.w r0, r0, r1\n",
+ " d2: bf08 it eq\n",
+ " d4: 43c8 mvneq r0, r1\n",
+ " d6: bf08 it eq\n",
+ " d8: 1888 addeq r0, r1, r2\n",
+ " da: bf08 it eq\n",
+ " dc: 1a88 subeq r0, r1, r2\n",
+ " de: bf08 it eq\n",
+ " e0: 4148 adceq r0, r1\n",
+ " e2: bf08 it eq\n",
+ " e4: 4188 sbceq r0, r1\n",
+ " e6: bf08 it eq\n",
+ " e8: 4008 andeq r0, r1\n",
+ " ea: bf08 it eq\n",
+ " ec: 4308 orreq r0, r1\n",
+ " ee: bf08 it eq\n",
+ " f0: 4048 eoreq r0, r1\n",
+ " f2: bf08 it eq\n",
+ " f4: 4388 biceq r0, r1\n",
+ " f6: 4608 mov r0, r1\n",
+ " f8: 43c8 mvns r0, r1\n",
+ " fa: 4408 add r0, r1\n",
+ " fc: 1888 adds r0, r1, r2\n",
+ " fe: 1a88 subs r0, r1, r2\n",
+ " 100: 4148 adcs r0, r1\n",
+ " 102: 4188 sbcs r0, r1\n",
+ " 104: 4008 ands r0, r1\n",
+ " 106: 4308 orrs r0, r1\n",
+ " 108: 4048 eors r0, r1\n",
+ " 10a: 4388 bics r0, r1\n",
+ " 10c: 4641 mov r1, r8\n",
+ " 10e: 4681 mov r9, r0\n",
+ " 110: 46c8 mov r8, r9\n",
+ " 112: 4441 add r1, r8\n",
+ " 114: 4481 add r9, r0\n",
+ " 116: 44c8 add r8, r9\n",
+ " 118: 4248 negs r0, r1\n",
+ " 11a: 4240 negs r0, r0\n",
+ " 11c: eb01 0c00 add.w ip, r1, r0\n",
nullptr
};
const char* DataProcessingImmediateResults[] = {
@@ -66,6 +148,8 @@ const char* DataProcessingImmediateResults[] = {
" 3a: 1f48 subs r0, r1, #5\n",
" 3c: 2055 movs r0, #85 ; 0x55\n",
" 3e: f07f 0055 mvns.w r0, #85 ; 0x55\n",
+ " 42: 1d48 adds r0, r1, #5\n",
+ " 44: 1f48 subs r0, r1, #5\n",
nullptr
};
const char* DataProcessingModifiedImmediateResults[] = {
@@ -100,13 +184,18 @@ const char* DataProcessingShiftedRegisterResults[] = {
" 0: 0123 lsls r3, r4, #4\n",
" 2: 0963 lsrs r3, r4, #5\n",
" 4: 11a3 asrs r3, r4, #6\n",
- " 6: ea4f 13f4 mov.w r3, r4, ror #7\n",
- " a: 41e3 rors r3, r4\n",
- " c: ea4f 1804 mov.w r8, r4, lsl #4\n",
- " 10: ea4f 1854 mov.w r8, r4, lsr #5\n",
- " 14: ea4f 18a4 mov.w r8, r4, asr #6\n",
- " 18: ea4f 18f4 mov.w r8, r4, ror #7\n",
- " 1c: ea4f 0834 mov.w r8, r4, rrx\n",
+ " 6: ea5f 13f4 movs.w r3, r4, ror #7\n",
+ " a: ea5f 0334 movs.w r3, r4, rrx\n",
+ " e: ea4f 1304 mov.w r3, r4, lsl #4\n",
+ " 12: ea4f 1354 mov.w r3, r4, lsr #5\n",
+ " 16: ea4f 13a4 mov.w r3, r4, asr #6\n",
+ " 1a: ea4f 13f4 mov.w r3, r4, ror #7\n",
+ " 1e: ea4f 0334 mov.w r3, r4, rrx\n",
+ " 22: ea5f 1804 movs.w r8, r4, lsl #4\n",
+ " 26: ea5f 1854 movs.w r8, r4, lsr #5\n",
+ " 2a: ea5f 18a4 movs.w r8, r4, asr #6\n",
+ " 2e: ea5f 18f4 movs.w r8, r4, ror #7\n",
+ " 32: ea5f 0834 movs.w r8, r4, rrx\n",
nullptr
};
const char* BasicLoadResults[] = {
@@ -1511,7 +1600,7 @@ const char* Max16BitBranchResults[] = {
" 7fc: 23fa movs r3, #250 ; 0xfa\n",
" 7fe: 23fc movs r3, #252 ; 0xfc\n",
" 800: 23fe movs r3, #254 ; 0xfe\n",
- " 802: 0011 movs r1, r2\n",
+ " 802: 4611 mov r1, r2\n",
nullptr
};
const char* Branch32Results[] = {
@@ -2541,7 +2630,7 @@ const char* Branch32Results[] = {
" 800: 23fc movs r3, #252 ; 0xfc\n",
" 802: 23fe movs r3, #254 ; 0xfe\n",
" 804: 2300 movs r3, #0\n",
- " 806: 0011 movs r1, r2\n",
+ " 806: 4611 mov r1, r2\n",
nullptr
};
const char* CompareAndBranchMaxResults[] = {
@@ -2610,7 +2699,7 @@ const char* CompareAndBranchMaxResults[] = {
" 7c: 237a movs r3, #122 ; 0x7a\n",
" 7e: 237c movs r3, #124 ; 0x7c\n",
" 80: 237e movs r3, #126 ; 0x7e\n",
- " 82: 0011 movs r1, r2\n",
+ " 82: 4611 mov r1, r2\n",
nullptr
};
const char* CompareAndBranchRelocation16Results[] = {
@@ -2681,7 +2770,7 @@ const char* CompareAndBranchRelocation16Results[] = {
" 80: 237c movs r3, #124 ; 0x7c\n",
" 82: 237e movs r3, #126 ; 0x7e\n",
" 84: 2380 movs r3, #128 ; 0x80\n",
- " 86: 0011 movs r1, r2\n",
+ " 86: 4611 mov r1, r2\n",
nullptr
};
const char* CompareAndBranchRelocation32Results[] = {
@@ -3712,7 +3801,7 @@ const char* CompareAndBranchRelocation32Results[] = {
" 802: 23fc movs r3, #252 ; 0xfc\n",
" 804: 23fe movs r3, #254 ; 0xfe\n",
" 806: 2300 movs r3, #0\n",
- " 808: 0011 movs r1, r2\n",
+ " 808: 4611 mov r1, r2\n",
nullptr
};
const char* MixedBranch32Results[] = {
@@ -4743,7 +4832,7 @@ const char* MixedBranch32Results[] = {
" 802: 23fe movs r3, #254 ; 0xfe\n",
" 804: 2300 movs r3, #0\n",
" 806: f7ff bbfd b.w 4 <MixedBranch32+0x4>\n",
- " 80a: 0011 movs r1, r2\n",
+ " 80a: 4611 mov r1, r2\n",
nullptr
};
const char* ShiftsResults[] = {
@@ -4753,28 +4842,46 @@ const char* ShiftsResults[] = {
" 6: 4088 lsls r0, r1\n",
" 8: 40c8 lsrs r0, r1\n",
" a: 4108 asrs r0, r1\n",
- " c: ea4f 1841 mov.w r8, r1, lsl #5\n",
- " 10: ea4f 1058 mov.w r0, r8, lsr #5\n",
- " 14: ea4f 1861 mov.w r8, r1, asr #5\n",
- " 18: ea4f 1078 mov.w r0, r8, ror #5\n",
- " 1c: fa01 f002 lsl.w r0, r1, r2\n",
- " 20: fa21 f002 lsr.w r0, r1, r2\n",
- " 24: fa41 f002 asr.w r0, r1, r2\n",
- " 28: fa61 f002 ror.w r0, r1, r2\n",
- " 2c: fa01 f802 lsl.w r8, r1, r2\n",
- " 30: fa28 f002 lsr.w r0, r8, r2\n",
- " 34: fa41 f008 asr.w r0, r1, r8\n",
- " 38: ea5f 1841 movs.w r8, r1, lsl #5\n",
- " 3c: ea5f 1058 movs.w r0, r8, lsr #5\n",
- " 40: ea5f 1861 movs.w r8, r1, asr #5\n",
- " 44: ea5f 1078 movs.w r0, r8, ror #5\n",
- " 48: fa11 f002 lsls.w r0, r1, r2\n",
- " 4c: fa31 f002 lsrs.w r0, r1, r2\n",
- " 50: fa51 f002 asrs.w r0, r1, r2\n",
- " 54: fa71 f002 rors.w r0, r1, r2\n",
- " 58: fa11 f802 lsls.w r8, r1, r2\n",
- " 5c: fa38 f002 lsrs.w r0, r8, r2\n",
- " 60: fa51 f008 asrs.w r0, r1, r8\n",
+ " c: 41c8 rors r0, r1\n",
+ " e: 0148 lsls r0, r1, #5\n",
+ " 10: 0948 lsrs r0, r1, #5\n",
+ " 12: 1148 asrs r0, r1, #5\n",
+ " 14: 4088 lsls r0, r1\n",
+ " 16: 40c8 lsrs r0, r1\n",
+ " 18: 4108 asrs r0, r1\n",
+ " 1a: 41c8 rors r0, r1\n",
+ " 1c: ea4f 1041 mov.w r0, r1, lsl #5\n",
+ " 20: ea4f 1051 mov.w r0, r1, lsr #5\n",
+ " 24: ea4f 1061 mov.w r0, r1, asr #5\n",
+ " 28: fa00 f001 lsl.w r0, r0, r1\n",
+ " 2c: fa20 f001 lsr.w r0, r0, r1\n",
+ " 30: fa40 f001 asr.w r0, r0, r1\n",
+ " 34: fa60 f001 ror.w r0, r0, r1\n",
+ " 38: ea4f 1071 mov.w r0, r1, ror #5\n",
+ " 3c: ea5f 1071 movs.w r0, r1, ror #5\n",
+ " 40: ea4f 1071 mov.w r0, r1, ror #5\n",
+ " 44: ea4f 1841 mov.w r8, r1, lsl #5\n",
+ " 48: ea4f 1058 mov.w r0, r8, lsr #5\n",
+ " 4c: ea4f 1861 mov.w r8, r1, asr #5\n",
+ " 50: ea4f 1078 mov.w r0, r8, ror #5\n",
+ " 54: fa01 f002 lsl.w r0, r1, r2\n",
+ " 58: fa21 f002 lsr.w r0, r1, r2\n",
+ " 5c: fa41 f002 asr.w r0, r1, r2\n",
+ " 60: fa61 f002 ror.w r0, r1, r2\n",
+ " 64: fa01 f802 lsl.w r8, r1, r2\n",
+ " 68: fa28 f002 lsr.w r0, r8, r2\n",
+ " 6c: fa41 f008 asr.w r0, r1, r8\n",
+ " 70: ea5f 1841 movs.w r8, r1, lsl #5\n",
+ " 74: ea5f 1058 movs.w r0, r8, lsr #5\n",
+ " 78: ea5f 1861 movs.w r8, r1, asr #5\n",
+ " 7c: ea5f 1078 movs.w r0, r8, ror #5\n",
+ " 80: fa11 f002 lsls.w r0, r1, r2\n",
+ " 84: fa31 f002 lsrs.w r0, r1, r2\n",
+ " 88: fa51 f002 asrs.w r0, r1, r2\n",
+ " 8c: fa71 f002 rors.w r0, r1, r2\n",
+ " 90: fa11 f802 lsls.w r8, r1, r2\n",
+ " 94: fa38 f002 lsrs.w r0, r8, r2\n",
+ " 98: fa51 f008 asrs.w r0, r1, r8\n",
nullptr
};
const char* LoadStoreRegOffsetResults[] = {
diff --git a/dexlist/Android.mk b/dexlist/Android.mk
index 9fbd8470b8..6ec6c97dce 100755
--- a/dexlist/Android.mk
+++ b/dexlist/Android.mk
@@ -14,8 +14,6 @@
# TODO(ajcbik): Art-i-fy this makefile
-# TODO(ajcbik): rename dexlist2 into dexlist when Dalvik version is removed
-
LOCAL_PATH:= $(call my-dir)
dexlist_src_files := dexlist.cc
@@ -33,7 +31,7 @@ LOCAL_SRC_FILES := $(dexlist_src_files)
LOCAL_C_INCLUDES := $(dexlist_c_includes)
LOCAL_CFLAGS += -Wall
LOCAL_SHARED_LIBRARIES += $(dexlist_libraries)
-LOCAL_MODULE := dexlist2
+LOCAL_MODULE := dexlist
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES)
include $(BUILD_EXECUTABLE)
@@ -49,6 +47,6 @@ LOCAL_SRC_FILES := $(dexlist_src_files)
LOCAL_C_INCLUDES := $(dexlist_c_includes)
LOCAL_CFLAGS += -Wall
LOCAL_SHARED_LIBRARIES += $(dexlist_libraries)
-LOCAL_MODULE := dexlist2
+LOCAL_MODULE := dexlist
LOCAL_MULTILIB := $(ART_MULTILIB_OVERRIDE_host)
include $(BUILD_HOST_EXECUTABLE)
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index d8fd242024..1d0f75ea92 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -235,7 +235,7 @@ int dexlistDriver(int argc, char** argv) {
gOptions.outputFileName = optarg;
break;
case 'm':
- // If -m x.y.z is given, then find all instances of the
+ // If -m p.c.m is given, then find all instances of the
// fully-qualified method name. This isn't really what
// dexlist is for, but it's easy to do it here.
{
diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc
index 7b1b63dba7..82179dea95 100644
--- a/dexlist/dexlist_test.cc
+++ b/dexlist/dexlist_test.cc
@@ -42,12 +42,11 @@ class DexListTest : public CommonRuntimeTest {
// Runs test with given arguments.
bool Exec(const std::vector<std::string>& args, std::string* error_msg) {
- // TODO(ajcbik): dexlist2 -> dexlist
std::string file_path = GetTestAndroidRoot();
if (IsHost()) {
- file_path += "/bin/dexlist2";
+ file_path += "/bin/dexlist";
} else {
- file_path += "/xbin/dexlist2";
+ file_path += "/xbin/dexlist";
}
EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
std::vector<std::string> exec_argv = { file_path };
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 1950d56419..c553a18561 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -78,6 +78,21 @@ const char* image_roots_descriptions_[] = {
"kClassRoots",
};
+// Map is so that we don't allocate multiple dex files for the same OatDexFile.
+static std::map<const OatFile::OatDexFile*,
+ std::unique_ptr<const DexFile>> opened_dex_files;
+
+const DexFile* OpenDexFile(const OatFile::OatDexFile* oat_dex_file, std::string* error_msg) {
+ DCHECK(oat_dex_file != nullptr);
+ auto it = opened_dex_files.find(oat_dex_file);
+ if (it != opened_dex_files.end()) {
+ return it->second.get();
+ }
+ const DexFile* ret = oat_dex_file->OpenDexFile(error_msg).release();
+ opened_dex_files.emplace(oat_dex_file, std::unique_ptr<const DexFile>(ret));
+ return ret;
+}
+
class OatSymbolizer FINAL {
public:
class RodataWriter FINAL : public CodeOutput {
@@ -159,8 +174,8 @@ class OatSymbolizer FINAL {
void WalkOatDexFile(const OatFile::OatDexFile* oat_dex_file, Callback callback) {
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
- if (dex_file.get() == nullptr) {
+ const DexFile* const dex_file = OpenDexFile(oat_dex_file, &error_msg);
+ if (dex_file == nullptr) {
return;
}
for (size_t class_def_index = 0;
@@ -172,7 +187,7 @@ class OatSymbolizer FINAL {
switch (type) {
case kOatClassAllCompiled:
case kOatClassSomeCompiled:
- WalkOatClass(oat_class, *dex_file.get(), class_def, callback);
+ WalkOatClass(oat_class, *dex_file, class_def, callback);
break;
case kOatClassNoneCompiled:
@@ -504,8 +519,8 @@ class OatDumper {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
- if (dex_file.get() == nullptr) {
+ const DexFile* const dex_file = OpenDexFile(oat_dex_file, &error_msg);
+ if (dex_file == nullptr) {
LOG(WARNING) << "Failed to open dex file '" << oat_dex_file->GetDexFileLocation()
<< "': " << error_msg;
} else {
@@ -533,8 +548,8 @@ class OatDumper {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
- if (dex_file.get() == nullptr) {
+ const DexFile* const dex_file = OpenDexFile(oat_dex_file, &error_msg);
+ if (dex_file == nullptr) {
LOG(WARNING) << "Failed to open dex file '" << oat_dex_file->GetDexFileLocation()
<< "': " << error_msg;
continue;
@@ -593,8 +608,8 @@ class OatDumper {
// Create the verifier early.
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
- if (dex_file.get() == nullptr) {
+ const DexFile* const dex_file = OpenDexFile(&oat_dex_file, &error_msg);
+ if (dex_file == nullptr) {
os << "NOT FOUND: " << error_msg << "\n\n";
os << std::flush;
return false;
@@ -621,7 +636,7 @@ class OatDumper {
<< " (" << oat_class.GetType() << ")\n";
// TODO: include bitmap here if type is kOatClassSomeCompiled?
if (options_.list_classes_) continue;
- if (!DumpOatClass(&vios, oat_class, *(dex_file.get()), class_def, &stop_analysis)) {
+ if (!DumpOatClass(&vios, oat_class, *dex_file, class_def, &stop_analysis)) {
success = false;
}
if (stop_analysis) {
@@ -638,7 +653,7 @@ class OatDumper {
std::string error_msg;
std::string dex_file_location = oat_dex_file.GetDexFileLocation();
- std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
+ const DexFile* const dex_file = OpenDexFile(&oat_dex_file, &error_msg);
if (dex_file == nullptr) {
os << "Failed to open dex file '" << dex_file_location << "': " << error_msg;
return false;
@@ -2337,21 +2352,17 @@ static int DumpOatWithRuntime(Runtime* runtime, OatFile* oat_file, OatDumperOpti
ScopedObjectAccess soa(self);
ClassLinker* class_linker = runtime->GetClassLinker();
class_linker->RegisterOatFile(oat_file);
- std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::vector<const DexFile*> class_path;
for (const OatFile::OatDexFile* odf : oat_file->GetOatDexFiles()) {
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file = odf->OpenDexFile(&error_msg);
+ const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
CHECK(dex_file != nullptr) << error_msg;
class_linker->RegisterDexFile(*dex_file);
- dex_files.push_back(std::move(dex_file));
+ class_path.push_back(dex_file);
}
// Need a class loader.
// Fake that we're a compiler.
- std::vector<const DexFile*> class_path;
- for (auto& dex_file : dex_files) {
- class_path.push_back(dex_file.get());
- }
jobject class_loader = class_linker->CreatePathClassLoader(self, class_path);
// Use the class loader while dumping.
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index be9af9871d..1599025697 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -167,7 +167,8 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
- qpoints->pDeoptimize = art_quick_deoptimize;
+ // Deoptimization from compiled code.
+ qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index d6396c18d8..e45d828584 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1141,6 +1141,17 @@ ENTRY art_quick_deoptimize
END art_quick_deoptimize
/*
+ * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimizeFromCompiledCode
+ENTRY art_quick_deoptimize_from_compiled_code
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1
+ mov r0, r9 @ Set up args.
+ blx artDeoptimizeFromCompiledCode @ artDeoptimizeFromCompiledCode(Thread*)
+END art_quick_deoptimize_from_compiled_code
+
+ /*
* Signed 64-bit integer multiply.
*
* Consider WXxYZ (r1r0 x r3r2) with a long multiply:
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 0f06727d0d..e9c816f260 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -150,8 +150,8 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
- // Deoptimize
- qpoints->pDeoptimize = art_quick_deoptimize;
+ // Deoptimization from compiled code.
+ qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index bfef0fa74a..169bc384a8 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1739,6 +1739,18 @@ ENTRY art_quick_deoptimize
brk 0
END art_quick_deoptimize
+ /*
+ * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimizeFromCompiledCode
+ENTRY art_quick_deoptimize_from_compiled_code
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ mov x0, xSELF // Pass thread.
+ bl artDeoptimizeFromCompiledCode // artDeoptimizeFromCompiledCode(Thread*)
+ brk 0
+END art_quick_deoptimize_from_compiled_code
+
/*
* String's indexOf.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 4e4b91fdcd..6721e5452f 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -267,8 +267,8 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct.");
- // Deoptimize
- qpoints->pDeoptimize = art_quick_deoptimize;
+ // Deoptimization from compiled code.
+ qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
static_assert(!IsDirectEntrypoint(kQuickDeoptimize), "Non-direct C stub marked direct.");
// Atomic 64-bit load/store
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index cb49cf5b39..ba58c3fccb 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1542,6 +1542,18 @@ ENTRY art_quick_deoptimize
END art_quick_deoptimize
/*
+ * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimizeFromCompiledCode
+ENTRY art_quick_deoptimize_from_compiled_code
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ jal artDeoptimizeFromCompiledCode # artDeoptimizeFromCompiledCode(Thread*)
+ # Returns caller method's frame size.
+ move $a0, rSELF # pass Thread::current
+END art_quick_deoptimize_from_compiled_code
+
+ /*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index ec02d5ab69..9f1f0e021c 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -176,8 +176,8 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
- // Deoptimize
- qpoints->pDeoptimize = art_quick_deoptimize;
+ // Deoptimization from compiled code.
+ qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
// TODO - use lld/scd instructions for Mips64
// Atomic 64-bit load/store
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 4bc049cfbd..1b50b2e246 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1603,5 +1603,17 @@ ENTRY art_quick_deoptimize
move $a0, rSELF # pass Thread::current
END art_quick_deoptimize
+ /*
+ * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimizeFromCompiledCode
+ENTRY art_quick_deoptimize_from_compiled_code
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ jal artDeoptimizeFromCompiledCode # artDeoptimizeFromCompiledCode(Thread*, SP)
+ # Returns caller method's frame size.
+ move $a0, rSELF # pass Thread::current
+END art_quick_deoptimize_from_compiled_code
+
UNIMPLEMENTED art_quick_indexof
UNIMPLEMENTED art_quick_string_compareto
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index e2632c103b..10fc281e3d 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -140,7 +140,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
// Deoptimize
- qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_slow_path;
+ qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 9b2d59d330..029a296e5a 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1684,9 +1684,6 @@ END_FUNCTION art_quick_instrumentation_exit
*/
DEFINE_FUNCTION art_quick_deoptimize
PUSH ebx // Entry point for a jump. Fake that we were called.
-.globl SYMBOL(art_quick_deoptimize_from_compiled_slow_path) // Entry point for real calls
- // from compiled slow paths.
-SYMBOL(art_quick_deoptimize_from_compiled_slow_path):
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx
subl LITERAL(12), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(12)
@@ -1697,6 +1694,20 @@ SYMBOL(art_quick_deoptimize_from_compiled_slow_path):
END_FUNCTION art_quick_deoptimize
/*
+ * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx
+ subl LITERAL(12), %esp // Align stack.
+ CFI_ADJUST_CFA_OFFSET(12)
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artDeoptimizeFromCompiledCode) // artDeoptimizeFromCompiledCode(Thread*)
+ UNREACHABLE
+END_FUNCTION art_quick_deoptimize_from_compiled_code
+
+ /*
* String's compareTo.
*
* On entry:
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index ef1bb5f9a7..5cc72e3c4b 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -144,7 +144,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
// Deoptimize
- qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_slow_path;
+ qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 88270d9902..1498a4b7e3 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1728,9 +1728,6 @@ END_FUNCTION art_quick_instrumentation_exit
DEFINE_FUNCTION art_quick_deoptimize
pushq %rsi // Entry point for a jump. Fake that we were called.
// Use hidden arg.
-.globl SYMBOL(art_quick_deoptimize_from_compiled_slow_path) // Entry point for real calls
- // from compiled slow paths.
-SYMBOL(art_quick_deoptimize_from_compiled_slow_path):
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
// Stack should be aligned now.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
@@ -1739,6 +1736,18 @@ SYMBOL(art_quick_deoptimize_from_compiled_slow_path):
END_FUNCTION art_quick_deoptimize
/*
+ * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ // Stack should be aligned now.
+ movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
+ call SYMBOL(artDeoptimizeFromCompiledCode) // artDeoptimizeFromCompiledCode(Thread*)
+ UNREACHABLE
+END_FUNCTION art_quick_deoptimize_from_compiled_code
+
+ /*
* String's compareTo.
*
* On entry:
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 56f7b35501..e46402d882 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -427,9 +427,16 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
self->ClearException();
ShadowFrame* shadow_frame =
self->PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame);
- result->SetJ(self->PopDeoptimizationReturnValue().GetJ());
+ mirror::Throwable* pending_exception = nullptr;
+ self->PopDeoptimizationContext(result, &pending_exception);
self->SetTopOfStack(nullptr);
self->SetTopOfShadowStack(shadow_frame);
+
+ // Restore the exception that was pending before deoptimization then interpret the
+ // deoptimized frames.
+ if (pending_exception != nullptr) {
+ self->SetException(pending_exception);
+ }
interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result);
}
if (kLogInvocationStartAndReturn) {
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 084c88e239..5c1922eea3 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -89,7 +89,7 @@ ADD_TEST_EQ(THREAD_ID_OFFSET,
art::Thread::ThinLockIdOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.card_table.
-#define THREAD_CARD_TABLE_OFFSET 136
+#define THREAD_CARD_TABLE_OFFSET 128
ADD_TEST_EQ(THREAD_CARD_TABLE_OFFSET,
art::Thread::CardTableOffset<__SIZEOF_POINTER__>().Int32Value())
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 4ef1f9e8c9..6d2c5e0f2c 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -17,9 +17,11 @@
#include "hash_set.h"
#include <map>
+#include <forward_list>
#include <sstream>
#include <string>
#include <unordered_set>
+#include <vector>
#include <gtest/gtest.h>
#include "hash_map.h"
@@ -258,4 +260,59 @@ TEST_F(HashSetTest, TestHashMap) {
ASSERT_EQ(it->second, 124);
}
+struct IsEmptyFnVectorInt {
+ void MakeEmpty(std::vector<int>& item) const {
+ item.clear();
+ }
+ bool IsEmpty(const std::vector<int>& item) const {
+ return item.empty();
+ }
+};
+
+template <typename T>
+size_t HashIntSequence(T begin, T end) {
+ size_t hash = 0;
+ for (auto iter = begin; iter != end; ++iter) {
+ hash = hash * 2 + *iter;
+ }
+ return hash;
+};
+
+struct VectorIntHashEquals {
+ std::size_t operator()(const std::vector<int>& item) const {
+ return HashIntSequence(item.begin(), item.end());
+ }
+
+ std::size_t operator()(const std::forward_list<int>& item) const {
+ return HashIntSequence(item.begin(), item.end());
+ }
+
+ bool operator()(const std::vector<int>& a, const std::vector<int>& b) const {
+ return a == b;
+ }
+
+ bool operator()(const std::vector<int>& a, const std::forward_list<int>& b) const {
+ auto aiter = a.begin();
+ auto biter = b.begin();
+ while (aiter != a.end() && biter != b.end()) {
+ if (*aiter != *biter) {
+ return false;
+ }
+ aiter++;
+ biter++;
+ }
+ return (aiter == a.end() && biter == b.end());
+ }
+};
+
+TEST_F(HashSetTest, TestLookupByAlternateKeyType) {
+ HashSet<std::vector<int>, IsEmptyFnVectorInt, VectorIntHashEquals, VectorIntHashEquals> hash_set;
+ hash_set.Insert(std::vector<int>({1, 2, 3, 4}));
+ hash_set.Insert(std::vector<int>({4, 2}));
+ ASSERT_EQ(hash_set.end(), hash_set.Find(std::vector<int>({1, 1, 1, 1})));
+ ASSERT_NE(hash_set.end(), hash_set.Find(std::vector<int>({1, 2, 3, 4})));
+ ASSERT_EQ(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 1, 1, 1})));
+ ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
+}
+
} // namespace art
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 5f2caef651..6bf203c307 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -64,6 +64,7 @@ enum LockLevel {
kJdwpSocketLock,
kRegionSpaceRegionLock,
kTransactionLogLock,
+ kMarkSweepMarkStackLock,
kJniWeakGlobalsLock,
kReferenceQueueSoftReferencesLock,
kReferenceQueuePhantomReferencesLock,
@@ -80,7 +81,6 @@ enum LockLevel {
kArenaPoolLock,
kDexFileMethodInlinerLock,
kDexFileToMethodInlinerMapLock,
- kMarkSweepMarkStackLock,
kInternTableLock,
kOatFileSecondaryLookupLock,
kTracingUniqueMethodsLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3b505e6bf9..e78914c3a3 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1834,11 +1834,15 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, si
klass.Assign(AllocClass(self, SizeOfClassWithoutEmbeddedTables(dex_file, dex_class_def)));
}
if (UNLIKELY(klass.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // Expect an OOME.
+ self->AssertPendingOOMException();
return nullptr;
}
- klass->SetDexCache(RegisterDexFile(dex_file));
-
+ mirror::DexCache* dex_cache = RegisterDexFile(dex_file);
+ if (dex_cache == nullptr) {
+ self->AssertPendingOOMException();
+ return nullptr;
+ }
+ klass->SetDexCache(dex_cache);
SetupClass(dex_file, dex_class_def, klass, class_loader.Get());
// Mark the string class by setting its access flag.
@@ -2498,7 +2502,7 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
- mirror::DexCache* dex_cache = FindDexCacheLocked(dex_file, true);
+ mirror::DexCache* dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
return dex_cache;
}
@@ -2508,13 +2512,15 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
// get to a suspend point.
StackHandleScope<1> hs(self);
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
- CHECK(h_dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
- << dex_file.GetLocation();
WriterMutexLock mu(self, dex_lock_);
- mirror::DexCache* dex_cache = FindDexCacheLocked(dex_file, true);
+ mirror::DexCache* dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
return dex_cache;
}
+ if (h_dex_cache.Get() == nullptr) {
+ self->AssertPendingOOMException();
+ return nullptr;
+ }
RegisterDexFileLocked(dex_file, h_dex_cache);
return h_dex_cache.Get();
}
@@ -2525,19 +2531,27 @@ void ClassLinker::RegisterDexFile(const DexFile& dex_file,
RegisterDexFileLocked(dex_file, dex_cache);
}
-mirror::DexCache* ClassLinker::FindDexCache(const DexFile& dex_file, bool allow_failure) {
- Thread* const self = Thread::Current();
+mirror::DexCache* ClassLinker::FindDexCache(Thread* self,
+ const DexFile& dex_file,
+ bool allow_failure) {
ReaderMutexLock mu(self, dex_lock_);
- return FindDexCacheLocked(dex_file, allow_failure);
+ return FindDexCacheLocked(self, dex_file, allow_failure);
}
-mirror::DexCache* ClassLinker::FindDexCacheLocked(const DexFile& dex_file, bool allow_failure) {
- Thread* const self = Thread::Current();
+mirror::DexCache* ClassLinker::FindDexCacheLocked(Thread* self,
+ const DexFile& dex_file,
+ bool allow_failure) {
// Search assuming unique-ness of dex file.
- for (jobject weak_root : dex_caches_) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
- if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
- return dex_cache;
+ JavaVMExt* const vm = self->GetJniEnv()->vm;
+ {
+ MutexLock mu(self, vm->WeakGlobalsLock());
+ for (jobject weak_root : dex_caches_) {
+ DCHECK_EQ(GetIndirectRefKind(weak_root), kWeakGlobal);
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
+ vm->DecodeWeakGlobalLocked(self, weak_root));
+ if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
+ return dex_cache;
+ }
}
}
if (allow_failure) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index cc56e8b570..2a7162b000 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -309,7 +309,9 @@ class ClassLinker {
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::DexCache* FindDexCache(const DexFile& dex_file, bool allow_failure = false)
+ mirror::DexCache* FindDexCache(Thread* self,
+ const DexFile& dex_file,
+ bool allow_failure = false)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void FixupDexCaches(ArtMethod* resolution_method)
REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -559,7 +561,9 @@ class ClassLinker {
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::DexCache* FindDexCacheLocked(const DexFile& dex_file, bool allow_failure)
+ mirror::DexCache* FindDexCacheLocked(Thread* self,
+ const DexFile& dex_file,
+ bool allow_failure)
REQUIRES(dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 0d1c875fdf..c3191fad3f 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -355,7 +355,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
TestRootVisitor visitor;
class_linker_->VisitRoots(&visitor, kVisitRootFlagAllRoots);
// Verify the dex cache has resolution methods in all resolved method slots
- mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
+ mirror::DexCache* dex_cache = class_linker_->FindDexCache(Thread::Current(), dex);
auto* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0; i < static_cast<size_t>(resolved_methods->GetLength()); i++) {
EXPECT_TRUE(resolved_methods->GetElementPtrSize<ArtMethod*>(i, sizeof(void*)) != nullptr)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 0cbbb79767..8d34f5a78f 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1191,6 +1191,10 @@ JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int c
if (error != JDWP::ERR_NONE) {
return error;
}
+ // Check if the object's type is compatible with the array's type.
+ if (o != nullptr && !o->InstanceOf(oa->GetClass()->GetComponentType())) {
+ return JDWP::ERR_TYPE_MISMATCH;
+ }
oa->Set<false>(offset + i, o);
}
}
@@ -2094,6 +2098,7 @@ JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
case kWaitingInMainSignalCatcherLoop:
case kWaitingPerformingGc:
case kWaitingWeakGcRootRead:
+ case kWaitingForGcThreadFlip:
case kWaiting:
return JDWP::TS_WAIT;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index a4feac1ea1..d749664d12 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -28,17 +28,30 @@
namespace art {
-extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
-
+NO_RETURN static void artDeoptimizeImpl(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
if (VLOG_IS_ON(deopt)) {
LOG(INFO) << "Deopting:";
self->Dump(LOG(INFO));
}
- self->PushAndClearDeoptimizationReturnValue();
+ self->AssertHasDeoptimizationContext();
self->SetException(Thread::GetDeoptimizationException());
self->QuickDeliverException();
}
+extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ artDeoptimizeImpl(self);
+}
+
+extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ // Before deoptimizing to interpreter, we must push the deoptimization context.
+ JValue return_value;
+ return_value.SetJ(0); // we never deoptimize from compiled code with an invoke result.
+ self->PushDeoptimizationContext(return_value, false, self->GetException());
+ artDeoptimizeImpl(self);
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index ad5ee8475e..8e660a246d 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -51,6 +51,9 @@ extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtM
uint64_t gpr_result,
uint64_t fpr_result)
SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Instrumentation exit stub must not be entered with a pending exception.
+ CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
+ << self->GetException()->Dump();
// Compute address of return PC and sanity check that it currently holds 0.
size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly);
uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index aa35ec1ca2..0c7caf38b9 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -688,8 +688,12 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
// Request a stack deoptimization if needed
ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
+ // Push the context of the deoptimization stack so we can restore the return value and the
+ // exception before executing the deoptimized frames.
+ self->PushDeoptimizationContext(result, shorty[0] == 'L', self->GetException());
+
+ // Set special exception to cause deoptimization.
self->SetException(Thread::GetDeoptimizationException());
- self->SetDeoptimizationReturnValue(result, shorty[0] == 'L');
}
// No need to restore the args since the method has already been run by the interpreter.
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
index 8209dc808e..2842c5a5a6 100644
--- a/runtime/entrypoints/runtime_asm_entrypoints.h
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -70,7 +70,8 @@ static inline const void* GetQuickInstrumentationEntryPoint() {
return reinterpret_cast<const void*>(art_quick_instrumentation_entry);
}
-extern "C" void art_quick_deoptimize_from_compiled_slow_path();
+// Stub to deoptimize from compiled code.
+extern "C" void art_quick_deoptimize_from_compiled_code();
// The return_pc of instrumentation exit stub.
extern "C" void art_quick_instrumentation_exit();
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index f7a3cd53cd..7db8888c7f 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -72,15 +72,12 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tls32_, throwing_OutOfMemoryError, no_thread_suspension, 4);
EXPECT_OFFSET_DIFFP(Thread, tls32_, no_thread_suspension, thread_exit_check_count, 4);
EXPECT_OFFSET_DIFFP(Thread, tls32_, thread_exit_check_count, handling_signal_, 4);
- EXPECT_OFFSET_DIFFP(Thread, tls32_, handling_signal_,
- deoptimization_return_value_is_reference, 4);
// TODO: Better connection. Take alignment into account.
EXPECT_OFFSET_DIFF_GT3(Thread, tls32_.thread_exit_check_count, tls64_.trace_clock_base, 4,
thread_tls32_to_tls64);
- EXPECT_OFFSET_DIFFP(Thread, tls64_, trace_clock_base, deoptimization_return_value, 8);
- EXPECT_OFFSET_DIFFP(Thread, tls64_, deoptimization_return_value, stats, 8);
+ EXPECT_OFFSET_DIFFP(Thread, tls64_, trace_clock_base, stats, 8);
// TODO: Better connection. Take alignment into account.
EXPECT_OFFSET_DIFF_GT3(Thread, tls64_.stats, tlsPtr_.card_table, 8, thread_tls64_to_tlsptr);
@@ -108,8 +105,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, single_step_control, stacked_shadow_frame_record,
sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stacked_shadow_frame_record,
- deoptimization_return_value_stack, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, deoptimization_return_value_stack, name, sizeof(void*));
+ deoptimization_context_stack, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, deoptimization_context_stack, name, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, name, pthread_self, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, pthread_self, last_no_thread_suspension_cause,
sizeof(void*));
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 65e946fd79..a5bc60a912 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -261,8 +261,10 @@ void ConcurrentCopying::FlipThreadRoots() {
gc_barrier_->Init(self, 0);
ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
FlipCallback flip_callback(this);
+ heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
size_t barrier_count = Runtime::Current()->FlipThreadRoots(
&thread_flip_visitor, &flip_callback, this);
+ heap_->ThreadFlipEnd(self);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
gc_barrier_->Increment(self, barrier_count);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b8c44781a3..aec8d631f4 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -161,6 +161,8 @@ Heap::Heap(size_t initial_size,
zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
zygote_space_(nullptr),
large_object_threshold_(large_object_threshold),
+ disable_thread_flip_count_(0),
+ thread_flip_running_(false),
collector_type_running_(kCollectorTypeNone),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -480,6 +482,9 @@ Heap::Heap(size_t initial_size,
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
+ thread_flip_lock_ = new Mutex("GC thread flip lock");
+ thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
+ *thread_flip_lock_));
task_processor_.reset(new TaskProcessor());
reference_processor_.reset(new ReferenceProcessor());
pending_task_lock_ = new Mutex("Pending task lock");
@@ -770,6 +775,71 @@ void Heap::DecrementDisableMovingGC(Thread* self) {
--disable_moving_gc_count_;
}
+void Heap::IncrementDisableThreadFlip(Thread* self) {
+ // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
+ CHECK(kUseReadBarrier);
+ ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
+ MutexLock mu(self, *thread_flip_lock_);
+ bool has_waited = false;
+ uint64_t wait_start = NanoTime();
+ while (thread_flip_running_) {
+ has_waited = true;
+ thread_flip_cond_->Wait(self);
+ }
+ ++disable_thread_flip_count_;
+ if (has_waited) {
+ uint64_t wait_time = NanoTime() - wait_start;
+ total_wait_time_ += wait_time;
+ if (wait_time > long_pause_log_threshold_) {
+ LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
+ }
+ }
+}
+
+void Heap::DecrementDisableThreadFlip(Thread* self) {
+ // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
+ // the GC waiting before doing a thread flip.
+ CHECK(kUseReadBarrier);
+ MutexLock mu(self, *thread_flip_lock_);
+ CHECK_GT(disable_thread_flip_count_, 0U);
+ --disable_thread_flip_count_;
+ thread_flip_cond_->Broadcast(self);
+}
+
+void Heap::ThreadFlipBegin(Thread* self) {
+ // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
+ // > 0, block. Otherwise, go ahead.
+ CHECK(kUseReadBarrier);
+ ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip);
+ MutexLock mu(self, *thread_flip_lock_);
+ bool has_waited = false;
+ uint64_t wait_start = NanoTime();
+ CHECK(!thread_flip_running_);
+ // Set this to true before waiting so that a new mutator entering a JNI critical won't starve GC.
+ thread_flip_running_ = true;
+ while (disable_thread_flip_count_ > 0) {
+ has_waited = true;
+ thread_flip_cond_->Wait(self);
+ }
+ if (has_waited) {
+ uint64_t wait_time = NanoTime() - wait_start;
+ total_wait_time_ += wait_time;
+ if (wait_time > long_pause_log_threshold_) {
+ LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time);
+ }
+ }
+}
+
+void Heap::ThreadFlipEnd(Thread* self) {
+ // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
+ // waiting before doing a JNI critical.
+ CHECK(kUseReadBarrier);
+ MutexLock mu(self, *thread_flip_lock_);
+ CHECK(thread_flip_running_);
+ thread_flip_running_ = false;
+ thread_flip_cond_->Broadcast(self);
+}
+
void Heap::UpdateProcessState(ProcessState process_state) {
if (process_state_ != process_state) {
process_state_ = process_state;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d94f1091e0..85688ae3ee 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -300,6 +300,12 @@ class Heap {
void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
+ // Temporarily disable thread flip for JNI critical calls.
+ void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
+ void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
+ void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
+ void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
+
// Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
void ClearMarkedObjects() REQUIRES(Locks::heap_bitmap_lock_);
@@ -1065,6 +1071,12 @@ class Heap {
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
+ // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
+ Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
+ size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
+ bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
+
// Reference processor;
std::unique_ptr<ReferenceProcessor> reference_processor_;
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index 888c0d27ca..ab921d95f1 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -27,11 +27,11 @@ class ReferenceQueueTest : public CommonRuntimeTest {};
TEST_F(ReferenceQueueTest, EnqueueDequeue) {
Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
StackHandleScope<20> hs(self);
Mutex lock("Reference queue lock");
ReferenceQueue queue(&lock);
ASSERT_TRUE(queue.IsEmpty());
- ScopedObjectAccess soa(self);
ASSERT_EQ(queue.GetLength(), 0U);
auto ref_class = hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/WeakReference;",
@@ -58,10 +58,10 @@ TEST_F(ReferenceQueueTest, EnqueueDequeue) {
TEST_F(ReferenceQueueTest, Dump) {
Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
StackHandleScope<20> hs(self);
Mutex lock("Reference queue lock");
ReferenceQueue queue(&lock);
- ScopedObjectAccess soa(self);
queue.Dump(LOG(INFO));
auto weak_ref_class = hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/WeakReference;",
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index 222083b39f..ca206ef721 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -19,8 +19,9 @@
#include "handle_scope.h"
+#include "base/mutex.h"
#include "handle.h"
-#include "thread.h"
+#include "thread-inl.h"
#include "verify_object-inl.h"
namespace art {
@@ -29,6 +30,9 @@ template<size_t kNumReferences>
inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
: HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) {
DCHECK_EQ(self, Thread::Current());
+ if (kDebugLocking) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ }
static_assert(kNumReferences >= 1, "StackHandleScope must contain at least 1 reference");
// TODO: Figure out how to use a compile assert.
CHECK_EQ(&storage_[0], GetReferences());
@@ -42,6 +46,9 @@ template<size_t kNumReferences>
inline StackHandleScope<kNumReferences>::~StackHandleScope() {
HandleScope* top_handle_scope = self_->PopHandleScope();
DCHECK_EQ(top_handle_scope, this);
+ if (kDebugLocking) {
+ Locks::mutator_lock_->AssertSharedHeld(self_);
+ }
}
inline size_t HandleScope::SizeOf(uint32_t num_references) {
@@ -59,6 +66,9 @@ inline size_t HandleScope::SizeOf(size_t pointer_size, uint32_t num_references)
inline mirror::Object* HandleScope::GetReference(size_t i) const {
DCHECK_LT(i, number_of_references_);
+ if (kDebugLocking) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ }
return GetReferences()[i].AsMirrorPtr();
}
@@ -73,6 +83,9 @@ inline MutableHandle<mirror::Object> HandleScope::GetMutableHandle(size_t i) {
}
inline void HandleScope::SetReference(size_t i, mirror::Object* object) {
+ if (kDebugLocking) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ }
DCHECK_LT(i, number_of_references_);
GetReferences()[i].Assign(object);
}
@@ -104,6 +117,9 @@ inline HandleWrapper<T> StackHandleScope<kNumReferences>::NewHandleWrapper(T** o
template<size_t kNumReferences>
inline void StackHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) {
+ if (kDebugLocking) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ }
DCHECK_LT(i, kNumReferences);
VerifyObject(object);
GetReferences()[i].Assign(object);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index ee6b020528..e2094dc95e 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -765,8 +765,9 @@ class Hprof : public SingleRootVisitor {
okay = !file_output.Errors();
if (okay) {
- // Check for expected size.
- CHECK_EQ(file_output.SumLength(), overall_size);
+ // Check for expected size. Output is expected to be less-or-equal than first phase, see
+ // b/23521263.
+ DCHECK_LE(file_output.SumLength(), overall_size);
}
output_ = nullptr;
}
@@ -810,8 +811,8 @@ class Hprof : public SingleRootVisitor {
// Write the dump.
ProcessHeap(true);
- // Check for expected size.
- CHECK_EQ(net_output.SumLength(), overall_size + kChunkHeaderSize);
+ // Check for expected size. See DumpToFile for comment.
+ DCHECK_LE(net_output.SumLength(), overall_size + kChunkHeaderSize);
output_ = nullptr;
return true;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index e28d578121..63c02ed686 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1016,7 +1016,8 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
PrettyMethod(method).c_str(),
return_value.GetJ()) << *self;
}
- self->SetDeoptimizationReturnValue(return_value, return_shorty == 'L');
+ self->PushDeoptimizationContext(return_value, return_shorty == 'L',
+ nullptr /* no pending exception */);
return GetTwoWordSuccessValue(*return_pc,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 2fd0517061..ef7a924c63 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -578,6 +578,13 @@ void JavaVMExt::UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* resu
mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
MutexLock mu(self, weak_globals_lock_);
+ return DecodeWeakGlobalLocked(self, ref);
+}
+
+mirror::Object* JavaVMExt::DecodeWeakGlobalLocked(Thread* self, IndirectRef ref) {
+ if (kDebugLocking) {
+ weak_globals_lock_.AssertHeld(self);
+ }
while (UNLIKELY((!kUseReadBarrier && !allow_new_weak_globals_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index d70fc47c61..e80266fd5d 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -133,7 +133,16 @@ class JavaVMExt : public JavaVM {
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!weak_globals_lock_);
+
+ mirror::Object* DecodeWeakGlobalLocked(Thread* self, IndirectRef ref)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(weak_globals_lock_);
+
+ Mutex& WeakGlobalsLock() RETURN_CAPABILITY(weak_globals_lock_) {
+ return weak_globals_lock_;
+ }
void UpdateWeakGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6a716b5e0d..6bc18291cb 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1729,7 +1729,13 @@ class JNI {
if (heap->IsMovableObject(s)) {
StackHandleScope<1> hs(soa.Self());
HandleWrapper<mirror::String> h(hs.NewHandleWrapper(&s));
- heap->IncrementDisableMovingGC(soa.Self());
+ if (!kUseReadBarrier) {
+ heap->IncrementDisableMovingGC(soa.Self());
+ } else {
+ // For the CC collector, we only need to wait for the thread flip rather than the whole GC
+ // to occur thanks to the to-space invariant.
+ heap->IncrementDisableThreadFlip(soa.Self());
+ }
}
if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
@@ -1744,7 +1750,11 @@ class JNI {
gc::Heap* heap = Runtime::Current()->GetHeap();
mirror::String* s = soa.Decode<mirror::String*>(java_string);
if (heap->IsMovableObject(s)) {
- heap->DecrementDisableMovingGC(soa.Self());
+ if (!kUseReadBarrier) {
+ heap->DecrementDisableMovingGC(soa.Self());
+ } else {
+ heap->DecrementDisableThreadFlip(soa.Self());
+ }
}
}
@@ -1891,7 +1901,13 @@ class JNI {
}
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap->IsMovableObject(array)) {
- heap->IncrementDisableMovingGC(soa.Self());
+ if (!kUseReadBarrier) {
+ heap->IncrementDisableMovingGC(soa.Self());
+ } else {
+ // For the CC collector, we only need to wait for the thread flip rather than the whole GC
+ // to occur thanks to the to-space invariant.
+ heap->IncrementDisableThreadFlip(soa.Self());
+ }
// Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
array = soa.Decode<mirror::Array*>(java_array);
}
@@ -2437,7 +2453,11 @@ class JNI {
delete[] reinterpret_cast<uint64_t*>(elements);
} else if (heap->IsMovableObject(array)) {
// Non copy to a movable object must means that we had disabled the moving GC.
- heap->DecrementDisableMovingGC(soa.Self());
+ if (!kUseReadBarrier) {
+ heap->DecrementDisableMovingGC(soa.Self());
+ } else {
+ heap->DecrementDisableThreadFlip(soa.Self());
+ }
}
}
}
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index eda6c9b0d3..28a830d86b 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -146,8 +146,8 @@ template<VerifyObjectFlags kVerifyFlags>
inline size_t String::SizeOf() {
size_t size = sizeof(String) + (sizeof(uint16_t) * GetLength<kVerifyFlags>());
// String.equals() intrinsics assume zero-padding up to kObjectAlignment,
- // so make sure the padding is actually zero-initialized if the allocator
- // chooses to clear, or GC compaction chooses to copy, only SizeOf() bytes.
+ // so make sure the zero-padding is actually copied around if GC compaction
+ // chooses to copy only SizeOf() bytes.
// http://b/23528461
return RoundUp(size, kObjectAlignment);
}
@@ -155,21 +155,35 @@ inline size_t String::SizeOf() {
template <bool kIsInstrumented, typename PreFenceVisitor>
inline String* String::Alloc(Thread* self, int32_t utf16_length, gc::AllocatorType allocator_type,
const PreFenceVisitor& pre_fence_visitor) {
- size_t header_size = sizeof(String);
- size_t data_size = sizeof(uint16_t) * utf16_length;
+ constexpr size_t header_size = sizeof(String);
+ static_assert(sizeof(utf16_length) <= sizeof(size_t),
+ "static_cast<size_t>(utf16_length) must not lose bits.");
+ size_t length = static_cast<size_t>(utf16_length);
+ size_t data_size = sizeof(uint16_t) * length;
size_t size = header_size + data_size;
+ // String.equals() intrinsics assume zero-padding up to kObjectAlignment,
+ // so make sure the allocator clears the padding as well.
+ // http://b/23528461
+ size_t alloc_size = RoundUp(size, kObjectAlignment);
Class* string_class = GetJavaLangString();
// Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
- if (UNLIKELY(size < data_size)) {
+ // Do this by comparing with the maximum length that will _not_ cause an overflow.
+ constexpr size_t overflow_length = (-header_size) / sizeof(uint16_t); // Unsigned arithmetic.
+ constexpr size_t max_alloc_length = overflow_length - 1u;
+ static_assert(IsAligned<sizeof(uint16_t)>(kObjectAlignment),
+ "kObjectAlignment must be at least as big as Java char alignment");
+ constexpr size_t max_length = RoundDown(max_alloc_length, kObjectAlignment / sizeof(uint16_t));
+ if (UNLIKELY(length > max_length)) {
self->ThrowOutOfMemoryError(StringPrintf("%s of length %d would overflow",
PrettyDescriptor(string_class).c_str(),
utf16_length).c_str());
return nullptr;
}
+
gc::Heap* heap = Runtime::Current()->GetHeap();
return down_cast<String*>(
- heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, string_class, size,
+ heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, string_class, alloc_size,
allocator_type, pre_fence_visitor));
}
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index e1173bb026..69112b18ec 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -290,15 +290,13 @@ class WatchdogTask : public Task {
static void CommonWaitSetup(MonitorTest* test, ClassLinker* class_linker, uint64_t create_sleep,
int64_t c_millis, bool c_expected, bool interrupt, uint64_t use_sleep,
int64_t u_millis, bool u_expected, const char* pool_name) {
+ Thread* const self = Thread::Current();
+ ScopedObjectAccess soa(self);
// First create the object we lock. String is easiest.
- StackHandleScope<3> hs(Thread::Current());
- {
- ScopedObjectAccess soa(Thread::Current());
- test->object_ = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(Thread::Current(),
- "hello, world!"));
- test->watchdog_object_ = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(Thread::Current(),
- "hello, world!"));
- }
+ StackHandleScope<3> hs(soa.Self());
+ test->object_ = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "hello, world!"));
+ test->watchdog_object_ = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self,
+ "hello, world!"));
// Create the barrier used to synchronize.
test->barrier_ = std::unique_ptr<Barrier>(new Barrier(2));
@@ -308,23 +306,17 @@ static void CommonWaitSetup(MonitorTest* test, ClassLinker* class_linker, uint64
// Fill the heap.
std::unique_ptr<StackHandleScope<kMaxHandles>> hsp;
std::vector<MutableHandle<mirror::Object>> handles;
- {
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
- // Our job: Fill the heap, then try Wait.
- FillHeap(self, class_linker, &hsp, &handles);
+ // Our job: Fill the heap, then try Wait.
+ FillHeap(soa.Self(), class_linker, &hsp, &handles);
- // Now release everything.
- auto it = handles.begin();
- auto end = handles.end();
-
- for ( ; it != end; ++it) {
- it->Assign(nullptr);
- }
- } // Need to drop the mutator lock to allow barriers.
+ // Now release everything.
+ for (MutableHandle<mirror::Object>& h : handles) {
+ h.Assign(nullptr);
+ }
- Thread* self = Thread::Current();
+ // Need to drop the mutator lock to allow barriers.
+ soa.Self()->TransitionFromRunnableToSuspended(kNative);
ThreadPool thread_pool(pool_name, 3);
thread_pool.AddTask(self, new CreateTask(test, create_sleep, c_millis, c_expected));
if (interrupt) {
@@ -336,19 +328,19 @@ static void CommonWaitSetup(MonitorTest* test, ClassLinker* class_linker, uint64
thread_pool.StartWorkers(self);
// Wait on completion barrier.
- test->complete_barrier_->Wait(Thread::Current());
+ test->complete_barrier_->Wait(self);
test->completed_ = true;
// Wake the watchdog.
{
- ScopedObjectAccess soa(Thread::Current());
-
+ ScopedObjectAccess soa2(self);
test->watchdog_object_.Get()->MonitorEnter(self); // Lock the object.
test->watchdog_object_.Get()->NotifyAll(self); // Wake up waiting parties.
test->watchdog_object_.Get()->MonitorExit(self); // Release the lock.
}
thread_pool.StopWorkers(self);
+ soa.Self()->TransitionFromSuspendedToRunnable();
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 9bd320cf07..3b84bfa026 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -171,7 +171,7 @@ static jobject DexFile_openDexFileNative(
if (array == nullptr) {
ScopedObjectAccess soa(env);
for (auto& dex_file : dex_files) {
- if (Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file, true) != nullptr) {
+ if (linker->FindDexCache(soa.Self(), *dex_file, true) != nullptr) {
dex_file.release();
}
}
@@ -208,8 +208,9 @@ static void DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
//
// TODO: The Runtime should support unloading of classes and freeing of the
// dex files for those unloaded classes rather than leaking dex files here.
- for (auto& dex_file : *dex_files) {
- if (Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file, true) == nullptr) {
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ for (const DexFile* dex_file : *dex_files) {
+ if (class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr) {
delete dex_file;
}
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 5a9c43bd22..4f957233c4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -428,9 +428,10 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
return;
}
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Thread* const self = Thread::Current();
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
CHECK(dex_file != nullptr);
- mirror::DexCache* const dex_cache = class_linker->FindDexCache(*dex_file, true);
+ mirror::DexCache* const dex_cache = class_linker->FindDexCache(self, *dex_file, true);
// If dex cache was deallocated, just continue.
if (dex_cache == nullptr) {
continue;
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index c76f6eec73..c75ff78821 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -90,6 +90,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
case kWaitingForMethodTracingStart: return kJavaWaiting;
case kWaitingForVisitObjects: return kJavaWaiting;
case kWaitingWeakGcRootRead: return kJavaWaiting;
+ case kWaitingForGcThreadFlip: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 29dd76ce5e..1520a9bb02 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '6', '8', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '6', '9', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 49451ade94..a9dc16ddd6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -235,6 +235,9 @@ Runtime::~Runtime() {
self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
WellKnownClasses::java_lang_Daemons_stop);
}
+
+ Trace::Shutdown();
+
if (attach_shutdown_thread) {
DetachCurrentThread();
self = nullptr;
@@ -245,8 +248,6 @@ Runtime::~Runtime() {
BackgroundMethodSamplingProfiler::Shutdown();
}
- Trace::Shutdown();
-
// Make sure to let the GC complete if it is running.
heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
heap_->DeleteThreadPool();
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a33e150b93..63534b131b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -162,27 +162,41 @@ void Thread::ResetQuickAllocEntryPointsForThread() {
ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
}
-class DeoptimizationReturnValueRecord {
+class DeoptimizationContextRecord {
public:
- DeoptimizationReturnValueRecord(const JValue& ret_val,
- bool is_reference,
- DeoptimizationReturnValueRecord* link)
- : ret_val_(ret_val), is_reference_(is_reference), link_(link) {}
+ DeoptimizationContextRecord(const JValue& ret_val, bool is_reference,
+ mirror::Throwable* pending_exception,
+ DeoptimizationContextRecord* link)
+ : ret_val_(ret_val), is_reference_(is_reference), pending_exception_(pending_exception),
+ link_(link) {}
JValue GetReturnValue() const { return ret_val_; }
bool IsReference() const { return is_reference_; }
- DeoptimizationReturnValueRecord* GetLink() const { return link_; }
- mirror::Object** GetGCRoot() {
+ mirror::Throwable* GetPendingException() const { return pending_exception_; }
+ DeoptimizationContextRecord* GetLink() const { return link_; }
+ mirror::Object** GetReturnValueAsGCRoot() {
DCHECK(is_reference_);
return ret_val_.GetGCRoot();
}
+ mirror::Object** GetPendingExceptionAsGCRoot() {
+ return reinterpret_cast<mirror::Object**>(&pending_exception_);
+ }
private:
+ // The value returned by the method at the top of the stack before deoptimization.
JValue ret_val_;
+
+ // Indicates whether the returned value is a reference. If so, the GC will visit it.
const bool is_reference_;
- DeoptimizationReturnValueRecord* const link_;
- DISALLOW_COPY_AND_ASSIGN(DeoptimizationReturnValueRecord);
+ // The exception that was pending before deoptimization (or null if there was no pending
+ // exception).
+ mirror::Throwable* pending_exception_;
+
+ // A link to the previous DeoptimizationContextRecord.
+ DeoptimizationContextRecord* const link_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord);
};
class StackedShadowFrameRecord {
@@ -206,22 +220,28 @@ class StackedShadowFrameRecord {
DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
};
-void Thread::PushAndClearDeoptimizationReturnValue() {
- DeoptimizationReturnValueRecord* record = new DeoptimizationReturnValueRecord(
- tls64_.deoptimization_return_value,
- tls32_.deoptimization_return_value_is_reference,
- tlsPtr_.deoptimization_return_value_stack);
- tlsPtr_.deoptimization_return_value_stack = record;
- ClearDeoptimizationReturnValue();
+void Thread::PushDeoptimizationContext(const JValue& return_value, bool is_reference,
+ mirror::Throwable* exception) {
+ DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
+ return_value,
+ is_reference,
+ exception,
+ tlsPtr_.deoptimization_context_stack);
+ tlsPtr_.deoptimization_context_stack = record;
+}
+
+void Thread::PopDeoptimizationContext(JValue* result, mirror::Throwable** exception) {
+ AssertHasDeoptimizationContext();
+ DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
+ tlsPtr_.deoptimization_context_stack = record->GetLink();
+ result->SetJ(record->GetReturnValue().GetJ());
+ *exception = record->GetPendingException();
+ delete record;
}
-JValue Thread::PopDeoptimizationReturnValue() {
- DeoptimizationReturnValueRecord* record = tlsPtr_.deoptimization_return_value_stack;
- DCHECK(record != nullptr);
- tlsPtr_.deoptimization_return_value_stack = record->GetLink();
- JValue ret_val(record->GetReturnValue());
- delete record;
- return ret_val;
+void Thread::AssertHasDeoptimizationContext() {
+ CHECK(tlsPtr_.deoptimization_context_stack != nullptr)
+ << "No deoptimization context for thread " << *this;
}
void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
@@ -1575,6 +1595,9 @@ Thread::~Thread() {
CHECK(tlsPtr_.flip_function == nullptr);
CHECK_EQ(tls32_.suspended_at_suspend_check, false);
+ // Make sure we processed all deoptimization requests.
+ CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
+
// We may be deleting a still born thread.
SetStateUnsafe(kTerminated);
@@ -2593,7 +2616,7 @@ void Thread::VisitRoots(RootVisitor* visitor) {
visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
- RootInfo(kRootNativeStack, thread_id));
+ RootInfo(kRootNativeStack, thread_id));
}
visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
@@ -2602,6 +2625,7 @@ void Thread::VisitRoots(RootVisitor* visitor) {
if (tlsPtr_.debug_invoke_req != nullptr) {
tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
}
+ // Visit roots for deoptimization.
if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
RootCallbackVisitor visitor_to_callback(visitor, thread_id);
ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
@@ -2615,14 +2639,16 @@ void Thread::VisitRoots(RootVisitor* visitor) {
}
}
}
- if (tlsPtr_.deoptimization_return_value_stack != nullptr) {
- for (DeoptimizationReturnValueRecord* record = tlsPtr_.deoptimization_return_value_stack;
+ if (tlsPtr_.deoptimization_context_stack != nullptr) {
+ for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
record != nullptr;
record = record->GetLink()) {
if (record->IsReference()) {
- visitor->VisitRootIfNonNull(record->GetGCRoot(),
- RootInfo(kRootThreadObject, thread_id));
+ visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(),
+ RootInfo(kRootThreadObject, thread_id));
}
+ visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(),
+ RootInfo(kRootThreadObject, thread_id));
}
}
for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 9bb57bfb6b..2d450f5f5d 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -77,7 +77,7 @@ class ClassLinker;
class Closure;
class Context;
struct DebugInvokeReq;
-class DeoptimizationReturnValueRecord;
+class DeoptimizationContextRecord;
class DexFile;
class JavaVMExt;
struct JNIEnvExt;
@@ -830,19 +830,13 @@ class Thread {
// and execute Java code, so there might be nested deoptimizations happening.
// We need to save the ongoing deoptimization shadow frames and return
// values on stacks.
- void SetDeoptimizationReturnValue(const JValue& ret_val, bool is_reference) {
- tls64_.deoptimization_return_value.SetJ(ret_val.GetJ());
- tls32_.deoptimization_return_value_is_reference = is_reference;
- }
- bool IsDeoptimizationReturnValueReference() {
- return tls32_.deoptimization_return_value_is_reference;
- }
- void ClearDeoptimizationReturnValue() {
- tls64_.deoptimization_return_value.SetJ(0);
- tls32_.deoptimization_return_value_is_reference = false;
- }
- void PushAndClearDeoptimizationReturnValue();
- JValue PopDeoptimizationReturnValue();
+ void PushDeoptimizationContext(const JValue& return_value, bool is_reference,
+ mirror::Throwable* exception)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void PopDeoptimizationContext(JValue* result, mirror::Throwable** exception)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssertHasDeoptimizationContext()
+ SHARED_REQUIRES(Locks::mutator_lock_);
void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type);
@@ -1102,9 +1096,8 @@ class Thread {
suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
thread_exit_check_count(0), handling_signal_(false),
- deoptimization_return_value_is_reference(false), suspended_at_suspend_check(false),
- ready_for_debug_invoke(false), debug_method_entry_(false), is_gc_marking(false),
- weak_ref_access_enabled(true) {
+ suspended_at_suspend_check(false), ready_for_debug_invoke(false),
+ debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true) {
}
union StateAndFlags state_and_flags;
@@ -1144,10 +1137,6 @@ class Thread {
// True if signal is being handled by this thread.
bool32_t handling_signal_;
- // True if the return value for interpreter after deoptimization is a reference.
- // For gc purpose.
- bool32_t deoptimization_return_value_is_reference;
-
// True if the thread is suspended in FullSuspendCheck(). This is
// used to distinguish runnable threads that are suspended due to
// a normal suspend check from other threads.
@@ -1178,15 +1167,12 @@ class Thread {
} tls32_;
struct PACKED(8) tls_64bit_sized_values {
- tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
+ tls_64bit_sized_values() : trace_clock_base(0) {
}
// The clock base used for tracing.
uint64_t trace_clock_base;
- // Return value used by deoptimization.
- JValue deoptimization_return_value;
-
RuntimeStats stats;
} tls64_;
@@ -1197,7 +1183,7 @@ class Thread {
stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
- stacked_shadow_frame_record(nullptr), deoptimization_return_value_stack(nullptr),
+ stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
name(nullptr), pthread_self(0),
last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
@@ -1282,7 +1268,7 @@ class Thread {
StackedShadowFrameRecord* stacked_shadow_frame_record;
// Deoptimization return value record stack.
- DeoptimizationReturnValueRecord* deoptimization_return_value_stack;
+ DeoptimizationContextRecord* deoptimization_context_stack;
// A cached copy of the java.lang.Thread's name.
std::string* name;
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index a11d213ea3..8f2f70f46e 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -44,6 +44,7 @@ enum ThreadState {
kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects
kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects
kWaitingWeakGcRootRead, // WAITING TS_WAIT waiting on the GC to read a weak root
+ kWaitingForGcThreadFlip, // WAITING TS_WAIT waiting on the GC thread flip (CC collector) to finish
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 7579d8d0e0..4ab5c0efe7 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -638,10 +638,11 @@ static void GetVisitedMethodsFromBitSets(
const std::map<const DexFile*, DexIndexBitSet*>& seen_methods,
std::set<ArtMethod*>* visited_methods) SHARED_REQUIRES(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* const self = Thread::Current();
for (auto& e : seen_methods) {
DexIndexBitSet* bit_set = e.second;
// TODO: Visit trace methods as roots.
- mirror::DexCache* dex_cache = class_linker->FindDexCache(*e.first, false);
+ mirror::DexCache* dex_cache = class_linker->FindDexCache(self, *e.first, false);
for (uint32_t i = 0; i < bit_set->size(); ++i) {
if ((*bit_set)[i]) {
visited_methods->insert(dex_cache->GetResolvedMethod(i, sizeof(void*)));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 223268d545..4f921bdbc8 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -589,7 +589,7 @@ bool MethodVerifier::Verify() {
std::ostream& MethodVerifier::Fail(VerifyError error) {
// Mark the error type as encountered.
- encountered_failure_types_ |= (1U << static_cast<uint32_t>(error));
+ encountered_failure_types_ |= static_cast<uint32_t>(error);
switch (error) {
case VERIFY_ERROR_NO_CLASS:
@@ -601,6 +601,7 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
case VERIFY_ERROR_INSTANTIATION:
case VERIFY_ERROR_CLASS_CHANGE:
case VERIFY_ERROR_FORCE_INTERPRETER:
+ case VERIFY_ERROR_LOCKING:
if (Runtime::Current()->IsAotCompiler() || !can_load_classes_) {
// If we're optimistically running verification at compile time, turn NO_xxx, ACCESS_xxx,
// class change and instantiation errors into soft verification errors so that we re-verify
@@ -631,12 +632,14 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
}
}
break;
+
// Indication that verification should be retried at runtime.
case VERIFY_ERROR_BAD_CLASS_SOFT:
if (!allow_soft_failures_) {
have_pending_hard_failure_ = true;
}
break;
+
// Hard verification failures at compile time will still fail at runtime, so the class is
// marked as rejected to prevent it from being compiled.
case VERIFY_ERROR_BAD_CLASS_HARD: {
@@ -1657,6 +1660,33 @@ static uint32_t GetFirstFinalInstanceFieldIndex(const DexFile& dex_file, uint16_
return DexFile::kDexNoIndex;
}
+// Setup a register line for the given return instruction.
+static void AdjustReturnLine(MethodVerifier* verifier,
+ const Instruction* ret_inst,
+ RegisterLine* line) {
+ Instruction::Code opcode = ret_inst->Opcode();
+
+ switch (opcode) {
+ case Instruction::RETURN_VOID:
+ case Instruction::RETURN_VOID_NO_BARRIER:
+ SafelyMarkAllRegistersAsConflicts(verifier, line);
+ break;
+
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT:
+ line->MarkAllRegistersAsConflictsExcept(verifier, ret_inst->VRegA_11x());
+ break;
+
+ case Instruction::RETURN_WIDE:
+ line->MarkAllRegistersAsConflictsExceptWide(verifier, ret_inst->VRegA_11x());
+ break;
+
+ default:
+ LOG(FATAL) << "Unknown return opcode " << opcode;
+ UNREACHABLE();
+ }
+}
+
bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// If we're doing FindLocksAtDexPc, check whether we're at the dex pc we care about.
// We want the state _before_ the instruction, for the case where the dex pc we're
@@ -3078,10 +3108,9 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else if (have_pending_runtime_throw_failure_) {
/* checking interpreter will throw, mark following code as unreachable */
opcode_flags = Instruction::kThrow;
- have_any_pending_runtime_throw_failure_ = true;
- // Reset the pending_runtime_throw flag. The flag is a global to decouple Fail and is per
- // instruction.
- have_pending_runtime_throw_failure_ = false;
+ // Note: the flag must be reset as it is only global to decouple Fail and is semantically per
+ // instruction. However, RETURN checking may throw LOCKING errors, so we clear at the
+ // very end.
}
/*
* If we didn't just set the result register, clear it out. This ensures that you can only use
@@ -3250,16 +3279,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
if (insn_flags_[next_insn_idx].IsReturn()) {
// For returns we only care about the operand to the return, all other registers are dead.
const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
- Instruction::Code opcode = ret_inst->Opcode();
- if (opcode == Instruction::RETURN_VOID || opcode == Instruction::RETURN_VOID_NO_BARRIER) {
- SafelyMarkAllRegistersAsConflicts(this, work_line_.get());
- } else {
- if (opcode == Instruction::RETURN_WIDE) {
- work_line_->MarkAllRegistersAsConflictsExceptWide(this, ret_inst->VRegA_11x());
- } else {
- work_line_->MarkAllRegistersAsConflictsExcept(this, ret_inst->VRegA_11x());
- }
- }
+ AdjustReturnLine(this, ret_inst, work_line_.get());
}
RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
if (next_line != nullptr) {
@@ -3280,9 +3300,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* If we're returning from the method, make sure monitor stack is empty. */
if ((opcode_flags & Instruction::kReturn) != 0) {
- if (!work_line_->VerifyMonitorStackEmpty(this)) {
- return false;
- }
+ work_line_->VerifyMonitorStackEmpty(this);
}
/*
@@ -3302,6 +3320,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
DCHECK_LT(*start_guess, code_item_->insns_size_in_code_units_);
DCHECK(insn_flags_[*start_guess].IsOpcode());
+ if (have_pending_runtime_throw_failure_) {
+ have_any_pending_runtime_throw_failure_ = true;
+ // Reset the pending_runtime_throw flag now.
+ have_pending_runtime_throw_failure_ = false;
+ }
+
return true;
} // NOLINT(readability/fn_size)
@@ -4425,31 +4449,15 @@ bool MethodVerifier::UpdateRegisters(uint32_t next_insn, RegisterLine* merge_lin
* there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
* only way a register can transition out of "unknown", so this is not just an optimization.)
*/
- if (!insn_flags_[next_insn].IsReturn()) {
- target_line->CopyFromLine(merge_line);
- } else {
+ target_line->CopyFromLine(merge_line);
+ if (insn_flags_[next_insn].IsReturn()) {
// Verify that the monitor stack is empty on return.
- if (!merge_line->VerifyMonitorStackEmpty(this)) {
- return false;
- }
+ merge_line->VerifyMonitorStackEmpty(this);
+
// For returns we only care about the operand to the return, all other registers are dead.
// Initialize them as conflicts so they don't add to GC and deoptimization information.
const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn);
- Instruction::Code opcode = ret_inst->Opcode();
- if (opcode == Instruction::RETURN_VOID || opcode == Instruction::RETURN_VOID_NO_BARRIER) {
- // Explicitly copy the this-initialized flag from the merge-line, as we didn't copy its
- // state. Must be done before SafelyMarkAllRegistersAsConflicts as that will do the
- // super-constructor-call checking.
- target_line->CopyThisInitialized(*merge_line);
- SafelyMarkAllRegistersAsConflicts(this, target_line);
- } else {
- target_line->CopyFromLine(merge_line);
- if (opcode == Instruction::RETURN_WIDE) {
- target_line->MarkAllRegistersAsConflictsExceptWide(this, ret_inst->VRegA_11x());
- } else {
- target_line->MarkAllRegistersAsConflictsExcept(this, ret_inst->VRegA_11x());
- }
- }
+ AdjustReturnLine(this, ret_inst, target_line);
}
} else {
std::unique_ptr<RegisterLine> copy(gDebugVerify ?
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index d0841f001f..b57abf537d 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -67,17 +67,17 @@ std::ostream& operator<<(std::ostream& os, const MethodType& rhs);
* to be rewritten to fail at runtime.
*/
enum VerifyError {
- VERIFY_ERROR_BAD_CLASS_HARD = 0, // VerifyError; hard error that skips compilation.
- VERIFY_ERROR_BAD_CLASS_SOFT = 1, // VerifyError; soft error that verifies again at runtime.
-
- VERIFY_ERROR_NO_CLASS = 2, // NoClassDefFoundError.
- VERIFY_ERROR_NO_FIELD = 3, // NoSuchFieldError.
- VERIFY_ERROR_NO_METHOD = 4, // NoSuchMethodError.
- VERIFY_ERROR_ACCESS_CLASS = 5, // IllegalAccessError.
- VERIFY_ERROR_ACCESS_FIELD = 6, // IllegalAccessError.
- VERIFY_ERROR_ACCESS_METHOD = 7, // IllegalAccessError.
- VERIFY_ERROR_CLASS_CHANGE = 8, // IncompatibleClassChangeError.
- VERIFY_ERROR_INSTANTIATION = 9, // InstantiationError.
+ VERIFY_ERROR_BAD_CLASS_HARD = 1, // VerifyError; hard error that skips compilation.
+ VERIFY_ERROR_BAD_CLASS_SOFT = 2, // VerifyError; soft error that verifies again at runtime.
+
+ VERIFY_ERROR_NO_CLASS = 4, // NoClassDefFoundError.
+ VERIFY_ERROR_NO_FIELD = 8, // NoSuchFieldError.
+ VERIFY_ERROR_NO_METHOD = 16, // NoSuchMethodError.
+ VERIFY_ERROR_ACCESS_CLASS = 32, // IllegalAccessError.
+ VERIFY_ERROR_ACCESS_FIELD = 64, // IllegalAccessError.
+ VERIFY_ERROR_ACCESS_METHOD = 128, // IllegalAccessError.
+ VERIFY_ERROR_CLASS_CHANGE = 256, // IncompatibleClassChangeError.
+ VERIFY_ERROR_INSTANTIATION = 512, // InstantiationError.
// For opcodes that don't have complete verifier support (such as lambda opcodes),
// we need a way to continue execution at runtime without attempting to re-verify
// (since we know it will fail no matter what). Instead, run as the interpreter
@@ -85,9 +85,11 @@ enum VerifyError {
// on the fly.
//
// TODO: Once all new opcodes have implemented full verifier support, this can be removed.
- VERIFY_ERROR_FORCE_INTERPRETER = 10, // Skip the verification phase at runtime;
- // force the interpreter to do access checks.
- // (sets a soft fail at compile time).
+ VERIFY_ERROR_FORCE_INTERPRETER = 1024, // Skip the verification phase at runtime;
+ // force the interpreter to do access checks.
+ // (sets a soft fail at compile time).
+ VERIFY_ERROR_LOCKING = 2048, // Could not guarantee balanced locking. This should be
+ // punted to the interpreter with access checks.
};
std::ostream& operator<<(std::ostream& os, const VerifyError& rhs);
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index bee5834c20..1df2428b09 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,6 +25,10 @@
namespace art {
namespace verifier {
+// Should we dump a warning on failures to verify balanced locking? That would be an indication to
+// developers that their code will be slow.
+static constexpr bool kDumpLockFailures = true;
+
inline const RegType& RegisterLine::GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const {
// The register index was validated during the static pass, so we don't need to check it here.
DCHECK_LT(vsrc, num_regs_);
@@ -167,12 +171,14 @@ inline bool RegisterLine::VerifyRegisterType(MethodVerifier* verifier, uint32_t
return true;
}
-inline bool RegisterLine::VerifyMonitorStackEmpty(MethodVerifier* verifier) const {
+inline void RegisterLine::VerifyMonitorStackEmpty(MethodVerifier* verifier) const {
if (MonitorStackDepth() != 0) {
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected empty monitor stack";
- return false;
- } else {
- return true;
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "expected empty monitor stack in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
}
}
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index bb6df76171..33c90e3000 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -344,14 +344,22 @@ void RegisterLine::PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32
verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object ("
<< reg_type << ")";
} else if (monitors_.size() >= 32) {
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter stack overflow: "
- << monitors_.size();
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "monitor-enter stack overflow while verifying "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
} else {
if (SetRegToLockDepth(reg_idx, monitors_.size())) {
monitors_.push_back(insn_idx);
} else {
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected monitor-enter on register v" <<
- reg_idx;
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "unexpected monitor-enter on register v" << reg_idx << " in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
}
}
}
@@ -361,16 +369,21 @@ void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
if (!reg_type.IsReferenceTypes()) {
verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
} else if (monitors_.empty()) {
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit stack underflow";
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "monitor-exit stack underflow while verifying "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
} else {
monitors_.pop_back();
if (!IsSetLockDepth(reg_idx, monitors_.size())) {
- // Bug 3215458: Locks and unlocks are on objects, if that object is a literal then before
- // format "036" the constant collector may create unlocks on the same object but referenced
- // via different registers.
- ((verifier->DexFileVersion() >= 36) ? verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
- : verifier->LogVerifyInfo())
- << "monitor-exit not unlocking the top of the monitor stack";
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "monitor-exit not unlocking the top of the monitor stack while verifying "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
} else {
// Record the register was unlocked
ClearRegToLockDepth(reg_idx, monitors_.size());
@@ -392,8 +405,13 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
}
if (monitors_.size() > 0 || incoming_line->monitors_.size() > 0) {
if (monitors_.size() != incoming_line->monitors_.size()) {
- LOG(WARNING) << "mismatched stack depths (depth=" << MonitorStackDepth()
- << ", incoming depth=" << incoming_line->MonitorStackDepth() << ")";
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "mismatched stack depths (depth=" << MonitorStackDepth()
+ << ", incoming depth=" << incoming_line->MonitorStackDepth() << ") in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
} else if (reg_to_lock_depths_ != incoming_line->reg_to_lock_depths_) {
for (uint32_t idx = 0; idx < num_regs_; idx++) {
size_t depths = reg_to_lock_depths_.count(idx);
@@ -402,14 +420,35 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
if (depths == 0 || incoming_depths == 0) {
reg_to_lock_depths_.erase(idx);
} else {
- LOG(WARNING) << "mismatched stack depths for register v" << idx
- << ": " << depths << " != " << incoming_depths;
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "mismatched stack depths for register v" << idx
+ << ": " << depths << " != " << incoming_depths << " in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
+ break;
+ }
+ } else if (depths > 0) {
+ // Check whether they're actually the same levels.
+ uint32_t locked_levels = reg_to_lock_depths_.find(idx)->second;
+ uint32_t incoming_locked_levels = incoming_line->reg_to_lock_depths_.find(idx)->second;
+ if (locked_levels != incoming_locked_levels) {
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
+ << std::hex << locked_levels << std::dec << " != "
+ << std::hex << incoming_locked_levels << std::dec << " in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
break;
}
}
}
}
}
+
// Check whether "this" was initialized in both paths.
if (this_initialized_ && !incoming_line->this_initialized_) {
this_initialized_ = false;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 41f1e28066..46db1c6a1c 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -185,7 +185,9 @@ class RegisterLine {
// Compare two register lines. Returns 0 if they match.
// Using this for a sort is unwise, since the value can change based on machine endianness.
int CompareLine(const RegisterLine* line2) const {
- DCHECK(monitors_ == line2->monitors_);
+ if (monitors_ != line2->monitors_) {
+ return 1;
+ }
// TODO: DCHECK(reg_to_lock_depths_ == line2->reg_to_lock_depths_);
return memcmp(&line_, &line2->line_, num_regs_ * sizeof(uint16_t));
}
@@ -298,8 +300,8 @@ class RegisterLine {
}
// We expect no monitors to be held at certain points, such a method returns. Verify the stack
- // is empty, failing and returning false if not.
- bool VerifyMonitorStackEmpty(MethodVerifier* verifier) const;
+ // is empty, queueing a LOCKING error else.
+ void VerifyMonitorStackEmpty(MethodVerifier* verifier) const;
bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/test/002-sleep/src/Main.java b/test/002-sleep/src/Main.java
index c1a2d83c52..55032fd0a7 100644
--- a/test/002-sleep/src/Main.java
+++ b/test/002-sleep/src/Main.java
@@ -2,8 +2,8 @@ public class Main {
static public void main(String[] args) throws Exception {
int millis = 1000;
- if (args.length != 0) {
- millis = Integer.parseInt(args[0]);
+ if (args.length > 1) {
+ millis = Integer.parseInt(args[1]);
}
System.out.println("Sleeping " + millis + " msec...");
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index 810dda0e71..dd88db0b7c 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -20,7 +20,7 @@ import java.lang.reflect.Proxy;
public class Main {
public static void main(String[] args) {
- System.loadLibrary("arttest");
+ System.loadLibrary(args[0]);
testFindClassOnAttachedNativeThread();
testFindFieldOnAttachedNativeThread();
testReflectFieldGetFromAttachedNativeThreadNative();
diff --git a/test/004-ReferenceMap/src/Main.java b/test/004-ReferenceMap/src/Main.java
index f9a5498d9e..dacd748ec5 100644
--- a/test/004-ReferenceMap/src/Main.java
+++ b/test/004-ReferenceMap/src/Main.java
@@ -36,11 +36,8 @@ public class Main {
}
native int refmap(int x);
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
Main rm = new Main();
rm.f();
}
diff --git a/test/004-SignalTest/src/Main.java b/test/004-SignalTest/src/Main.java
index 8b1f49bacb..626691815a 100644
--- a/test/004-SignalTest/src/Main.java
+++ b/test/004-SignalTest/src/Main.java
@@ -24,8 +24,7 @@ public class Main {
}
public static void main(String[] args) {
- System.loadLibrary("arttest");
-
+ System.loadLibrary(args[0]);
System.out.println("init signal test");
initSignalTest();
try {
diff --git a/test/004-StackWalk/src/Main.java b/test/004-StackWalk/src/Main.java
index 9a1d0ab9f2..883ce2c9fe 100644
--- a/test/004-StackWalk/src/Main.java
+++ b/test/004-StackWalk/src/Main.java
@@ -87,11 +87,8 @@ public class Main {
native int stackmap(int x);
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
Main st = new Main();
st.$noinline$f();
}
diff --git a/test/004-ThreadStress/src/Main.java b/test/004-ThreadStress/src/Main.java
index d5b389f628..4eeae2f295 100644
--- a/test/004-ThreadStress/src/Main.java
+++ b/test/004-ThreadStress/src/Main.java
@@ -310,7 +310,8 @@ public class Main implements Runnable {
boolean dumpMap = false;
if (args != null) {
- for (int i = 0; i < args.length; i++) {
+ // args[0] is libarttest
+ for (int i = 1; i < args.length; i++) {
if (args[i].equals("-n")) {
i++;
numberOfThreads = Integer.parseInt(args[i]);
diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java
index 818f5d9806..c93db50ab8 100644
--- a/test/004-UnsafeTest/src/Main.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -18,10 +18,6 @@ import java.lang.reflect.Field;
import sun.misc.Unsafe;
public class Main {
- static {
- System.loadLibrary("arttest");
- }
-
private static void check(int actual, int expected, String msg) {
if (actual != expected) {
System.out.println(msg + " : " + actual + " != " + expected);
@@ -51,6 +47,7 @@ public class Main {
}
public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
Unsafe unsafe = getUnsafe();
check(unsafe.arrayBaseOffset(boolean[].class), vmArrayBaseOffset(boolean[].class),
"Unsafe.arrayBaseOffset(boolean[])");
diff --git a/test/051-thread/src/Main.java b/test/051-thread/src/Main.java
index b81273ea4e..2e26b22265 100644
--- a/test/051-thread/src/Main.java
+++ b/test/051-thread/src/Main.java
@@ -20,11 +20,8 @@ import java.util.ArrayList;
* Test some basic thread stuff.
*/
public class Main {
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
System.out.println("thread test starting");
testThreadCapacity();
testThreadDaemons();
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index bd606a6d4e..08ccf0ef14 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -594,6 +594,54 @@ public class Main {
Assert.assertEquals(Math.ceil(-2.5), -2.0d, 0.0);
Assert.assertEquals(Math.ceil(-2.9), -2.0d, 0.0);
Assert.assertEquals(Math.ceil(-3.0), -3.0d, 0.0);
+ // 2^52 - 1.5
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0x432FFFFFFFFFFFFDl)),
+ Double.longBitsToDouble(0x432FFFFFFFFFFFFEl), 0.0);
+ // 2^52 - 0.5
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0x432FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x4330000000000000l), 0.0);
+ // 2^52
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0x4330000000000000l)),
+ Double.longBitsToDouble(0x4330000000000000l), 0.0);
+ // 2^53 - 1
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0x433FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x433FFFFFFFFFFFFFl), 0.0);
+ // 2^53
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0x4340000000000000l)),
+ Double.longBitsToDouble(0x4340000000000000l), 0.0);
+ // 2^63 - 2^10
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0x43DFFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x43DFFFFFFFFFFFFFl), 0.0);
+ // 2^63
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0x43E0000000000000l)),
+ Double.longBitsToDouble(0x43E0000000000000l), 0.0);
+ // 2^64
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0x43F0000000000000l)),
+ Double.longBitsToDouble(0x43F0000000000000l), 0.0);
+ // -(2^52 - 1.5)
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0xC32FFFFFFFFFFFFDl)),
+ Double.longBitsToDouble(0xC32FFFFFFFFFFFFCl), 0.0);
+ // -(2^52 - 0.5)
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0xC32FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC32FFFFFFFFFFFFEl), 0.0);
+ // -2^52
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0xC330000000000000l)),
+ Double.longBitsToDouble(0xC330000000000000l), 0.0);
+ // -(2^53 - 1)
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0xC33FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC33FFFFFFFFFFFFFl), 0.0);
+ // -2^53
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0xC340000000000000l)),
+ Double.longBitsToDouble(0xC340000000000000l), 0.0);
+ // -(2^63 - 2^10)
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0xC3DFFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC3DFFFFFFFFFFFFFl), 0.0);
+ // -2^63
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0xC3E0000000000000l)),
+ Double.longBitsToDouble(0xC3E0000000000000l), 0.0);
+ // -2^64
+ Assert.assertEquals(Math.ceil(Double.longBitsToDouble(0xC3F0000000000000l)),
+ Double.longBitsToDouble(0xC3F0000000000000l), 0.0);
Assert.assertEquals(Math.ceil(Double.NaN), Double.NaN, 0.0);
Assert.assertEquals(Math.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
Assert.assertEquals(Math.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
@@ -613,6 +661,54 @@ public class Main {
Assert.assertEquals(Math.floor(-2.5), -3.0d, 0.0);
Assert.assertEquals(Math.floor(-2.9), -3.0d, 0.0);
Assert.assertEquals(Math.floor(-3.0), -3.0d, 0.0);
+ // 2^52 - 1.5
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0x432FFFFFFFFFFFFDl)),
+ Double.longBitsToDouble(0x432FFFFFFFFFFFFCl), 0.0);
+ // 2^52 - 0.5
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0x432FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x432FFFFFFFFFFFFEl), 0.0);
+ // 2^52
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0x4330000000000000l)),
+ Double.longBitsToDouble(0x4330000000000000l), 0.0);
+ // 2^53 - 1
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0x433FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x433FFFFFFFFFFFFFl), 0.0);
+ // 2^53
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0x4340000000000000l)),
+ Double.longBitsToDouble(0x4340000000000000l), 0.0);
+ // 2^63 - 2^10
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0x43DFFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x43DFFFFFFFFFFFFFl), 0.0);
+ // 2^63
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0x43E0000000000000l)),
+ Double.longBitsToDouble(0x43E0000000000000l), 0.0);
+ // 2^64
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0x43F0000000000000l)),
+ Double.longBitsToDouble(0x43F0000000000000l), 0.0);
+ // -(2^52 - 1.5)
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0xC32FFFFFFFFFFFFDl)),
+ Double.longBitsToDouble(0xC32FFFFFFFFFFFFEl), 0.0);
+ // -(2^52 - 0.5)
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0xC32FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC330000000000000l), 0.0);
+ // -2^52
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0xC330000000000000l)),
+ Double.longBitsToDouble(0xC330000000000000l), 0.0);
+ // -(2^53 - 1)
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0xC33FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC33FFFFFFFFFFFFFl), 0.0);
+ // -2^53
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0xC340000000000000l)),
+ Double.longBitsToDouble(0xC340000000000000l), 0.0);
+ // -(2^63 - 2^10)
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0xC3DFFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC3DFFFFFFFFFFFFFl), 0.0);
+ // -2^63
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0xC3E0000000000000l)),
+ Double.longBitsToDouble(0xC3E0000000000000l), 0.0);
+ // -2^64
+ Assert.assertEquals(Math.floor(Double.longBitsToDouble(0xC3F0000000000000l)),
+ Double.longBitsToDouble(0xC3F0000000000000l), 0.0);
Assert.assertEquals(Math.floor(Double.NaN), Double.NaN, 0.0);
Assert.assertEquals(Math.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
Assert.assertEquals(Math.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
@@ -632,6 +728,54 @@ public class Main {
Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0);
Assert.assertEquals(Math.rint(-2.9), -3.0d, 0.0);
Assert.assertEquals(Math.rint(-3.0), -3.0d, 0.0);
+ // 2^52 - 1.5
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x432FFFFFFFFFFFFDl)),
+ Double.longBitsToDouble(0x432FFFFFFFFFFFFCl), 0.0);
+ // 2^52 - 0.5
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x432FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x4330000000000000l), 0.0);
+ // 2^52
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x4330000000000000l)),
+ Double.longBitsToDouble(0x4330000000000000l), 0.0);
+ // 2^53 - 1
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x433FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x433FFFFFFFFFFFFFl), 0.0);
+ // 2^53
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x4340000000000000l)),
+ Double.longBitsToDouble(0x4340000000000000l), 0.0);
+ // 2^63 - 2^10
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x43DFFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0x43DFFFFFFFFFFFFFl), 0.0);
+ // 2^63
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x43E0000000000000l)),
+ Double.longBitsToDouble(0x43E0000000000000l), 0.0);
+ // 2^64
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0x43F0000000000000l)),
+ Double.longBitsToDouble(0x43F0000000000000l), 0.0);
+ // -(2^52 - 1.5)
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0xC32FFFFFFFFFFFFDl)),
+ Double.longBitsToDouble(0xC32FFFFFFFFFFFFCl), 0.0);
+ // -(2^52 - 0.5)
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0xC32FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC330000000000000l), 0.0);
+ // -2^52
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0xC330000000000000l)),
+ Double.longBitsToDouble(0xC330000000000000l), 0.0);
+ // -(2^53 - 1)
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0xC33FFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC33FFFFFFFFFFFFFl), 0.0);
+ // -2^53
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0xC340000000000000l)),
+ Double.longBitsToDouble(0xC340000000000000l), 0.0);
+ // -(2^63 - 2^10)
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0xC3DFFFFFFFFFFFFFl)),
+ Double.longBitsToDouble(0xC3DFFFFFFFFFFFFFl), 0.0);
+ // -2^63
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0xC3E0000000000000l)),
+ Double.longBitsToDouble(0xC3E0000000000000l), 0.0);
+ // -2^64
+ Assert.assertEquals(Math.rint(Double.longBitsToDouble(0xC3F0000000000000l)),
+ Double.longBitsToDouble(0xC3F0000000000000l), 0.0);
Assert.assertEquals(Math.rint(Double.NaN), Double.NaN, 0.0);
Assert.assertEquals(Math.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
Assert.assertEquals(Math.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
diff --git a/test/088-monitor-verification/expected.txt b/test/088-monitor-verification/expected.txt
index 07f5b0be82..13b8c73970 100644
--- a/test/088-monitor-verification/expected.txt
+++ b/test/088-monitor-verification/expected.txt
@@ -1,7 +1,12 @@
recursiveSync ok
nestedMayThrow ok
constantLock ok
-excessiveNesting ok
notNested ok
twoPath ok
triplet ok
+OK
+TooDeep
+NotStructuredOverUnlock
+NotStructuredUnderUnlock
+UnbalancedJoin
+UnbalancedStraight
diff --git a/test/088-monitor-verification/smali/NotStructuredOverUnlock.smali b/test/088-monitor-verification/smali/NotStructuredOverUnlock.smali
new file mode 100644
index 0000000000..aa0c2d5a13
--- /dev/null
+++ b/test/088-monitor-verification/smali/NotStructuredOverUnlock.smali
@@ -0,0 +1,21 @@
+.class public LNotStructuredOverUnlock;
+
+.super Ljava/lang/Object;
+
+.method public static run(Ljava/lang/Object;)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertCallerIsInterpreted()V
+
+ # Lock twice, but unlock thrice.
+
+ monitor-enter v2 # 1
+ monitor-enter v2 # 2
+
+ monitor-exit v2 # 1
+ monitor-exit v2 # 2
+ monitor-exit v2 # 3
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali b/test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali
new file mode 100644
index 0000000000..2c31fdaa85
--- /dev/null
+++ b/test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali
@@ -0,0 +1,21 @@
+.class public LNotStructuredUnderUnlock;
+
+.super Ljava/lang/Object;
+
+.method public static run(Ljava/lang/Object;)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertCallerIsInterpreted()V
+
+ # Lock thrice, but only unlock twice.
+
+ monitor-enter v2 # 1
+ monitor-enter v2 # 2
+ monitor-enter v2 # 3
+
+ monitor-exit v2 # 1
+ monitor-exit v2 # 2
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/smali/OK.smali b/test/088-monitor-verification/smali/OK.smali
new file mode 100644
index 0000000000..596798d80c
--- /dev/null
+++ b/test/088-monitor-verification/smali/OK.smali
@@ -0,0 +1,68 @@
+.class public LOK;
+
+.super Ljava/lang/Object;
+
+.method public static run(Ljava/lang/Object;Ljava/lang/Object;)V
+ .registers 3
+
+ invoke-static {v1, v2}, LOK;->runNoMonitors(Ljava/lang/Object;Ljava/lang/Object;)V
+
+ invoke-static {v1, v2}, LOK;->runStraightLine(Ljava/lang/Object;Ljava/lang/Object;)V
+
+ invoke-static {v1, v2}, LOK;->runBalancedJoin(Ljava/lang/Object;Ljava/lang/Object;)V
+
+ return-void
+
+.end method
+
+
+
+.method public static runNoMonitors(Ljava/lang/Object;Ljava/lang/Object;)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertCallerIsManaged()V
+
+ return-void
+
+.end method
+
+.method public static runStraightLine(Ljava/lang/Object;Ljava/lang/Object;)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertCallerIsManaged()V
+
+ monitor-enter v1 # 1
+ monitor-enter v2 # 2
+
+ monitor-exit v2 # 2
+ monitor-exit v1 # 1
+
+ return-void
+
+.end method
+
+.method public static runBalancedJoin(Ljava/lang/Object;Ljava/lang/Object;)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertCallerIsManaged()V
+
+ monitor-enter v1 # 1
+
+ if-eqz v2, :Lnull
+
+:LnotNull
+
+ monitor-enter v2 # 2
+ goto :Lend
+
+:Lnull
+ monitor-enter v2 # 2
+
+:Lend
+
+ monitor-exit v2 # 2
+ monitor-exit v1 # 1
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/smali/TooDeep.smali b/test/088-monitor-verification/smali/TooDeep.smali
new file mode 100644
index 0000000000..1a8f2f06e8
--- /dev/null
+++ b/test/088-monitor-verification/smali/TooDeep.smali
@@ -0,0 +1,82 @@
+.class public LTooDeep;
+
+.super Ljava/lang/Object;
+
+.method public static run(Ljava/lang/Object;)V
+ .registers 3
+
+ # Lock depth is 33, which is more than the verifier supports. This should have been punted to
+ # the interpreter.
+ invoke-static {}, LMain;->assertCallerIsInterpreted()V
+
+ monitor-enter v2 # 1
+ monitor-enter v2 # 2
+ monitor-enter v2 # 3
+ monitor-enter v2 # 4
+ monitor-enter v2 # 5
+ monitor-enter v2 # 6
+ monitor-enter v2 # 7
+ monitor-enter v2 # 8
+ monitor-enter v2 # 9
+ monitor-enter v2 # 10
+ monitor-enter v2 # 11
+ monitor-enter v2 # 12
+ monitor-enter v2 # 13
+ monitor-enter v2 # 14
+ monitor-enter v2 # 15
+ monitor-enter v2 # 16
+ monitor-enter v2 # 17
+ monitor-enter v2 # 18
+ monitor-enter v2 # 19
+ monitor-enter v2 # 20
+ monitor-enter v2 # 21
+ monitor-enter v2 # 22
+ monitor-enter v2 # 23
+ monitor-enter v2 # 24
+ monitor-enter v2 # 25
+ monitor-enter v2 # 26
+ monitor-enter v2 # 27
+ monitor-enter v2 # 28
+ monitor-enter v2 # 29
+ monitor-enter v2 # 30
+ monitor-enter v2 # 31
+ monitor-enter v2 # 32
+ monitor-enter v2 # 33
+
+ monitor-exit v2 # 1
+ monitor-exit v2 # 2
+ monitor-exit v2 # 3
+ monitor-exit v2 # 4
+ monitor-exit v2 # 5
+ monitor-exit v2 # 6
+ monitor-exit v2 # 7
+ monitor-exit v2 # 8
+ monitor-exit v2 # 9
+ monitor-exit v2 # 10
+ monitor-exit v2 # 11
+ monitor-exit v2 # 12
+ monitor-exit v2 # 13
+ monitor-exit v2 # 14
+ monitor-exit v2 # 15
+ monitor-exit v2 # 16
+ monitor-exit v2 # 17
+ monitor-exit v2 # 18
+ monitor-exit v2 # 19
+ monitor-exit v2 # 20
+ monitor-exit v2 # 21
+ monitor-exit v2 # 22
+ monitor-exit v2 # 23
+ monitor-exit v2 # 24
+ monitor-exit v2 # 25
+ monitor-exit v2 # 26
+ monitor-exit v2 # 27
+ monitor-exit v2 # 28
+ monitor-exit v2 # 29
+ monitor-exit v2 # 30
+ monitor-exit v2 # 31
+ monitor-exit v2 # 32
+ monitor-exit v2 # 33
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/smali/UnbalancedJoin.smali b/test/088-monitor-verification/smali/UnbalancedJoin.smali
new file mode 100644
index 0000000000..da8f7732af
--- /dev/null
+++ b/test/088-monitor-verification/smali/UnbalancedJoin.smali
@@ -0,0 +1,31 @@
+.class public LUnbalancedJoin;
+
+.super Ljava/lang/Object;
+
+.method public static run(Ljava/lang/Object;Ljava/lang/Object;)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertCallerIsInterpreted()V
+
+ if-eqz v2, :Lnull
+
+:LnotNull
+
+ monitor-enter v1 # 1
+ monitor-enter v2 # 2
+ goto :Lend
+
+:Lnull
+ monitor-enter v2 # 1
+ monitor-enter v1 # 2
+
+:Lend
+
+ # Lock levels are "opposite" for the joined flows.
+
+ monitor-exit v2 # 2
+ monitor-exit v1 # 1
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/smali/UnbalancedStraight.smali b/test/088-monitor-verification/smali/UnbalancedStraight.smali
new file mode 100644
index 0000000000..68edb6c783
--- /dev/null
+++ b/test/088-monitor-verification/smali/UnbalancedStraight.smali
@@ -0,0 +1,18 @@
+.class public LUnbalancedStraight;
+
+.super Ljava/lang/Object;
+
+.method public static run(Ljava/lang/Object;Ljava/lang/Object;)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertCallerIsInterpreted()V
+
+ monitor-enter v1 # 1
+ monitor-enter v2 # 2
+
+ monitor-exit v1 # 1 Unbalanced unlock.
+ monitor-exit v2 # 2
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index b60c71e668..53b72e9f5c 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
/*
* Entry point and tests that are expected to succeed.
@@ -23,6 +26,7 @@ public class Main {
* Drives tests.
*/
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
Main m = new Main();
m.recursiveSync(0);
@@ -38,11 +42,6 @@ public class Main {
System.out.println("constantLock ok");
m.notExcessiveNesting();
- try {
- TooDeep.excessiveNesting();
- System.err.println("excessiveNesting did not throw");
- } catch (VerifyError ve) {}
- System.out.println("excessiveNesting ok");
m.notNested();
System.out.println("notNested ok");
@@ -55,6 +54,8 @@ public class Main {
m.triplet(obj1, obj2, 0);
System.out.println("triplet ok");
+
+ runSmaliTests();
}
/**
@@ -216,4 +217,62 @@ public class Main {
doNothing(localObj);
}
+
+ // Smali testing code.
+ private static void runSmaliTests() {
+ runTest("OK", new Object[] { new Object(), new Object() }, null);
+ runTest("TooDeep", new Object[] { new Object() }, null);
+ runTest("NotStructuredOverUnlock", new Object[] { new Object() },
+ IllegalMonitorStateException.class);
+ runTest("NotStructuredUnderUnlock", new Object[] { new Object() }, null);
+ // TODO: new IllegalMonitorStateException());
+ runTest("UnbalancedJoin", new Object[] { new Object(), new Object() }, null);
+ runTest("UnbalancedStraight", new Object[] { new Object(), new Object() }, null);
+ }
+
+ private static void runTest(String className, Object[] parameters, Class<?> excType) {
+ System.out.println(className);
+ try {
+ Class<?> c = Class.forName(className);
+
+ Method[] methods = c.getDeclaredMethods();
+
+ // For simplicity we assume that test methods are not overloaded. So searching by name
+ // will give us the method we need to run.
+ Method method = null;
+ for (Method m : methods) {
+ if (m.getName().equals("run")) {
+ method = m;
+ break;
+ }
+ }
+
+ if (method == null) {
+ System.out.println("Could not find test method for " + className);
+ } else if (!Modifier.isStatic(method.getModifiers())) {
+ System.out.println("Test method for " + className + " is not static.");
+ } else {
+ method.invoke(null, parameters);
+ if (excType != null) {
+ System.out.println("Expected an exception in " + className);
+ }
+ }
+ } catch (Throwable exc) {
+ if (excType == null) {
+ System.out.println("Did not expect exception " + exc + " for " + className);
+ exc.printStackTrace(System.out);
+ } else if (exc instanceof InvocationTargetException && exc.getCause() != null &&
+ exc.getCause().getClass().equals(excType)) {
+ // Expected exception is wrapped in InvocationTargetException.
+ } else if (!excType.equals(exc.getClass())) {
+ System.out.println("Expected " + excType.getName() + ", but got " + exc.getClass());
+ } else {
+ // Expected exception, do nothing.
+ }
+ }
+ }
+
+ // Helpers for the smali code.
+ public static native void assertCallerIsInterpreted();
+ public static native void assertCallerIsManaged();
}
diff --git a/test/088-monitor-verification/src/TooDeep.java b/test/088-monitor-verification/src/TooDeep.java
deleted file mode 100644
index 76192e55c6..0000000000
--- a/test/088-monitor-verification/src/TooDeep.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/**
- * The class has a method with too many levels of nested "synchronized"
- * blocks. The verifier will reject it.
- *
- * (It would be perfectly okay if the verifier *didn't* reject this.
- * The goal here is just to exercise the failure path. It also serves
- * as a check to see if the monitor checks are enabled.)
- */
-public class TooDeep {
-
- public static void excessiveNesting() {
- synchronized (TooDeep.class) { // 1
- synchronized (TooDeep.class) { // 2
- synchronized (TooDeep.class) { // 3
- synchronized (TooDeep.class) { // 4
- synchronized (TooDeep.class) { // 5
- synchronized (TooDeep.class) { // 6
- synchronized (TooDeep.class) { // 7
- synchronized (TooDeep.class) { // 8
- synchronized (TooDeep.class) { // 9
- synchronized (TooDeep.class) { // 10
- synchronized (TooDeep.class) { // 11
- synchronized (TooDeep.class) { // 12
- synchronized (TooDeep.class) { // 13
- synchronized (TooDeep.class) { // 14
- synchronized (TooDeep.class) { // 15
- synchronized (TooDeep.class) { // 16
- synchronized (TooDeep.class) { // 17
- synchronized (TooDeep.class) { // 18
- synchronized (TooDeep.class) { // 19
- synchronized (TooDeep.class) { // 20
- synchronized (TooDeep.class) { // 21
- synchronized (TooDeep.class) { // 22
- synchronized (TooDeep.class) { // 23
- synchronized (TooDeep.class) { // 24
- synchronized (TooDeep.class) { // 25
- synchronized (TooDeep.class) { // 26
- synchronized (TooDeep.class) { // 27
- synchronized (TooDeep.class) { // 28
- synchronized (TooDeep.class) { // 29
- synchronized (TooDeep.class) { // 30
- synchronized (TooDeep.class) { // 31
- synchronized (TooDeep.class) { // 32
- synchronized (TooDeep.class) { // 33
- }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
- }
-}
diff --git a/test/088-monitor-verification/stack_inspect.cc b/test/088-monitor-verification/stack_inspect.cc
new file mode 100644
index 0000000000..e2899c3d68
--- /dev/null
+++ b/test/088-monitor-verification/stack_inspect.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+#include "base/logging.h"
+#include "dex_file-inl.h"
+#include "mirror/class-inl.h"
+#include "nth_caller_visitor.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread-inl.h"
+
+namespace art {
+
+// public static native void assertCallerIsInterpreted();
+
+extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsInterpreted(JNIEnv* env, jclass) {
+ LOG(INFO) << "assertCallerIsInterpreted";
+
+ ScopedObjectAccess soa(env);
+ NthCallerVisitor caller(soa.Self(), 1, false);
+ caller.WalkStack();
+ CHECK(caller.caller != nullptr);
+ LOG(INFO) << PrettyMethod(caller.caller);
+ CHECK(caller.GetCurrentShadowFrame() != nullptr);
+}
+
+// public static native void assertCallerIsManaged();
+
+extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsManaged(JNIEnv* env, jclass cls) {
+ // Note: needs some smarts to not fail if there is no managed code, at all.
+ LOG(INFO) << "assertCallerIsManaged";
+
+ ScopedObjectAccess soa(env);
+
+ mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
+ const DexFile& dex_file = klass->GetDexFile();
+ const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+ if (oat_dex_file == nullptr) {
+ // No oat file, this must be a test configuration that doesn't compile at all. Ignore that the
+ // result will be that we're running the interpreter.
+ return;
+ }
+
+ NthCallerVisitor caller(soa.Self(), 1, false);
+ caller.WalkStack();
+ CHECK(caller.caller != nullptr);
+ LOG(INFO) << PrettyMethod(caller.caller);
+
+ if (caller.GetCurrentShadowFrame() == nullptr) {
+ // Not a shadow frame, this looks good.
+ return;
+ }
+
+ // This could be an interpret-only or a verify-at-runtime compilation, or a read-barrier variant,
+ // or... It's not really safe to just reject now. Let's look at the access flags. If the method
+ // was successfully verified, its access flags should be set to mark it preverified, except when
+ // we're running soft-fail tests.
+ if (Runtime::Current()->IsVerificationSoftFail()) {
+ // Soft-fail config. Everything should be running with interpreter access checks, potentially.
+ return;
+ }
+ CHECK(caller.caller->IsPreverified());
+}
+
+} // namespace art
diff --git a/test/101-fibonacci/src/Main.java b/test/101-fibonacci/src/Main.java
index 3773e1b123..c594edbded 100644
--- a/test/101-fibonacci/src/Main.java
+++ b/test/101-fibonacci/src/Main.java
@@ -43,7 +43,7 @@ class Main {
}
public static void main(String[] args) {
- String arg = (args.length > 0) ? args[0] : "10";
+ String arg = (args.length > 1) ? args[1] : "10";
try {
int x = Integer.parseInt(arg);
int y = fibonacci(x);
diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run
index 32a9975c4b..ea2045b86c 100644
--- a/test/115-native-bridge/run
+++ b/test/115-native-bridge/run
@@ -20,7 +20,9 @@ ARGS=${@}
LIBPATH=$(echo ${ARGS} | sed -r 's/.*Djava.library.path=([^ ]*) .*/\1/')
ln -s ${LIBPATH}/libnativebridgetest.so .
touch libarttest.so
+touch libarttestd.so
ln -s ${LIBPATH}/libarttest.so libarttest2.so
+ln -s ${LIBPATH}/libarttestd.so libarttestd2.so
# pwd likely has /, so it's a pain to put that into a sed rule.
LEFT=$(echo ${ARGS} | sed -r 's/-Djava.library.path.*//')
diff --git a/test/115-native-bridge/src/NativeBridgeMain.java b/test/115-native-bridge/src/NativeBridgeMain.java
index 25390f7504..c298b1b772 100644
--- a/test/115-native-bridge/src/NativeBridgeMain.java
+++ b/test/115-native-bridge/src/NativeBridgeMain.java
@@ -189,7 +189,7 @@ public class NativeBridgeMain {
static public void main(String[] args) throws Exception {
System.out.println("Ready for native bridge tests.");
- System.loadLibrary("arttest");
+ System.loadLibrary(args[0]);
Main.main(null);
}
diff --git a/test/116-nodex2oat/src/Main.java b/test/116-nodex2oat/src/Main.java
index 37ac9d5b78..086ffb9295 100644
--- a/test/116-nodex2oat/src/Main.java
+++ b/test/116-nodex2oat/src/Main.java
@@ -16,6 +16,7 @@
public class Main {
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
System.out.println(
"Has oat is " + hasOat() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
@@ -26,10 +27,6 @@ public class Main {
}
}
- static {
- System.loadLibrary("arttest");
- }
-
private native static boolean hasOat();
private native static boolean isDex2OatEnabled();
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
index 7bc9dbb947..223e12084d 100644
--- a/test/117-nopatchoat/src/Main.java
+++ b/test/117-nopatchoat/src/Main.java
@@ -16,6 +16,8 @@
public class Main {
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
boolean executable_correct = (isPic() ?
hasExecutableOat() == true :
hasExecutableOat() == isDex2OatEnabled());
@@ -41,10 +43,6 @@ public class Main {
return ret.substring(0, ret.length() - 1);
}
- static {
- System.loadLibrary("arttest");
- }
-
private native static boolean isDex2OatEnabled();
private native static boolean isPic();
diff --git a/test/118-noimage-dex2oat/src/Main.java b/test/118-noimage-dex2oat/src/Main.java
index 9bf5bb3b08..dba9166ad5 100644
--- a/test/118-noimage-dex2oat/src/Main.java
+++ b/test/118-noimage-dex2oat/src/Main.java
@@ -19,6 +19,7 @@ import java.lang.reflect.Method;
public class Main {
public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
boolean hasImage = hasImage();
String instructionSet = VMRuntime.getCurrentInstructionSet();
boolean isBootClassPathOnDisk = VMRuntime.isBootClassPathOnDisk(instructionSet);
@@ -41,10 +42,6 @@ public class Main {
testB18485243();
}
- static {
- System.loadLibrary("arttest");
- }
-
private native static boolean hasImage();
private native static boolean isImageDex2OatEnabled();
diff --git a/test/119-noimage-patchoat/src/Main.java b/test/119-noimage-patchoat/src/Main.java
index 11c736a7d6..6a70f5885b 100644
--- a/test/119-noimage-patchoat/src/Main.java
+++ b/test/119-noimage-patchoat/src/Main.java
@@ -16,6 +16,7 @@
public class Main {
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
boolean hasImage = hasImage();
System.out.println(
"Has image is " + hasImage + ", is image dex2oat enabled is "
@@ -28,10 +29,6 @@ public class Main {
}
}
- static {
- System.loadLibrary("arttest");
- }
-
private native static boolean hasImage();
private native static boolean isImageDex2OatEnabled();
diff --git a/test/131-structural-change/src/Main.java b/test/131-structural-change/src/Main.java
index 8dfa2808a2..6cbbd12387 100644
--- a/test/131-structural-change/src/Main.java
+++ b/test/131-structural-change/src/Main.java
@@ -23,6 +23,7 @@ import java.lang.reflect.Method;
*/
public class Main {
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
new Main().run();
}
@@ -49,9 +50,5 @@ public class Main {
System.out.println("Done.");
}
- static {
- System.loadLibrary("arttest");
- }
-
private native static boolean hasOat();
}
diff --git a/test/134-nodex2oat-nofallback/src/Main.java b/test/134-nodex2oat-nofallback/src/Main.java
index 37ac9d5b78..086ffb9295 100644
--- a/test/134-nodex2oat-nofallback/src/Main.java
+++ b/test/134-nodex2oat-nofallback/src/Main.java
@@ -16,6 +16,7 @@
public class Main {
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
System.out.println(
"Has oat is " + hasOat() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
@@ -26,10 +27,6 @@ public class Main {
}
}
- static {
- System.loadLibrary("arttest");
- }
-
private native static boolean hasOat();
private native static boolean isDex2OatEnabled();
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
index 6cd187a033..dc3ef7eb07 100644
--- a/test/137-cfi/src/Main.java
+++ b/test/137-cfi/src/Main.java
@@ -41,6 +41,7 @@ public class Main implements Comparator<Main> {
}
public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
boolean secondary = false;
if (args.length > 0 && args[args.length - 1].equals("--secondary")) {
secondary = true;
@@ -48,10 +49,6 @@ public class Main implements Comparator<Main> {
new Main(secondary).run();
}
- static {
- System.loadLibrary("arttest");
- }
-
private void run() {
if (secondary) {
if (!TEST_REMOTE_UNWINDING) {
diff --git a/test/139-register-natives/src/Main.java b/test/139-register-natives/src/Main.java
index 35b2f9cabb..8dd2131670 100644
--- a/test/139-register-natives/src/Main.java
+++ b/test/139-register-natives/src/Main.java
@@ -16,15 +16,12 @@
public class Main {
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
testRegistration1();
testRegistration2();
testRegistration3();
}
- static {
- System.loadLibrary("arttest");
- }
-
// Test that a subclass' method is registered instead of a superclass' method.
private static void testRegistration1() {
registerNatives(TestSub.class);
diff --git a/test/454-get-vreg/src/Main.java b/test/454-get-vreg/src/Main.java
index df07d441a8..95d4190c4d 100644
--- a/test/454-get-vreg/src/Main.java
+++ b/test/454-get-vreg/src/Main.java
@@ -36,11 +36,8 @@ public class Main {
return 42;
}
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
Main rm = new Main();
if (rm.testSimpleVReg(1, 1.0f, (short)2, true, (byte)3, 'c') != 43) {
throw new Error("Expected 43");
diff --git a/test/455-set-vreg/src/Main.java b/test/455-set-vreg/src/Main.java
index 2172d9212d..4db9d66f94 100644
--- a/test/455-set-vreg/src/Main.java
+++ b/test/455-set-vreg/src/Main.java
@@ -40,11 +40,8 @@ public class Main {
native void doNativeCallSetVReg();
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
Main rm = new Main();
int intExpected = 5 - 4 - 3 - 2 - 1;
int intResult = rm.testIntVReg(0, 0, 0, 0, 0);
diff --git a/test/457-regs/src/Main.java b/test/457-regs/src/Main.java
index 0d82033195..3b8df443ff 100644
--- a/test/457-regs/src/Main.java
+++ b/test/457-regs/src/Main.java
@@ -22,6 +22,8 @@ public class Main {
class InnerClass {}
public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+
Class<?> c = Class.forName("PhiLiveness");
Method m = c.getMethod("mergeOk", boolean.class, byte.class);
m.invoke(null, new Boolean(true), new Byte((byte)2));
@@ -38,8 +40,4 @@ public class Main {
m = c.getMethod("phiAllEquivalents", Main.class);
m.invoke(null, new Main());
}
-
- static {
- System.loadLibrary("arttest");
- }
}
diff --git a/test/461-get-reference-vreg/src/Main.java b/test/461-get-reference-vreg/src/Main.java
index a94c6fb381..f7d43568d5 100644
--- a/test/461-get-reference-vreg/src/Main.java
+++ b/test/461-get-reference-vreg/src/Main.java
@@ -38,11 +38,8 @@ public class Main {
native int doNativeCallRef();
static native int doStaticNativeCallRef();
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
Main rm = new Main();
if (rm.testThisWithInstanceCall() != 1) {
throw new Error("Expected 1");
diff --git a/test/466-get-live-vreg/src/Main.java b/test/466-get-live-vreg/src/Main.java
index 851506bf8a..d036a24459 100644
--- a/test/466-get-live-vreg/src/Main.java
+++ b/test/466-get-live-vreg/src/Main.java
@@ -48,11 +48,8 @@ public class Main {
static native void doStaticNativeCallLiveVreg();
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
if (testLiveArgument(staticField3) != staticField3) {
throw new Error("Expected " + staticField3);
}
diff --git a/test/474-fp-sub-neg/expected.txt b/test/474-fp-sub-neg/expected.txt
index 1c15abba3d..1c7ded3ca8 100644
--- a/test/474-fp-sub-neg/expected.txt
+++ b/test/474-fp-sub-neg/expected.txt
@@ -1,6 +1,13 @@
-0.0
+-0.0
+0.0
+0.0
0.0
0.0
-0.0
+-0.0
+0.0
+0.0
0.0
0.0
+d 0.0
diff --git a/test/474-fp-sub-neg/src/Main.java b/test/474-fp-sub-neg/src/Main.java
index c190e8e40b..796d56c30a 100644
--- a/test/474-fp-sub-neg/src/Main.java
+++ b/test/474-fp-sub-neg/src/Main.java
@@ -17,33 +17,58 @@
public class Main {
public static void floatTest() {
float f = 0;
+ float nf = -0;
float fc = 1f;
for (int i = 0; i < 2; i++) {
f -= fc;
f = -f;
+ nf -= fc;
+ nf = -nf;
}
System.out.println(f);
+ System.out.println(nf);
System.out.println(f + 0f);
System.out.println(f - (-0f));
+ System.out.println(-f - (-nf));
+ System.out.println(-f + (-nf));
}
public static void doubleTest() {
double d = 0;
+ double nd = -0;
double dc = 1f;
for (int i = 0; i < 2; i++) {
d -= dc;
d = -d;
+ nd -= dc;
+ nd = -nd;
}
System.out.println(d);
+ System.out.println(nd);
System.out.println(d + 0f);
System.out.println(d - (-0f));
+ System.out.println(-d - (-nd));
+ System.out.println(-d + (-nd));
+ }
+
+ public static void bug_1() {
+ int i4=18, i3=-48959;
+ float d;
+ float f=-0.0f;
+ float a=0.0f;
+
+ d = -f + (-a);
+ f += i4 * i3;
+
+ System.out.println("d " + d);
}
public static void main(String[] args) {
doubleTest();
floatTest();
+ bug_1();
}
}
diff --git a/test/497-inlining-and-class-loader/expected.txt b/test/497-inlining-and-class-loader/expected.txt
index 3e1d85e309..f5b9fe07de 100644
--- a/test/497-inlining-and-class-loader/expected.txt
+++ b/test/497-inlining-and-class-loader/expected.txt
@@ -1,7 +1,7 @@
java.lang.Exception
- at Main.$noinline$bar(Main.java:127)
+ at Main.$noinline$bar(Main.java:124)
at Level2.$inline$bar(Level1.java:25)
at Level1.$inline$bar(Level1.java:19)
at LoadedByMyClassLoader.bar(Main.java:82)
at java.lang.reflect.Method.invoke(Native Method)
- at Main.main(Main.java:101)
+ at Main.main(Main.java:98)
diff --git a/test/497-inlining-and-class-loader/src/Main.java b/test/497-inlining-and-class-loader/src/Main.java
index 0f7eb599cb..832b1f08ee 100644
--- a/test/497-inlining-and-class-loader/src/Main.java
+++ b/test/497-inlining-and-class-loader/src/Main.java
@@ -84,11 +84,8 @@ class LoadedByMyClassLoader {
}
class Main {
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
// Clone resolved methods, to restore the original version just
// before we walk the stack in $noinline$bar.
savedResolvedMethods = cloneResolvedMethods(Main.class);
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index fcb9f8a779..82f8c79512 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -25,6 +25,7 @@ LIBARTTEST_COMMON_SRC_FILES := \
004-StackWalk/stack_walk_jni.cc \
004-UnsafeTest/unsafe_test.cc \
051-thread/thread_test.cc \
+ 088-monitor-verification/stack_inspect.cc \
116-nodex2oat/nodex2oat.cc \
117-nopatchoat/nopatchoat.cc \
118-noimage-dex2oat/noimage-dex2oat.cc \
@@ -38,8 +39,10 @@ LIBARTTEST_COMMON_SRC_FILES := \
497-inlining-and-class-loader/clear_dex_cache.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
+ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so
ifdef TARGET_2ND_ARCH
ART_TARGET_LIBARTTEST_$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttest.so
+ ART_TARGET_LIBARTTEST_$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttestd.so
endif
# $(1): target or host
@@ -49,17 +52,23 @@ define build-libarttest
$$(error expected target or host for argument 1, received $(1))
endif
endif
+ ifneq ($(2),d)
+ ifneq ($(2),)
+ $$(error d or empty for argument 2, received $(2))
+ endif
+ endif
art_target_or_host := $(1)
+ suffix := $(2)
include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
- LOCAL_MODULE := libarttest
+ LOCAL_MODULE := libarttest$$(suffix)
ifeq ($$(art_target_or_host),target)
LOCAL_MODULE_TAGS := tests
endif
LOCAL_SRC_FILES := $(LIBARTTEST_COMMON_SRC_FILES)
- LOCAL_SHARED_LIBRARIES += libartd libbacktrace
+ LOCAL_SHARED_LIBRARIES += libart$$(suffix) libbacktrace
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libarttest.mk
@@ -84,13 +93,16 @@ define build-libarttest
# Clear locally used variables.
art_target_or_host :=
+ suffix :=
endef
ifeq ($(ART_BUILD_TARGET),true)
- $(eval $(call build-libarttest,target))
+ $(eval $(call build-libarttest,target,))
+ $(eval $(call build-libarttest,target,d))
endif
ifeq ($(ART_BUILD_HOST),true)
- $(eval $(call build-libarttest,host))
+ $(eval $(call build-libarttest,host,))
+ $(eval $(call build-libarttest,host,d))
endif
# Clear locally used variables.
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 4e6df6ca79..439e42331c 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -378,41 +378,6 @@ ifneq (,$(filter trace stream,$(TRACE_TYPES)))
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_TRACING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
-TEST_ART_BROKEN_TRACING_RUN_TESTS :=
-
-# The following tests use libarttest.so, which is linked against libartd.so, so will
-# not work when libart.so is the one loaded.
-# TODO: Find a way to run these tests in ndebug mode.
-TEST_ART_BROKEN_NDEBUG_TESTS := \
- 004-JniTest \
- 004-ReferenceMap \
- 004-SignalTest \
- 004-StackWalk \
- 004-UnsafeTest \
- 051-thread \
- 115-native-bridge \
- 116-nodex2oat \
- 117-nopatchoat \
- 118-noimage-dex2oat \
- 119-noimage-patchoat \
- 131-structural-change \
- 137-cfi \
- 139-register-natives \
- 454-get-vreg \
- 455-set-vreg \
- 457-regs \
- 461-get-reference-vreg \
- 466-get-live-vreg \
- 497-inlining-and-class-loader \
-
-ifneq (,$(filter ndebug,$(RUN_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
- $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_NDEBUG_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_NDEBUG_TESTS :=
-
# Known broken tests for the interpreter.
# CFI unwinding expects managed frames.
TEST_ART_BROKEN_INTERPRETER_RUN_TESTS := \
@@ -602,8 +567,10 @@ TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
# Also need libarttest.
TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so
ifdef TARGET_2ND_ARCH
TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttest.so
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttestd.so
endif
# Also need libnativebridgetest.
@@ -617,12 +584,14 @@ endif
ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
ifneq ($(HOST_PREFER_32_BIT),true)
ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
endif
diff --git a/test/StackWalk2/StackWalk2.java b/test/StackWalk2/StackWalk2.java
index a879b46447..5e7b22c252 100644
--- a/test/StackWalk2/StackWalk2.java
+++ b/test/StackWalk2/StackWalk2.java
@@ -50,11 +50,8 @@ public class StackWalk2 {
native int refmap2(int x);
- static {
- System.loadLibrary("arttest");
- }
-
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
StackWalk2 st = new StackWalk2();
st.f();
}
diff --git a/test/dexdump/run-all-tests b/test/dexdump/run-all-tests
index d9f1e9649d..9cf7ab6cea 100755
--- a/test/dexdump/run-all-tests
+++ b/test/dexdump/run-all-tests
@@ -43,7 +43,7 @@ DEXDFLAGS1="-dfh"
DEXDFLAGS2="-l xml"
# Set up dexlist binary and flags to test.
-DEXL="${ANDROID_HOST_OUT}/bin/dexlist2"
+DEXL="${ANDROID_HOST_OUT}/bin/dexlist"
DEXLFLAGS=""
# Run the tests.
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index a1af5774f3..39dc0301d9 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -47,6 +47,7 @@ ZYGOTE=""
DEX_VERIFY=""
USE_DEX2OAT_AND_PATCHOAT="y"
INSTRUCTION_SET_FEATURES=""
+ARGS=""
while true; do
if [ "x$1" = "x--quiet" ]; then
@@ -60,6 +61,14 @@ while true; do
fi
LIB="$1"
shift
+ elif [ "x$1" = "x--testlib" ]; then
+ shift
+ if [ "x$1" = "x" ]; then
+ echo "$0 missing argument to --testlib" 1>&2
+ exit 1
+ fi
+ ARGS="${ARGS} $1"
+ shift
elif [ "x$1" = "x-Xcompiler-option" ]; then
shift
option="$1"
@@ -369,7 +378,7 @@ dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
$INT_OPTS \
$DEBUGGER_OPTS \
$DALVIKVM_BOOT_OPT \
- -cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN"
+ -cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN $ARGS"
# Remove whitespace.
dex2oat_cmdline=$(echo $dex2oat_cmdline)
diff --git a/test/run-test b/test/run-test
index 84c818b444..424c2e40be 100755
--- a/test/run-test
+++ b/test/run-test
@@ -119,6 +119,7 @@ build_output="build-output.txt"
cfg_output="graph.cfg"
strace_output="strace-output.txt"
lib="libartd.so"
+testlib="arttestd"
run_args="--quiet"
build_args=""
@@ -164,6 +165,7 @@ while true; do
shift
elif [ "x$1" = "x-O" ]; then
lib="libart.so"
+ testlib="arttest"
shift
elif [ "x$1" = "x--dalvik" ]; then
lib="libdvm.so"
@@ -644,6 +646,10 @@ if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
fi
fi
+if [ "$runtime" != "jvm" ]; then
+ run_args="${run_args} --testlib ${testlib}"
+fi
+
# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB.
build_file_size_limit=2048
run_file_size_limit=2048
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 7ada1896e8..728991d0e9 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -130,7 +130,28 @@
description: "Crypto failures",
result: EXEC_FAILED,
names: ["libcore.javax.crypto.CipherTest#testCipher_ShortBlock_Failure",
- "libcore.javax.crypto.CipherTest#testCipher_Success"]
+ "libcore.javax.crypto.CipherTest#testCipher_Success",
+ "libcore.javax.crypto.spec.AlgorithmParametersTestDESede#testAlgorithmParameters",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#testDoFinalbyteArrayintintbyteArrayint",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#testUpdatebyteArrayintintbyteArrayint",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinal$BI",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinal$BII$B",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinalLjava_nio_ByteBufferLjava_nio_ByteBuffer",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getAlgorithm",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getBlockSize",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getInstanceLjava_lang_String",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getOutputSizeI",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithAlgorithmParameterSpec",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithKey",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithKeyAlgorithmParameterSpecSecureRandom",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithSecureRandom",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_unwrap$BLjava_lang_StringI",
+ "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_updateLjava_nio_ByteBufferLjava_nio_ByteBuffer",
+ "org.apache.harmony.crypto.tests.javax.crypto.func.CipherAesWrapTest#test_AesWrap",
+ "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeTest#test_DESedeISO",
+ "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeTest#test_DESedeNoISO",
+ "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeWrapTest#test_DESedeWrap",
+ "org.apache.harmony.crypto.tests.javax.crypto.func.CipherPBETest#test_PBEWithMD5AndDES"]
},
{
description: "Flake when running with libartd.so or interpreter",