summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/compiled_method.h30
-rw-r--r--compiler/compiled_method_test.cc86
-rw-r--r--compiler/driver/compiler_options.h4
-rw-r--r--compiler/image_writer.cc32
-rw-r--r--compiler/intrinsics_list.h6
-rw-r--r--compiler/jit/jit_compiler.cc5
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc2
-rw-r--r--compiler/oat_writer.cc10
-rw-r--r--compiler/optimizing/code_generator_arm.cc74
-rw-r--r--compiler/optimizing/code_generator_arm.h9
-rw-r--r--compiler/optimizing/code_generator_arm64.cc67
-rw-r--r--compiler/optimizing/code_generator_arm64.h14
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc82
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h12
-rw-r--r--compiler/optimizing/code_generator_mips.cc84
-rw-r--r--compiler/optimizing/code_generator_mips.h15
-rw-r--r--compiler/optimizing/code_generator_mips64.cc82
-rw-r--r--compiler/optimizing/code_generator_mips64.h15
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc90
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc92
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc96
-rw-r--r--compiler/optimizing/code_generator_x86.cc49
-rw-r--r--compiler/optimizing/code_generator_x86.h4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc27
-rw-r--r--compiler/optimizing/code_generator_x86_64.h4
-rw-r--r--compiler/optimizing/graph_visualizer.cc8
-rw-r--r--compiler/optimizing/intrinsics_arm.cc21
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc22
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc23
-rw-r--r--compiler/optimizing/intrinsics_mips.cc2
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc21
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc21
-rw-r--r--compiler/optimizing/loop_optimization.cc50
-rw-r--r--compiler/optimizing/loop_optimization.h1
-rw-r--r--compiler/optimizing/nodes.cc4
-rw-r--r--compiler/optimizing/nodes.h18
-rw-r--r--compiler/optimizing/nodes_vector.h22
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc2
-rw-r--r--compiler/optimizing/sharpening.cc14
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc4
-rw-r--r--compiler/utils/x86/assembler_x86.cc133
-rw-r--r--compiler/utils/x86/assembler_x86.h19
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc64
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc150
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h19
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc64
-rw-r--r--disassembler/disassembler_x86.cc66
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/context.h1
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/common_dex_operations.h4
-rw-r--r--runtime/dex_instruction.cc83
-rw-r--r--runtime/dex_instruction.h97
-rw-r--r--runtime/dex_instruction_list.h514
-rw-r--r--runtime/dex_instruction_visitor.h72
-rw-r--r--runtime/dex_instruction_visitor_test.cc69
-rw-r--r--runtime/fault_handler.cc16
-rw-r--r--runtime/gc/collector/concurrent_copying.cc2
-rw-r--r--runtime/gc/collector/semi_space.cc1
-rw-r--r--runtime/gc/gc_cause.cc2
-rw-r--r--runtime/gc/gc_cause.h8
-rw-r--r--runtime/gc/heap.cc23
-rw-r--r--runtime/gc/heap.h14
-rw-r--r--runtime/generated/asm_support_gen.h2
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/interpreter/interpreter.cc6
-rw-r--r--runtime/interpreter/interpreter_common.cc28
-rw-r--r--runtime/interpreter/interpreter_common.h3
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc1
-rw-r--r--runtime/jit/jit_code_cache.cc12
-rw-r--r--runtime/monitor.cc9
-rw-r--r--runtime/native_bridge_art_interface.cc10
-rw-r--r--runtime/runtime.cc22
-rw-r--r--runtime/runtime.h3
-rw-r--r--runtime/thread.cc18
-rw-r--r--runtime/thread.h26
-rw-r--r--runtime/verifier/method_verifier.cc14
-rw-r--r--runtime/verifier/method_verifier.h4
-rw-r--r--runtime/verifier/reg_type-inl.h103
-rw-r--r--runtime/verifier/reg_type.h140
-rw-r--r--sigchainlib/sigchain.cc72
-rw-r--r--sigchainlib/sigchain.h15
-rw-r--r--sigchainlib/sigchain_dummy.cc4
-rw-r--r--test/115-native-bridge/expected.txt13
-rw-r--r--test/115-native-bridge/nativebridge.cc177
-rw-r--r--test/115-native-bridge/src/NativeBridgeMain.java3
-rw-r--r--test/651-checker-byte-simd-minmax/expected.txt1
-rw-r--r--test/651-checker-byte-simd-minmax/info.txt1
-rw-r--r--test/651-checker-byte-simd-minmax/src/Main.java94
-rw-r--r--test/651-checker-char-simd-minmax/expected.txt1
-rw-r--r--test/651-checker-char-simd-minmax/info.txt1
-rw-r--r--test/651-checker-char-simd-minmax/src/Main.java102
-rw-r--r--test/651-checker-double-simd-minmax/expected.txt1
-rw-r--r--test/651-checker-double-simd-minmax/info.txt1
-rw-r--r--test/651-checker-double-simd-minmax/src/Main.java126
-rw-r--r--test/651-checker-float-simd-minmax/expected.txt1
-rw-r--r--test/651-checker-float-simd-minmax/info.txt1
-rw-r--r--test/651-checker-float-simd-minmax/src/Main.java126
-rw-r--r--test/651-checker-int-simd-minmax/expected.txt1
-rw-r--r--test/651-checker-int-simd-minmax/info.txt1
-rw-r--r--test/651-checker-int-simd-minmax/src/Main.java106
-rw-r--r--test/651-checker-long-simd-minmax/expected.txt1
-rw-r--r--test/651-checker-long-simd-minmax/info.txt1
-rw-r--r--test/651-checker-long-simd-minmax/src/Main.java108
-rw-r--r--test/651-checker-short-simd-minmax/expected.txt1
-rw-r--r--test/651-checker-short-simd-minmax/info.txt1
-rw-r--r--test/651-checker-short-simd-minmax/src/Main.java108
-rw-r--r--tools/dexfuzz/src/dexfuzz/DexFuzz.java9
-rw-r--r--tools/dexfuzz/src/dexfuzz/Options.java4
-rwxr-xr-xtools/jfuzz/run_dex_fuzz_test.py11
-rwxr-xr-xtools/jfuzz/run_jfuzz_test.py16
-rwxr-xr-xtools/jfuzz/run_jfuzz_test_nightly.py10
113 files changed, 2883 insertions, 1288 deletions
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index d0f66e2d8e..912c96468d 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -122,10 +122,8 @@ class LinkerPatch {
kMethod,
kCall,
kCallRelative, // NOTE: Actual patching is instruction_set-dependent.
- kType,
kTypeRelative, // NOTE: Actual patching is instruction_set-dependent.
kTypeBssEntry, // NOTE: Actual patching is instruction_set-dependent.
- kString,
kStringRelative, // NOTE: Actual patching is instruction_set-dependent.
kStringBssEntry, // NOTE: Actual patching is instruction_set-dependent.
kDexCacheArray, // NOTE: Actual patching is instruction_set-dependent.
@@ -156,14 +154,6 @@ class LinkerPatch {
return patch;
}
- static LinkerPatch TypePatch(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t target_type_idx) {
- LinkerPatch patch(literal_offset, Type::kType, target_dex_file);
- patch.type_idx_ = target_type_idx;
- return patch;
- }
-
static LinkerPatch RelativeTypePatch(size_t literal_offset,
const DexFile* target_dex_file,
uint32_t pc_insn_offset,
@@ -184,14 +174,6 @@ class LinkerPatch {
return patch;
}
- static LinkerPatch StringPatch(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t target_string_idx) {
- LinkerPatch patch(literal_offset, Type::kString, target_dex_file);
- patch.string_idx_ = target_string_idx;
- return patch;
- }
-
static LinkerPatch RelativeStringPatch(size_t literal_offset,
const DexFile* target_dex_file,
uint32_t pc_insn_offset,
@@ -265,29 +247,25 @@ class LinkerPatch {
}
const DexFile* TargetTypeDexFile() const {
- DCHECK(patch_type_ == Type::kType ||
- patch_type_ == Type::kTypeRelative ||
+ DCHECK(patch_type_ == Type::kTypeRelative ||
patch_type_ == Type::kTypeBssEntry);
return target_dex_file_;
}
dex::TypeIndex TargetTypeIndex() const {
- DCHECK(patch_type_ == Type::kType ||
- patch_type_ == Type::kTypeRelative ||
+ DCHECK(patch_type_ == Type::kTypeRelative ||
patch_type_ == Type::kTypeBssEntry);
return dex::TypeIndex(type_idx_);
}
const DexFile* TargetStringDexFile() const {
- DCHECK(patch_type_ == Type::kString ||
- patch_type_ == Type::kStringRelative ||
+ DCHECK(patch_type_ == Type::kStringRelative ||
patch_type_ == Type::kStringBssEntry);
return target_dex_file_;
}
dex::StringIndex TargetStringIndex() const {
- DCHECK(patch_type_ == Type::kString ||
- patch_type_ == Type::kStringRelative ||
+ DCHECK(patch_type_ == Type::kStringRelative ||
patch_type_ == Type::kStringBssEntry);
return dex::StringIndex(string_idx_);
}
diff --git a/compiler/compiled_method_test.cc b/compiler/compiled_method_test.cc
index 99ee875da2..81a2cde794 100644
--- a/compiler/compiled_method_test.cc
+++ b/compiler/compiled_method_test.cc
@@ -62,10 +62,38 @@ TEST(CompiledMethod, LinkerPatchOperators) {
LinkerPatch::RelativeCodePatch(16u, dex_file1, 1001u),
LinkerPatch::RelativeCodePatch(16u, dex_file2, 1000u),
LinkerPatch::RelativeCodePatch(16u, dex_file2, 1001u),
- LinkerPatch::TypePatch(16u, dex_file1, 1000u),
- LinkerPatch::TypePatch(16u, dex_file1, 1001u),
- LinkerPatch::TypePatch(16u, dex_file2, 1000u),
- LinkerPatch::TypePatch(16u, dex_file2, 1001u),
+ LinkerPatch::RelativeTypePatch(16u, dex_file1, 3000u, 1000u),
+ LinkerPatch::RelativeTypePatch(16u, dex_file1, 3001u, 1000u),
+ LinkerPatch::RelativeTypePatch(16u, dex_file1, 3000u, 1001u),
+ LinkerPatch::RelativeTypePatch(16u, dex_file1, 3001u, 1001u),
+ LinkerPatch::RelativeTypePatch(16u, dex_file2, 3000u, 1000u),
+ LinkerPatch::RelativeTypePatch(16u, dex_file2, 3001u, 1000u),
+ LinkerPatch::RelativeTypePatch(16u, dex_file2, 3000u, 1001u),
+ LinkerPatch::RelativeTypePatch(16u, dex_file2, 3001u, 1001u),
+ LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3000u, 1000u),
+ LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3001u, 1000u),
+ LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3000u, 1001u),
+ LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3001u, 1001u),
+ LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3000u, 1000u),
+ LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3001u, 1000u),
+ LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3000u, 1001u),
+ LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3001u, 1001u),
+ LinkerPatch::RelativeStringPatch(16u, dex_file1, 3000u, 1000u),
+ LinkerPatch::RelativeStringPatch(16u, dex_file1, 3001u, 1000u),
+ LinkerPatch::RelativeStringPatch(16u, dex_file1, 3000u, 1001u),
+ LinkerPatch::RelativeStringPatch(16u, dex_file1, 3001u, 1001u),
+ LinkerPatch::RelativeStringPatch(16u, dex_file2, 3000u, 1000u),
+ LinkerPatch::RelativeStringPatch(16u, dex_file2, 3001u, 1000u),
+ LinkerPatch::RelativeStringPatch(16u, dex_file2, 3000u, 1001u),
+ LinkerPatch::RelativeStringPatch(16u, dex_file2, 3001u, 1001u),
+ LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3000u, 1000u),
+ LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3001u, 1000u),
+ LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3000u, 1001u),
+ LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3001u, 1001u),
+ LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3000u, 1000u),
+ LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3001u, 1000u),
+ LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3000u, 1001u),
+ LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3001u, 1001u),
LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2000u),
LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2000u),
LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2001u),
@@ -74,10 +102,15 @@ TEST(CompiledMethod, LinkerPatchOperators) {
LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2000u),
LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2001u),
LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2001u),
+ LinkerPatch::BakerReadBarrierBranchPatch(16u, 0u, 0u),
+ LinkerPatch::BakerReadBarrierBranchPatch(16u, 0u, 1u),
+ LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 0u),
+ LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 1u),
+
LinkerPatch::MethodPatch(32u, dex_file1, 1000u),
LinkerPatch::MethodPatch(32u, dex_file1, 1001u),
LinkerPatch::MethodPatch(32u, dex_file2, 1000u),
- LinkerPatch::MethodPatch(32u, dex_file2, 1001u),
+ LinkerPatch::MethodPatch(32u, dex_file2, 1001u), // Index 3.
LinkerPatch::CodePatch(32u, dex_file1, 1000u),
LinkerPatch::CodePatch(32u, dex_file1, 1001u),
LinkerPatch::CodePatch(32u, dex_file2, 1000u),
@@ -86,10 +119,38 @@ TEST(CompiledMethod, LinkerPatchOperators) {
LinkerPatch::RelativeCodePatch(32u, dex_file1, 1001u),
LinkerPatch::RelativeCodePatch(32u, dex_file2, 1000u),
LinkerPatch::RelativeCodePatch(32u, dex_file2, 1001u),
- LinkerPatch::TypePatch(32u, dex_file1, 1000u),
- LinkerPatch::TypePatch(32u, dex_file1, 1001u),
- LinkerPatch::TypePatch(32u, dex_file2, 1000u),
- LinkerPatch::TypePatch(32u, dex_file2, 1001u),
+ LinkerPatch::RelativeTypePatch(32u, dex_file1, 3000u, 1000u),
+ LinkerPatch::RelativeTypePatch(32u, dex_file1, 3001u, 1000u),
+ LinkerPatch::RelativeTypePatch(32u, dex_file1, 3000u, 1001u),
+ LinkerPatch::RelativeTypePatch(32u, dex_file1, 3001u, 1001u),
+ LinkerPatch::RelativeTypePatch(32u, dex_file2, 3000u, 1000u),
+ LinkerPatch::RelativeTypePatch(32u, dex_file2, 3001u, 1000u),
+ LinkerPatch::RelativeTypePatch(32u, dex_file2, 3000u, 1001u),
+ LinkerPatch::RelativeTypePatch(32u, dex_file2, 3001u, 1001u),
+ LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3000u, 1000u),
+ LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3001u, 1000u),
+ LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3000u, 1001u),
+ LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3001u, 1001u),
+ LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3000u, 1000u),
+ LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3001u, 1000u),
+ LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3000u, 1001u),
+ LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3001u, 1001u),
+ LinkerPatch::RelativeStringPatch(32u, dex_file1, 3000u, 1000u),
+ LinkerPatch::RelativeStringPatch(32u, dex_file1, 3001u, 1000u),
+ LinkerPatch::RelativeStringPatch(32u, dex_file1, 3000u, 1001u),
+ LinkerPatch::RelativeStringPatch(32u, dex_file1, 3001u, 1001u),
+ LinkerPatch::RelativeStringPatch(32u, dex_file2, 3000u, 1000u),
+ LinkerPatch::RelativeStringPatch(32u, dex_file2, 3001u, 1000u),
+ LinkerPatch::RelativeStringPatch(32u, dex_file2, 3000u, 1001u),
+ LinkerPatch::RelativeStringPatch(32u, dex_file2, 3001u, 1001u),
+ LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3000u, 1000u),
+ LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3001u, 1000u),
+ LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3000u, 1001u),
+ LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3001u, 1001u),
+ LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3000u, 1000u),
+ LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3001u, 1000u),
+ LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3000u, 1001u),
+ LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3001u, 1001u),
LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2000u),
LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2000u),
LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2001u),
@@ -98,7 +159,12 @@ TEST(CompiledMethod, LinkerPatchOperators) {
LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2000u),
LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2001u),
LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2001u),
- LinkerPatch::MethodPatch(16u, dex_file2, 1001u), // identical with patch as index 3.
+ LinkerPatch::BakerReadBarrierBranchPatch(32u, 0u, 0u),
+ LinkerPatch::BakerReadBarrierBranchPatch(32u, 0u, 1u),
+ LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 0u),
+ LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 1u),
+
+ LinkerPatch::MethodPatch(16u, dex_file2, 1001u), // identical with patch at index 3.
};
constexpr size_t last_index = arraysize(patches) - 1u;
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 957ea99c49..5a8202159e 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -237,6 +237,10 @@ class CompilerOptions FINAL {
bool ParseCompilerOption(const StringPiece& option, UsageFn Usage);
+ void SetNonPic() {
+ compile_pic_ = false;
+ }
+
const std::string& GetDumpCfgFileName() const {
return dump_cfg_file_name_;
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 952a7c637a..4d6db4745f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -48,6 +48,7 @@
#include "image.h"
#include "imt_conflict_table.h"
#include "intern_table.h"
+#include "jni_internal.h"
#include "linear_alloc.h"
#include "lock_word.h"
#include "mirror/array-inl.h"
@@ -114,15 +115,19 @@ bool ImageWriter::IsInBootOatFile(const void* ptr) const {
return false;
}
-static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
+static void ClearDexFileCookieCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
Class* klass = obj->GetClass();
- CHECK_NE(Class::PrettyClass(klass), "com.android.dex.Dex");
+ if (klass == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_DexFile)) {
+ ArtField* field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ // Null out the cookie to enable determinism. b/34090128
+ field->SetObject</*kTransactionActive*/false>(obj, nullptr);
+ }
}
-static void CheckNoDexObjects() {
- ScopedObjectAccess soa(Thread::Current());
- Runtime::Current()->GetHeap()->VisitObjects(CheckNoDexObjectsCallback, nullptr);
+static void ClearDexFileCookies() REQUIRES_SHARED(Locks::mutator_lock_) {
+ Runtime::Current()->GetHeap()->VisitObjects(ClearDexFileCookieCallback, nullptr);
}
bool ImageWriter::PrepareImageAddressSpace() {
@@ -131,23 +136,18 @@ bool ImageWriter::PrepareImageAddressSpace() {
{
ScopedObjectAccess soa(Thread::Current());
PruneNonImageClasses(); // Remove junk
- if (!compile_app_image_) {
+ if (compile_app_image_) {
+ // Clear dex file cookies for app images to enable app image determinism. This is required
+ // since the cookie field contains long pointers to DexFiles which are not deterministic.
+ // b/34090128
+ ClearDexFileCookies();
+ } else {
// Avoid for app image since this may increase RAM and image size.
ComputeLazyFieldsForImageClasses(); // Add useful information
}
}
heap->CollectGarbage(false); // Remove garbage.
- // Dex caches must not have their dex fields set in the image. These are memory buffers of mapped
- // dex files.
- //
- // We may open them in the unstarted-runtime code for class metadata. Their fields should all be
- // reset in PruneNonImageClasses and the objects reclaimed in the GC. Make sure that's actually
- // true.
- if (kIsDebugBuild) {
- CheckNoDexObjects();
- }
-
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
CheckNonImageClassesRemoved();
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h
index 63c23cb074..c8a0119667 100644
--- a/compiler/intrinsics_list.h
+++ b/compiler/intrinsics_list.h
@@ -28,6 +28,9 @@
// The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to GVN Integer.valueOf
// (kNoSideEffects), and it is also OK to remove it if it's unused.
+// Note: Thread.interrupted is marked with kAllSideEffects due to the lack of finer grain
+// side effects representation.
+
#define INTRINSICS_LIST(V) \
V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J") \
V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToLongBits", "(D)J") \
@@ -154,7 +157,8 @@
V(UnsafeStoreFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "storeFence", "()V") \
V(UnsafeFullFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "fullFence", "()V") \
V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") \
- V(IntegerValueOf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;")
+ V(IntegerValueOf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;") \
+ V(ThreadInterrupted, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kNoThrow, "Ljava/lang/Thread;", "interrupted", "()Z")
#endif // ART_COMPILER_INTRINSICS_LIST_H_
#undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint.
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index ad951bcc3f..fed1f48d65 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -105,7 +105,7 @@ JitCompiler::JitCompiler() {
/* implicit_null_checks */ true,
/* implicit_so_checks */ true,
/* implicit_suspend_checks */ false,
- /* pic */ true, // TODO: Support non-PIC in optimizing.
+ /* pic */ false,
/* verbose_methods */ nullptr,
/* init_failure_output */ nullptr,
/* abort_on_hard_verifier_failure */ false,
@@ -117,6 +117,9 @@ JitCompiler::JitCompiler() {
for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) {
compiler_options_->ParseCompilerOption(argument, Usage);
}
+ // JIT is never PIC, no matter what the runtime compiler options specify.
+ compiler_options_->SetNonPic();
+
const InstructionSet instruction_set = kRuntimeISA;
for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
VLOG(compiler) << "JIT compiler option " << option;
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index e99687a54f..c033c2d043 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -57,8 +57,6 @@ inline bool IsAdrpPatch(const LinkerPatch& patch) {
case LinkerPatch::Type::kMethod:
case LinkerPatch::Type::kCall:
case LinkerPatch::Type::kCallRelative:
- case LinkerPatch::Type::kType:
- case LinkerPatch::Type::kString:
case LinkerPatch::Type::kBakerReadBarrierBranch:
return false;
case LinkerPatch::Type::kTypeRelative:
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 9b22334ead..5091c0b35d 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1336,16 +1336,6 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
PatchMethodAddress(&patched_code_, literal_offset, method);
break;
}
- case LinkerPatch::Type::kString: {
- mirror::String* string = GetTargetString(patch);
- PatchObjectAddress(&patched_code_, literal_offset, string);
- break;
- }
- case LinkerPatch::Type::kType: {
- mirror::Class* type = GetTargetType(patch);
- PatchObjectAddress(&patched_code_, literal_offset, type);
- break;
- }
case LinkerPatch::Type::kBakerReadBarrierBranch: {
writer_->relative_patcher_->PatchBakerReadBarrierBranch(&patched_code_,
patch,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index ab3d499235..713d370c87 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2009,11 +2009,7 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -6753,20 +6749,14 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- break;
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -6855,13 +6845,6 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ LoadLiteral(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
- cls->GetTypeIndex()));
- break;
- }
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
@@ -6961,20 +6944,14 @@ void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
HLoadString::LoadKind CodeGeneratorARM::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- break;
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
}
@@ -7020,12 +6997,6 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_S
HLoadString::LoadKind load_kind = load->GetLoadKind();
switch (load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- __ LoadLiteral(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
- return; // No dex cache slow path.
- }
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARM::PcRelativePatchInfo* labels =
@@ -8740,20 +8711,6 @@ Label* CodeGeneratorARM::NewBakerReadBarrierPatch(uint32_t custom_data) {
return &baker_read_barrier_patches_.back().label;
}
-Literal* CodeGeneratorARM::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- return boot_image_string_patches_.GetOrCreate(
- StringReference(&dex_file, string_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
-Literal* CodeGeneratorARM::DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
- dex::TypeIndex type_index) {
- return boot_image_type_patches_.GetOrCreate(
- TypeReference(&dex_file, type_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
Literal* CodeGeneratorARM::DeduplicateBootImageAddressLiteral(uint32_t address) {
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
@@ -8802,24 +8759,13 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
DCHECK(linker_patches->empty());
size_t size =
/* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
- boot_image_string_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
- boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- for (const auto& entry : boot_image_string_patches_) {
- const StringReference& target_string = entry.first;
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = literal->GetLabel()->Position();
- linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
- target_string.dex_file,
- target_string.string_index.index_));
- }
if (!GetCompilerOptions().IsBootImage()) {
DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
@@ -8832,15 +8778,6 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
- for (const auto& entry : boot_image_type_patches_) {
- const TypeReference& target_type = entry.first;
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = literal->GetLabel()->Position();
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset,
- target_type.dex_file,
- target_type.type_index.index_));
- }
for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
linker_patches->push_back(LinkerPatch::BakerReadBarrierBranchPatch(info.label.Position(),
info.custom_data));
@@ -8854,13 +8791,6 @@ Literal* CodeGeneratorARM::DeduplicateUint32Literal(uint32_t value, Uint32ToLite
[this, value]() { return __ NewLiteral<uint32_t>(value); });
}
-Literal* CodeGeneratorARM::DeduplicateMethodLiteral(MethodReference target_method,
- MethodToLiteralMap* map) {
- return map->GetOrCreate(
- target_method,
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
void LocationsBuilderARM::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index b94ee20d9d..47e6be59bd 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -493,9 +493,6 @@ class CodeGeneratorARM : public CodeGenerator {
// before the BNE instruction.
Label* NewBakerReadBarrierPatch(uint32_t custom_data);
- Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
- Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index,
@@ -630,7 +627,6 @@ class CodeGeneratorARM : public CodeGenerator {
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
- using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
using StringToLiteralMap = ArenaSafeMap<StringReference,
Literal*,
StringReferenceValueComparator>;
@@ -646,7 +642,6 @@ class CodeGeneratorARM : public CodeGenerator {
};
Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
- Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
uint32_t offset_or_index,
ArenaDeque<PcRelativePatchInfo>* patches);
@@ -667,12 +662,8 @@ class CodeGeneratorARM : public CodeGenerator {
Uint32ToLiteralMap uint32_literals_;
// PC-relative patch info for each HArmDexCacheArraysBase.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
- StringToLiteralMap boot_image_string_patches_;
// PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
- // Deduplication map for boot type literals for kBootImageLinkTimeAddress.
- TypeToLiteralMap boot_image_type_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index fa39b79e39..7ff100d870 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1449,11 +1449,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
uint64_literals_(std::less<uint64_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -4685,20 +4681,6 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch(
return label;
}
-vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageStringLiteral(
- const DexFile& dex_file, dex::StringIndex string_index) {
- return boot_image_string_patches_.GetOrCreate(
- StringReference(&dex_file, string_index),
- [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
-}
-
-vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageTypeLiteral(
- const DexFile& dex_file, dex::TypeIndex type_index) {
- return boot_image_type_patches_.GetOrCreate(
- TypeReference(&dex_file, type_index),
- [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
-}
-
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral(
uint64_t address) {
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
@@ -4765,9 +4747,7 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
DCHECK(linker_patches->empty());
size_t size =
pc_relative_dex_cache_patches_.size() +
- boot_image_string_patches_.size() +
pc_relative_string_patches_.size() +
- boot_image_type_patches_.size() +
pc_relative_type_patches_.size() +
type_bss_entry_patches_.size() +
baker_read_barrier_patches_.size();
@@ -4778,13 +4758,6 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
info.pc_insn_label->GetLocation(),
info.offset_or_index));
}
- for (const auto& entry : boot_image_string_patches_) {
- const StringReference& target_string = entry.first;
- vixl::aarch64::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::StringPatch(literal->GetOffset(),
- target_string.dex_file,
- target_string.string_index.index_));
- }
if (!GetCompilerOptions().IsBootImage()) {
DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
@@ -4797,13 +4770,6 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
- for (const auto& entry : boot_image_type_patches_) {
- const TypeReference& target_type = entry.first;
- vixl::aarch64::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::TypePatch(literal->GetOffset(),
- target_type.dex_file,
- target_type.type_index.index_));
- }
for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
linker_patches->push_back(LinkerPatch::BakerReadBarrierBranchPatch(info.label.GetLocation(),
info.custom_data));
@@ -4824,14 +4790,6 @@ vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateUint64Literal(u
[this, value]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(value); });
}
-vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodLiteral(
- MethodReference target_method,
- MethodToLiteralMap* map) {
- return map->GetOrCreate(
- target_method,
- [this]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(/* placeholder */ 0u); });
-}
-
void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
// Explicit clinit checks triggered by static invokes must have been pruned by
// art::PrepareForRegisterAllocation.
@@ -4871,20 +4829,14 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- break;
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -4967,11 +4919,6 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
- cls->GetTypeIndex()));
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
// Add ADRP with its PC-relative type patch.
@@ -5073,20 +5020,14 @@ void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear A
HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- break;
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
}
@@ -5126,10 +5067,6 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
Location out_loc = load->GetLocations()->Out();
switch (load->GetLoadKind()) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- __ Ldr(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
- return; // No dex cache slow path.
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f16f625b6c..56444dc0dc 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -586,11 +586,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
// before the CBNZ instruction.
vixl::aarch64::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
- vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageStringLiteral(
- const DexFile& dex_file,
- dex::StringIndex string_index);
- vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
- dex::TypeIndex type_index);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index,
@@ -731,9 +726,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
private:
using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>;
using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::aarch64::Literal<uint32_t>*>;
- using MethodToLiteralMap = ArenaSafeMap<MethodReference,
- vixl::aarch64::Literal<uint64_t>*,
- MethodReferenceComparator>;
using StringToLiteralMap = ArenaSafeMap<StringReference,
vixl::aarch64::Literal<uint32_t>*,
StringReferenceValueComparator>;
@@ -744,8 +736,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value,
Uint32ToLiteralMap* map);
vixl::aarch64::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
- vixl::aarch64::Literal<uint64_t>* DeduplicateMethodLiteral(MethodReference target_method,
- MethodToLiteralMap* map);
// The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
// and boot image strings/types. The only difference is the interpretation of the
@@ -797,12 +787,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
Uint64ToLiteralMap uint64_literals_;
// PC-relative DexCache access info.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
- StringToLiteralMap boot_image_string_patches_;
// PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
- // Deduplication map for boot type literals for kBootImageLinkTimeAddress.
- TypeToLiteralMap boot_image_type_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 1759c68125..015e6ddea3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2093,11 +2093,7 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -6849,20 +6845,14 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- break;
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -6951,13 +6941,6 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
- cls->GetTypeIndex()));
- break;
- }
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
@@ -7054,20 +7037,14 @@ void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- break;
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
}
@@ -7113,11 +7090,6 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
HLoadString::LoadKind load_kind = load->GetLoadKind();
switch (load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
- __ Ldr(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
- return; // No dex cache slow path.
- }
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
@@ -8909,26 +8881,6 @@ vixl::aarch32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t cu
return &baker_read_barrier_patches_.back().label;
}
-VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageStringLiteral(
- const DexFile& dex_file,
- dex::StringIndex string_index) {
- return boot_image_string_patches_.GetOrCreate(
- StringReference(&dex_file, string_index),
- [this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
- });
-}
-
-VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageTypeLiteral(
- const DexFile& dex_file,
- dex::TypeIndex type_index) {
- return boot_image_type_patches_.GetOrCreate(
- TypeReference(&dex_file, type_index),
- [this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
- });
-}
-
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageAddressLiteral(uint32_t address) {
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
@@ -8982,24 +8934,13 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
DCHECK(linker_patches->empty());
size_t size =
/* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
- boot_image_string_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
- boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- for (const auto& entry : boot_image_string_patches_) {
- const StringReference& target_string = entry.first;
- VIXLUInt32Literal* literal = entry.second;
- DCHECK(literal->IsBound());
- uint32_t literal_offset = literal->GetLocation();
- linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
- target_string.dex_file,
- target_string.string_index.index_));
- }
if (!GetCompilerOptions().IsBootImage()) {
DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
@@ -9012,15 +8953,6 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
- for (const auto& entry : boot_image_type_patches_) {
- const TypeReference& target_type = entry.first;
- VIXLUInt32Literal* literal = entry.second;
- DCHECK(literal->IsBound());
- uint32_t literal_offset = literal->GetLocation();
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset,
- target_type.dex_file,
- target_type.type_index.index_));
- }
for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
linker_patches->push_back(LinkerPatch::BakerReadBarrierBranchPatch(info.label.GetLocation(),
info.custom_data));
@@ -9038,16 +8970,6 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
});
}
-VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateMethodLiteral(
- MethodReference target_method,
- MethodToLiteralMap* map) {
- return map->GetOrCreate(
- target_method,
- [this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
- });
-}
-
void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 657d3c134f..daba9bf060 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -577,10 +577,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// before the BNE instruction.
vixl::aarch32::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
- VIXLUInt32Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
- VIXLUInt32Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
- dex::TypeIndex type_index);
VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index,
@@ -725,8 +721,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
vixl::aarch32::Register temp);
using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, VIXLUInt32Literal*>;
- using MethodToLiteralMap =
- ArenaSafeMap<MethodReference, VIXLUInt32Literal*, MethodReferenceComparator>;
using StringToLiteralMap = ArenaSafeMap<StringReference,
VIXLUInt32Literal*,
StringReferenceValueComparator>;
@@ -742,8 +736,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
};
VIXLUInt32Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
- VIXLUInt32Literal* DeduplicateMethodLiteral(MethodReference target_method,
- MethodToLiteralMap* map);
PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
uint32_t offset_or_index,
ArenaDeque<PcRelativePatchInfo>* patches);
@@ -768,12 +760,8 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
Uint32ToLiteralMap uint32_literals_;
// PC-relative patch info for each HArmDexCacheArraysBase.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
- StringToLiteralMap boot_image_string_patches_;
// PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
- // Deduplication map for boot type literals for kBootImageLinkTimeAddress.
- TypeToLiteralMap boot_image_type_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 503026e399..95be3d7fd2 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1061,11 +1061,7 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
uint32_literals_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -1608,9 +1604,7 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
pc_relative_dex_cache_patches_.size() +
pc_relative_string_patches_.size() +
pc_relative_type_patches_.size() +
- type_bss_entry_patches_.size() +
- boot_image_string_patches_.size() +
- boot_image_type_patches_.size();
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -1626,24 +1620,6 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
- for (const auto& entry : boot_image_string_patches_) {
- const StringReference& target_string = entry.first;
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
- linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
- target_string.dex_file,
- target_string.string_index.index_));
- }
- for (const auto& entry : boot_image_type_patches_) {
- const TypeReference& target_type = entry.first;
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset,
- target_type.dex_file,
- target_type.type_index.index_));
- }
DCHECK_EQ(size, linker_patches->size());
}
@@ -1679,27 +1655,6 @@ Literal* CodeGeneratorMIPS::DeduplicateUint32Literal(uint32_t value, Uint32ToLit
[this, value]() { return __ NewLiteral<uint32_t>(value); });
}
-Literal* CodeGeneratorMIPS::DeduplicateMethodLiteral(MethodReference target_method,
- MethodToLiteralMap* map) {
- return map->GetOrCreate(
- target_method,
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
-Literal* CodeGeneratorMIPS::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- return boot_image_string_patches_.GetOrCreate(
- StringReference(&dex_file, string_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
-Literal* CodeGeneratorMIPS::DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
- dex::TypeIndex type_index) {
- return boot_image_type_patches_.GetOrCreate(
- TypeReference(&dex_file, type_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
Literal* CodeGeneratorMIPS::DeduplicateBootImageAddressLiteral(uint32_t address) {
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
@@ -7038,17 +6993,12 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
bool is_r6 = GetInstructionSetFeatures().IsR6();
bool fallback_load = has_irreducible_loops && !is_r6;
switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- break;
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kBootImageAddress:
+ break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
fallback_load = false;
@@ -7077,17 +7027,12 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kReferrersClass:
fallback_load = false;
break;
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- break;
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kBootImageAddress:
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
fallback_load = false;
@@ -7326,7 +7271,6 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
}
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBssEntry:
@@ -7376,7 +7320,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBssEntry:
@@ -7407,14 +7350,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ LoadLiteral(out,
- base_or_current_method_reg,
- codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
- cls->GetTypeIndex()));
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
@@ -7521,7 +7456,6 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBssEntry:
@@ -7571,7 +7505,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBssEntry:
@@ -7583,13 +7516,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
}
switch (load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- __ LoadLiteral(out,
- base_or_current_method_reg,
- codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
- return; // No dex cache slow path.
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 5ad1f12f8a..449cb4c62b 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -588,9 +588,6 @@ class CodeGeneratorMIPS : public CodeGenerator {
PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
- Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
- Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, Register out, Register base);
@@ -624,16 +621,8 @@ class CodeGeneratorMIPS : public CodeGenerator {
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
- using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
- using BootStringToLiteralMap = ArenaSafeMap<StringReference,
- Literal*,
- StringReferenceValueComparator>;
- using BootTypeToLiteralMap = ArenaSafeMap<TypeReference,
- Literal*,
- TypeReferenceValueComparator>;
Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
- Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
uint32_t offset_or_index,
ArenaDeque<PcRelativePatchInfo>* patches);
@@ -655,12 +644,8 @@ class CodeGeneratorMIPS : public CodeGenerator {
Uint32ToLiteralMap uint32_literals_;
// PC-relative patch info for each HMipsDexCacheArraysBase.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
- BootStringToLiteralMap boot_image_string_patches_;
// PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
- // Deduplication map for boot type literals for kBootImageLinkTimeAddress.
- BootTypeToLiteralMap boot_image_type_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e0dba21d71..5cdff5a7bc 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -958,11 +958,7 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
uint64_literals_(std::less<uint64_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -1446,9 +1442,7 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
pc_relative_dex_cache_patches_.size() +
pc_relative_string_patches_.size() +
pc_relative_type_patches_.size() +
- type_bss_entry_patches_.size() +
- boot_image_string_patches_.size() +
- boot_image_type_patches_.size();
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -1464,24 +1458,6 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
- for (const auto& entry : boot_image_string_patches_) {
- const StringReference& target_string = entry.first;
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
- linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
- target_string.dex_file,
- target_string.string_index.index_));
- }
- for (const auto& entry : boot_image_type_patches_) {
- const TypeReference& target_type = entry.first;
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset,
- target_type.dex_file,
- target_type.type_index.index_));
- }
DCHECK_EQ(size, linker_patches->size());
}
@@ -1523,27 +1499,6 @@ Literal* CodeGeneratorMIPS64::DeduplicateUint64Literal(uint64_t value) {
[this, value]() { return __ NewLiteral<uint64_t>(value); });
}
-Literal* CodeGeneratorMIPS64::DeduplicateMethodLiteral(MethodReference target_method,
- MethodToLiteralMap* map) {
- return map->GetOrCreate(
- target_method,
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
-Literal* CodeGeneratorMIPS64::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- return boot_image_string_patches_.GetOrCreate(
- StringReference(&dex_file, string_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
-Literal* CodeGeneratorMIPS64::DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
- dex::TypeIndex type_index) {
- return boot_image_type_patches_.GetOrCreate(
- TypeReference(&dex_file, type_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
-}
-
Literal* CodeGeneratorMIPS64::DeduplicateBootImageAddressLiteral(uint64_t address) {
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
}
@@ -4898,22 +4853,16 @@ HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
bool fallback_load = false;
switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- break;
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kBootImageAddress:
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
if (fallback_load) {
desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
@@ -4930,20 +4879,14 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- break;
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -5172,14 +5115,6 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
ArtMethod::DeclaringClassOffset().Int32Value(),
read_barrier_option);
break;
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ LoadLiteral(out,
- kLoadUnsignedWord,
- codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
- cls->GetTypeIndex()));
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
@@ -5292,13 +5227,6 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
GpuRegister out = out_loc.AsRegister<GpuRegister>();
switch (load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- __ LoadLiteral(out,
- kLoadUnsignedWord,
- codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
- return; // No dex cache slow path.
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 4c8376623f..1f34ced687 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -553,9 +553,6 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
uint32_t element_offset);
PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file,
uint32_t method_index);
- Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
- Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint64_t address);
void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, GpuRegister out);
@@ -574,23 +571,15 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
private:
using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
- using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
using StringToLiteralMap = ArenaSafeMap<StringReference,
Literal*,
StringReferenceValueComparator>;
using TypeToLiteralMap = ArenaSafeMap<TypeReference,
Literal*,
TypeReferenceValueComparator>;
- using BootStringToLiteralMap = ArenaSafeMap<StringReference,
- Literal*,
- StringReferenceValueComparator>;
- using BootTypeToLiteralMap = ArenaSafeMap<TypeReference,
- Literal*,
- TypeReferenceValueComparator>;
Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
Literal* DeduplicateUint64Literal(uint64_t value);
- Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
uint32_t offset_or_index,
@@ -616,12 +605,8 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
Uint64ToLiteralMap uint64_literals_;
// PC-relative patch info.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
- BootStringToLiteralMap boot_image_string_patches_;
// PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
- // Deduplication map for boot type literals for kBootImageLinkTimeAddress.
- BootTypeToLiteralMap boot_image_type_patches_;
// PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// PC-relative type patch info for kBssEntry.
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 478bd24388..a41adca02c 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -468,7 +468,50 @@ void LocationsBuilderARM64::VisitVecMin(HVecMin* instruction) {
}
void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
- LOG(FATAL) << "Unsupported SIMD instruction " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Umin(dst.V16B(), lhs.V16B(), rhs.V16B());
+ } else {
+ __ Smin(dst.V16B(), lhs.V16B(), rhs.V16B());
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Umin(dst.V8H(), lhs.V8H(), rhs.V8H());
+ } else {
+ __ Smin(dst.V8H(), lhs.V8H(), rhs.V8H());
+ }
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
+ } else {
+ __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
+ }
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ Fmin(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ Fmin(dst.V2D(), lhs.V2D(), rhs.V2D());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderARM64::VisitVecMax(HVecMax* instruction) {
@@ -476,7 +519,50 @@ void LocationsBuilderARM64::VisitVecMax(HVecMax* instruction) {
}
void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
- LOG(FATAL) << "Unsupported SIMD instruction " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ VRegister lhs = VRegisterFrom(locations->InAt(0));
+ VRegister rhs = VRegisterFrom(locations->InAt(1));
+ VRegister dst = VRegisterFrom(locations->Out());
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Umax(dst.V16B(), lhs.V16B(), rhs.V16B());
+ } else {
+ __ Smax(dst.V16B(), lhs.V16B(), rhs.V16B());
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Umax(dst.V8H(), lhs.V8H(), rhs.V8H());
+ } else {
+ __ Smax(dst.V8H(), lhs.V8H(), rhs.V8H());
+ }
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
+ } else {
+ __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
+ }
+ break;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ Fmax(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ Fmax(dst.V2D(), lhs.V2D(), rhs.V2D());
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderARM64::VisitVecAnd(HVecAnd* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 5bb19c193c..14782d70a1 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -483,7 +483,51 @@ void LocationsBuilderX86::VisitVecMin(HVecMin* instruction) {
}
void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pminub(dst, src);
+ } else {
+ __ pminsb(dst, src);
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pminuw(dst, src);
+ } else {
+ __ pminsw(dst, src);
+ }
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pminud(dst, src);
+ } else {
+ __ pminsd(dst, src);
+ }
+ break;
+ // Next cases are sloppy wrt 0.0 vs -0.0.
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ minps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ minpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderX86::VisitVecMax(HVecMax* instruction) {
@@ -491,7 +535,51 @@ void LocationsBuilderX86::VisitVecMax(HVecMax* instruction) {
}
void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pmaxub(dst, src);
+ } else {
+ __ pmaxsb(dst, src);
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pmaxuw(dst, src);
+ } else {
+ __ pmaxsw(dst, src);
+ }
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pmaxud(dst, src);
+ } else {
+ __ pmaxsd(dst, src);
+ }
+ break;
+ // Next cases are sloppy wrt 0.0 vs -0.0.
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ maxps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ maxpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderX86::VisitVecAnd(HVecAnd* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 6d4aae86e6..246044ebb8 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -353,6 +353,10 @@ void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruct
DCHECK(locations->InAt(0).Equals(locations->Out()));
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+
+ DCHECK(instruction->IsRounded());
+ DCHECK(instruction->IsUnsigned());
+
switch (instruction->GetPackedType()) {
case Primitive::kPrimByte:
DCHECK_EQ(16u, instruction->GetVectorLength());
@@ -472,7 +476,51 @@ void LocationsBuilderX86_64::VisitVecMin(HVecMin* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pminub(dst, src);
+ } else {
+ __ pminsb(dst, src);
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pminuw(dst, src);
+ } else {
+ __ pminsw(dst, src);
+ }
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pminud(dst, src);
+ } else {
+ __ pminsd(dst, src);
+ }
+ break;
+ // Next cases are sloppy wrt 0.0 vs -0.0.
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ minps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ minpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderX86_64::VisitVecMax(HVecMax* instruction) {
@@ -480,7 +528,51 @@ void LocationsBuilderX86_64::VisitVecMax(HVecMax* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+ XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+ switch (instruction->GetPackedType()) {
+ case Primitive::kPrimByte:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pmaxub(dst, src);
+ } else {
+ __ pmaxsb(dst, src);
+ }
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pmaxuw(dst, src);
+ } else {
+ __ pmaxsw(dst, src);
+ }
+ break;
+ case Primitive::kPrimInt:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ if (instruction->IsUnsigned()) {
+ __ pmaxud(dst, src);
+ } else {
+ __ pmaxsd(dst, src);
+ }
+ break;
+ // Next cases are sloppy wrt 0.0 vs -0.0.
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ maxps(dst, src);
+ break;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(!instruction->IsUnsigned());
+ __ maxpd(dst, src);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type";
+ UNREACHABLE();
+ }
}
void LocationsBuilderX86_64::VisitVecAnd(HVecAnd* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index bd9a5d2564..aceeefb216 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4709,23 +4709,13 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- if (!GetCompilerOptions().IsBootImage()) {
- DCHECK(boot_image_type_patches_.empty());
- EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
- } else if (GetCompilerOptions().GetCompilePic()) {
+ if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
} else {
- for (const PatchInfo<Label>& info : boot_image_type_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset, &info.dex_file, info.index));
- }
- for (const PatchInfo<Label>& info : string_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(
- LinkerPatch::StringPatch(literal_offset, &info.dex_file, info.index));
- }
+ DCHECK(boot_image_type_patches_.empty());
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
@@ -6054,20 +6044,14 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- FALLTHROUGH_INTENDED;
case HLoadClass::LoadKind::kBssEntry:
- DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -6158,13 +6142,6 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
read_barrier_option);
break;
}
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ movl(out, Immediate(/* placeholder */ 0));
- codegen_->RecordBootTypePatch(cls);
- break;
- }
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
@@ -6252,20 +6229,14 @@ void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- break;
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- FALLTHROUGH_INTENDED;
case HLoadString::LoadKind::kBssEntry:
- DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
- break;
- case HLoadString::LoadKind::kBootImageAddress:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
}
@@ -6317,12 +6288,6 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S
Register out = out_loc.AsRegister<Register>();
switch (load->GetLoadKind()) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
- DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- __ movl(out, Immediate(/* placeholder */ 0));
- codegen_->RecordBootStringPatch(load);
- return; // No dex cache slow path.
- }
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
Register method_address = locations->InAt(0).AsRegister<Register>();
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ca3a9eadd2..f08d642f5e 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -633,9 +633,9 @@ class CodeGeneratorX86 : public CodeGenerator {
// PC-relative DexCache access info.
ArenaDeque<X86PcRelativePatchInfo> pc_relative_dex_cache_patches_;
- // String patch locations; type depends on configuration (app .bss or boot image PIC/non-PIC).
+ // String patch locations; type depends on configuration (app .bss or boot image).
ArenaDeque<X86PcRelativePatchInfo> string_patches_;
- // Type patch locations for boot image; type depends on configuration (boot image PIC/non-PIC).
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<X86PcRelativePatchInfo> boot_image_type_patches_;
// Type patch locations for kBssEntry.
ArenaDeque<X86PcRelativePatchInfo> type_bss_entry_patches_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 6b0e001ad8..ac0f37b717 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1134,14 +1134,13 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
- if (!GetCompilerOptions().IsBootImage()) {
- DCHECK(boot_image_type_patches_.empty());
- EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
- } else {
- // These are always PC-relative, see GetSupportedLoadClassKind()/GetSupportedLoadStringKind().
+ if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
+ } else {
+ DCHECK(boot_image_type_patches_.empty());
+ EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
}
EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
@@ -5458,21 +5457,14 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
UNREACHABLE();
case HLoadClass::LoadKind::kReferrersClass:
break;
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- // We prefer the always-available RIP-relative address for the x86-64 boot image.
- return HLoadClass::LoadKind::kBootImageLinkTimePcRelative;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadClass::LoadKind::kBootImageAddress:
- break;
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -5635,21 +5627,14 @@ void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
HLoadString::LoadKind desired_string_load_kind) {
switch (desired_string_load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!GetCompilerOptions().GetCompilePic());
- // We prefer the always-available RIP-relative address for the x86-64 boot image.
- return HLoadString::LoadKind::kBootImageLinkTimePcRelative;
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- DCHECK(GetCompilerOptions().GetCompilePic());
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- break;
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kDexCacheViaMethod:
break;
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index c8336dabd9..d8005cc410 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -603,9 +603,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// PC-relative DexCache access info.
ArenaDeque<PatchInfo<Label>> pc_relative_dex_cache_patches_;
- // String patch locations; type depends on configuration (app .bss or boot image PIC).
+ // String patch locations; type depends on configuration (app .bss or boot image).
ArenaDeque<PatchInfo<Label>> string_patches_;
- // Type patch locations for boot image (always PIC).
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
// Type patch locations for kBssEntry.
ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index e5d94c3504..02816cf7ce 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -514,6 +514,14 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
}
+ void VisitVecMin(HVecMin* min) OVERRIDE {
+ StartAttributeStream("unsigned") << std::boolalpha << min->IsUnsigned() << std::noboolalpha;
+ }
+
+ void VisitVecMax(HVecMax* max) OVERRIDE {
+ StartAttributeStream("unsigned") << std::boolalpha << max->IsUnsigned() << std::noboolalpha;
+ }
+
void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
StartAttributeStream("kind") << instruction->GetOpKind();
}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 69cf9a126f..1df884e551 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -2753,6 +2753,27 @@ void IntrinsicCodeGeneratorARM::VisitIntegerValueOf(HInvoke* invoke) {
}
}
+void IntrinsicLocationsBuilderARM::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM::VisitThreadInterrupted(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+ int32_t offset = Thread::InterruptedOffset<kArmPointerSize>().Int32Value();
+ __ LoadFromOffset(kLoadWord, out, TR, offset);
+ Label done;
+ __ CompareAndBranchIfZero(out, &done);
+ __ dmb(ISH);
+ __ LoadImmediate(IP, 0);
+ __ StoreToOffset(kStoreWord, IP, TR, offset);
+ __ dmb(ISH);
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 65a82229e9..b511c5a18d 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -3033,6 +3033,28 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
}
}
+void IntrinsicLocationsBuilderARM64::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitThreadInterrupted(HInvoke* invoke) {
+ MacroAssembler* masm = GetVIXLAssembler();
+ Register out = RegisterFrom(invoke->GetLocations()->Out(), Primitive::kPrimInt);
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+
+ __ Add(temp, tr, Thread::InterruptedOffset<kArm64PointerSize>().Int32Value());
+ __ Ldar(out.W(), MemOperand(temp));
+
+ vixl::aarch64::Label done;
+ __ Cbz(out.W(), &done);
+ __ Stlr(wzr, MemOperand(temp));
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 356d5bcb0c..2d9781ade8 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -3157,6 +3157,29 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
}
}
+void IntrinsicLocationsBuilderARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
+ ArmVIXLAssembler* assembler = GetAssembler();
+ vixl32::Register out = RegisterFrom(invoke->GetLocations()->Out());
+ int32_t offset = Thread::InterruptedOffset<kArmPointerSize>().Int32Value();
+ __ Ldr(out, MemOperand(tr, offset));
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ vixl32::Label done;
+ __ CompareAndBranchIfZero(out, &done, /* far_target */ false);
+ __ Dmb(vixl32::ISH);
+ __ Mov(temp, 0);
+ assembler->StoreToOffset(kStoreWord, temp, tr, offset);
+ __ Dmb(vixl32::ISH);
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index eb28742672..4731da1ea9 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -3257,6 +3257,8 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, ThreadInterrupted)
+
UNREACHABLE_INTRINSICS(MIPS)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index a476b2bc25..00afbcd8f2 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2621,6 +2621,8 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS64, ThreadInterrupted)
+
UNREACHABLE_INTRINSICS(MIPS64)
#undef __
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 8e4574774f..57adcc3c2f 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3407,6 +3407,27 @@ void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
}
}
+void IntrinsicLocationsBuilderX86::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86::VisitThreadInterrupted(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+ Address address = Address::Absolute(Thread::InterruptedOffset<kX86PointerSize>().Int32Value());
+ NearLabel done;
+ __ fs()->movl(out, address);
+ __ testl(out, out);
+ __ j(kEqual, &done);
+ __ fs()->movl(address, Immediate(0));
+ codegen_->MemoryFence();
+ __ Bind(&done);
+}
+
+
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index af0b193b03..773383ef1b 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -3085,6 +3085,27 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
}
}
+void IntrinsicLocationsBuilderX86_64::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitThreadInterrupted(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
+ Address address = Address::Absolute
+ (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip */ true);
+ NearLabel done;
+ __ gs()->movl(out, address);
+ __ testl(out, out);
+ __ j(kEqual, &done);
+ __ gs()->movl(address, Immediate(0));
+ codegen_->MemoryFence();
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 881802d714..4067aa3468 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -869,6 +869,32 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
}
return false;
}
+ case Intrinsics::kMathMinIntInt:
+ case Intrinsics::kMathMinLongLong:
+ case Intrinsics::kMathMinFloatFloat:
+ case Intrinsics::kMathMinDoubleDouble:
+ case Intrinsics::kMathMaxIntInt:
+ case Intrinsics::kMathMaxLongLong:
+ case Intrinsics::kMathMaxFloatFloat:
+ case Intrinsics::kMathMaxDoubleDouble: {
+ // Deal with vector restrictions.
+ if (HasVectorRestrictions(restrictions, kNoMinMax) ||
+ HasVectorRestrictions(restrictions, kNoHiBits)) {
+ // TODO: we can do better for some hibits cases.
+ return false;
+ }
+ // Accept MIN/MAX(x, y) for vectorizable operands.
+ HInstruction* opa = instruction->InputAt(0);
+ HInstruction* opb = instruction->InputAt(1);
+ if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
+ VectorizeUse(node, opb, generate_code, type, restrictions)) {
+ if (generate_code) {
+ GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type);
+ }
+ return true;
+ }
+ return false;
+ }
default:
return false;
} // switch
@@ -898,7 +924,7 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric
*restrictions |= kNoDiv;
return TrySetVectorLength(4);
case Primitive::kPrimLong:
- *restrictions |= kNoDiv | kNoMul;
+ *restrictions |= kNoDiv | kNoMul | kNoMinMax;
return TrySetVectorLength(2);
case Primitive::kPrimFloat:
return TrySetVectorLength(4);
@@ -924,11 +950,13 @@ bool HLoopOptimization::TrySetVectorType(Primitive::Type type, uint64_t* restric
*restrictions |= kNoDiv;
return TrySetVectorLength(4);
case Primitive::kPrimLong:
- *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs;
+ *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs | kNoMinMax;
return TrySetVectorLength(2);
case Primitive::kPrimFloat:
+ *restrictions |= kNoMinMax; // -0.0 vs +0.0
return TrySetVectorLength(4);
case Primitive::kPrimDouble:
+ *restrictions |= kNoMinMax; // -0.0 vs +0.0
return TrySetVectorLength(2);
default:
break;
@@ -1108,6 +1136,24 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
DCHECK(opb == nullptr);
vector = new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_);
break;
+ case Intrinsics::kMathMinIntInt:
+ case Intrinsics::kMathMinLongLong:
+ case Intrinsics::kMathMinFloatFloat:
+ case Intrinsics::kMathMinDoubleDouble: {
+ bool is_unsigned = false; // TODO: detect unsigned versions
+ vector = new (global_allocator_)
+ HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned);
+ break;
+ }
+ case Intrinsics::kMathMaxIntInt:
+ case Intrinsics::kMathMaxLongLong:
+ case Intrinsics::kMathMaxFloatFloat:
+ case Intrinsics::kMathMaxDoubleDouble: {
+ bool is_unsigned = false; // TODO: detect unsigned versions
+ vector = new (global_allocator_)
+ HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned);
+ break;
+ }
default:
LOG(FATAL) << "Unsupported SIMD intrinsic";
UNREACHABLE();
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 4a7da86e32..6d5978d337 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -71,6 +71,7 @@ class HLoopOptimization : public HOptimization {
kNoSignedHAdd = 32, // no signed halving add
kNoUnroundedHAdd = 64, // no unrounded halving add
kNoAbs = 128, // no absolute value
+ kNoMinMax = 256, // no min/max
};
/*
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index a8bfe610de..8cb2d38d7d 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2690,8 +2690,6 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) {
switch (rhs) {
case HLoadClass::LoadKind::kReferrersClass:
return os << "ReferrersClass";
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- return os << "BootImageLinkTimeAddress";
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
return os << "BootImageLinkTimePcRelative";
case HLoadClass::LoadKind::kBootImageAddress:
@@ -2744,8 +2742,6 @@ void HLoadString::SetLoadKind(LoadKind load_kind) {
std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs) {
switch (rhs) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- return os << "BootImageLinkTimeAddress";
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
return os << "BootImageLinkTimePcRelative";
case HLoadString::LoadKind::kBootImageAddress:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 522962485b..9bf8aebfcd 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5664,12 +5664,8 @@ class HLoadClass FINAL : public HInstruction {
// Use the Class* from the method's own ArtMethod*.
kReferrersClass,
- // Use boot image Class* address that will be known at link time.
- // Used for boot image classes referenced by boot image code in non-PIC mode.
- kBootImageLinkTimeAddress,
-
// Use PC-relative boot image Class* address that will be known at link time.
- // Used for boot image classes referenced by boot image code in PIC mode.
+ // Used for boot image classes referenced by boot image code.
kBootImageLinkTimePcRelative,
// Use a known boot image Class* address, embedded in the code by the codegen.
@@ -5821,7 +5817,6 @@ class HLoadClass FINAL : public HInstruction {
static bool HasTypeReference(LoadKind load_kind) {
return load_kind == LoadKind::kReferrersClass ||
- load_kind == LoadKind::kBootImageLinkTimeAddress ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
load_kind == LoadKind::kBssEntry ||
load_kind == LoadKind::kDexCacheViaMethod;
@@ -5855,7 +5850,6 @@ inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kBootImageLinkTimeAddress ||
GetLoadKind() == LoadKind::kBootImageAddress ||
GetLoadKind() == LoadKind::kBssEntry) << GetLoadKind();
DCHECK(special_input_.GetInstruction() == nullptr);
@@ -5867,12 +5861,8 @@ class HLoadString FINAL : public HInstruction {
public:
// Determines how to load the String.
enum class LoadKind {
- // Use boot image String* address that will be known at link time.
- // Used for boot image strings referenced by boot image code in non-PIC mode.
- kBootImageLinkTimeAddress,
-
// Use PC-relative boot image String* address that will be known at link time.
- // Used for boot image strings referenced by boot image code in PIC mode.
+ // Used for boot image strings referenced by boot image code.
kBootImageLinkTimePcRelative,
// Use a known boot image String* address, embedded in the code by the codegen.
@@ -5937,8 +5927,7 @@ class HLoadString FINAL : public HInstruction {
// the dex cache and the string is not guaranteed to be there yet.
bool NeedsEnvironment() const OVERRIDE {
LoadKind load_kind = GetLoadKind();
- if (load_kind == LoadKind::kBootImageLinkTimeAddress ||
- load_kind == LoadKind::kBootImageLinkTimePcRelative ||
+ if (load_kind == LoadKind::kBootImageLinkTimePcRelative ||
load_kind == LoadKind::kBootImageAddress ||
load_kind == LoadKind::kJitTableAddress) {
return false;
@@ -6001,7 +5990,6 @@ inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
// including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
GetLoadKind() == LoadKind::kBssEntry ||
- GetLoadKind() == LoadKind::kBootImageLinkTimeAddress ||
GetLoadKind() == LoadKind::kBootImageAddress) << GetLoadKind();
// HLoadString::GetInputRecords() returns an empty array at this point,
// so use the GetInputRecords() from the base class to set the input record.
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 92fe9bfa7d..5dbe29b4fa 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -456,13 +456,24 @@ class HVecMin FINAL : public HVecBinaryOperation {
HInstruction* right,
Primitive::Type packed_type,
size_t vector_length,
+ bool is_unsigned,
uint32_t dex_pc = kNoDexPc)
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
+ SetPackedFlag<kFieldMinOpIsUnsigned>(is_unsigned);
}
+
+ bool IsUnsigned() const { return GetPackedFlag<kFieldMinOpIsUnsigned>(); }
+
DECLARE_INSTRUCTION(VecMin);
+
private:
+ // Additional packed bits.
+ static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
+ static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1;
+ static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+
DISALLOW_COPY_AND_ASSIGN(HVecMin);
};
@@ -475,13 +486,24 @@ class HVecMax FINAL : public HVecBinaryOperation {
HInstruction* right,
Primitive::Type packed_type,
size_t vector_length,
+ bool is_unsigned,
uint32_t dex_pc = kNoDexPc)
: HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
+ SetPackedFlag<kFieldMaxOpIsUnsigned>(is_unsigned);
}
+
+ bool IsUnsigned() const { return GetPackedFlag<kFieldMaxOpIsUnsigned>(); }
+
DECLARE_INSTRUCTION(VecMax);
+
private:
+ // Additional packed bits.
+ static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
+ static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1;
+ static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+
DISALLOW_COPY_AND_ASSIGN(HVecMax);
};
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index a0fdde169d..ef2c432086 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -61,7 +61,6 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
switch (load_kind) {
- case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBssEntry:
@@ -77,7 +76,6 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
void VisitLoadString(HLoadString* load_string) OVERRIDE {
HLoadString::LoadKind load_kind = load_string->GetLoadKind();
switch (load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBssEntry:
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 98ded24257..9a0316330d 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -170,9 +170,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(HLoadClass* load_class,
} else if ((klass != nullptr) && compiler_driver->IsImageClass(
dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
is_in_boot_image = true;
- desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
- ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
- : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
+ desired_load_kind = HLoadClass::LoadKind::kBootImageLinkTimePcRelative;
} else {
// Not a boot image class.
DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
@@ -182,8 +180,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(HLoadClass* load_class,
is_in_boot_image = (klass != nullptr) &&
runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
if (runtime->UseJitCompilation()) {
- // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
- // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
+ DCHECK(!codegen->GetCompilerOptions().GetCompilePic());
if (is_in_boot_image) {
// TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
@@ -249,16 +246,13 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
CHECK(string != nullptr);
if (compiler_driver_->GetSupportBootImageFixup()) {
DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic()
- ? HLoadString::LoadKind::kBootImageLinkTimePcRelative
- : HLoadString::LoadKind::kBootImageLinkTimeAddress;
+ desired_load_kind = HLoadString::LoadKind::kBootImageLinkTimePcRelative;
} else {
// compiler_driver_test. Do not sharpen.
desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
}
} else if (runtime->UseJitCompilation()) {
- // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
- // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
+ DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
string = class_linker->LookupString(dex_file, string_index, dex_cache.Get());
if (string != nullptr) {
if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index f8c4008b45..eaaf81518a 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5535,7 +5535,7 @@ const char* const VixlJniHelpersResults[] = {
" f0: f1bc 0f00 cmp.w ip, #0\n",
" f4: bf18 it ne\n",
" f6: f20d 4c01 addwne ip, sp, #1025 ; 0x401\n",
- " fa: f8d9 c084 ldr.w ip, [r9, #132] ; 0x84\n",
+ " fa: f8d9 c08c ldr.w ip, [r9, #140] ; 0x8c\n",
" fe: f1bc 0f00 cmp.w ip, #0\n",
" 102: d171 bne.n 1e8 <VixlJniHelpers+0x1e8>\n",
" 104: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
@@ -5610,7 +5610,7 @@ const char* const VixlJniHelpersResults[] = {
" 214: ecbd 8a10 vpop {s16-s31}\n",
" 218: e8bd 8de0 ldmia.w sp!, {r5, r6, r7, r8, sl, fp, pc}\n",
" 21c: 4660 mov r0, ip\n",
- " 21e: f8d9 c2b8 ldr.w ip, [r9, #696] ; 0x2b8\n",
+ " 21e: f8d9 c2c0 ldr.w ip, [r9, #704] ; 0x2c0\n",
" 222: 47e0 blx ip\n",
nullptr
};
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 1736618363..bef32f8254 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1238,6 +1238,139 @@ void X86Assembler::pavgw(XmmRegister dst, XmmRegister src) {
EmitXmmRegisterOperand(dst, src);
}
+void X86Assembler::pminsb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x38);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pmaxsb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pminsw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xEA);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pmaxsw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xEE);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pminsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x39);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pmaxsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3D);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pminub(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xDA);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pmaxub(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0xDE);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pminuw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3A);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pmaxuw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pminud(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3B);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::pmaxud(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3F);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::minps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x5D);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::maxps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x5F);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::minpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x5D);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+void X86Assembler::maxpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x5F);
+ EmitXmmRegisterOperand(dst, src);
+}
void X86Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a747cda7bd..c4bb9ee18a 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -498,6 +498,25 @@ class X86Assembler FINAL : public Assembler {
void pavgb(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void pavgw(XmmRegister dst, XmmRegister src);
+ void pminsb(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void pmaxsb(XmmRegister dst, XmmRegister src);
+ void pminsw(XmmRegister dst, XmmRegister src);
+ void pmaxsw(XmmRegister dst, XmmRegister src);
+ void pminsd(XmmRegister dst, XmmRegister src);
+ void pmaxsd(XmmRegister dst, XmmRegister src);
+
+ void pminub(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void pmaxub(XmmRegister dst, XmmRegister src);
+ void pminuw(XmmRegister dst, XmmRegister src);
+ void pmaxuw(XmmRegister dst, XmmRegister src);
+ void pminud(XmmRegister dst, XmmRegister src);
+ void pmaxud(XmmRegister dst, XmmRegister src);
+
+ void minps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void maxps(XmmRegister dst, XmmRegister src);
+ void minpd(XmmRegister dst, XmmRegister src);
+ void maxpd(XmmRegister dst, XmmRegister src);
+
void pcmpeqb(XmmRegister dst, XmmRegister src);
void pcmpeqw(XmmRegister dst, XmmRegister src);
void pcmpeqd(XmmRegister dst, XmmRegister src);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index f75f972265..34f2a47c27 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -613,6 +613,70 @@ TEST_F(AssemblerX86Test, PAvgW) {
DriverStr(RepeatFF(&x86::X86Assembler::pavgw, "pavgw %{reg2}, %{reg1}"), "pavgw");
}
+TEST_F(AssemblerX86Test, PMinSB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pminsb, "pminsb %{reg2}, %{reg1}"), "pminsb");
+}
+
+TEST_F(AssemblerX86Test, PMaxSB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pmaxsb, "pmaxsb %{reg2}, %{reg1}"), "pmaxsb");
+}
+
+TEST_F(AssemblerX86Test, PMinSW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pminsw, "pminsw %{reg2}, %{reg1}"), "pminsw");
+}
+
+TEST_F(AssemblerX86Test, PMaxSW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pmaxsw, "pmaxsw %{reg2}, %{reg1}"), "pmaxsw");
+}
+
+TEST_F(AssemblerX86Test, PMinSD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pminsd, "pminsd %{reg2}, %{reg1}"), "pminsd");
+}
+
+TEST_F(AssemblerX86Test, PMaxSD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pmaxsd, "pmaxsd %{reg2}, %{reg1}"), "pmaxsd");
+}
+
+TEST_F(AssemblerX86Test, PMinUB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pminub, "pminub %{reg2}, %{reg1}"), "pminub");
+}
+
+TEST_F(AssemblerX86Test, PMaxUB) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pmaxub, "pmaxub %{reg2}, %{reg1}"), "pmaxub");
+}
+
+TEST_F(AssemblerX86Test, PMinUW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pminuw, "pminuw %{reg2}, %{reg1}"), "pminuw");
+}
+
+TEST_F(AssemblerX86Test, PMaxUW) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pmaxuw, "pmaxuw %{reg2}, %{reg1}"), "pmaxuw");
+}
+
+TEST_F(AssemblerX86Test, PMinUD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pminud, "pminud %{reg2}, %{reg1}"), "pminud");
+}
+
+TEST_F(AssemblerX86Test, PMaxUD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::pmaxud, "pmaxud %{reg2}, %{reg1}"), "pmaxud");
+}
+
+TEST_F(AssemblerX86Test, MinPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::minps, "minps %{reg2}, %{reg1}"), "minps");
+}
+
+TEST_F(AssemblerX86Test, MaxPS) {
+ DriverStr(RepeatFF(&x86::X86Assembler::maxps, "maxps %{reg2}, %{reg1}"), "maxps");
+}
+
+TEST_F(AssemblerX86Test, MinPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::minpd, "minpd %{reg2}, %{reg1}"), "minpd");
+}
+
+TEST_F(AssemblerX86Test, MaxPD) {
+ DriverStr(RepeatFF(&x86::X86Assembler::maxpd, "maxpd %{reg2}, %{reg1}"), "maxpd");
+}
+
TEST_F(AssemblerX86Test, PCmpeqB) {
DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqb, "pcmpeqb %{reg2}, %{reg1}"), "cmpeqb");
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 1b7a4850db..82d1174a25 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1445,6 +1445,156 @@ void X86_64Assembler::pavgw(XmmRegister dst, XmmRegister src) {
EmitXmmRegisterOperand(dst.LowBits(), src);
}
+void X86_64Assembler::pminsb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x38);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pmaxsb(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3C);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pminsw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xEA);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pmaxsw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xEE);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pminsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x39);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pmaxsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3D);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pminub(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xDA);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pmaxub(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0xDE);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pminuw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3A);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pmaxuw(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3E);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pminud(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3B);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::pmaxud(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x38);
+ EmitUint8(0x3F);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::minps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5D);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::maxps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5F);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::minpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5D);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+void X86_64Assembler::maxpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5F);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
void X86_64Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 0ddc46ca44..6e584fece1 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -526,6 +526,25 @@ class X86_64Assembler FINAL : public Assembler {
void pavgb(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void pavgw(XmmRegister dst, XmmRegister src);
+ void pminsb(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void pmaxsb(XmmRegister dst, XmmRegister src);
+ void pminsw(XmmRegister dst, XmmRegister src);
+ void pmaxsw(XmmRegister dst, XmmRegister src);
+ void pminsd(XmmRegister dst, XmmRegister src);
+ void pmaxsd(XmmRegister dst, XmmRegister src);
+
+ void pminub(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void pmaxub(XmmRegister dst, XmmRegister src);
+ void pminuw(XmmRegister dst, XmmRegister src);
+ void pmaxuw(XmmRegister dst, XmmRegister src);
+ void pminud(XmmRegister dst, XmmRegister src);
+ void pmaxud(XmmRegister dst, XmmRegister src);
+
+ void minps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void maxps(XmmRegister dst, XmmRegister src);
+ void minpd(XmmRegister dst, XmmRegister src);
+ void maxpd(XmmRegister dst, XmmRegister src);
+
void pcmpeqb(XmmRegister dst, XmmRegister src);
void pcmpeqw(XmmRegister dst, XmmRegister src);
void pcmpeqd(XmmRegister dst, XmmRegister src);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index e7d8401e29..b57400334c 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1301,6 +1301,70 @@ TEST_F(AssemblerX86_64Test, Pavgw) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::pavgw, "pavgw %{reg2}, %{reg1}"), "pavgw");
}
+TEST_F(AssemblerX86_64Test, Pminsb) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pminsb, "pminsb %{reg2}, %{reg1}"), "pminsb");
+}
+
+TEST_F(AssemblerX86_64Test, Pmaxsb) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmaxsb, "pmaxsb %{reg2}, %{reg1}"), "pmaxsb");
+}
+
+TEST_F(AssemblerX86_64Test, Pminsw) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pminsw, "pminsw %{reg2}, %{reg1}"), "pminsw");
+}
+
+TEST_F(AssemblerX86_64Test, Pmaxsw) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmaxsw, "pmaxsw %{reg2}, %{reg1}"), "pmaxsw");
+}
+
+TEST_F(AssemblerX86_64Test, Pminsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pminsd, "pminsd %{reg2}, %{reg1}"), "pminsd");
+}
+
+TEST_F(AssemblerX86_64Test, Pmaxsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmaxsd, "pmaxsd %{reg2}, %{reg1}"), "pmaxsd");
+}
+
+TEST_F(AssemblerX86_64Test, Pminub) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pminub, "pminub %{reg2}, %{reg1}"), "pminub");
+}
+
+TEST_F(AssemblerX86_64Test, Pmaxub) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmaxub, "pmaxub %{reg2}, %{reg1}"), "pmaxub");
+}
+
+TEST_F(AssemblerX86_64Test, Pminuw) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pminuw, "pminuw %{reg2}, %{reg1}"), "pminuw");
+}
+
+TEST_F(AssemblerX86_64Test, Pmaxuw) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmaxuw, "pmaxuw %{reg2}, %{reg1}"), "pmaxuw");
+}
+
+TEST_F(AssemblerX86_64Test, Pminud) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pminud, "pminud %{reg2}, %{reg1}"), "pminud");
+}
+
+TEST_F(AssemblerX86_64Test, Pmaxud) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmaxud, "pmaxud %{reg2}, %{reg1}"), "pmaxud");
+}
+
+TEST_F(AssemblerX86_64Test, Minps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::minps, "minps %{reg2}, %{reg1}"), "minps");
+}
+
+TEST_F(AssemblerX86_64Test, Maxps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::maxps, "maxps %{reg2}, %{reg1}"), "maxps");
+}
+
+TEST_F(AssemblerX86_64Test, Minpd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::minpd, "minpd %{reg2}, %{reg1}"), "minpd");
+}
+
+TEST_F(AssemblerX86_64Test, Maxpd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::maxpd, "maxpd %{reg2}, %{reg1}"), "maxpd");
+}
+
TEST_F(AssemblerX86_64Test, PCmpeqb) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpeqb, "pcmpeqb %{reg2}, %{reg1}"), "pcmpeqb");
}
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index e12bcec776..4824f70a28 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -581,13 +581,69 @@ DISASSEMBLER_ENTRY(cmp,
load = true;
src_reg_file = dst_reg_file = SSE;
break;
- case 0x39:
+ case 0x37:
opcode1 = "pcmpgtq";
prefix[2] = 0;
has_modrm = true;
load = true;
src_reg_file = dst_reg_file = SSE;
break;
+ case 0x38:
+ opcode1 = "pminsb";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
+ case 0x39:
+ opcode1 = "pminsd";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
+ case 0x3A:
+ opcode1 = "pminuw";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
+ case 0x3B:
+ opcode1 = "pminud";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
+ case 0x3C:
+ opcode1 = "pmaxsb";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
+ case 0x3D:
+ opcode1 = "pmaxsd";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
+ case 0x3E:
+ opcode1 = "pmaxuw";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
+ case 0x3F:
+ opcode1 = "pmaxud";
+ prefix[2] = 0;
+ has_modrm = true;
+ load = true;
+ src_reg_file = dst_reg_file = SSE;
+ break;
case 0x40:
opcode1 = "pmulld";
prefix[2] = 0;
@@ -1133,8 +1189,12 @@ DISASSEMBLER_ENTRY(cmp,
opcode1 = opcode_tmp.c_str();
}
break;
+ case 0xDA:
+ case 0xDE:
case 0xE0:
case 0xE3:
+ case 0xEA:
+ case 0xEE:
if (prefix[2] == 0x66) {
src_reg_file = dst_reg_file = SSE;
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
@@ -1142,8 +1202,12 @@ DISASSEMBLER_ENTRY(cmp,
src_reg_file = dst_reg_file = MMX;
}
switch (*instr) {
+ case 0xDA: opcode1 = "pminub"; break;
+ case 0xDE: opcode1 = "pmaxub"; break;
case 0xE0: opcode1 = "pavgb"; break;
case 0xE3: opcode1 = "pavgw"; break;
+ case 0xEA: opcode1 = "pminsw"; break;
+ case 0xEE: opcode1 = "pmaxsw"; break;
}
prefix[2] = 0;
has_modrm = true;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 186996894e..8ee5498115 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -539,7 +539,6 @@ art_cc_test {
"dex_file_test.cc",
"dex_file_verifier_test.cc",
"dex_instruction_test.cc",
- "dex_instruction_visitor_test.cc",
"dex_method_iterator_test.cc",
"entrypoints/math_entrypoints_test.cc",
"entrypoints/quick/quick_trampoline_entrypoints_test.cc",
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index a50064851b..d067f667cb 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -92,7 +92,6 @@ class Context {
// Switches execution of the executing context to this context
NO_RETURN virtual void DoLongJump() = 0;
- protected:
enum {
kBadGprBase = 0xebad6070,
kBadFprBase = 0xebad8070,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3ecf59520d..694c113eb6 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -8705,6 +8705,8 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self,
DCHECK(h_long_array != nullptr);
h_long_array->Set(kDexFileIndexStart, reinterpret_cast<intptr_t>(dex_file));
+ // Note that this creates a finalizable dalvik.system.DexFile object and a corresponding
+ // FinalizerReference which will never get cleaned up without a started runtime.
Handle<mirror::Object> h_dex_file = hs2.NewHandle(
cookie_field->GetDeclaringClass()->AllocObject(self));
DCHECK(h_dex_file != nullptr);
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 6693eefa5a..8776061433 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -36,8 +36,8 @@ namespace interpreter {
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
- const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
+ uint16_t arg_offset,
JValue* result);
} // namespace interpreter
@@ -56,7 +56,7 @@ inline void PerformCall(Thread* self,
interpreter::ArtInterpreterToInterpreterBridge(self, code_item, callee_frame, result);
} else {
interpreter::ArtInterpreterToCompiledCodeBridge(
- self, caller_method, code_item, callee_frame, result);
+ self, caller_method, callee_frame, first_dest_reg, result);
}
} else {
interpreter::UnstartedRuntime::Invoke(self, code_item, callee_frame, result, first_dest_reg);
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 9f34c12d9a..b2267e5573 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -31,57 +31,46 @@ namespace art {
using android::base::StringPrintf;
const char* const Instruction::kInstructionNames[] = {
-#define INSTRUCTION_NAME(o, c, pname, f, i, a, v) pname,
+#define INSTRUCTION_NAME(o, c, pname, f, i, a, e, v) pname,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_NAME)
#undef DEX_INSTRUCTION_LIST
#undef INSTRUCTION_NAME
};
-Instruction::Format const Instruction::kInstructionFormats[] = {
-#define INSTRUCTION_FORMAT(o, c, p, format, i, a, v) format,
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_FORMAT)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_FORMAT
-};
-
-Instruction::IndexType const Instruction::kInstructionIndexTypes[] = {
-#define INSTRUCTION_INDEX_TYPE(o, c, p, f, index, a, v) index,
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_INDEX_TYPE)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_FLAGS
-};
-
-int const Instruction::kInstructionFlags[] = {
-#define INSTRUCTION_FLAGS(o, c, p, f, i, flags, v) flags,
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_FLAGS)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_FLAGS
-};
+static_assert(sizeof(Instruction::InstructionDescriptor) == 8u, "Unexpected descriptor size");
-int const Instruction::kInstructionVerifyFlags[] = {
-#define INSTRUCTION_VERIFY_FLAGS(o, c, p, f, i, a, vflags) vflags,
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_VERIFY_FLAGS)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_VERIFY_FLAGS
-};
+static constexpr int8_t InstructionSizeInCodeUnitsByOpcode(Instruction::Code opcode,
+ Instruction::Format format) {
+ if (opcode == Instruction::Code::NOP) {
+ return -1;
+ } else if ((format >= Instruction::Format::k10x) && (format <= Instruction::Format::k10t)) {
+ return 1;
+ } else if ((format >= Instruction::Format::k20t) && (format <= Instruction::Format::k22c)) {
+ return 2;
+ } else if ((format >= Instruction::Format::k32x) && (format <= Instruction::Format::k3rc)) {
+ return 3;
+ } else if ((format >= Instruction::Format::k45cc) && (format <= Instruction::Format::k4rcc)) {
+ return 4;
+ } else if (format == Instruction::Format::k51l) {
+ return 5;
+ } else {
+ return -1;
+ }
+}
-int const Instruction::kInstructionSizeInCodeUnits[] = {
-#define INSTRUCTION_SIZE(opcode, c, p, format, i, a, v) \
- (((opcode) == NOP) ? -1 : \
- (((format) >= k10x) && ((format) <= k10t)) ? 1 : \
- (((format) >= k20t) && ((format) <= k22c)) ? 2 : \
- (((format) >= k32x) && ((format) <= k3rc)) ? 3 : \
- (((format) >= k45cc) && ((format) <= k4rcc)) ? 4 : \
- ((format) == k51l) ? 5 : -1),
+Instruction::InstructionDescriptor const Instruction::kInstructionDescriptors[] = {
+#define INSTRUCTION_DESCR(opcode, c, p, format, index, flags, eflags, vflags) \
+ { vflags, \
+ format, \
+ index, \
+ flags, \
+ InstructionSizeInCodeUnitsByOpcode((c), (format)), \
+ },
#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_SIZE)
+ DEX_INSTRUCTION_LIST(INSTRUCTION_DESCR)
#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_SIZE
+#undef INSTRUCTION_DESCR
};
int32_t Instruction::GetTargetOffset() const {
@@ -520,7 +509,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
struct InstructionStaticAsserts : private Instruction {
#define IMPLIES(a, b) (!(a) || (b))
- #define VAR_ARGS_CHECK(o, c, pname, f, i, a, v) \
+ #define VAR_ARGS_CHECK(o, c, pname, f, i, a, e, v) \
static_assert(IMPLIES((f) == k35c || (f) == k45cc, \
((v) & (kVerifyVarArg | kVerifyVarArgNonZero)) != 0), \
"Missing var-arg verification");
@@ -529,7 +518,7 @@ struct InstructionStaticAsserts : private Instruction {
#undef DEX_INSTRUCTION_LIST
#undef VAR_ARGS_CHECK
- #define VAR_ARGS_RANGE_CHECK(o, c, pname, f, i, a, v) \
+ #define VAR_ARGS_RANGE_CHECK(o, c, pname, f, i, a, e, v) \
static_assert(IMPLIES((f) == k3rc || (f) == k4rcc, \
((v) & (kVerifyVarArgRange | kVerifyVarArgRangeNonZero)) != 0), \
"Missing var-arg verification");
@@ -537,6 +526,14 @@ struct InstructionStaticAsserts : private Instruction {
DEX_INSTRUCTION_LIST(VAR_ARGS_RANGE_CHECK)
#undef DEX_INSTRUCTION_LIST
#undef VAR_ARGS_RANGE_CHECK
+
+ #define EXPERIMENTAL_CHECK(o, c, pname, f, i, a, e, v) \
+ static_assert(kHaveExperimentalInstructions || (((a) & kExperimental) == 0), \
+ "Unexpected experimental instruction.");
+ #include "dex_instruction_list.h"
+ DEX_INSTRUCTION_LIST(EXPERIMENTAL_CHECK)
+ #undef DEX_INSTRUCTION_LIST
+ #undef EXPERIMENTAL_CHECK
};
std::ostream& operator<<(std::ostream& os, const Instruction::Code& code) {
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index d269110570..9a1757601c 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -80,7 +80,7 @@ class Instruction {
};
enum Code { // private marker to avoid generate-operator-out.py from processing.
-#define INSTRUCTION_ENUM(opcode, cname, p, f, i, a, v) cname = (opcode),
+#define INSTRUCTION_ENUM(opcode, cname, p, f, i, a, e, v) cname = (opcode),
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM)
#undef DEX_INSTRUCTION_LIST
@@ -88,7 +88,7 @@ class Instruction {
RSUB_INT_LIT16 = RSUB_INT,
};
- enum Format {
+ enum Format : uint8_t {
k10x, // op
k12x, // op vA, vB
k11n, // op vA, #+B
@@ -124,7 +124,7 @@ class Instruction {
k51l, // op vAA, #+BBBBBBBBBBBBBBBB
};
- enum IndexType {
+ enum IndexType : uint8_t {
kIndexUnknown = 0,
kIndexNone, // has no index
kIndexTypeRef, // type reference index
@@ -137,14 +137,19 @@ class Instruction {
kIndexCallSiteRef, // call site reference index
};
- enum Flags {
- kBranch = 0x0000001, // conditional or unconditional branch
- kContinue = 0x0000002, // flow can continue to next statement
- kSwitch = 0x0000004, // switch statement
- kThrow = 0x0000008, // could cause an exception to be thrown
- kReturn = 0x0000010, // returns, no additional statements
- kInvoke = 0x0000020, // a flavor of invoke
- kUnconditional = 0x0000040, // unconditional branch
+ enum Flags : uint8_t {
+ kBranch = 0x01, // conditional or unconditional branch
+ kContinue = 0x02, // flow can continue to next statement
+ kSwitch = 0x04, // switch statement
+ kThrow = 0x08, // could cause an exception to be thrown
+ kReturn = 0x10, // returns, no additional statements
+ kInvoke = 0x20, // a flavor of invoke
+ kUnconditional = 0x40, // unconditional branch
+ kExperimental = 0x80, // is an experimental opcode
+ };
+
+ // Old flags. Keeping them around in case we might need them again some day.
+ enum ExtendedFlags : uint32_t {
kAdd = 0x0000080, // addition
kSubtract = 0x0000100, // subtract
kMultiply = 0x0000200, // multiply
@@ -162,10 +167,9 @@ class Instruction {
kClobber = 0x0200000, // clobbers memory in a big way (not just a write)
kRegCFieldOrConstant = 0x0400000, // is the third virtual register a field or literal constant (vC)
kRegBFieldOrConstant = 0x0800000, // is the second virtual register a field or literal constant (vB)
- kExperimental = 0x1000000, // is an experimental opcode
};
- enum VerifyFlag {
+ enum VerifyFlag : uint32_t {
kVerifyNone = 0x0000000,
kVerifyRegA = 0x0000001,
kVerifyRegAWide = 0x0000002,
@@ -194,11 +198,22 @@ class Instruction {
kVerifyRegBCallSite = 0x1000000
};
+ // Collect the enums in a struct for better locality.
+ struct InstructionDescriptor {
+ uint32_t verify_flags; // Set of VerifyFlag.
+ Format format;
+ IndexType index_type;
+ uint8_t flags; // Set of Flags.
+ int8_t size_in_code_units;
+ };
+
static constexpr uint32_t kMaxVarArgRegs = 5;
+ static constexpr bool kHaveExperimentalInstructions = false;
+
// Returns the size (in 2 byte code units) of this instruction.
size_t SizeInCodeUnits() const {
- int result = kInstructionSizeInCodeUnits[Opcode()];
+ int8_t result = kInstructionDescriptors[Opcode()].size_in_code_units;
if (UNLIKELY(result < 0)) {
return SizeInCodeUnitsComplexOpcode();
} else {
@@ -497,32 +512,32 @@ class Instruction {
// Returns the format of the given opcode.
static Format FormatOf(Code opcode) {
- return kInstructionFormats[opcode];
+ return kInstructionDescriptors[opcode].format;
}
// Returns the index type of the given opcode.
static IndexType IndexTypeOf(Code opcode) {
- return kInstructionIndexTypes[opcode];
+ return kInstructionDescriptors[opcode].index_type;
}
// Returns the flags for the given opcode.
- static int FlagsOf(Code opcode) {
- return kInstructionFlags[opcode];
+ static uint8_t FlagsOf(Code opcode) {
+ return kInstructionDescriptors[opcode].flags;
}
// Return the verify flags for the given opcode.
- static int VerifyFlagsOf(Code opcode) {
- return kInstructionVerifyFlags[opcode];
+ static uint32_t VerifyFlagsOf(Code opcode) {
+ return kInstructionDescriptors[opcode].verify_flags;
}
// Returns true if this instruction is a branch.
bool IsBranch() const {
- return (kInstructionFlags[Opcode()] & kBranch) != 0;
+ return (kInstructionDescriptors[Opcode()].flags & kBranch) != 0;
}
// Returns true if this instruction is a unconditional branch.
bool IsUnconditional() const {
- return (kInstructionFlags[Opcode()] & kUnconditional) != 0;
+ return (kInstructionDescriptors[Opcode()].flags & kUnconditional) != 0;
}
// Returns the branch offset if this instruction is a branch.
@@ -533,23 +548,23 @@ class Instruction {
// Returns true if the instruction is a quickened instruction.
bool IsQuickened() const {
- return (kInstructionIndexTypes[Opcode()] == kIndexFieldOffset) ||
- (kInstructionIndexTypes[Opcode()] == kIndexVtableOffset);
+ return (kInstructionDescriptors[Opcode()].index_type == kIndexFieldOffset) ||
+ (kInstructionDescriptors[Opcode()].index_type == kIndexVtableOffset);
}
// Returns true if this instruction is a switch.
bool IsSwitch() const {
- return (kInstructionFlags[Opcode()] & kSwitch) != 0;
+ return (kInstructionDescriptors[Opcode()].flags & kSwitch) != 0;
}
// Returns true if this instruction can throw.
bool IsThrow() const {
- return (kInstructionFlags[Opcode()] & kThrow) != 0;
+ return (kInstructionDescriptors[Opcode()].flags & kThrow) != 0;
}
// Determine if the instruction is any of 'return' instructions.
bool IsReturn() const {
- return (kInstructionFlags[Opcode()] & kReturn) != 0;
+ return (kInstructionDescriptors[Opcode()].flags & kReturn) != 0;
}
// Determine if this instruction ends execution of its basic block.
@@ -559,41 +574,41 @@ class Instruction {
// Determine if this instruction is an invoke.
bool IsInvoke() const {
- return (kInstructionFlags[Opcode()] & kInvoke) != 0;
+ return (kInstructionDescriptors[Opcode()].flags & kInvoke) != 0;
}
// Determine if this instruction is experimental.
bool IsExperimental() const {
- return (kInstructionFlags[Opcode()] & kExperimental) != 0;
+ return (kInstructionDescriptors[Opcode()].flags & kExperimental) != 0;
}
int GetVerifyTypeArgumentA() const {
- return (kInstructionVerifyFlags[Opcode()] & (kVerifyRegA | kVerifyRegAWide));
+ return (kInstructionDescriptors[Opcode()].verify_flags & (kVerifyRegA | kVerifyRegAWide));
}
int GetVerifyTypeArgumentB() const {
- return (kInstructionVerifyFlags[Opcode()] & (kVerifyRegB | kVerifyRegBField |
+ return (kInstructionDescriptors[Opcode()].verify_flags & (kVerifyRegB | kVerifyRegBField |
kVerifyRegBMethod | kVerifyRegBNewInstance | kVerifyRegBString | kVerifyRegBType |
kVerifyRegBWide));
}
int GetVerifyTypeArgumentC() const {
- return (kInstructionVerifyFlags[Opcode()] & (kVerifyRegC | kVerifyRegCField |
+ return (kInstructionDescriptors[Opcode()].verify_flags & (kVerifyRegC | kVerifyRegCField |
kVerifyRegCNewArray | kVerifyRegCType | kVerifyRegCWide));
}
int GetVerifyTypeArgumentH() const {
- return (kInstructionVerifyFlags[Opcode()] & kVerifyRegHPrototype);
+ return (kInstructionDescriptors[Opcode()].verify_flags & kVerifyRegHPrototype);
}
int GetVerifyExtraFlags() const {
- return (kInstructionVerifyFlags[Opcode()] & (kVerifyArrayData | kVerifyBranchTarget |
- kVerifySwitchTargets | kVerifyVarArg | kVerifyVarArgNonZero | kVerifyVarArgRange |
- kVerifyVarArgRangeNonZero | kVerifyError));
+ return (kInstructionDescriptors[Opcode()].verify_flags & (kVerifyArrayData |
+ kVerifyBranchTarget | kVerifySwitchTargets | kVerifyVarArg | kVerifyVarArgNonZero |
+ kVerifyVarArgRange | kVerifyVarArgRangeNonZero | kVerifyError));
}
bool GetVerifyIsRuntimeOnly() const {
- return (kInstructionVerifyFlags[Opcode()] & kVerifyRuntimeOnly) != 0;
+ return (kInstructionDescriptors[Opcode()].verify_flags & kVerifyRuntimeOnly) != 0;
}
// Get the dex PC of this instruction as a offset in code units from the beginning of insns.
@@ -651,11 +666,9 @@ class Instruction {
}
static const char* const kInstructionNames[];
- static Format const kInstructionFormats[];
- static IndexType const kInstructionIndexTypes[];
- static int const kInstructionFlags[];
- static int const kInstructionVerifyFlags[];
- static int const kInstructionSizeInCodeUnits[];
+
+ static const InstructionDescriptor kInstructionDescriptors[];
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
std::ostream& operator<<(std::ostream& os, const Instruction::Code& code);
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index 11dc7e2c9f..d0a4ae51a7 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -17,264 +17,264 @@
#ifndef ART_RUNTIME_DEX_INSTRUCTION_LIST_H_
#define ART_RUNTIME_DEX_INSTRUCTION_LIST_H_
-// V(opcode, instruction_code, name, format, index, flags, verifier_flags);
+// V(opcode, instruction_code, name, format, index, flags, extended_flags, verifier_flags);
#define DEX_INSTRUCTION_LIST(V) \
- V(0x00, NOP, "nop", k10x, kIndexNone, kContinue, kVerifyNone) \
- V(0x01, MOVE, "move", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x02, MOVE_FROM16, "move/from16", k22x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x03, MOVE_16, "move/16", k32x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x04, MOVE_WIDE, "move-wide", k12x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x05, MOVE_WIDE_FROM16, "move-wide/from16", k22x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x06, MOVE_WIDE_16, "move-wide/16", k32x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x07, MOVE_OBJECT, "move-object", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x08, MOVE_OBJECT_FROM16, "move-object/from16", k22x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x09, MOVE_OBJECT_16, "move-object/16", k32x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x0A, MOVE_RESULT, "move-result", k11x, kIndexNone, kContinue, kVerifyRegA) \
- V(0x0B, MOVE_RESULT_WIDE, "move-result-wide", k11x, kIndexNone, kContinue, kVerifyRegAWide) \
- V(0x0C, MOVE_RESULT_OBJECT, "move-result-object", k11x, kIndexNone, kContinue, kVerifyRegA) \
- V(0x0D, MOVE_EXCEPTION, "move-exception", k11x, kIndexNone, kContinue, kVerifyRegA) \
- V(0x0E, RETURN_VOID, "return-void", k10x, kIndexNone, kReturn, kVerifyNone) \
- V(0x0F, RETURN, "return", k11x, kIndexNone, kReturn, kVerifyRegA) \
- V(0x10, RETURN_WIDE, "return-wide", k11x, kIndexNone, kReturn, kVerifyRegAWide) \
- V(0x11, RETURN_OBJECT, "return-object", k11x, kIndexNone, kReturn, kVerifyRegA) \
- V(0x12, CONST_4, "const/4", k11n, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
- V(0x13, CONST_16, "const/16", k21s, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
- V(0x14, CONST, "const", k31i, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
- V(0x15, CONST_HIGH16, "const/high16", k21h, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
- V(0x16, CONST_WIDE_16, "const-wide/16", k21s, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
- V(0x17, CONST_WIDE_32, "const-wide/32", k31i, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
- V(0x18, CONST_WIDE, "const-wide", k51l, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
- V(0x19, CONST_WIDE_HIGH16, "const-wide/high16", k21h, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
- V(0x1A, CONST_STRING, "const-string", k21c, kIndexStringRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBString) \
- V(0x1B, CONST_STRING_JUMBO, "const-string/jumbo", k31c, kIndexStringRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBString) \
- V(0x1C, CONST_CLASS, "const-class", k21c, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBType) \
- V(0x1D, MONITOR_ENTER, "monitor-enter", k11x, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA) \
- V(0x1E, MONITOR_EXIT, "monitor-exit", k11x, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA) \
- V(0x1F, CHECK_CAST, "check-cast", k21c, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBType) \
- V(0x20, INSTANCE_OF, "instance-of", k22c, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
- V(0x21, ARRAY_LENGTH, "array-length", k12x, kIndexNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
- V(0x22, NEW_INSTANCE, "new-instance", k21c, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyRegBNewInstance) \
- V(0x23, NEW_ARRAY, "new-array", k22c, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyRegB | kVerifyRegCNewArray) \
- V(0x24, FILLED_NEW_ARRAY, "filled-new-array", k35c, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegBType | kVerifyVarArg) \
- V(0x25, FILLED_NEW_ARRAY_RANGE, "filled-new-array/range", k3rc, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegBType | kVerifyVarArgRange) \
- V(0x26, FILL_ARRAY_DATA, "fill-array-data", k31t, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyArrayData) \
- V(0x27, THROW, "throw", k11x, kIndexNone, kThrow, kVerifyRegA) \
- V(0x28, GOTO, "goto", k10t, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
- V(0x29, GOTO_16, "goto/16", k20t, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
- V(0x2A, GOTO_32, "goto/32", k30t, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
- V(0x2B, PACKED_SWITCH, "packed-switch", k31t, kIndexNone, kContinue | kSwitch, kVerifyRegA | kVerifySwitchTargets) \
- V(0x2C, SPARSE_SWITCH, "sparse-switch", k31t, kIndexNone, kContinue | kSwitch, kVerifyRegA | kVerifySwitchTargets) \
- V(0x2D, CMPL_FLOAT, "cmpl-float", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x2E, CMPG_FLOAT, "cmpg-float", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x2F, CMPL_DOUBLE, "cmpl-double", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x30, CMPG_DOUBLE, "cmpg-double", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x31, CMP_LONG, "cmp-long", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x32, IF_EQ, "if-eq", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x33, IF_NE, "if-ne", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x34, IF_LT, "if-lt", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x35, IF_GE, "if-ge", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x36, IF_GT, "if-gt", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x37, IF_LE, "if-le", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x38, IF_EQZ, "if-eqz", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x39, IF_NEZ, "if-nez", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3A, IF_LTZ, "if-ltz", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3B, IF_GEZ, "if-gez", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3C, IF_GTZ, "if-gtz", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3D, IF_LEZ, "if-lez", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3E, UNUSED_3E, "unused-3e", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0x3F, UNUSED_3F, "unused-3f", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0x40, UNUSED_40, "unused-40", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0x41, UNUSED_41, "unused-41", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0x42, UNUSED_42, "unused-42", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0x43, UNUSED_43, "unused-43", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0x44, AGET, "aget", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x45, AGET_WIDE, "aget-wide", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
- V(0x46, AGET_OBJECT, "aget-object", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x47, AGET_BOOLEAN, "aget-boolean", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x48, AGET_BYTE, "aget-byte", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x49, AGET_CHAR, "aget-char", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4A, AGET_SHORT, "aget-short", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4B, APUT, "aput", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4C, APUT_WIDE, "aput-wide", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
- V(0x4D, APUT_OBJECT, "aput-object", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4E, APUT_BOOLEAN, "aput-boolean", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4F, APUT_BYTE, "aput-byte", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x50, APUT_CHAR, "aput-char", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x51, APUT_SHORT, "aput-short", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x52, IGET, "iget", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x53, IGET_WIDE, "iget-wide", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
- V(0x54, IGET_OBJECT, "iget-object", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x55, IGET_BOOLEAN, "iget-boolean", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x56, IGET_BYTE, "iget-byte", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x57, IGET_CHAR, "iget-char", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x58, IGET_SHORT, "iget-short", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x59, IPUT, "iput", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5A, IPUT_WIDE, "iput-wide", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
- V(0x5B, IPUT_OBJECT, "iput-object", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5C, IPUT_BOOLEAN, "iput-boolean", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5D, IPUT_BYTE, "iput-byte", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5E, IPUT_CHAR, "iput-char", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5F, IPUT_SHORT, "iput-short", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x60, SGET, "sget", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x61, SGET_WIDE, "sget-wide", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
- V(0x62, SGET_OBJECT, "sget-object", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x63, SGET_BOOLEAN, "sget-boolean", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x64, SGET_BYTE, "sget-byte", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x65, SGET_CHAR, "sget-char", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x66, SGET_SHORT, "sget-short", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x67, SPUT, "sput", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x68, SPUT_WIDE, "sput-wide", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
- V(0x69, SPUT_OBJECT, "sput-object", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6A, SPUT_BOOLEAN, "sput-boolean", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6B, SPUT_BYTE, "sput-byte", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6C, SPUT_CHAR, "sput-char", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6D, SPUT_SHORT, "sput-short", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6E, INVOKE_VIRTUAL, "invoke-virtual", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x6F, INVOKE_SUPER, "invoke-super", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x71, INVOKE_STATIC, "invoke-static", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
- V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x73, RETURN_VOID_NO_BARRIER, "return-void-no-barrier", k10x, kIndexNone, kReturn, kVerifyNone) \
- V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
- V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
- V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
- V(0x77, INVOKE_STATIC_RANGE, "invoke-static/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
- V(0x78, INVOKE_INTERFACE_RANGE, "invoke-interface/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
- V(0x79, UNUSED_79, "unused-79", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0x7A, UNUSED_7A, "unused-7a", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0x7B, NEG_INT, "neg-int", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x7C, NOT_INT, "not-int", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x7D, NEG_LONG, "neg-long", k12x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x7E, NOT_LONG, "not-long", k12x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x7F, NEG_FLOAT, "neg-float", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x80, NEG_DOUBLE, "neg-double", k12x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x81, INT_TO_LONG, "int-to-long", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
- V(0x82, INT_TO_FLOAT, "int-to-float", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x83, INT_TO_DOUBLE, "int-to-double", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
- V(0x84, LONG_TO_INT, "long-to-int", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
- V(0x85, LONG_TO_FLOAT, "long-to-float", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
- V(0x86, LONG_TO_DOUBLE, "long-to-double", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x87, FLOAT_TO_INT, "float-to-int", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x88, FLOAT_TO_LONG, "float-to-long", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
- V(0x89, FLOAT_TO_DOUBLE, "float-to-double", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
- V(0x8A, DOUBLE_TO_INT, "double-to-int", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
- V(0x8B, DOUBLE_TO_LONG, "double-to-long", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x8C, DOUBLE_TO_FLOAT, "double-to-float", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
- V(0x8D, INT_TO_BYTE, "int-to-byte", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x8E, INT_TO_CHAR, "int-to-char", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x8F, INT_TO_SHORT, "int-to-short", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x90, ADD_INT, "add-int", k23x, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x91, SUB_INT, "sub-int", k23x, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x92, MUL_INT, "mul-int", k23x, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x93, DIV_INT, "div-int", k23x, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x94, REM_INT, "rem-int", k23x, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x95, AND_INT, "and-int", k23x, kIndexNone, kContinue | kAnd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x96, OR_INT, "or-int", k23x, kIndexNone, kContinue | kOr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x97, XOR_INT, "xor-int", k23x, kIndexNone, kContinue | kXor, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x98, SHL_INT, "shl-int", k23x, kIndexNone, kContinue | kShl, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x99, SHR_INT, "shr-int", k23x, kIndexNone, kContinue | kShr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x9A, USHR_INT, "ushr-int", k23x, kIndexNone, kContinue | kUshr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x9B, ADD_LONG, "add-long", k23x, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x9C, SUB_LONG, "sub-long", k23x, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x9D, MUL_LONG, "mul-long", k23x, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x9E, DIV_LONG, "div-long", k23x, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x9F, REM_LONG, "rem-long", k23x, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xA0, AND_LONG, "and-long", k23x, kIndexNone, kContinue | kAnd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xA1, OR_LONG, "or-long", k23x, kIndexNone, kContinue | kOr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xA2, XOR_LONG, "xor-long", k23x, kIndexNone, kContinue | kXor, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xA3, SHL_LONG, "shl-long", k23x, kIndexNone, kContinue | kShl, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
- V(0xA4, SHR_LONG, "shr-long", k23x, kIndexNone, kContinue | kShr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
- V(0xA5, USHR_LONG, "ushr-long", k23x, kIndexNone, kContinue | kUshr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
- V(0xA6, ADD_FLOAT, "add-float", k23x, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xA7, SUB_FLOAT, "sub-float", k23x, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xA8, MUL_FLOAT, "mul-float", k23x, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xA9, DIV_FLOAT, "div-float", k23x, kIndexNone, kContinue | kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xAA, REM_FLOAT, "rem-float", k23x, kIndexNone, kContinue | kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xAB, ADD_DOUBLE, "add-double", k23x, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xAC, SUB_DOUBLE, "sub-double", k23x, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xAD, MUL_DOUBLE, "mul-double", k23x, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xAE, DIV_DOUBLE, "div-double", k23x, kIndexNone, kContinue | kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xAF, REM_DOUBLE, "rem-double", k23x, kIndexNone, kContinue | kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xB0, ADD_INT_2ADDR, "add-int/2addr", k12x, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB) \
- V(0xB1, SUB_INT_2ADDR, "sub-int/2addr", k12x, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB) \
- V(0xB2, MUL_INT_2ADDR, "mul-int/2addr", k12x, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB) \
- V(0xB3, DIV_INT_2ADDR, "div-int/2addr", k12x, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegA | kVerifyRegB) \
- V(0xB4, REM_INT_2ADDR, "rem-int/2addr", k12x, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegA | kVerifyRegB) \
- V(0xB5, AND_INT_2ADDR, "and-int/2addr", k12x, kIndexNone, kContinue | kAnd, kVerifyRegA | kVerifyRegB) \
- V(0xB6, OR_INT_2ADDR, "or-int/2addr", k12x, kIndexNone, kContinue | kOr, kVerifyRegA | kVerifyRegB) \
- V(0xB7, XOR_INT_2ADDR, "xor-int/2addr", k12x, kIndexNone, kContinue | kXor, kVerifyRegA | kVerifyRegB) \
- V(0xB8, SHL_INT_2ADDR, "shl-int/2addr", k12x, kIndexNone, kContinue | kShl, kVerifyRegA | kVerifyRegB) \
- V(0xB9, SHR_INT_2ADDR, "shr-int/2addr", k12x, kIndexNone, kContinue | kShr, kVerifyRegA | kVerifyRegB) \
- V(0xBA, USHR_INT_2ADDR, "ushr-int/2addr", k12x, kIndexNone, kContinue | kUshr, kVerifyRegA | kVerifyRegB) \
- V(0xBB, ADD_LONG_2ADDR, "add-long/2addr", k12x, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xBC, SUB_LONG_2ADDR, "sub-long/2addr", k12x, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xBD, MUL_LONG_2ADDR, "mul-long/2addr", k12x, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xBE, DIV_LONG_2ADDR, "div-long/2addr", k12x, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xBF, REM_LONG_2ADDR, "rem-long/2addr", k12x, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xC0, AND_LONG_2ADDR, "and-long/2addr", k12x, kIndexNone, kContinue | kAnd, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xC1, OR_LONG_2ADDR, "or-long/2addr", k12x, kIndexNone, kContinue | kOr, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xC2, XOR_LONG_2ADDR, "xor-long/2addr", k12x, kIndexNone, kContinue | kXor, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xC3, SHL_LONG_2ADDR, "shl-long/2addr", k12x, kIndexNone, kContinue | kShl, kVerifyRegAWide | kVerifyRegB) \
- V(0xC4, SHR_LONG_2ADDR, "shr-long/2addr", k12x, kIndexNone, kContinue | kShr, kVerifyRegAWide | kVerifyRegB) \
- V(0xC5, USHR_LONG_2ADDR, "ushr-long/2addr", k12x, kIndexNone, kContinue | kUshr, kVerifyRegAWide | kVerifyRegB) \
- V(0xC6, ADD_FLOAT_2ADDR, "add-float/2addr", k12x, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB) \
- V(0xC7, SUB_FLOAT_2ADDR, "sub-float/2addr", k12x, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB) \
- V(0xC8, MUL_FLOAT_2ADDR, "mul-float/2addr", k12x, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB) \
- V(0xC9, DIV_FLOAT_2ADDR, "div-float/2addr", k12x, kIndexNone, kContinue | kDivide, kVerifyRegA | kVerifyRegB) \
- V(0xCA, REM_FLOAT_2ADDR, "rem-float/2addr", k12x, kIndexNone, kContinue | kRemainder, kVerifyRegA | kVerifyRegB) \
- V(0xCB, ADD_DOUBLE_2ADDR, "add-double/2addr", k12x, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xCC, SUB_DOUBLE_2ADDR, "sub-double/2addr", k12x, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xCD, MUL_DOUBLE_2ADDR, "mul-double/2addr", k12x, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xCE, DIV_DOUBLE_2ADDR, "div-double/2addr", k12x, kIndexNone, kContinue | kDivide, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xCF, REM_DOUBLE_2ADDR, "rem-double/2addr", k12x, kIndexNone, kContinue | kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xD0, ADD_INT_LIT16, "add-int/lit16", k22s, kIndexNone, kContinue | kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD1, RSUB_INT, "rsub-int", k22s, kIndexNone, kContinue | kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD2, MUL_INT_LIT16, "mul-int/lit16", k22s, kIndexNone, kContinue | kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD3, DIV_INT_LIT16, "div-int/lit16", k22s, kIndexNone, kContinue | kThrow | kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD4, REM_INT_LIT16, "rem-int/lit16", k22s, kIndexNone, kContinue | kThrow | kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD5, AND_INT_LIT16, "and-int/lit16", k22s, kIndexNone, kContinue | kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD6, OR_INT_LIT16, "or-int/lit16", k22s, kIndexNone, kContinue | kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD7, XOR_INT_LIT16, "xor-int/lit16", k22s, kIndexNone, kContinue | kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD8, ADD_INT_LIT8, "add-int/lit8", k22b, kIndexNone, kContinue | kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD9, RSUB_INT_LIT8, "rsub-int/lit8", k22b, kIndexNone, kContinue | kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDA, MUL_INT_LIT8, "mul-int/lit8", k22b, kIndexNone, kContinue | kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDB, DIV_INT_LIT8, "div-int/lit8", k22b, kIndexNone, kContinue | kThrow | kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDC, REM_INT_LIT8, "rem-int/lit8", k22b, kIndexNone, kContinue | kThrow | kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDD, AND_INT_LIT8, "and-int/lit8", k22b, kIndexNone, kContinue | kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDE, OR_INT_LIT8, "or-int/lit8", k22b, kIndexNone, kContinue | kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDF, XOR_INT_LIT8, "xor-int/lit8", k22b, kIndexNone, kContinue | kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xE0, SHL_INT_LIT8, "shl-int/lit8", k22b, kIndexNone, kContinue | kShl | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xE1, SHR_INT_LIT8, "shr-int/lit8", k22b, kIndexNone, kContinue | kShr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xE2, USHR_INT_LIT8, "ushr-int/lit8", k22b, kIndexNone, kContinue | kUshr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xE3, IGET_QUICK, "iget-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE4, IGET_WIDE_QUICK, "iget-wide-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE5, IGET_OBJECT_QUICK, "iget-object-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE6, IPUT_QUICK, "iput-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE7, IPUT_WIDE_QUICK, "iput-wide-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, kIndexVtableOffset, kContinue | kThrow | kInvoke, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \
- V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, kIndexVtableOffset, kContinue | kThrow | kInvoke, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \
- V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xEF, IGET_BOOLEAN_QUICK, "iget-boolean-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF0, IGET_BYTE_QUICK, "iget-byte-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF1, IGET_CHAR_QUICK, "iget-char-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF2, IGET_SHORT_QUICK, "iget-short-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF3, UNUSED_F3, "unused-f3", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xF4, UNUSED_F4, "unused-f4", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xF5, UNUSED_F5, "unused-f5", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xF6, UNUSED_F6, "unused-f6", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xF7, UNUSED_F7, "unused-f7", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xF8, UNUSED_F8, "unused-f8", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xF9, UNUSED_F9, "unused-f9", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xFA, INVOKE_POLYMORPHIC, "invoke-polymorphic", k45cc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero | kVerifyRegHPrototype) \
- V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero | kVerifyRegHPrototype) \
- V(0xFC, INVOKE_CUSTOM, "invoke-custom", k35c, kIndexCallSiteRef, kContinue | kThrow, kVerifyRegBCallSite | kVerifyVarArg) \
- V(0xFD, INVOKE_CUSTOM_RANGE, "invoke-custom/range", k3rc, kIndexCallSiteRef, kContinue | kThrow, kVerifyRegBCallSite | kVerifyVarArgRange) \
- V(0xFE, UNUSED_FE, "unused-fe", k10x, kIndexUnknown, 0, kVerifyError) \
- V(0xFF, UNUSED_FF, "unused-ff", k10x, kIndexUnknown, 0, kVerifyError)
+ V(0x00, NOP, "nop", k10x, kIndexNone, kContinue, 0, kVerifyNone) \
+ V(0x01, MOVE, "move", k12x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x02, MOVE_FROM16, "move/from16", k22x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x03, MOVE_16, "move/16", k32x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x04, MOVE_WIDE, "move-wide", k12x, kIndexNone, kContinue, 0, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x05, MOVE_WIDE_FROM16, "move-wide/from16", k22x, kIndexNone, kContinue, 0, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x06, MOVE_WIDE_16, "move-wide/16", k32x, kIndexNone, kContinue, 0, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x07, MOVE_OBJECT, "move-object", k12x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x08, MOVE_OBJECT_FROM16, "move-object/from16", k22x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x09, MOVE_OBJECT_16, "move-object/16", k32x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x0A, MOVE_RESULT, "move-result", k11x, kIndexNone, kContinue, 0, kVerifyRegA) \
+ V(0x0B, MOVE_RESULT_WIDE, "move-result-wide", k11x, kIndexNone, kContinue, 0, kVerifyRegAWide) \
+ V(0x0C, MOVE_RESULT_OBJECT, "move-result-object", k11x, kIndexNone, kContinue, 0, kVerifyRegA) \
+ V(0x0D, MOVE_EXCEPTION, "move-exception", k11x, kIndexNone, kContinue, 0, kVerifyRegA) \
+ V(0x0E, RETURN_VOID, "return-void", k10x, kIndexNone, kReturn, 0, kVerifyNone) \
+ V(0x0F, RETURN, "return", k11x, kIndexNone, kReturn, 0, kVerifyRegA) \
+ V(0x10, RETURN_WIDE, "return-wide", k11x, kIndexNone, kReturn, 0, kVerifyRegAWide) \
+ V(0x11, RETURN_OBJECT, "return-object", k11x, kIndexNone, kReturn, 0, kVerifyRegA) \
+ V(0x12, CONST_4, "const/4", k11n, kIndexNone, kContinue, kRegBFieldOrConstant, kVerifyRegA) \
+ V(0x13, CONST_16, "const/16", k21s, kIndexNone, kContinue, kRegBFieldOrConstant, kVerifyRegA) \
+ V(0x14, CONST, "const", k31i, kIndexNone, kContinue, kRegBFieldOrConstant, kVerifyRegA) \
+ V(0x15, CONST_HIGH16, "const/high16", k21h, kIndexNone, kContinue, kRegBFieldOrConstant, kVerifyRegA) \
+ V(0x16, CONST_WIDE_16, "const-wide/16", k21s, kIndexNone, kContinue, kRegBFieldOrConstant, kVerifyRegAWide) \
+ V(0x17, CONST_WIDE_32, "const-wide/32", k31i, kIndexNone, kContinue, kRegBFieldOrConstant, kVerifyRegAWide) \
+ V(0x18, CONST_WIDE, "const-wide", k51l, kIndexNone, kContinue, kRegBFieldOrConstant, kVerifyRegAWide) \
+ V(0x19, CONST_WIDE_HIGH16, "const-wide/high16", k21h, kIndexNone, kContinue, kRegBFieldOrConstant, kVerifyRegAWide) \
+ V(0x1A, CONST_STRING, "const-string", k21c, kIndexStringRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegBString) \
+ V(0x1B, CONST_STRING_JUMBO, "const-string/jumbo", k31c, kIndexStringRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegBString) \
+ V(0x1C, CONST_CLASS, "const-class", k21c, kIndexTypeRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegBType) \
+ V(0x1D, MONITOR_ENTER, "monitor-enter", k11x, kIndexNone, kContinue | kThrow, kClobber, kVerifyRegA) \
+ V(0x1E, MONITOR_EXIT, "monitor-exit", k11x, kIndexNone, kContinue | kThrow, kClobber, kVerifyRegA) \
+ V(0x1F, CHECK_CAST, "check-cast", k21c, kIndexTypeRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegBType) \
+ V(0x20, INSTANCE_OF, "instance-of", k22c, kIndexTypeRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
+ V(0x21, ARRAY_LENGTH, "array-length", k12x, kIndexNone, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x22, NEW_INSTANCE, "new-instance", k21c, kIndexTypeRef, kContinue | kThrow, kClobber, kVerifyRegA | kVerifyRegBNewInstance) \
+ V(0x23, NEW_ARRAY, "new-array", k22c, kIndexTypeRef, kContinue | kThrow, kClobber, kVerifyRegA | kVerifyRegB | kVerifyRegCNewArray) \
+ V(0x24, FILLED_NEW_ARRAY, "filled-new-array", k35c, kIndexTypeRef, kContinue | kThrow, kClobber, kVerifyRegBType | kVerifyVarArg) \
+ V(0x25, FILLED_NEW_ARRAY_RANGE, "filled-new-array/range", k3rc, kIndexTypeRef, kContinue | kThrow, kClobber, kVerifyRegBType | kVerifyVarArgRange) \
+ V(0x26, FILL_ARRAY_DATA, "fill-array-data", k31t, kIndexNone, kContinue | kThrow, kClobber, kVerifyRegA | kVerifyArrayData) \
+ V(0x27, THROW, "throw", k11x, kIndexNone, kThrow, 0, kVerifyRegA) \
+ V(0x28, GOTO, "goto", k10t, kIndexNone, kBranch | kUnconditional, 0, kVerifyBranchTarget) \
+ V(0x29, GOTO_16, "goto/16", k20t, kIndexNone, kBranch | kUnconditional, 0, kVerifyBranchTarget) \
+ V(0x2A, GOTO_32, "goto/32", k30t, kIndexNone, kBranch | kUnconditional, 0, kVerifyBranchTarget) \
+ V(0x2B, PACKED_SWITCH, "packed-switch", k31t, kIndexNone, kContinue | kSwitch, 0, kVerifyRegA | kVerifySwitchTargets) \
+ V(0x2C, SPARSE_SWITCH, "sparse-switch", k31t, kIndexNone, kContinue | kSwitch, 0, kVerifyRegA | kVerifySwitchTargets) \
+ V(0x2D, CMPL_FLOAT, "cmpl-float", k23x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x2E, CMPG_FLOAT, "cmpg-float", k23x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x2F, CMPL_DOUBLE, "cmpl-double", k23x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x30, CMPG_DOUBLE, "cmpg-double", k23x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x31, CMP_LONG, "cmp-long", k23x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x32, IF_EQ, "if-eq", k22t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x33, IF_NE, "if-ne", k22t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x34, IF_LT, "if-lt", k22t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x35, IF_GE, "if-ge", k22t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x36, IF_GT, "if-gt", k22t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x37, IF_LE, "if-le", k22t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x38, IF_EQZ, "if-eqz", k21t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x39, IF_NEZ, "if-nez", k21t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3A, IF_LTZ, "if-ltz", k21t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3B, IF_GEZ, "if-gez", k21t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3C, IF_GTZ, "if-gtz", k21t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3D, IF_LEZ, "if-lez", k21t, kIndexNone, kContinue | kBranch, 0, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3E, UNUSED_3E, "unused-3e", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0x3F, UNUSED_3F, "unused-3f", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0x40, UNUSED_40, "unused-40", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0x41, UNUSED_41, "unused-41", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0x42, UNUSED_42, "unused-42", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0x43, UNUSED_43, "unused-43", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0x44, AGET, "aget", k23x, kIndexNone, kContinue | kThrow, kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x45, AGET_WIDE, "aget-wide", k23x, kIndexNone, kContinue | kThrow, kLoad, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
+ V(0x46, AGET_OBJECT, "aget-object", k23x, kIndexNone, kContinue | kThrow, kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x47, AGET_BOOLEAN, "aget-boolean", k23x, kIndexNone, kContinue | kThrow, kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x48, AGET_BYTE, "aget-byte", k23x, kIndexNone, kContinue | kThrow, kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x49, AGET_CHAR, "aget-char", k23x, kIndexNone, kContinue | kThrow, kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4A, AGET_SHORT, "aget-short", k23x, kIndexNone, kContinue | kThrow, kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4B, APUT, "aput", k23x, kIndexNone, kContinue | kThrow, kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4C, APUT_WIDE, "aput-wide", k23x, kIndexNone, kContinue | kThrow, kStore, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
+ V(0x4D, APUT_OBJECT, "aput-object", k23x, kIndexNone, kContinue | kThrow, kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4E, APUT_BOOLEAN, "aput-boolean", k23x, kIndexNone, kContinue | kThrow, kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4F, APUT_BYTE, "aput-byte", k23x, kIndexNone, kContinue | kThrow, kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x50, APUT_CHAR, "aput-char", k23x, kIndexNone, kContinue | kThrow, kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x51, APUT_SHORT, "aput-short", k23x, kIndexNone, kContinue | kThrow, kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x52, IGET, "iget", k22c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x53, IGET_WIDE, "iget-wide", k22c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
+ V(0x54, IGET_OBJECT, "iget-object", k22c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x55, IGET_BOOLEAN, "iget-boolean", k22c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x56, IGET_BYTE, "iget-byte", k22c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x57, IGET_CHAR, "iget-char", k22c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x58, IGET_SHORT, "iget-short", k22c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x59, IPUT, "iput", k22c, kIndexFieldRef, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5A, IPUT_WIDE, "iput-wide", k22c, kIndexFieldRef, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
+ V(0x5B, IPUT_OBJECT, "iput-object", k22c, kIndexFieldRef, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5C, IPUT_BOOLEAN, "iput-boolean", k22c, kIndexFieldRef, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5D, IPUT_BYTE, "iput-byte", k22c, kIndexFieldRef, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5E, IPUT_CHAR, "iput-char", k22c, kIndexFieldRef, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5F, IPUT_SHORT, "iput-short", k22c, kIndexFieldRef, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x60, SGET, "sget", k21c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x61, SGET_WIDE, "sget-wide", k21c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
+ V(0x62, SGET_OBJECT, "sget-object", k21c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x63, SGET_BOOLEAN, "sget-boolean", k21c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x64, SGET_BYTE, "sget-byte", k21c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x65, SGET_CHAR, "sget-char", k21c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x66, SGET_SHORT, "sget-short", k21c, kIndexFieldRef, kContinue | kThrow, kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x67, SPUT, "sput", k21c, kIndexFieldRef, kContinue | kThrow, kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x68, SPUT_WIDE, "sput-wide", k21c, kIndexFieldRef, kContinue | kThrow, kStore | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
+ V(0x69, SPUT_OBJECT, "sput-object", k21c, kIndexFieldRef, kContinue | kThrow, kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6A, SPUT_BOOLEAN, "sput-boolean", k21c, kIndexFieldRef, kContinue | kThrow, kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6B, SPUT_BYTE, "sput-byte", k21c, kIndexFieldRef, kContinue | kThrow, kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6C, SPUT_CHAR, "sput-char", k21c, kIndexFieldRef, kContinue | kThrow, kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6D, SPUT_SHORT, "sput-short", k21c, kIndexFieldRef, kContinue | kThrow, kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6E, INVOKE_VIRTUAL, "invoke-virtual", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x6F, INVOKE_SUPER, "invoke-super", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x71, INVOKE_STATIC, "invoke-static", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArg) \
+ V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x73, RETURN_VOID_NO_BARRIER, "return-void-no-barrier", k10x, kIndexNone, kReturn, 0, kVerifyNone) \
+ V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x77, INVOKE_STATIC_RANGE, "invoke-static/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRange) \
+ V(0x78, INVOKE_INTERFACE_RANGE, "invoke-interface/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x79, UNUSED_79, "unused-79", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0x7A, UNUSED_7A, "unused-7a", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0x7B, NEG_INT, "neg-int", k12x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x7C, NOT_INT, "not-int", k12x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x7D, NEG_LONG, "neg-long", k12x, kIndexNone, kContinue, 0, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x7E, NOT_LONG, "not-long", k12x, kIndexNone, kContinue, 0, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x7F, NEG_FLOAT, "neg-float", k12x, kIndexNone, kContinue, 0, kVerifyRegA | kVerifyRegB) \
+ V(0x80, NEG_DOUBLE, "neg-double", k12x, kIndexNone, kContinue, 0, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x81, INT_TO_LONG, "int-to-long", k12x, kIndexNone, kContinue, kCast, kVerifyRegAWide | kVerifyRegB) \
+ V(0x82, INT_TO_FLOAT, "int-to-float", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x83, INT_TO_DOUBLE, "int-to-double", k12x, kIndexNone, kContinue, kCast, kVerifyRegAWide | kVerifyRegB) \
+ V(0x84, LONG_TO_INT, "long-to-int", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegBWide) \
+ V(0x85, LONG_TO_FLOAT, "long-to-float", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegBWide) \
+ V(0x86, LONG_TO_DOUBLE, "long-to-double", k12x, kIndexNone, kContinue, kCast, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x87, FLOAT_TO_INT, "float-to-int", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x88, FLOAT_TO_LONG, "float-to-long", k12x, kIndexNone, kContinue, kCast, kVerifyRegAWide | kVerifyRegB) \
+ V(0x89, FLOAT_TO_DOUBLE, "float-to-double", k12x, kIndexNone, kContinue, kCast, kVerifyRegAWide | kVerifyRegB) \
+ V(0x8A, DOUBLE_TO_INT, "double-to-int", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegBWide) \
+ V(0x8B, DOUBLE_TO_LONG, "double-to-long", k12x, kIndexNone, kContinue, kCast, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x8C, DOUBLE_TO_FLOAT, "double-to-float", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegBWide) \
+ V(0x8D, INT_TO_BYTE, "int-to-byte", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x8E, INT_TO_CHAR, "int-to-char", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x8F, INT_TO_SHORT, "int-to-short", k12x, kIndexNone, kContinue, kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x90, ADD_INT, "add-int", k23x, kIndexNone, kContinue, kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x91, SUB_INT, "sub-int", k23x, kIndexNone, kContinue, kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x92, MUL_INT, "mul-int", k23x, kIndexNone, kContinue, kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x93, DIV_INT, "div-int", k23x, kIndexNone, kContinue | kThrow, kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x94, REM_INT, "rem-int", k23x, kIndexNone, kContinue | kThrow, kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x95, AND_INT, "and-int", k23x, kIndexNone, kContinue, kAnd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x96, OR_INT, "or-int", k23x, kIndexNone, kContinue, kOr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x97, XOR_INT, "xor-int", k23x, kIndexNone, kContinue, kXor, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x98, SHL_INT, "shl-int", k23x, kIndexNone, kContinue, kShl, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x99, SHR_INT, "shr-int", k23x, kIndexNone, kContinue, kShr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x9A, USHR_INT, "ushr-int", k23x, kIndexNone, kContinue, kUshr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x9B, ADD_LONG, "add-long", k23x, kIndexNone, kContinue, kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x9C, SUB_LONG, "sub-long", k23x, kIndexNone, kContinue, kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x9D, MUL_LONG, "mul-long", k23x, kIndexNone, kContinue, kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x9E, DIV_LONG, "div-long", k23x, kIndexNone, kContinue | kThrow, kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x9F, REM_LONG, "rem-long", k23x, kIndexNone, kContinue | kThrow, kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xA0, AND_LONG, "and-long", k23x, kIndexNone, kContinue, kAnd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xA1, OR_LONG, "or-long", k23x, kIndexNone, kContinue, kOr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xA2, XOR_LONG, "xor-long", k23x, kIndexNone, kContinue, kXor, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xA3, SHL_LONG, "shl-long", k23x, kIndexNone, kContinue, kShl, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+ V(0xA4, SHR_LONG, "shr-long", k23x, kIndexNone, kContinue, kShr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+ V(0xA5, USHR_LONG, "ushr-long", k23x, kIndexNone, kContinue, kUshr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+ V(0xA6, ADD_FLOAT, "add-float", k23x, kIndexNone, kContinue, kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xA7, SUB_FLOAT, "sub-float", k23x, kIndexNone, kContinue, kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xA8, MUL_FLOAT, "mul-float", k23x, kIndexNone, kContinue, kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xA9, DIV_FLOAT, "div-float", k23x, kIndexNone, kContinue, kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xAA, REM_FLOAT, "rem-float", k23x, kIndexNone, kContinue, kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xAB, ADD_DOUBLE, "add-double", k23x, kIndexNone, kContinue, kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xAC, SUB_DOUBLE, "sub-double", k23x, kIndexNone, kContinue, kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xAD, MUL_DOUBLE, "mul-double", k23x, kIndexNone, kContinue, kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xAE, DIV_DOUBLE, "div-double", k23x, kIndexNone, kContinue, kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xAF, REM_DOUBLE, "rem-double", k23x, kIndexNone, kContinue, kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xB0, ADD_INT_2ADDR, "add-int/2addr", k12x, kIndexNone, kContinue, kAdd, kVerifyRegA | kVerifyRegB) \
+ V(0xB1, SUB_INT_2ADDR, "sub-int/2addr", k12x, kIndexNone, kContinue, kSubtract, kVerifyRegA | kVerifyRegB) \
+ V(0xB2, MUL_INT_2ADDR, "mul-int/2addr", k12x, kIndexNone, kContinue, kMultiply, kVerifyRegA | kVerifyRegB) \
+ V(0xB3, DIV_INT_2ADDR, "div-int/2addr", k12x, kIndexNone, kContinue | kThrow, kDivide, kVerifyRegA | kVerifyRegB) \
+ V(0xB4, REM_INT_2ADDR, "rem-int/2addr", k12x, kIndexNone, kContinue | kThrow, kRemainder, kVerifyRegA | kVerifyRegB) \
+ V(0xB5, AND_INT_2ADDR, "and-int/2addr", k12x, kIndexNone, kContinue, kAnd, kVerifyRegA | kVerifyRegB) \
+ V(0xB6, OR_INT_2ADDR, "or-int/2addr", k12x, kIndexNone, kContinue, kOr, kVerifyRegA | kVerifyRegB) \
+ V(0xB7, XOR_INT_2ADDR, "xor-int/2addr", k12x, kIndexNone, kContinue, kXor, kVerifyRegA | kVerifyRegB) \
+ V(0xB8, SHL_INT_2ADDR, "shl-int/2addr", k12x, kIndexNone, kContinue, kShl, kVerifyRegA | kVerifyRegB) \
+ V(0xB9, SHR_INT_2ADDR, "shr-int/2addr", k12x, kIndexNone, kContinue, kShr, kVerifyRegA | kVerifyRegB) \
+ V(0xBA, USHR_INT_2ADDR, "ushr-int/2addr", k12x, kIndexNone, kContinue, kUshr, kVerifyRegA | kVerifyRegB) \
+ V(0xBB, ADD_LONG_2ADDR, "add-long/2addr", k12x, kIndexNone, kContinue, kAdd, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xBC, SUB_LONG_2ADDR, "sub-long/2addr", k12x, kIndexNone, kContinue, kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xBD, MUL_LONG_2ADDR, "mul-long/2addr", k12x, kIndexNone, kContinue, kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xBE, DIV_LONG_2ADDR, "div-long/2addr", k12x, kIndexNone, kContinue | kThrow, kDivide, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xBF, REM_LONG_2ADDR, "rem-long/2addr", k12x, kIndexNone, kContinue | kThrow, kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xC0, AND_LONG_2ADDR, "and-long/2addr", k12x, kIndexNone, kContinue, kAnd, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xC1, OR_LONG_2ADDR, "or-long/2addr", k12x, kIndexNone, kContinue, kOr, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xC2, XOR_LONG_2ADDR, "xor-long/2addr", k12x, kIndexNone, kContinue, kXor, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xC3, SHL_LONG_2ADDR, "shl-long/2addr", k12x, kIndexNone, kContinue, kShl, kVerifyRegAWide | kVerifyRegB) \
+ V(0xC4, SHR_LONG_2ADDR, "shr-long/2addr", k12x, kIndexNone, kContinue, kShr, kVerifyRegAWide | kVerifyRegB) \
+ V(0xC5, USHR_LONG_2ADDR, "ushr-long/2addr", k12x, kIndexNone, kContinue, kUshr, kVerifyRegAWide | kVerifyRegB) \
+ V(0xC6, ADD_FLOAT_2ADDR, "add-float/2addr", k12x, kIndexNone, kContinue, kAdd, kVerifyRegA | kVerifyRegB) \
+ V(0xC7, SUB_FLOAT_2ADDR, "sub-float/2addr", k12x, kIndexNone, kContinue, kSubtract, kVerifyRegA | kVerifyRegB) \
+ V(0xC8, MUL_FLOAT_2ADDR, "mul-float/2addr", k12x, kIndexNone, kContinue, kMultiply, kVerifyRegA | kVerifyRegB) \
+ V(0xC9, DIV_FLOAT_2ADDR, "div-float/2addr", k12x, kIndexNone, kContinue, kDivide, kVerifyRegA | kVerifyRegB) \
+ V(0xCA, REM_FLOAT_2ADDR, "rem-float/2addr", k12x, kIndexNone, kContinue, kRemainder, kVerifyRegA | kVerifyRegB) \
+ V(0xCB, ADD_DOUBLE_2ADDR, "add-double/2addr", k12x, kIndexNone, kContinue, kAdd, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xCC, SUB_DOUBLE_2ADDR, "sub-double/2addr", k12x, kIndexNone, kContinue, kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xCD, MUL_DOUBLE_2ADDR, "mul-double/2addr", k12x, kIndexNone, kContinue, kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xCE, DIV_DOUBLE_2ADDR, "div-double/2addr", k12x, kIndexNone, kContinue, kDivide, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xCF, REM_DOUBLE_2ADDR, "rem-double/2addr", k12x, kIndexNone, kContinue, kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xD0, ADD_INT_LIT16, "add-int/lit16", k22s, kIndexNone, kContinue, kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD1, RSUB_INT, "rsub-int", k22s, kIndexNone, kContinue, kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD2, MUL_INT_LIT16, "mul-int/lit16", k22s, kIndexNone, kContinue, kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD3, DIV_INT_LIT16, "div-int/lit16", k22s, kIndexNone, kContinue | kThrow, kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD4, REM_INT_LIT16, "rem-int/lit16", k22s, kIndexNone, kContinue | kThrow, kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD5, AND_INT_LIT16, "and-int/lit16", k22s, kIndexNone, kContinue, kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD6, OR_INT_LIT16, "or-int/lit16", k22s, kIndexNone, kContinue, kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD7, XOR_INT_LIT16, "xor-int/lit16", k22s, kIndexNone, kContinue, kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD8, ADD_INT_LIT8, "add-int/lit8", k22b, kIndexNone, kContinue, kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD9, RSUB_INT_LIT8, "rsub-int/lit8", k22b, kIndexNone, kContinue, kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDA, MUL_INT_LIT8, "mul-int/lit8", k22b, kIndexNone, kContinue, kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDB, DIV_INT_LIT8, "div-int/lit8", k22b, kIndexNone, kContinue | kThrow, kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDC, REM_INT_LIT8, "rem-int/lit8", k22b, kIndexNone, kContinue | kThrow, kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDD, AND_INT_LIT8, "and-int/lit8", k22b, kIndexNone, kContinue, kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDE, OR_INT_LIT8, "or-int/lit8", k22b, kIndexNone, kContinue, kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDF, XOR_INT_LIT8, "xor-int/lit8", k22b, kIndexNone, kContinue, kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xE0, SHL_INT_LIT8, "shl-int/lit8", k22b, kIndexNone, kContinue, kShl | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xE1, SHR_INT_LIT8, "shr-int/lit8", k22b, kIndexNone, kContinue, kShr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xE2, USHR_INT_LIT8, "ushr-int/lit8", k22b, kIndexNone, kContinue, kUshr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xE3, IGET_QUICK, "iget-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE4, IGET_WIDE_QUICK, "iget-wide-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE5, IGET_OBJECT_QUICK, "iget-object-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE6, IPUT_QUICK, "iput-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE7, IPUT_WIDE_QUICK, "iput-wide-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, kIndexVtableOffset, kContinue | kThrow | kInvoke, 0, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \
+ V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, kIndexVtableOffset, kContinue | kThrow | kInvoke, 0, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \
+ V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEF, IGET_BOOLEAN_QUICK, "iget-boolean-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF0, IGET_BYTE_QUICK, "iget-byte-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF1, IGET_CHAR_QUICK, "iget-char-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF2, IGET_SHORT_QUICK, "iget-short-quick", k22c, kIndexFieldOffset, kContinue | kThrow, kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF3, UNUSED_F3, "unused-f3", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0xF4, UNUSED_F4, "unused-f4", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0xF5, UNUSED_F5, "unused-f5", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0xF6, UNUSED_F6, "unused-f6", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0xF7, UNUSED_F7, "unused-f7", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0xF8, UNUSED_F8, "unused-f8", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0xF9, UNUSED_F9, "unused-f9", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0xFA, INVOKE_POLYMORPHIC, "invoke-polymorphic", k45cc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgNonZero | kVerifyRegHPrototype) \
+ V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero | kVerifyRegHPrototype) \
+ V(0xFC, INVOKE_CUSTOM, "invoke-custom", k35c, kIndexCallSiteRef, kContinue | kThrow, 0, kVerifyRegBCallSite | kVerifyVarArg) \
+ V(0xFD, INVOKE_CUSTOM_RANGE, "invoke-custom/range", k3rc, kIndexCallSiteRef, kContinue | kThrow, 0, kVerifyRegBCallSite | kVerifyVarArgRange) \
+ V(0xFE, UNUSED_FE, "unused-fe", k10x, kIndexUnknown, 0, 0, kVerifyError) \
+ V(0xFF, UNUSED_FF, "unused-ff", k10x, kIndexUnknown, 0, 0, kVerifyError)
#define DEX_INSTRUCTION_FORMAT_LIST(V) \
V(k10x) \
diff --git a/runtime/dex_instruction_visitor.h b/runtime/dex_instruction_visitor.h
deleted file mode 100644
index 42af6a9c4f..0000000000
--- a/runtime/dex_instruction_visitor.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_DEX_INSTRUCTION_VISITOR_H_
-#define ART_RUNTIME_DEX_INSTRUCTION_VISITOR_H_
-
-#include "base/macros.h"
-#include "dex_instruction.h"
-
-namespace art {
-
-template<typename T>
-class DexInstructionVisitor {
- public:
- void Visit(const uint16_t* code, size_t size_in_bytes) {
- T* derived = static_cast<T*>(this);
- size_t size_in_code_units = size_in_bytes / sizeof(uint16_t);
- size_t i = 0;
- while (i < size_in_code_units) {
- const Instruction* inst = Instruction::At(&code[i]);
- switch (inst->Opcode()) {
-#define INSTRUCTION_CASE(o, cname, p, f, i, a, v) \
- case Instruction::cname: { \
- derived->Do_ ## cname(inst); \
- break; \
- }
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_CASE)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_CASE
- default:
- CHECK(false);
- }
- i += inst->SizeInCodeUnits();
- }
- }
-
- private:
- // Specific handlers for each instruction.
-#define INSTRUCTION_VISITOR(o, cname, p, f, i, a, v) \
- void Do_ ## cname(const Instruction* inst) { \
- T* derived = static_cast<T*>(this); \
- derived->Do_Default(inst); \
- }
-#include "dex_instruction_list.h"
- DEX_INSTRUCTION_LIST(INSTRUCTION_VISITOR)
-#undef DEX_INSTRUCTION_LIST
-#undef INSTRUCTION_VISITOR
-
- // The default instruction handler.
- void Do_Default(const Instruction*) {
- return;
- }
-};
-
-
-} // namespace art
-
-#endif // ART_RUNTIME_DEX_INSTRUCTION_VISITOR_H_
diff --git a/runtime/dex_instruction_visitor_test.cc b/runtime/dex_instruction_visitor_test.cc
deleted file mode 100644
index 5273084a9a..0000000000
--- a/runtime/dex_instruction_visitor_test.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex_instruction_visitor.h"
-
-#include <memory>
-
-#include "gtest/gtest.h"
-
-namespace art {
-
-class TestVisitor : public DexInstructionVisitor<TestVisitor> {};
-
-TEST(InstructionTest, Init) {
- std::unique_ptr<TestVisitor> visitor(new TestVisitor);
-}
-
-class CountVisitor : public DexInstructionVisitor<CountVisitor> {
- public:
- int count_;
-
- CountVisitor() : count_(0) {}
-
- void Do_Default(const Instruction*) {
- ++count_;
- }
-};
-
-TEST(InstructionTest, Count) {
- CountVisitor v0;
- const uint16_t c0[] = {};
- v0.Visit(c0, sizeof(c0));
- EXPECT_EQ(0, v0.count_);
-
- CountVisitor v1;
- const uint16_t c1[] = { 0 };
- v1.Visit(c1, sizeof(c1));
- EXPECT_EQ(1, v1.count_);
-
- CountVisitor v2;
- const uint16_t c2[] = { 0, 0 };
- v2.Visit(c2, sizeof(c2));
- EXPECT_EQ(2, v2.count_);
-
- CountVisitor v3;
- const uint16_t c3[] = { 0, 0, 0, };
- v3.Visit(c3, sizeof(c3));
- EXPECT_EQ(3, v3.count_);
-
- CountVisitor v4;
- const uint16_t c4[] = { 0, 0, 0, 0 };
- v4.Visit(c4, sizeof(c4));
- EXPECT_EQ(4, v4.count_);
-}
-
-} // namespace art
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 7f738bf5a9..5594f4dfc7 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -129,7 +129,21 @@ FaultManager::~FaultManager() {
void FaultManager::Init() {
CHECK(!initialized_);
- AddSpecialSignalHandlerFn(SIGSEGV, art_fault_handler);
+ sigset_t mask;
+ sigfillset(&mask);
+ sigdelset(&mask, SIGABRT);
+ sigdelset(&mask, SIGBUS);
+ sigdelset(&mask, SIGFPE);
+ sigdelset(&mask, SIGILL);
+ sigdelset(&mask, SIGSEGV);
+
+ SigchainAction sa = {
+ .sc_sigaction = art_fault_handler,
+ .sc_mask = mask,
+ .sc_flags = 0UL,
+ };
+
+ AddSpecialSignalHandlerFn(SIGSEGV, &sa);
initialized_ = true;
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8b80f54880..b0218b51fa 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -299,7 +299,7 @@ void ConcurrentCopying::InitializePhase() {
objects_moved_.StoreRelaxed(0);
GcCause gc_cause = GetCurrentIteration()->GetGcCause();
if (gc_cause == kGcCauseExplicit ||
- gc_cause == kGcCauseForNativeAlloc ||
+ gc_cause == kGcCauseForNativeAllocBlocking ||
gc_cause == kGcCauseCollectorTransition ||
GetCurrentIteration()->GetClearSoftReferences()) {
force_evacuate_all_ = true;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 41e605104c..d3798924ee 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -193,6 +193,7 @@ void SemiSpace::MarkingPhase() {
if (generational_) {
if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
+ GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAllocBlocking ||
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index c35ec7c341..2bbc86e3e9 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -29,7 +29,7 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseBackground: return "Background";
case kGcCauseExplicit: return "Explicit";
case kGcCauseForNativeAlloc: return "NativeAlloc";
- case kGcCauseForNativeAllocBackground: return "NativeAllocBackground";
+ case kGcCauseForNativeAllocBlocking: return "NativeAllocBlocking";
case kGcCauseCollectorTransition: return "CollectorTransition";
case kGcCauseDisableMovingGc: return "DisableMovingGc";
case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 41c894340c..b8cf3c4295 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -31,10 +31,12 @@ enum GcCause {
kGcCauseBackground,
// An explicit System.gc() call.
kGcCauseExplicit,
- // GC triggered for a native allocation.
+ // GC triggered for a native allocation when NativeAllocationGcWatermark is exceeded.
+ // (This may be a blocking GC depending on whether we run a non-concurrent collector).
kGcCauseForNativeAlloc,
- // Background GC triggered for a native allocation.
- kGcCauseForNativeAllocBackground,
+ // GC triggered for a native allocation when NativeAllocationBlockingGcWatermark is exceeded.
+ // (This is always a blocking GC).
+ kGcCauseForNativeAllocBlocking,
// GC triggered for a collector transition.
kGcCauseCollectorTransition,
// Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical).
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 298336ae4d..668fb4b7e4 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -563,6 +563,7 @@ Heap::Heap(size_t initial_size,
native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
*native_blocking_gc_lock_));
+ native_blocking_gc_is_assigned_ = false;
native_blocking_gc_in_progress_ = false;
native_blocking_gcs_finished_ = 0;
@@ -2695,6 +2696,10 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
// old_native_bytes_allocated_ now that GC has been triggered, resetting
// new_native_bytes_allocated_ to zero in the process.
old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
+ if (gc_cause == kGcCauseForNativeAllocBlocking) {
+ MutexLock mu(self, *native_blocking_gc_lock_);
+ native_blocking_gc_in_progress_ = true;
+ }
}
DCHECK_LT(gc_type, collector::kGcTypeMax);
@@ -3526,6 +3531,7 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
// it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
if (cause == kGcCauseForAlloc ||
cause == kGcCauseForNativeAlloc ||
+ cause == kGcCauseForNativeAllocBlocking ||
cause == kGcCauseDisableMovingGc) {
VLOG(gc) << "Starting a blocking GC " << cause;
}
@@ -3927,33 +3933,36 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// finish before addressing the fact that we exceeded the blocking
// watermark again.
do {
+ ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion");
native_blocking_gc_cond_->Wait(self);
} while (native_blocking_gcs_finished_ == initial_gcs_finished);
initial_gcs_finished++;
}
// It's possible multiple threads have seen that we exceeded the
- // blocking watermark. Ensure that only one of those threads runs the
- // blocking GC. The rest of the threads should instead wait for the
- // blocking GC to complete.
+ // blocking watermark. Ensure that only one of those threads is assigned
+ // to run the blocking GC. The rest of the threads should instead wait
+ // for the blocking GC to complete.
if (native_blocking_gcs_finished_ == initial_gcs_finished) {
- if (native_blocking_gc_in_progress_) {
+ if (native_blocking_gc_is_assigned_) {
do {
+ ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion");
native_blocking_gc_cond_->Wait(self);
} while (native_blocking_gcs_finished_ == initial_gcs_finished);
} else {
- native_blocking_gc_in_progress_ = true;
+ native_blocking_gc_is_assigned_ = true;
run_gc = true;
}
}
}
if (run_gc) {
- CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+ CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false);
RunFinalization(env, kNativeAllocationFinalizeTimeout);
CHECK(!env->ExceptionCheck());
MutexLock mu(self, *native_blocking_gc_lock_);
+ native_blocking_gc_is_assigned_ = false;
native_blocking_gc_in_progress_ = false;
native_blocking_gcs_finished_++;
native_blocking_gc_cond_->Broadcast(self);
@@ -3962,7 +3971,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
if (IsGcConcurrent()) {
- RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAllocBackground, /*force_full*/true);
+ RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true);
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index aa123d8736..72871785e5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1237,10 +1237,20 @@ class Heap {
// old_native_bytes_allocated_ and new_native_bytes_allocated_.
Atomic<size_t> old_native_bytes_allocated_;
- // Used for synchronization of blocking GCs triggered by
- // RegisterNativeAllocation.
+ // Used for synchronization when multiple threads call into
+ // RegisterNativeAllocation and require blocking GC.
+ // * If a previous blocking GC is in progress, all threads will wait for
+ // that GC to complete, then wait for one of the threads to complete another
+ // blocking GC.
+ // * If a blocking GC is assigned but not in progress, a thread has been
+ // assigned to run a blocking GC but has not started yet. Threads will wait
+ // for the assigned blocking GC to complete.
+ // * If a blocking GC is not assigned nor in progress, the first thread will
+ // run a blocking GC and signal to other threads that blocking GC has been
+ // assigned.
Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
+ bool native_blocking_gc_is_assigned_ GUARDED_BY(native_blocking_gc_lock_);
bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 06638e712c..2de4f19222 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -40,7 +40,7 @@ DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>
DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread:: ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
#define THREAD_IS_GC_MARKING_OFFSET 52
DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread:: IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
-#define THREAD_CARD_TABLE_OFFSET 128
+#define THREAD_CARD_TABLE_OFFSET 136
DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread:: CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
#define CODEITEM_INSNS_OFFSET 16
DEFINE_CHECK_EQ(static_cast<int32_t>(CODEITEM_INSNS_OFFSET), (static_cast<int32_t>(__builtin_offsetof(art::DexFile::CodeItem, insns_))))
diff --git a/runtime/image.cc b/runtime/image.cc
index b2486a1209..489a53b35c 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '3', '\0' }; // hash-based DexCache fields
+const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '4', '\0' }; // Thread.interrupted
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index bf49e84760..f3ca3383a5 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -264,7 +264,11 @@ static inline JValue Execute(
// Pop the shadow frame before calling into compiled code.
self->PopShadowFrame();
- ArtInterpreterToCompiledCodeBridge(self, nullptr, code_item, &shadow_frame, &result);
+ // Calculate the offset of the first input reg. The input registers are in the high regs.
+ // The frame may only contain room for the inputs, in which case the arg offset is 0.
+ uint16_t arg_offset = code_item->registers_size_ == shadow_frame.NumberOfVRegs() ?
+ code_item->registers_size_ - code_item->ins_size_ : 0;
+ ArtInterpreterToCompiledCodeBridge(self, nullptr, &shadow_frame, arg_offset, &result);
// Push the shadow frame back as the caller will expect it.
self->PushShadowFrame(&shadow_frame);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index ef0ddb30db..d8c17f24e2 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -458,8 +458,8 @@ ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
- const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
+ uint16_t arg_offset,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
@@ -482,9 +482,15 @@ void ArtInterpreterToCompiledCodeBridge(Thread* self,
method = shadow_frame->GetMethod();
}
}
- uint16_t arg_offset = (code_item == nullptr)
- ? 0
- : code_item->registers_size_ - code_item->ins_size_;
+ // Basic checks for the arg_offset. If there's no code item, the arg_offset must be 0. Otherwise,
+ // check that the arg_offset isn't greater than the number of registers. A stronger check is
+ // difficult since the frame may contain space for all the registers in the method, or only enough
+ // space for the arguments.
+ if (method->GetCodeItem() == nullptr) {
+ DCHECK_EQ(0u, arg_offset) << method->PrettyMethod();
+ } else {
+ DCHECK_LE(arg_offset, shadow_frame->NumberOfVRegs());
+ }
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr && caller != nullptr) {
jit->NotifyInterpreterToCompiledCodeTransition(self, caller);
@@ -918,12 +924,20 @@ static inline bool DoCallCommon(ArtMethod* called_method,
// Compute method information.
const DexFile::CodeItem* code_item = called_method->GetCodeItem();
-
// Number of registers for the callee's call frame.
uint16_t num_regs;
if (LIKELY(code_item != nullptr)) {
- num_regs = code_item->registers_size_;
- DCHECK_EQ(string_init ? number_of_inputs - 1 : number_of_inputs, code_item->ins_size_);
+ // When transitioning to compiled code, space only needs to be reserved for the input registers.
+ // The rest of the frame gets discarded. This also prevents accessing the called method's code
+ // item, saving memory by keeping code items of compiled code untouched.
+ if (Runtime::Current()->IsStarted() &&
+ !ClassLinker::ShouldUseInterpreterEntrypoint(
+ called_method, called_method->GetEntryPointFromQuickCompiledCode())) {
+ num_regs = number_of_inputs;
+ } else {
+ num_regs = code_item->registers_size_;
+ DCHECK_EQ(string_init ? number_of_inputs - 1 : number_of_inputs, code_item->ins_size_);
+ }
} else {
DCHECK(called_method->IsNative() || called_method->IsProxyMethod());
num_regs = number_of_inputs;
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 2589ad046b..67e072dd96 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -527,10 +527,11 @@ static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFra
}
}
+// The arg_offset is the offset to the first input register in the frame.
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
- const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
+ uint16_t arg_offset,
JValue* result);
// Set string value created from StringFactory.newStringFromXXX() into all aliases of
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 869d43061b..74e6cd297d 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -469,6 +469,7 @@ bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
UNIMPLEMENTED_CASE(UnsafeFullFence /* ()V */)
UNIMPLEMENTED_CASE(ReferenceGetReferent /* ()Ljava/lang/Object; */)
UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */)
+ UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
case Intrinsics::kNone:
res = false;
break;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2bd1d646a1..52322529c8 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -18,6 +18,7 @@
#include <sstream>
+#include "arch/context.h"
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/stl_util.h"
@@ -325,7 +326,8 @@ static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = n
// Use a sentinel for marking entries in the JIT table that have been cleared.
// This helps diagnosing in case the compiled code tries to wrongly access such
// entries.
-static mirror::Class* const weak_sentinel = reinterpret_cast<mirror::Class*>(0x1);
+static mirror::Class* const weak_sentinel =
+ reinterpret_cast<mirror::Class*>(Context::kBadGprBase + 0xff);
// Helper for the GC to process a weak class in a JIT root table.
static inline void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr,
@@ -677,13 +679,13 @@ void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
}
method->SetProfilingInfo(nullptr);
ScopedCodeCacheWrite ccw(code_map_.get());
- for (auto code_iter = method_code_map_.begin();
- code_iter != method_code_map_.end();
- ++code_iter) {
+ for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
if (code_iter->second == method) {
FreeCode(code_iter->first);
- method_code_map_.erase(code_iter);
+ code_iter = method_code_map_.erase(code_iter);
+ continue;
}
+ ++code_iter;
}
auto code_map = osr_code_map_.find(method);
if (code_map != osr_code_map_.end()) {
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index e365b42670..bb33047895 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -662,7 +662,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
monitor_lock_.Unlock(self);
// Handle the case where the thread was interrupted before we called wait().
- if (self->IsInterruptedLocked()) {
+ if (self->IsInterrupted()) {
was_interrupted = true;
} else {
// Wait for a notification or a timeout to occur.
@@ -672,7 +672,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
DCHECK(why == kTimedWaiting || why == kSleeping) << why;
self->GetWaitConditionVariable()->TimedWait(self, ms, ns);
}
- was_interrupted = self->IsInterruptedLocked();
+ was_interrupted = self->IsInterrupted();
}
}
@@ -697,10 +697,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
* The doc sayeth: "The interrupted status of the current thread is
* cleared when this exception is thrown."
*/
- {
- MutexLock mu(self, *self->GetWaitMutex());
- self->SetInterruptedLocked(false);
- }
+ self->SetInterrupted(false);
self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
}
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index d77cfa1d35..cd8315cdf9 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -118,7 +118,15 @@ void InitializeNativeBridge(JNIEnv* env, const char* instruction_set) {
for (int signal = 0; signal < _NSIG; ++signal) {
android::NativeBridgeSignalHandlerFn fn = android::NativeBridgeGetSignalHandler(signal);
if (fn != nullptr) {
- AddSpecialSignalHandlerFn(signal, fn);
+ sigset_t mask;
+ sigfillset(&mask);
+ SigchainAction sa = {
+ .sc_sigaction = fn,
+ .sc_mask = mask,
+ // The native bridge signal might not return back to sigchain's handler.
+ .sc_flags = SIGCHAIN_ALLOW_NORETURN,
+ };
+ AddSpecialSignalHandlerFn(signal, &sa);
}
}
#endif
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b169373612..3697f2176c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -390,6 +390,7 @@ Runtime::~Runtime() {
low_4gb_arena_pool_.reset();
arena_pool_.reset();
jit_arena_pool_.reset();
+ protected_fault_page_.reset();
MemMap::Shutdown();
// TODO: acquire a static mutex on Runtime to avoid racing.
@@ -1400,6 +1401,27 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInitialAgents);
}
+ // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
+ // If we cannot reserve it, log a warning.
+ // Note: This is allocated last so that the heap and other things have priority, if necessary.
+ {
+ constexpr uintptr_t kSentinelAddr =
+ RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
+ protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
+ reinterpret_cast<uint8_t*>(kSentinelAddr),
+ kPageSize,
+ PROT_NONE,
+ true,
+ false,
+ &error_msg));
+ if (protected_fault_page_ == nullptr) {
+ LOG(WARNING) << "Could not reserve sentinel fault page: " << error_msg;
+ } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
+ LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
+ protected_fault_page_.reset();
+ }
+ }
+
VLOG(startup) << "Runtime::Init exiting";
return true;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 4931382e55..2e3b8d7bae 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -81,6 +81,7 @@ class DexFile;
class InternTable;
class JavaVMExt;
class LinearAlloc;
+class MemMap;
class MonitorList;
class MonitorPool;
class NullPointerHandler;
@@ -942,6 +943,8 @@ class Runtime {
std::atomic<uint32_t> deoptimization_counts_[
static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
+ std::unique_ptr<MemMap> protected_fault_page_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 653a9bd1d4..6848686e5d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1964,7 +1964,6 @@ void Thread::Shutdown() {
Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
- interrupted_(false),
custom_tls_(nullptr),
can_call_into_java_(true) {
wait_mutex_ = new Mutex("a thread wait mutex");
@@ -1976,6 +1975,7 @@ Thread::Thread(bool daemon)
"art::Thread has a size which is not a multiple of 4.");
tls32_.state_and_flags.as_struct.flags = 0;
tls32_.state_and_flags.as_struct.state = kNative;
+ tls32_.interrupted.StoreRelaxed(false);
memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
std::fill(tlsPtr_.rosalloc_runs,
tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
@@ -2269,24 +2269,26 @@ bool Thread::IsJWeakCleared(jweak obj) const {
// Implements java.lang.Thread.interrupted.
bool Thread::Interrupted() {
- MutexLock mu(Thread::Current(), *wait_mutex_);
- bool interrupted = IsInterruptedLocked();
- SetInterruptedLocked(false);
+ DCHECK_EQ(Thread::Current(), this);
+ // No other thread can concurrently reset the interrupted flag.
+ bool interrupted = tls32_.interrupted.LoadSequentiallyConsistent();
+ if (interrupted) {
+ tls32_.interrupted.StoreSequentiallyConsistent(false);
+ }
return interrupted;
}
// Implements java.lang.Thread.isInterrupted.
bool Thread::IsInterrupted() {
- MutexLock mu(Thread::Current(), *wait_mutex_);
- return IsInterruptedLocked();
+ return tls32_.interrupted.LoadSequentiallyConsistent();
}
void Thread::Interrupt(Thread* self) {
MutexLock mu(self, *wait_mutex_);
- if (interrupted_) {
+ if (tls32_.interrupted.LoadSequentiallyConsistent()) {
return;
}
- interrupted_ = true;
+ tls32_.interrupted.StoreSequentiallyConsistent(true);
NotifyLocked(self);
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 6abde5b450..a60fd58ca0 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -487,15 +487,12 @@ class Thread {
}
// Implements java.lang.Thread.interrupted.
- bool Interrupted() REQUIRES(!*wait_mutex_);
+ bool Interrupted();
// Implements java.lang.Thread.isInterrupted.
- bool IsInterrupted() REQUIRES(!*wait_mutex_);
- bool IsInterruptedLocked() REQUIRES(wait_mutex_) {
- return interrupted_;
- }
+ bool IsInterrupted();
void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
- void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) {
- interrupted_ = i;
+ void SetInterrupted(bool i) {
+ tls32_.interrupted.StoreSequentiallyConsistent(i);
}
void Notify() REQUIRES(!*wait_mutex_);
@@ -580,6 +577,13 @@ class Thread {
}
template<PointerSize pointer_size>
+ static ThreadOffset<pointer_size> InterruptedOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
+ }
+
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadFlagsOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
@@ -1432,6 +1436,9 @@ class Thread {
// GC roots.
bool32_t is_gc_marking;
+ // Thread "interrupted" status; stays raised until queried or thrown.
+ Atomic<bool32_t> interrupted;
+
// True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
// weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
// processing of the CC collector only. This is thread local so that we can enable/disable weak
@@ -1631,7 +1638,7 @@ class Thread {
gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
} tlsPtr_;
- // Guards the 'interrupted_' and 'wait_monitor_' members.
+ // Guards the 'wait_monitor_' members.
Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Condition variable waited upon during a wait.
@@ -1639,9 +1646,6 @@ class Thread {
// Pointer to the monitor lock we're currently waiting on or null if not waiting.
Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
- // Thread "interrupted" status; stays raised until queried or thrown.
- bool interrupted_ GUARDED_BY(wait_mutex_);
-
// Debug disable read barrier count, only is checked for debug builds and only in the runtime.
uint8_t debug_disallow_read_barrier_ = 0;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index cb9c6052a2..81bf293344 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -33,7 +33,6 @@
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "dex_instruction_utils.h"
-#include "dex_instruction_visitor.h"
#include "experimental_flags.h"
#include "gc/accounting/card_table-inl.h"
#include "handle_scope-inl.h"
@@ -1111,8 +1110,9 @@ bool MethodVerifier::VerifyInstructions() {
GetInstructionFlags(0).SetCompileTimeInfoPoint();
uint32_t insns_size = code_item_->insns_size_in_code_units_;
+ bool allow_runtime_only_instructions = !Runtime::Current()->IsAotCompiler() || verify_to_dump_;
for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
- if (!VerifyInstruction(inst, dex_pc)) {
+ if (!VerifyInstruction(inst, dex_pc, allow_runtime_only_instructions)) {
DCHECK_NE(failures_.size(), 0U);
return false;
}
@@ -1139,8 +1139,10 @@ bool MethodVerifier::VerifyInstructions() {
return true;
}
-bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_offset) {
- if (UNLIKELY(inst->IsExperimental())) {
+bool MethodVerifier::VerifyInstruction(const Instruction* inst,
+ uint32_t code_offset,
+ bool allow_runtime_only_instructions) {
+ if (Instruction::kHaveExperimentalInstructions && UNLIKELY(inst->IsExperimental())) {
// Experimental instructions don't yet have verifier support implementation.
// While it is possible to use them by themselves, when we try to use stable instructions
// with a virtual register that was created by an experimental instruction,
@@ -1248,7 +1250,7 @@ bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_of
result = false;
break;
}
- if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsAotCompiler() && !verify_to_dump_) {
+ if (!allow_runtime_only_instructions && inst->GetVerifyIsRuntimeOnly()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "opcode only expected at runtime " << inst->Name();
result = false;
}
@@ -4345,7 +4347,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs(
}
}
- if (method_type == METHOD_POLYMORPHIC) {
+ if (UNLIKELY(method_type == METHOD_POLYMORPHIC)) {
// Process the signature of the calling site that is invoking the method handle.
DexFileParameterIterator it(*dex_file_, dex_file_->GetProtoId(inst->VRegH()));
return VerifyInvocationArgsFromIterator(&it, inst, method_type, is_range, res_method);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 26dc15e24b..b34a3afd2e 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -390,7 +390,9 @@ class MethodVerifier {
* - (earlier) for each exception handler, the handler must start at a valid
* instruction
*/
- bool VerifyInstruction(const Instruction* inst, uint32_t code_offset);
+ bool VerifyInstruction(const Instruction* inst,
+ uint32_t code_offset,
+ bool allow_runtime_only_instructions);
/* Ensure that the register index is valid for this code item. */
bool CheckRegisterIndex(uint32_t idx);
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index aa4a2591d9..9245828ae2 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -71,59 +71,62 @@ inline bool RegType::AssignableFrom(const RegType& lhs,
if (lhs.Equals(rhs)) {
return true;
} else {
- if (lhs.IsBoolean()) {
- return rhs.IsBooleanTypes();
- } else if (lhs.IsByte()) {
- return rhs.IsByteTypes();
- } else if (lhs.IsShort()) {
- return rhs.IsShortTypes();
- } else if (lhs.IsChar()) {
- return rhs.IsCharTypes();
- } else if (lhs.IsInteger()) {
- return rhs.IsIntegralTypes();
- } else if (lhs.IsFloat()) {
- return rhs.IsFloatTypes();
- } else if (lhs.IsLongLo()) {
- return rhs.IsLongTypes();
- } else if (lhs.IsDoubleLo()) {
- return rhs.IsDoubleTypes();
- } else if (lhs.IsConflict()) {
- LOG(WARNING) << "RegType::AssignableFrom lhs is Conflict!";
- return false;
- } else {
- CHECK(lhs.IsReferenceTypes())
- << "Unexpected register type in IsAssignableFrom: '"
- << lhs << "' := '" << rhs << "'";
- if (rhs.IsZero()) {
- return true; // All reference types can be assigned null.
- } else if (!rhs.IsReferenceTypes()) {
- return false; // Expect rhs to be a reference type.
- } else if (lhs.IsUninitializedTypes() || rhs.IsUninitializedTypes()) {
- // Uninitialized types are only allowed to be assigned to themselves.
- // TODO: Once we have a proper "reference" super type, this needs to be extended.
+ switch (lhs.GetAssignmentType()) {
+ case AssignmentType::kBoolean:
+ return rhs.IsBooleanTypes();
+ case AssignmentType::kByte:
+ return rhs.IsByteTypes();
+ case AssignmentType::kShort:
+ return rhs.IsShortTypes();
+ case AssignmentType::kChar:
+ return rhs.IsCharTypes();
+ case AssignmentType::kInteger:
+ return rhs.IsIntegralTypes();
+ case AssignmentType::kFloat:
+ return rhs.IsFloatTypes();
+ case AssignmentType::kLongLo:
+ return rhs.IsLongTypes();
+ case AssignmentType::kDoubleLo:
+ return rhs.IsDoubleTypes();
+ case AssignmentType::kConflict:
+ LOG(WARNING) << "RegType::AssignableFrom lhs is Conflict!";
return false;
- } else if (lhs.IsJavaLangObject()) {
- return true; // All reference types can be assigned to Object.
- } else if (!strict && !lhs.IsUnresolvedTypes() && lhs.GetClass()->IsInterface()) {
- // If we're not strict allow assignment to any interface, see comment in ClassJoin.
- return true;
- } else if (lhs.IsJavaLangObjectArray()) {
- return rhs.IsObjectArrayTypes(); // All reference arrays may be assigned to Object[]
- } else if (lhs.HasClass() && rhs.HasClass()) {
- // Test assignability from the Class point-of-view.
- bool result = lhs.GetClass()->IsAssignableFrom(rhs.GetClass());
- // Record assignability dependency. The `verifier` is null during unit tests and
- // VerifiedMethod::GenerateSafeCastSet.
- if (verifier != nullptr) {
- VerifierDeps::MaybeRecordAssignability(
- verifier->GetDexFile(), lhs.GetClass(), rhs.GetClass(), strict, result);
+ case AssignmentType::kReference:
+ if (rhs.IsZero()) {
+ return true; // All reference types can be assigned null.
+ } else if (!rhs.IsReferenceTypes()) {
+ return false; // Expect rhs to be a reference type.
+ } else if (lhs.IsUninitializedTypes() || rhs.IsUninitializedTypes()) {
+ // Uninitialized types are only allowed to be assigned to themselves.
+ // TODO: Once we have a proper "reference" super type, this needs to be extended.
+ return false;
+ } else if (lhs.IsJavaLangObject()) {
+ return true; // All reference types can be assigned to Object.
+ } else if (!strict && !lhs.IsUnresolvedTypes() && lhs.GetClass()->IsInterface()) {
+ // If we're not strict allow assignment to any interface, see comment in ClassJoin.
+ return true;
+ } else if (lhs.IsJavaLangObjectArray()) {
+ return rhs.IsObjectArrayTypes(); // All reference arrays may be assigned to Object[]
+ } else if (lhs.HasClass() && rhs.HasClass()) {
+ // Test assignability from the Class point-of-view.
+ bool result = lhs.GetClass()->IsAssignableFrom(rhs.GetClass());
+ // Record assignability dependency. The `verifier` is null during unit tests and
+ // VerifiedMethod::GenerateSafeCastSet.
+ if (verifier != nullptr) {
+ VerifierDeps::MaybeRecordAssignability(
+ verifier->GetDexFile(), lhs.GetClass(), rhs.GetClass(), strict, result);
+ }
+ return result;
+ } else {
+ // Unresolved types are only assignable for null and equality.
+ return false;
}
- return result;
- } else {
- // Unresolved types are only assignable for null and equality.
- return false;
- }
+ case AssignmentType::kNotAssignable:
+ break;
}
+ LOG(FATAL) << "Unexpected register type in IsAssignableFrom: '"
+ << lhs << "' := '" << rhs << "'";
+ UNREACHABLE();
}
}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index dedf77f7db..25baac5094 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -268,6 +268,52 @@ class RegType {
static void* operator new(size_t size, ArenaAllocator* arena) = delete;
static void* operator new(size_t size, ScopedArenaAllocator* arena);
+ enum class AssignmentType {
+ kBoolean,
+ kByte,
+ kShort,
+ kChar,
+ kInteger,
+ kFloat,
+ kLongLo,
+ kDoubleLo,
+ kConflict,
+ kReference,
+ kNotAssignable,
+ };
+
+ ALWAYS_INLINE
+ inline AssignmentType GetAssignmentType() const {
+ AssignmentType t = GetAssignmentTypeImpl();
+ if (kIsDebugBuild) {
+ if (IsBoolean()) {
+ CHECK(AssignmentType::kBoolean == t);
+ } else if (IsByte()) {
+ CHECK(AssignmentType::kByte == t);
+ } else if (IsShort()) {
+ CHECK(AssignmentType::kShort == t);
+ } else if (IsChar()) {
+ CHECK(AssignmentType::kChar == t);
+ } else if (IsInteger()) {
+ CHECK(AssignmentType::kInteger == t);
+ } else if (IsFloat()) {
+ CHECK(AssignmentType::kFloat == t);
+ } else if (IsLongLo()) {
+ CHECK(AssignmentType::kLongLo == t);
+ } else if (IsDoubleLo()) {
+ CHECK(AssignmentType::kDoubleLo == t);
+ } else if (IsConflict()) {
+ CHECK(AssignmentType::kConflict == t);
+ } else if (IsReferenceTypes()) {
+ CHECK(AssignmentType::kReference == t);
+ } else {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
+ }
+ return t;
+ }
+
protected:
RegType(mirror::Class* klass,
const StringPiece& descriptor,
@@ -285,6 +331,8 @@ class RegType {
}
}
+ virtual AssignmentType GetAssignmentTypeImpl() const = 0;
+
const StringPiece descriptor_;
mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes.
const uint16_t cache_id_;
@@ -341,6 +389,10 @@ class ConflictType FINAL : public RegType {
// Destroy the singleton instance.
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kConflict;
+ }
+
private:
ConflictType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -372,6 +424,10 @@ class UndefinedType FINAL : public RegType {
// Destroy the singleton instance.
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
+
private:
UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -407,6 +463,10 @@ class IntegerType FINAL : public Cat1Type {
static const IntegerType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kInteger;
+ }
+
private:
IntegerType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -427,6 +487,10 @@ class BooleanType FINAL : public Cat1Type {
static const BooleanType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kBoolean;
+ }
+
private:
BooleanType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -448,6 +512,10 @@ class ByteType FINAL : public Cat1Type {
static const ByteType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kByte;
+ }
+
private:
ByteType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -468,6 +536,10 @@ class ShortType FINAL : public Cat1Type {
static const ShortType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kShort;
+ }
+
private:
ShortType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -488,6 +560,10 @@ class CharType FINAL : public Cat1Type {
static const CharType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kChar;
+ }
+
private:
CharType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -508,6 +584,10 @@ class FloatType FINAL : public Cat1Type {
static const FloatType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kFloat;
+ }
+
private:
FloatType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -535,6 +615,10 @@ class LongLoType FINAL : public Cat2Type {
static const LongLoType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kLongLo;
+ }
+
private:
LongLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -555,6 +639,10 @@ class LongHiType FINAL : public Cat2Type {
static const LongHiType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
+
private:
LongHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -576,6 +664,10 @@ class DoubleLoType FINAL : public Cat2Type {
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kDoubleLo;
+ }
+
private:
DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -596,6 +688,10 @@ class DoubleHiType FINAL : public Cat2Type {
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
+
private:
DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -658,6 +754,10 @@ class ConstantType : public RegType {
}
virtual bool IsConstantTypes() const OVERRIDE { return true; }
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
+
private:
const uint32_t constant_;
};
@@ -673,6 +773,10 @@ class PreciseConstType FINAL : public ConstantType {
bool IsPreciseConstant() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
};
class PreciseConstLoType FINAL : public ConstantType {
@@ -684,6 +788,10 @@ class PreciseConstLoType FINAL : public ConstantType {
}
bool IsPreciseConstantLo() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
};
class PreciseConstHiType FINAL : public ConstantType {
@@ -695,6 +803,10 @@ class PreciseConstHiType FINAL : public ConstantType {
}
bool IsPreciseConstantHi() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
};
class ImpreciseConstType FINAL : public ConstantType {
@@ -706,6 +818,10 @@ class ImpreciseConstType FINAL : public ConstantType {
}
bool IsImpreciseConstant() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
};
class ImpreciseConstLoType FINAL : public ConstantType {
@@ -717,6 +833,10 @@ class ImpreciseConstLoType FINAL : public ConstantType {
}
bool IsImpreciseConstantLo() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
};
class ImpreciseConstHiType FINAL : public ConstantType {
@@ -728,6 +848,10 @@ class ImpreciseConstHiType FINAL : public ConstantType {
}
bool IsImpreciseConstantHi() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kNotAssignable;
+ }
};
// Common parent of all uninitialized types. Uninitialized types are created by
@@ -747,6 +871,10 @@ class UninitializedType : public RegType {
return allocation_pc_;
}
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kReference;
+ }
+
private:
const uint32_t allocation_pc_;
};
@@ -848,6 +976,10 @@ class ReferenceType FINAL : public RegType {
bool HasClassVirtual() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kReference;
+ }
};
// A type of register holding a reference to an Object of type GetClass and only
@@ -866,6 +998,10 @@ class PreciseReferenceType FINAL : public RegType {
bool HasClassVirtual() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kReference;
+ }
};
// Common parent of unresolved types.
@@ -876,6 +1012,10 @@ class UnresolvedType : public RegType {
: RegType(nullptr, descriptor, cache_id) {}
bool IsNonZeroReferenceTypes() const OVERRIDE;
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kReference;
+ }
};
// Similar to ReferenceType except the Class couldn't be loaded. Assignability
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 13c03e8261..df4372f431 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -190,10 +190,10 @@ class SignalChain {
return action_;
}
- void AddSpecialHandler(SpecialSignalHandlerFn fn) {
- for (SpecialSignalHandlerFn& slot : special_handlers_) {
- if (slot == nullptr) {
- slot = fn;
+ void AddSpecialHandler(SigchainAction* sa) {
+ for (SigchainAction& slot : special_handlers_) {
+ if (slot.sc_sigaction == nullptr) {
+ slot = *sa;
return;
}
}
@@ -201,15 +201,15 @@ class SignalChain {
fatal("too many special signal handlers");
}
- void RemoveSpecialHandler(SpecialSignalHandlerFn fn) {
+ void RemoveSpecialHandler(bool (*fn)(int, siginfo_t*, void*)) {
// This isn't thread safe, but it's unlikely to be a real problem.
size_t len = sizeof(special_handlers_)/sizeof(*special_handlers_);
for (size_t i = 0; i < len; ++i) {
- if (special_handlers_[i] == fn) {
+ if (special_handlers_[i].sc_sigaction == fn) {
for (size_t j = i; j < len - 1; ++j) {
special_handlers_[j] = special_handlers_[j + 1];
}
- special_handlers_[len - 1] = nullptr;
+ special_handlers_[len - 1].sc_sigaction = nullptr;
return;
}
}
@@ -223,47 +223,37 @@ class SignalChain {
private:
bool claimed_;
struct sigaction action_;
- SpecialSignalHandlerFn special_handlers_[2];
+ SigchainAction special_handlers_[2];
};
static SignalChain chains[_NSIG];
-class ScopedSignalUnblocker {
- public:
- explicit ScopedSignalUnblocker(const std::initializer_list<int>& signals) {
- sigset_t new_mask;
- sigemptyset(&new_mask);
- for (int signal : signals) {
- sigaddset(&new_mask, signal);
- }
- if (sigprocmask(SIG_UNBLOCK, &new_mask, &previous_mask_) != 0) {
- fatal("failed to unblock signals: %s", strerror(errno));
- }
- }
-
- ~ScopedSignalUnblocker() {
- if (sigprocmask(SIG_SETMASK, &previous_mask_, nullptr) != 0) {
- fatal("failed to unblock signals: %s", strerror(errno));
- }
- }
-
- private:
- sigset_t previous_mask_;
-};
-
void SignalChain::Handler(int signo, siginfo_t* siginfo, void* ucontext_raw) {
- ScopedHandlingSignal handling_signal;
-
// Try the special handlers first.
// If one of them crashes, we'll reenter this handler and pass that crash onto the user handler.
if (!GetHandlingSignal()) {
- ScopedSignalUnblocker unblocked { SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV }; // NOLINT
- SetHandlingSignal(true);
-
for (const auto& handler : chains[signo].special_handlers_) {
- if (handler != nullptr && handler(signo, siginfo, ucontext_raw)) {
+ if (handler.sc_sigaction == nullptr) {
+ break;
+ }
+
+ // The native bridge signal handler might not return.
+ // Avoid setting the thread local flag in this case, since we'll never
+ // get a chance to restore it.
+ bool handler_noreturn = (handler.sc_flags & SIGCHAIN_ALLOW_NORETURN);
+ sigset_t previous_mask;
+ linked_sigprocmask(SIG_SETMASK, &handler.sc_mask, &previous_mask);
+
+ ScopedHandlingSignal restorer;
+ if (!handler_noreturn) {
+ SetHandlingSignal(true);
+ }
+
+ if (handler.sc_sigaction(signo, siginfo, ucontext_raw)) {
return;
}
+
+ linked_sigprocmask(SIG_SETMASK, &previous_mask, nullptr);
}
}
@@ -275,7 +265,7 @@ void SignalChain::Handler(int signo, siginfo_t* siginfo, void* ucontext_raw) {
if ((handler_flags & SA_NODEFER)) {
sigdelset(&mask, signo);
}
- sigprocmask(SIG_SETMASK, &mask, nullptr);
+ linked_sigprocmask(SIG_SETMASK, &mask, nullptr);
if ((handler_flags & SA_SIGINFO)) {
chains[signo].action_.sa_sigaction(signo, siginfo, ucontext_raw);
@@ -386,7 +376,7 @@ extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bi
return linked_sigprocmask(how, new_set_ptr, bionic_old_set);
}
-extern "C" void AddSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn) {
+extern "C" void AddSpecialSignalHandlerFn(int signal, SigchainAction* sa) {
InitializeSignalChain();
if (signal <= 0 || signal >= _NSIG) {
@@ -394,11 +384,11 @@ extern "C" void AddSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn)
}
// Set the managed_handler.
- chains[signal].AddSpecialHandler(fn);
+ chains[signal].AddSpecialHandler(sa);
chains[signal].Claim(signal);
}
-extern "C" void RemoveSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn) {
+extern "C" void RemoveSpecialSignalHandlerFn(int signal, bool (*fn)(int, siginfo_t*, void*)) {
InitializeSignalChain();
if (signal <= 0 || signal >= _NSIG) {
diff --git a/sigchainlib/sigchain.h b/sigchainlib/sigchain.h
index 0bed117963..23fba030d2 100644
--- a/sigchainlib/sigchain.h
+++ b/sigchainlib/sigchain.h
@@ -18,12 +18,21 @@
#define ART_SIGCHAINLIB_SIGCHAIN_H_
#include <signal.h>
+#include <stdint.h>
namespace art {
-typedef bool (*SpecialSignalHandlerFn)(int, siginfo_t*, void*);
-extern "C" void AddSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn);
-extern "C" void RemoveSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn);
+// Handlers that exit without returning to their caller (e.g. via siglongjmp) must pass this flag.
+static constexpr uint64_t SIGCHAIN_ALLOW_NORETURN = 0x1UL;
+
+struct SigchainAction {
+ bool (*sc_sigaction)(int, siginfo_t*, void*);
+ sigset_t sc_mask;
+ uint64_t sc_flags;
+};
+
+extern "C" void AddSpecialSignalHandlerFn(int signal, SigchainAction* sa);
+extern "C" void RemoveSpecialSignalHandlerFn(int signal, bool (*fn)(int, siginfo_t*, void*));
extern "C" void EnsureFrontOfChain(int signal);
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
index 2d7985aad1..edce965e33 100644
--- a/sigchainlib/sigchain_dummy.cc
+++ b/sigchainlib/sigchain_dummy.cc
@@ -54,13 +54,13 @@ extern "C" void EnsureFrontOfChain(int signal ATTRIBUTE_UNUSED) {
}
extern "C" void AddSpecialSignalHandlerFn(int signal ATTRIBUTE_UNUSED,
- SpecialSignalHandlerFn fn ATTRIBUTE_UNUSED) {
+ SigchainAction* sa ATTRIBUTE_UNUSED) {
log("SetSpecialSignalHandlerFn is not exported by the main executable.");
abort();
}
extern "C" void RemoveSpecialSignalHandlerFn(int signal ATTRIBUTE_UNUSED,
- SpecialSignalHandlerFn fn ATTRIBUTE_UNUSED) {
+ bool (*fn)(int, siginfo_t*, void*) ATTRIBUTE_UNUSED) {
log("SetSpecialSignalHandlerFn is not exported by the main executable.");
abort();
}
diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt
index 9c64111027..343a76247e 100644
--- a/test/115-native-bridge/expected.txt
+++ b/test/115-native-bridge/expected.txt
@@ -3,7 +3,7 @@ Checking for getEnvValues.
Ready for native bridge tests.
Checking for support.
Getting trampoline for JNI_OnLoad with shorty (null).
-Test ART callbacks: all JNI function number is 11.
+Test ART callbacks: all JNI function number is 12.
name:booleanMethod, signature:(ZZZZZZZZZZ)Z, shorty:ZZZZZZZZZZZ.
name:byteMethod, signature:(BBBBBBBBBB)B, shorty:BBBBBBBBBBB.
name:charMethod, signature:(CCCCCCCCCC)C, shorty:CCCCCCCCCCC.
@@ -14,6 +14,7 @@ Test ART callbacks: all JNI function number is 11.
name:testGetMirandaMethodNative, signature:()Ljava/lang/reflect/Method;, shorty:L.
name:testNewStringObject, signature:()V, shorty:V.
name:testSignal, signature:()I, shorty:I.
+ name:testSignalHandlerNotReturn, signature:()V, shorty:V.
name:testZeroLengthByteBuffers, signature:()V, shorty:V.
trampoline_JNI_OnLoad called!
JNI_OnLoad called
@@ -67,3 +68,13 @@ Checking for support.
Was to load 'libinvalid.so', force fail.
getError() in native bridge.
Catch UnsatisfiedLinkError exception as expected.
+Getting trampoline for Java_Main_testSignalHandlerNotReturn with shorty V.
+start testSignalHandlerNotReturn
+raising first SIGSEGV
+NB signal handler with signal 11.
+handling first SIGSEGV, will raise another
+unblock SIGSEGV in handler
+raising second SIGSEGV
+NB signal handler with signal 11.
+handling second SIGSEGV, will jump back to test function
+back to test from signal handler via siglongjmp(), and done!
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index b3b89491bf..307fd9b766 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -26,6 +26,7 @@
#include "stdio.h"
#include "unistd.h"
#include "sys/stat.h"
+#include "setjmp.h"
#include "base/macros.h"
#include "nativebridge/native_bridge.h"
@@ -191,6 +192,19 @@ char *go_away_compiler = nullptr;
abort();
}
+static void raise_sigsegv() {
+#if defined(__arm__) || defined(__i386__) || defined(__aarch64__)
+ *go_away_compiler = 'a';
+#elif defined(__x86_64__)
+ // Cause a SEGV using an instruction known to be 2 bytes long to account for hardcoded jump
+ // in the signal handler
+ asm volatile("movl $0, %%eax;" "movb %%ah, (%%rax);" : : : "%eax");
+#else
+ // On other architectures we simulate SEGV.
+ kill(getpid(), SIGSEGV);
+#endif
+}
+
static jint trampoline_Java_Main_testSignal(JNIEnv*, jclass) {
// Install the sigaction handler above, which should *not* be reached as the native-bridge
// handler should be called first. Note: we won't chain at all, if we ever get here, we'll die.
@@ -203,16 +217,7 @@ static jint trampoline_Java_Main_testSignal(JNIEnv*, jclass) {
// Test segv
sigaction(SIGSEGV, &tmp, nullptr);
-#if defined(__arm__) || defined(__i386__) || defined(__aarch64__)
- *go_away_compiler = 'a';
-#elif defined(__x86_64__)
- // Cause a SEGV using an instruction known to be 2 bytes long to account for hardcoded jump
- // in the signal handler
- asm volatile("movl $0, %%eax;" "movb %%ah, (%%rax);" : : : "%eax");
-#else
- // On other architectures we simulate SEGV.
- kill(getpid(), SIGSEGV);
-#endif
+ raise_sigsegv();
// Test sigill
sigaction(SIGILL, &tmp, nullptr);
@@ -221,6 +226,135 @@ static jint trampoline_Java_Main_testSignal(JNIEnv*, jclass) {
return 1234;
}
+// Status of the tricky control path of testSignalHandlerNotReturn.
+//
+// "kNone" is the default status except testSignalHandlerNotReturn,
+// others are used by testSignalHandlerNotReturn.
+enum class TestStatus {
+ kNone,
+ kRaiseFirst,
+ kHandleFirst,
+ kRaiseSecond,
+ kHandleSecond,
+};
+
+// State transition helper for testSignalHandlerNotReturn.
+class SignalHandlerTestStatus {
+ public:
+ SignalHandlerTestStatus() : state_(TestStatus::kNone) {
+ }
+
+ TestStatus Get() {
+ return state_;
+ }
+
+ void Reset() {
+ Set(TestStatus::kNone);
+ }
+
+ void Set(TestStatus state) {
+ switch (state) {
+ case TestStatus::kNone:
+ AssertState(TestStatus::kHandleSecond);
+ break;
+
+ case TestStatus::kRaiseFirst:
+ AssertState(TestStatus::kNone);
+ break;
+
+ case TestStatus::kHandleFirst:
+ AssertState(TestStatus::kRaiseFirst);
+ break;
+
+ case TestStatus::kRaiseSecond:
+ AssertState(TestStatus::kHandleFirst);
+ break;
+
+ case TestStatus::kHandleSecond:
+ AssertState(TestStatus::kRaiseSecond);
+ break;
+
+ default:
+ printf("ERROR: unknown state\n");
+ abort();
+ }
+
+ state_ = state;
+ }
+
+ private:
+ TestStatus state_;
+
+ void AssertState(TestStatus expected) {
+ if (state_ != expected) {
+ printf("ERROR: unexpected state, was %d, expected %d\n", state_, expected);
+ }
+ }
+};
+
+static SignalHandlerTestStatus gSignalTestStatus;
+// The context is used to jump out from signal handler.
+static sigjmp_buf gSignalTestJmpBuf;
+
+// Test whether NativeBridge can receive future signal when its handler doesn't return.
+//
+// Control path:
+// 1. Raise first SIGSEGV in test function.
+// 2. Raise another SIGSEGV in NativeBridge's signal handler which is handling
+// the first SIGSEGV.
+// 3. Expect that NativeBridge's signal handler invokes again. And jump back
+// to test function in when handling second SIGSEGV.
+// 4. Exit test.
+//
+// NOTE: sigchain should be aware that "special signal handler" may not return.
+// Pay attention if this case fails.
+static void trampoline_Java_Main_testSignalHandlerNotReturn(JNIEnv*, jclass) {
+ if (gSignalTestStatus.Get() != TestStatus::kNone) {
+ printf("ERROR: test already started?\n");
+ return;
+ }
+ printf("start testSignalHandlerNotReturn\n");
+
+ if (sigsetjmp(gSignalTestJmpBuf, 1) == 0) {
+ gSignalTestStatus.Set(TestStatus::kRaiseFirst);
+ printf("raising first SIGSEGV\n");
+ raise_sigsegv();
+ } else {
+ // jump to here from signal handler when handling second SIGSEGV.
+ if (gSignalTestStatus.Get() != TestStatus::kHandleSecond) {
+ printf("ERROR: not jump from second SIGSEGV?\n");
+ return;
+ }
+ gSignalTestStatus.Reset();
+ printf("back to test from signal handler via siglongjmp(), and done!\n");
+ }
+}
+
+// Signal handler for testSignalHandlerNotReturn.
+// This handler won't return.
+static bool NotReturnSignalHandler() {
+ if (gSignalTestStatus.Get() == TestStatus::kRaiseFirst) {
+ // handling first SIGSEGV
+ gSignalTestStatus.Set(TestStatus::kHandleFirst);
+ printf("handling first SIGSEGV, will raise another\n");
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGSEGV);
+ printf("unblock SIGSEGV in handler\n");
+ sigprocmask(SIG_UNBLOCK, &set, nullptr);
+ gSignalTestStatus.Set(TestStatus::kRaiseSecond);
+ printf("raising second SIGSEGV\n");
+ raise_sigsegv(); // raise second SIGSEGV
+ } else if (gSignalTestStatus.Get() == TestStatus::kRaiseSecond) {
+ // handling second SIGSEGV
+ gSignalTestStatus.Set(TestStatus::kHandleSecond);
+ printf("handling second SIGSEGV, will jump back to test function\n");
+ siglongjmp(gSignalTestJmpBuf, 1);
+ }
+ printf("ERROR: should not reach here!\n");
+ return false;
+}
+
NativeBridgeMethod gNativeBridgeMethods[] = {
{ "JNI_OnLoad", "", true, nullptr,
reinterpret_cast<void*>(trampoline_JNI_OnLoad) },
@@ -246,6 +380,8 @@ NativeBridgeMethod gNativeBridgeMethods[] = {
reinterpret_cast<void*>(trampoline_Java_Main_testZeroLengthByteBuffers) },
{ "testSignal", "()I", true, nullptr,
reinterpret_cast<void*>(trampoline_Java_Main_testSignal) },
+ { "testSignalHandlerNotReturn", "()V", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_testSignalHandlerNotReturn) },
};
static NativeBridgeMethod* find_native_bridge_method(const char *name) {
@@ -399,10 +535,8 @@ extern "C" bool native_bridge_isCompatibleWith(uint32_t bridge_version ATTRIBUTE
#endif
#endif
-// A dummy special handler, continueing after the faulting location. This code comes from
-// 004-SignalTest.
-static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* context) {
- printf("NB signal handler with signal %d.\n", sig);
+static bool StandardSignalHandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
if (sig == SIGSEGV) {
#if defined(__arm__)
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
@@ -427,6 +561,21 @@ static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* co
return true;
}
+// A dummy special handler, continueing after the faulting location. This code comes from
+// 004-SignalTest.
+static bool nb_signalhandler(int sig, siginfo_t* info, void* context) {
+ printf("NB signal handler with signal %d.\n", sig);
+
+ if (gSignalTestStatus.Get() == TestStatus::kNone) {
+ return StandardSignalHandler(sig, info, context);
+ } else if (sig == SIGSEGV) {
+ return NotReturnSignalHandler();
+ } else {
+ printf("ERROR: should not reach here!\n");
+ return false;
+ }
+}
+
static ::android::NativeBridgeSignalHandlerFn native_bridge_getSignalHandler(int signal) {
// Test segv for already claimed signal, and sigill for not claimed signal
if ((signal == SIGSEGV) || (signal == SIGILL)) {
diff --git a/test/115-native-bridge/src/NativeBridgeMain.java b/test/115-native-bridge/src/NativeBridgeMain.java
index e8d1e4e326..11eaa84e80 100644
--- a/test/115-native-bridge/src/NativeBridgeMain.java
+++ b/test/115-native-bridge/src/NativeBridgeMain.java
@@ -35,6 +35,7 @@ class Main {
testNewStringObject();
testSignalHandler();
testGetErrorByLoadInvalidLibrary();
+ testSignalHandlerNotReturn();
}
public static native void testFindClassOnAttachedNativeThread();
@@ -199,6 +200,8 @@ class Main {
System.out.println("Catch UnsatisfiedLinkError exception as expected.");
}
}
+
+ private static native void testSignalHandlerNotReturn();
}
public class NativeBridgeMain {
diff --git a/test/651-checker-byte-simd-minmax/expected.txt b/test/651-checker-byte-simd-minmax/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/651-checker-byte-simd-minmax/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/651-checker-byte-simd-minmax/info.txt b/test/651-checker-byte-simd-minmax/info.txt
new file mode 100644
index 0000000000..73af1242c0
--- /dev/null
+++ b/test/651-checker-byte-simd-minmax/info.txt
@@ -0,0 +1 @@
+Functional tests on min/max SIMD vectorization.
diff --git a/test/651-checker-byte-simd-minmax/src/Main.java b/test/651-checker-byte-simd-minmax/src/Main.java
new file mode 100644
index 0000000000..8211ace741
--- /dev/null
+++ b/test/651-checker-byte-simd-minmax/src/Main.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for MIN/MAX vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecMin
+ private static void doitMin(byte[] x, byte[] y, byte[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (byte) Math.min(y[i], z[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:b\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:b\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecMax
+ private static void doitMax(byte[] x, byte[] y, byte[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (byte) Math.max(y[i], z[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ // Initialize cross-values for all possible values.
+ int total = 256 * 256;
+ byte[] x = new byte[total];
+ byte[] y = new byte[total];
+ byte[] z = new byte[total];
+ int k = 0;
+ for (int i = 0; i < 256; i++) {
+ for (int j = 0; j < 256; j++) {
+ x[k] = 0;
+ y[k] = (byte) i;
+ z[k] = (byte) j;
+ k++;
+ }
+ }
+
+ // And test.
+ doitMin(x, y, z);
+ for (int i = 0; i < total; i++) {
+ byte expected = (byte) Math.min(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+ doitMax(x, y, z);
+ for (int i = 0; i < total; i++) {
+ byte expected = (byte) Math.max(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(byte expected, byte result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/651-checker-char-simd-minmax/expected.txt b/test/651-checker-char-simd-minmax/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/651-checker-char-simd-minmax/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/651-checker-char-simd-minmax/info.txt b/test/651-checker-char-simd-minmax/info.txt
new file mode 100644
index 0000000000..73af1242c0
--- /dev/null
+++ b/test/651-checker-char-simd-minmax/info.txt
@@ -0,0 +1 @@
+Functional tests on min/max SIMD vectorization.
diff --git a/test/651-checker-char-simd-minmax/src/Main.java b/test/651-checker-char-simd-minmax/src/Main.java
new file mode 100644
index 0000000000..5ce7b94bf4
--- /dev/null
+++ b/test/651-checker-char-simd-minmax/src/Main.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for MIN/MAX vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.doitMin(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecMin
+ private static void doitMin(char[] x, char[] y, char[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (char) Math.min(y[i], z[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitMax(char[], char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecMax
+ private static void doitMax(char[] x, char[] y, char[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (char) Math.max(y[i], z[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ char[] interesting = {
+ 0x0000, 0x0001, 0x007f, 0x0080, 0x0081, 0x00ff,
+ 0x0100, 0x0101, 0x017f, 0x0180, 0x0181, 0x01ff,
+ 0x7f00, 0x7f01, 0x7f7f, 0x7f80, 0x7f81, 0x7fff,
+ 0x8000, 0x8001, 0x807f, 0x8080, 0x8081, 0x80ff,
+ 0x8100, 0x8101, 0x817f, 0x8180, 0x8181, 0x81ff,
+ 0xff00, 0xff01, 0xff7f, 0xff80, 0xff81, 0xffff
+ };
+ // Initialize cross-values for the interesting values.
+ int total = interesting.length * interesting.length;
+ char[] x = new char[total];
+ char[] y = new char[total];
+ char[] z = new char[total];
+ int k = 0;
+ for (int i = 0; i < interesting.length; i++) {
+ for (int j = 0; j < interesting.length; j++) {
+ x[k] = 0;
+ y[k] = interesting[i];
+ z[k] = interesting[j];
+ k++;
+ }
+ }
+
+ // And test.
+ doitMin(x, y, z);
+ for (int i = 0; i < total; i++) {
+ char expected = (char) Math.min(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+ doitMax(x, y, z);
+ for (int i = 0; i < total; i++) {
+ char expected = (char) Math.max(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(char expected, char result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/651-checker-double-simd-minmax/expected.txt b/test/651-checker-double-simd-minmax/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/651-checker-double-simd-minmax/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/651-checker-double-simd-minmax/info.txt b/test/651-checker-double-simd-minmax/info.txt
new file mode 100644
index 0000000000..73af1242c0
--- /dev/null
+++ b/test/651-checker-double-simd-minmax/info.txt
@@ -0,0 +1 @@
+Functional tests on min/max SIMD vectorization.
diff --git a/test/651-checker-double-simd-minmax/src/Main.java b/test/651-checker-double-simd-minmax/src/Main.java
new file mode 100644
index 0000000000..e1711aef60
--- /dev/null
+++ b/test/651-checker-double-simd-minmax/src/Main.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for MIN/MAX vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.doitMin(double[], double[], double[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinDoubleDouble loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO x86: 0.0 vs -0.0?
+ //
+ /// CHECK-START-ARM64: void Main.doitMin(double[], double[], double[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ private static void doitMin(double[] x, double[] y, double[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = Math.min(y[i], z[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitMax(double[], double[], double[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxDoubleDouble loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO-x86: 0.0 vs -0.0?
+ //
+ /// CHECK-START-ARM64: void Main.doitMax(double[], double[], double[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ private static void doitMax(double[] x, double[] y, double[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = Math.max(y[i], z[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ double[] interesting = {
+ -0.0f,
+ +0.0f,
+ -1.0f,
+ +1.0f,
+ -3.14f,
+ +3.14f,
+ -100.0f,
+ +100.0f,
+ -4444.44f,
+ +4444.44f,
+ Double.MIN_NORMAL,
+ Double.MIN_VALUE,
+ Double.MAX_VALUE,
+ Double.NEGATIVE_INFINITY,
+ Double.POSITIVE_INFINITY,
+ Double.NaN
+ };
+ // Initialize cross-values for the interesting values.
+ int total = interesting.length * interesting.length;
+ double[] x = new double[total];
+ double[] y = new double[total];
+ double[] z = new double[total];
+ int k = 0;
+ for (int i = 0; i < interesting.length; i++) {
+ for (int j = 0; j < interesting.length; j++) {
+ x[k] = 0;
+ y[k] = interesting[i];
+ z[k] = interesting[j];
+ k++;
+ }
+ }
+
+ // And test.
+ doitMin(x, y, z);
+ for (int i = 0; i < total; i++) {
+ double expected = Math.min(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+ doitMax(x, y, z);
+ for (int i = 0; i < total; i++) {
+ double expected = Math.max(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(double expected, double result) {
+ // Tests the bits directly. This distinguishes correctly between +0.0
+ // and -0.0 and returns a canonical representation for all NaN.
+ long expected_bits = Double.doubleToLongBits(expected);
+ long result_bits = Double.doubleToLongBits(result);
+ if (expected_bits != result_bits) {
+ throw new Error("Expected: " + expected +
+ "(0x" + Long.toHexString(expected_bits) + "), found: " + result +
+ "(0x" + Long.toHexString(result_bits) + ")");
+ }
+ }
+}
diff --git a/test/651-checker-float-simd-minmax/expected.txt b/test/651-checker-float-simd-minmax/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/651-checker-float-simd-minmax/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/651-checker-float-simd-minmax/info.txt b/test/651-checker-float-simd-minmax/info.txt
new file mode 100644
index 0000000000..73af1242c0
--- /dev/null
+++ b/test/651-checker-float-simd-minmax/info.txt
@@ -0,0 +1 @@
+Functional tests on min/max SIMD vectorization.
diff --git a/test/651-checker-float-simd-minmax/src/Main.java b/test/651-checker-float-simd-minmax/src/Main.java
new file mode 100644
index 0000000000..bd412e02e9
--- /dev/null
+++ b/test/651-checker-float-simd-minmax/src/Main.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for MIN/MAX vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.doitMin(float[], float[], float[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:f\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:f\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:f\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinFloatFloat loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO x86: 0.0 vs -0.0?
+ //
+ /// CHECK-START-ARM64: void Main.doitMin(float[], float[], float[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ private static void doitMin(float[] x, float[] y, float[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = Math.min(y[i], z[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitMax(float[], float[], float[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:f\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:f\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:f\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxFloatFloat loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO x86: 0.0 vs -0.0?
+ //
+ /// CHECK-START-ARM64: void Main.doitMax(float[], float[], float[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ private static void doitMax(float[] x, float[] y, float[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = Math.max(y[i], z[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ float[] interesting = {
+ -0.0f,
+ +0.0f,
+ -1.0f,
+ +1.0f,
+ -3.14f,
+ +3.14f,
+ -100.0f,
+ +100.0f,
+ -4444.44f,
+ +4444.44f,
+ Float.MIN_NORMAL,
+ Float.MIN_VALUE,
+ Float.MAX_VALUE,
+ Float.NEGATIVE_INFINITY,
+ Float.POSITIVE_INFINITY,
+ Float.NaN
+ };
+ // Initialize cross-values for the interesting values.
+ int total = interesting.length * interesting.length;
+ float[] x = new float[total];
+ float[] y = new float[total];
+ float[] z = new float[total];
+ int k = 0;
+ for (int i = 0; i < interesting.length; i++) {
+ for (int j = 0; j < interesting.length; j++) {
+ x[k] = 0;
+ y[k] = interesting[i];
+ z[k] = interesting[j];
+ k++;
+ }
+ }
+
+ // And test.
+ doitMin(x, y, z);
+ for (int i = 0; i < total; i++) {
+ float expected = Math.min(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+ doitMax(x, y, z);
+ for (int i = 0; i < total; i++) {
+ float expected = Math.max(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(float expected, float result) {
+ // Tests the bits directly. This distinguishes correctly between +0.0
+ // and -0.0 and returns a canonical representation for all NaN.
+ int expected_bits = Float.floatToIntBits(expected);
+ int result_bits = Float.floatToIntBits(result);
+ if (expected_bits != result_bits) {
+ throw new Error("Expected: " + expected +
+ "(0x" + Integer.toHexString(expected_bits) + "), found: " + result +
+ "(0x" + Integer.toHexString(result_bits) + ")");
+ }
+ }
+}
diff --git a/test/651-checker-int-simd-minmax/expected.txt b/test/651-checker-int-simd-minmax/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/651-checker-int-simd-minmax/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/651-checker-int-simd-minmax/info.txt b/test/651-checker-int-simd-minmax/info.txt
new file mode 100644
index 0000000000..73af1242c0
--- /dev/null
+++ b/test/651-checker-int-simd-minmax/info.txt
@@ -0,0 +1 @@
+Functional tests on min/max SIMD vectorization.
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
new file mode 100644
index 0000000000..4e05a9ded3
--- /dev/null
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for MIN/MAX vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.doitMin(int[], int[], int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ private static void doitMin(int[] x, int[] y, int[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = Math.min(y[i], z[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitMax(int[], int[], int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:i\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ private static void doitMax(int[] x, int[] y, int[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = Math.max(y[i], z[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ int[] interesting = {
+ 0x00000000, 0x00000001, 0x00007fff, 0x00008000, 0x00008001, 0x0000ffff,
+ 0x00010000, 0x00010001, 0x00017fff, 0x00018000, 0x00018001, 0x0001ffff,
+ 0x7fff0000, 0x7fff0001, 0x7fff7fff, 0x7fff8000, 0x7fff8001, 0x7fffffff,
+ 0x80000000, 0x80000001, 0x80007fff, 0x80008000, 0x80008001, 0x8000ffff,
+ 0x80010000, 0x80010001, 0x80017fff, 0x80018000, 0x80018001, 0x8001ffff,
+ 0xffff0000, 0xffff0001, 0xffff7fff, 0xffff8000, 0xffff8001, 0xffffffff
+ };
+ // Initialize cross-values for the interesting values.
+ int total = interesting.length * interesting.length;
+ int[] x = new int[total];
+ int[] y = new int[total];
+ int[] z = new int[total];
+ int k = 0;
+ for (int i = 0; i < interesting.length; i++) {
+ for (int j = 0; j < interesting.length; j++) {
+ x[k] = 0;
+ y[k] = interesting[i];
+ z[k] = interesting[j];
+ k++;
+ }
+ }
+
+ // And test.
+ doitMin(x, y, z);
+ for (int i = 0; i < total; i++) {
+ int expected = Math.min(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+ doitMax(x, y, z);
+ for (int i = 0; i < total; i++) {
+ int expected = Math.max(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/651-checker-long-simd-minmax/expected.txt b/test/651-checker-long-simd-minmax/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/651-checker-long-simd-minmax/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/651-checker-long-simd-minmax/info.txt b/test/651-checker-long-simd-minmax/info.txt
new file mode 100644
index 0000000000..73af1242c0
--- /dev/null
+++ b/test/651-checker-long-simd-minmax/info.txt
@@ -0,0 +1 @@
+Functional tests on min/max SIMD vectorization.
diff --git a/test/651-checker-long-simd-minmax/src/Main.java b/test/651-checker-long-simd-minmax/src/Main.java
new file mode 100644
index 0000000000..51cf67ee00
--- /dev/null
+++ b/test/651-checker-long-simd-minmax/src/Main.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for MIN/MAX vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.doitMin(long[], long[], long[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:j\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinLongLong loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
+ //
+ // Not directly supported for longs.
+ //
+ /// CHECK-START: void Main.doitMin(long[], long[], long[]) loop_optimization (after)
+ /// CHECK-NOT: VecMin
+ private static void doitMin(long[] x, long[] y, long[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = Math.min(y[i], z[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitMax(long[], long[], long[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:j\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:j\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxLongLong loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
+ //
+ // Not directly supported for longs.
+ //
+ /// CHECK-START: void Main.doitMax(long[], long[], long[]) loop_optimization (after)
+ /// CHECK-NOT: VecMax
+ private static void doitMax(long[] x, long[] y, long[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = Math.max(y[i], z[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ long[] interesting = {
+ 0x0000000000000000L, 0x0000000000000001L, 0x000000007fffffffL,
+ 0x0000000080000000L, 0x0000000080000001L, 0x00000000ffffffffL,
+ 0x0000000100000000L, 0x0000000100000001L, 0x000000017fffffffL,
+ 0x0000000180000000L, 0x0000000180000001L, 0x00000001ffffffffL,
+ 0x7fffffff00000000L, 0x7fffffff00000001L, 0x7fffffff7fffffffL,
+ 0x7fffffff80000000L, 0x7fffffff80000001L, 0x7fffffffffffffffL,
+ 0x8000000000000000L, 0x8000000000000001L, 0x800000007fffffffL,
+ 0x8000000080000000L, 0x8000000080000001L, 0x80000000ffffffffL,
+ 0x8000000100000000L, 0x8000000100000001L, 0x800000017fffffffL,
+ 0x8000000180000000L, 0x8000000180000001L, 0x80000001ffffffffL,
+ 0xffffffff00000000L, 0xffffffff00000001L, 0xffffffff7fffffffL,
+ 0xffffffff80000000L, 0xffffffff80000001L, 0xffffffffffffffffL
+ };
+ // Initialize cross-values for the interesting values.
+ int total = interesting.length * interesting.length;
+ long[] x = new long[total];
+ long[] y = new long[total];
+ long[] z = new long[total];
+ int k = 0;
+ for (int i = 0; i < interesting.length; i++) {
+ for (int j = 0; j < interesting.length; j++) {
+ x[k] = 0;
+ y[k] = interesting[i];
+ z[k] = interesting[j];
+ k++;
+ }
+ }
+
+ // And test.
+ doitMin(x, y, z);
+ for (int i = 0; i < total; i++) {
+ long expected = Math.min(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+ doitMax(x, y, z);
+ for (int i = 0; i < total; i++) {
+ long expected = Math.max(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/651-checker-short-simd-minmax/expected.txt b/test/651-checker-short-simd-minmax/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/651-checker-short-simd-minmax/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/651-checker-short-simd-minmax/info.txt b/test/651-checker-short-simd-minmax/info.txt
new file mode 100644
index 0000000000..73af1242c0
--- /dev/null
+++ b/test/651-checker-short-simd-minmax/info.txt
@@ -0,0 +1 @@
+Functional tests on min/max SIMD vectorization.
diff --git a/test/651-checker-short-simd-minmax/src/Main.java b/test/651-checker-short-simd-minmax/src/Main.java
new file mode 100644
index 0000000000..f34f5264c1
--- /dev/null
+++ b/test/651-checker-short-simd-minmax/src/Main.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for MIN/MAX vectorization.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.doitMin(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecMin
+ private static void doitMin(short[] x, short[] y, short[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (short) Math.min(y[i], z[i]);
+ }
+ }
+
+ /// CHECK-START: void Main.doitMax(short[], short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:i\d+>> InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>> outer_loop:none
+ //
+ // TODO: narrow type vectorization.
+ /// CHECK-START: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecMax
+ private static void doitMax(short[] x, short[] y, short[] z) {
+ int min = Math.min(x.length, Math.min(y.length, z.length));
+ for (int i = 0; i < min; i++) {
+ x[i] = (short) Math.max(y[i], z[i]);
+ }
+ }
+
+ public static void main(String[] args) {
+ short[] interesting = {
+ (short) 0x0000, (short) 0x0001, (short) 0x007f,
+ (short) 0x0080, (short) 0x0081, (short) 0x00ff,
+ (short) 0x0100, (short) 0x0101, (short) 0x017f,
+ (short) 0x0180, (short) 0x0181, (short) 0x01ff,
+ (short) 0x7f00, (short) 0x7f01, (short) 0x7f7f,
+ (short) 0x7f80, (short) 0x7f81, (short) 0x7fff,
+ (short) 0x8000, (short) 0x8001, (short) 0x807f,
+ (short) 0x8080, (short) 0x8081, (short) 0x80ff,
+ (short) 0x8100, (short) 0x8101, (short) 0x817f,
+ (short) 0x8180, (short) 0x8181, (short) 0x81ff,
+ (short) 0xff00, (short) 0xff01, (short) 0xff7f,
+ (short) 0xff80, (short) 0xff81, (short) 0xffff
+ };
+ // Initialize cross-values for the interesting values.
+ int total = interesting.length * interesting.length;
+ short[] x = new short[total];
+ short[] y = new short[total];
+ short[] z = new short[total];
+ int k = 0;
+ for (int i = 0; i < interesting.length; i++) {
+ for (int j = 0; j < interesting.length; j++) {
+ x[k] = 0;
+ y[k] = interesting[i];
+ z[k] = interesting[j];
+ k++;
+ }
+ }
+
+ // And test.
+ doitMin(x, y, z);
+ for (int i = 0; i < total; i++) {
+ short expected = (short) Math.min(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+ doitMax(x, y, z);
+ for (int i = 0; i < total; i++) {
+ short expected = (short) Math.max(y[i], z[i]);
+ expectEquals(expected, x[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(short expected, short result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/tools/dexfuzz/src/dexfuzz/DexFuzz.java b/tools/dexfuzz/src/dexfuzz/DexFuzz.java
index 18db4c1f1a..3b2875482e 100644
--- a/tools/dexfuzz/src/dexfuzz/DexFuzz.java
+++ b/tools/dexfuzz/src/dexfuzz/DexFuzz.java
@@ -61,11 +61,14 @@ public class DexFuzz {
multipleListener.addListener(statusListener);
if (Options.repeat > 1 && Options.execute) {
- // Add the live updating listener, but only if we're not printing out lots of logs.
- if (!Log.likelyToLog()) {
+ // If executing repeatedly, take care of reporting progress to the user.
+ if (Options.quiet) {
+ // Nothing if quiet is requested.
+ } else if (!Log.likelyToLog()) {
+ // Add the live updating listener if we're not printing out lots of logs.
multipleListener.addListener(new UpdatingConsoleListener());
} else {
- // If we are dumping out lots of logs, then use the ConsoleLogger instead.
+ // If we are dumping out lots of logs, then use the console logger instead.
multipleListener.addListener(new ConsoleLoggerListener());
}
// Add the file logging listener.
diff --git a/tools/dexfuzz/src/dexfuzz/Options.java b/tools/dexfuzz/src/dexfuzz/Options.java
index af8a05cdbc..d1d81723eb 100644
--- a/tools/dexfuzz/src/dexfuzz/Options.java
+++ b/tools/dexfuzz/src/dexfuzz/Options.java
@@ -80,6 +80,7 @@ public class Options {
public static boolean dumpMutations;
public static boolean loadMutations;
public static boolean runBisectionSearch;
+ public static boolean quiet;
/**
* Print out usage information about dexfuzz, and then exit.
@@ -144,6 +145,7 @@ public class Options {
Log.always(" --unique-db=<file> : Use <file> store results about unique programs");
Log.always(" (Default: unique_progs.db)");
Log.always(" --bisection-search : Run bisection search for divergences");
+ Log.always(" --quiet : Disables progress log");
Log.always("");
System.exit(0);
}
@@ -203,6 +205,8 @@ public class Options {
maxMethods = 1;
} else if (flag.equals("bisection-search")) {
runBisectionSearch = true;
+ } else if (flag.equals("quiet")) {
+ quiet = true;
} else if (flag.equals("help")) {
usage();
} else {
diff --git a/tools/jfuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
index 34a92f6ea8..c1d2e4fba6 100755
--- a/tools/jfuzz/run_dex_fuzz_test.py
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -91,7 +91,7 @@ class DexFuzzTester(object):
def Run(self):
"""Feeds JFuzz programs into DexFuzz testing."""
print()
- print('**\n**** JFuzz Testing\n**')
+ print('**\n**** J/DexFuzz Testing\n**')
print()
print('#Tests :', self._num_tests)
print('Device :', self._device)
@@ -111,9 +111,11 @@ class DexFuzzTester(object):
for i in range(1, self._num_inputs + 1):
jack_args = ['-cp', GetJackClassPath(), '--output-dex', '.', 'Test.java']
if RunCommand(['jfuzz'], out='Test.java', err=None) != RetCode.SUCCESS:
+ print('Unexpected error while running JFuzz')
raise FatalError('Unexpected error while running JFuzz')
if RunCommand(['jack'] + jack_args, out=None, err='jackerr.txt',
timeout=30) != RetCode.SUCCESS:
+ print('Unexpected error while running Jack')
raise FatalError('Unexpected error while running Jack')
shutil.move('Test.java', '../Test' + str(i) + '.java')
shutil.move('classes.dex', 'classes' + str(i) + '.dex')
@@ -126,8 +128,11 @@ class DexFuzzTester(object):
'--execute',
'--execute-class=Test',
'--repeat=' + str(self._num_tests),
- '--dump-output', '--dump-verify',
- '--interpreter', '--optimizing',
+ '--quiet',
+ '--dump-output',
+ '--dump-verify',
+ '--interpreter',
+ '--optimizing',
'--bisection-search']
if self._device is not None:
dexfuzz_args += ['--device=' + self._device, '--allarm']
diff --git a/tools/jfuzz/run_jfuzz_test.py b/tools/jfuzz/run_jfuzz_test.py
index b5f856f3b7..7e72aa1d92 100755
--- a/tools/jfuzz/run_jfuzz_test.py
+++ b/tools/jfuzz/run_jfuzz_test.py
@@ -470,12 +470,20 @@ class JFuzzTester(object):
self._num_not_compiled += 1
else:
self._num_not_run += 1
- elif self._true_divergence_only and RetCode.TIMEOUT in (retc1, retc2):
- # When only true divergences are requested, any divergence in return
- # code where one is a time out is treated as a regular time out.
- self._num_timed_out += 1
else:
# Divergence in return code.
+ if self._true_divergence_only:
+ # When only true divergences are requested, any divergence in return
+ # code where one is a time out is treated as a regular time out.
+ if RetCode.TIMEOUT in (retc1, retc2):
+ self._num_timed_out += 1
+ return
+ # When only true divergences are requested, a runtime crash in just
+ # the RI is treated as if not run at all.
+ if retc1 == RetCode.ERROR and retc2 == RetCode.SUCCESS:
+ if self._runner1.GetBisectionSearchArgs() is None:
+ self._num_not_run += 1
+ return
self.ReportDivergence(retc1, retc2, is_output_divergence=False)
def GetCurrentDivergenceDir(self):
diff --git a/tools/jfuzz/run_jfuzz_test_nightly.py b/tools/jfuzz/run_jfuzz_test_nightly.py
index a9f8365c8f..e6c216d1f7 100755
--- a/tools/jfuzz/run_jfuzz_test_nightly.py
+++ b/tools/jfuzz/run_jfuzz_test_nightly.py
@@ -26,8 +26,9 @@ from glob import glob
from tempfile import mkdtemp
from tempfile import TemporaryFile
-# run_jfuzz_test.py success string.
+# run_jfuzz_test.py success/failure strings.
SUCCESS_STRING = 'success (no divergences)'
+FAILURE_STRING = 'FAILURE (divergences)'
# Constant returned by string find() method when search fails.
NOT_FOUND = -1
@@ -43,7 +44,10 @@ def main(argv):
(args, unknown_args) = parser.parse_known_args()
# Run processes.
cmd = cmd + unknown_args
- print('\n**** Running ****\n\n', cmd, '\n')
+ print()
+ print('**\n**** Nightly JFuzz Testing\n**')
+ print()
+ print('**** Running ****\n\n', cmd, '\n')
output_files = [TemporaryFile('wb+') for _ in range(args.num_proc)]
processes = []
for i, output_file in enumerate(output_files):
@@ -69,7 +73,7 @@ def main(argv):
if directory_match:
output_dirs.append(directory_match.group(1))
if output_str.find(SUCCESS_STRING) == NOT_FOUND:
- print('Tester', i, output_str)
+ print('Tester', i, FAILURE_STRING)
else:
print('Tester', i, SUCCESS_STRING)
# Gather divergences.